aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/9p/vfs_inode.c4
-rw-r--r--fs/adfs/super.c8
-rw-r--r--fs/affs/amigaffs.c4
-rw-r--r--fs/affs/bitmap.c3
-rw-r--r--fs/affs/super.c6
-rw-r--r--fs/afs/kafsasyncd.c1
-rw-r--r--fs/afs/kafstimod.c1
-rw-r--r--fs/afs/server.c3
-rw-r--r--fs/afs/super.c12
-rw-r--r--fs/aio.c20
-rw-r--r--fs/autofs/inode.c7
-rw-r--r--fs/autofs4/inode.c7
-rw-r--r--fs/befs/linuxvfs.c6
-rw-r--r--fs/bfs/inode.c6
-rw-r--r--fs/binfmt_elf.c26
-rw-r--r--fs/binfmt_elf_fdpic.c3
-rw-r--r--fs/bio.c4
-rw-r--r--fs/block_dev.c6
-rw-r--r--fs/buffer.c6
-rw-r--r--fs/cifs/cifsfs.c17
-rw-r--r--fs/cifs/connect.c1
-rw-r--r--fs/cifs/misc.c4
-rw-r--r--fs/cifs/transport.c6
-rw-r--r--fs/coda/inode.c6
-rw-r--r--fs/compat.c37
-rw-r--r--fs/compat_ioctl.c34
-rw-r--r--fs/configfs/configfs_internal.h2
-rw-r--r--fs/configfs/mount.c2
-rw-r--r--fs/cramfs/inode.c2
-rw-r--r--fs/dcache.c22
-rw-r--r--fs/dcookies.c2
-rw-r--r--fs/dlm/Kconfig20
-rw-r--r--fs/dlm/Makefile4
-rw-r--r--fs/dlm/dlm_internal.h4
-rw-r--r--fs/dlm/lock.c16
-rw-r--r--fs/dlm/lockspace.c4
-rw-r--r--fs/dlm/lowcomms-sctp.c (renamed from fs/dlm/lowcomms.c)264
-rw-r--r--fs/dlm/lowcomms-tcp.c1189
-rw-r--r--fs/dlm/lowcomms.h2
-rw-r--r--fs/dlm/main.c10
-rw-r--r--fs/dlm/member.c8
-rw-r--r--fs/dlm/memory.c2
-rw-r--r--fs/dlm/rcom.c58
-rw-r--r--fs/dlm/recover.c1
-rw-r--r--fs/dlm/recoverd.c44
-rw-r--r--fs/dlm/requestqueue.c26
-rw-r--r--fs/dlm/requestqueue.h2
-rw-r--r--fs/dnotify.c4
-rw-r--r--fs/dquot.c4
-rw-r--r--fs/ecryptfs/crypto.c6
-rw-r--r--fs/ecryptfs/file.c2
-rw-r--r--fs/ecryptfs/inode.c6
-rw-r--r--fs/ecryptfs/keystore.c2
-rw-r--r--fs/ecryptfs/main.c8
-rw-r--r--fs/ecryptfs/super.c2
-rw-r--r--fs/efs/super.c6
-rw-r--r--fs/eventpoll.c8
-rw-r--r--fs/exec.c5
-rw-r--r--fs/ext2/ioctl.c6
-rw-r--r--fs/ext2/super.c24
-rw-r--r--fs/ext2/xattr.c5
-rw-r--r--fs/ext3/Makefile2
-rw-r--r--fs/ext3/balloc.c31
-rw-r--r--fs/ext3/dir.c3
-rw-r--r--fs/ext3/ext3_jbd.c59
-rw-r--r--fs/ext3/inode.c85
-rw-r--r--fs/ext3/namei.c9
-rw-r--r--fs/ext3/super.c17
-rw-r--r--fs/ext3/xattr.c5
-rw-r--r--fs/ext4/Makefile3
-rw-r--r--fs/ext4/balloc.c31
-rw-r--r--fs/ext4/dir.c3
-rw-r--r--fs/ext4/ext4_jbd2.c59
-rw-r--r--fs/ext4/extents.c110
-rw-r--r--fs/ext4/inode.c85
-rw-r--r--fs/ext4/namei.c9
-rw-r--r--fs/ext4/super.c17
-rw-r--r--fs/ext4/xattr.c5
-rw-r--r--fs/fat/cache.c6
-rw-r--r--fs/fat/inode.c6
-rw-r--r--fs/fcntl.c4
-rw-r--r--fs/file.c29
-rw-r--r--fs/freevxfs/vxfs_inode.c6
-rw-r--r--fs/fuse/dev.c4
-rw-r--r--fs/fuse/dir.c5
-rw-r--r--fs/fuse/file.c37
-rw-r--r--fs/fuse/fuse_i.h9
-rw-r--r--fs/fuse/inode.c123
-rw-r--r--fs/gfs2/Kconfig1
-rw-r--r--fs/gfs2/acl.c39
-rw-r--r--fs/gfs2/acl.h1
-rw-r--r--fs/gfs2/bmap.c179
-rw-r--r--fs/gfs2/daemon.c7
-rw-r--r--fs/gfs2/dir.c93
-rw-r--r--fs/gfs2/dir.h8
-rw-r--r--fs/gfs2/eaops.c2
-rw-r--r--fs/gfs2/eattr.c66
-rw-r--r--fs/gfs2/eattr.h6
-rw-r--r--fs/gfs2/glock.c36
-rw-r--r--fs/gfs2/glock.h3
-rw-r--r--fs/gfs2/glops.c138
-rw-r--r--fs/gfs2/incore.h43
-rw-r--r--fs/gfs2/inode.c406
-rw-r--r--fs/gfs2/inode.h20
-rw-r--r--fs/gfs2/log.c41
-rw-r--r--fs/gfs2/log.h2
-rw-r--r--fs/gfs2/lops.c40
-rw-r--r--fs/gfs2/lops.h2
-rw-r--r--fs/gfs2/main.c4
-rw-r--r--fs/gfs2/meta_io.c46
-rw-r--r--fs/gfs2/meta_io.h1
-rw-r--r--fs/gfs2/ondisk.c138
-rw-r--r--fs/gfs2/ops_address.c52
-rw-r--r--fs/gfs2/ops_dentry.c4
-rw-r--r--fs/gfs2/ops_export.c38
-rw-r--r--fs/gfs2/ops_export.h2
-rw-r--r--fs/gfs2/ops_file.c66
-rw-r--r--fs/gfs2/ops_file.h2
-rw-r--r--fs/gfs2/ops_fstype.c4
-rw-r--r--fs/gfs2/ops_inode.c134
-rw-r--r--fs/gfs2/ops_super.c11
-rw-r--r--fs/gfs2/ops_vm.c2
-rw-r--r--fs/gfs2/quota.c15
-rw-r--r--fs/gfs2/recovery.c29
-rw-r--r--fs/gfs2/recovery.h2
-rw-r--r--fs/gfs2/rgrp.c13
-rw-r--r--fs/gfs2/super.c50
-rw-r--r--fs/gfs2/super.h6
-rw-r--r--fs/gfs2/sys.c8
-rw-r--r--fs/gfs2/util.c6
-rw-r--r--fs/gfs2/util.h12
-rw-r--r--fs/hfs/super.c6
-rw-r--r--fs/hfsplus/super.c6
-rw-r--r--fs/hpfs/dir.c10
-rw-r--r--fs/hpfs/dnode.c13
-rw-r--r--fs/hpfs/ea.c5
-rw-r--r--fs/hpfs/hpfs_fn.h3
-rw-r--r--fs/hpfs/inode.c5
-rw-r--r--fs/hpfs/map.c20
-rw-r--r--fs/hpfs/super.c29
-rw-r--r--fs/hugetlbfs/inode.c6
-rw-r--r--fs/inode.c9
-rw-r--r--fs/inotify_user.c4
-rw-r--r--fs/isofs/inode.c6
-rw-r--r--fs/jbd/journal.c8
-rw-r--r--fs/jbd/revoke.c4
-rw-r--r--fs/jbd/transaction.c4
-rw-r--r--fs/jbd2/commit.c8
-rw-r--r--fs/jbd2/journal.c8
-rw-r--r--fs/jbd2/revoke.c4
-rw-r--r--fs/jbd2/transaction.c2
-rw-r--r--fs/jffs/inode-v23.c4
-rw-r--r--fs/jffs/intrep.c4
-rw-r--r--fs/jffs/jffs_fm.c4
-rw-r--r--fs/jffs2/background.c1
-rw-r--r--fs/jffs2/malloc.c18
-rw-r--r--fs/jffs2/super.c6
-rw-r--r--fs/jfs/jfs_logmgr.c2
-rw-r--r--fs/jfs/jfs_metapage.c4
-rw-r--r--fs/jfs/jfs_txnmgr.c2
-rw-r--r--fs/jfs/super.c6
-rw-r--r--fs/lockd/clntproc.c1
-rw-r--r--fs/lockd/host.c53
-rw-r--r--fs/locks.c6
-rw-r--r--fs/mbcache.c2
-rw-r--r--fs/minix/inode.c6
-rw-r--r--fs/namei.c9
-rw-r--r--fs/namespace.c2
-rw-r--r--fs/ncpfs/inode.c6
-rw-r--r--fs/nfs/direct.c4
-rw-r--r--fs/nfs/inode.c6
-rw-r--r--fs/nfs/pagelist.c4
-rw-r--r--fs/nfs/read.c4
-rw-r--r--fs/nfs/write.c6
-rw-r--r--fs/nfsd/nfs3xdr.c24
-rw-r--r--fs/nfsd/nfs4state.c10
-rw-r--r--fs/nfsd/nfsxdr.c13
-rw-r--r--fs/nls/nls_cp936.c113
-rw-r--r--fs/ntfs/attrib.c2
-rw-r--r--fs/ntfs/index.c2
-rw-r--r--fs/ntfs/inode.c4
-rw-r--r--fs/ntfs/unistr.c2
-rw-r--r--fs/ocfs2/dlm/dlmfs.c6
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c2
-rw-r--r--fs/ocfs2/extent_map.c2
-rw-r--r--fs/ocfs2/inode.h2
-rw-r--r--fs/ocfs2/super.c10
-rw-r--r--fs/ocfs2/uptodate.c2
-rw-r--r--fs/openpromfs/inode.c6
-rw-r--r--fs/partitions/amiga.c2
-rw-r--r--fs/partitions/atari.c2
-rw-r--r--fs/partitions/check.c19
-rw-r--r--fs/partitions/ibm.c29
-rw-r--r--fs/pipe.c22
-rw-r--r--fs/proc/Makefile3
-rw-r--r--fs/proc/base.c11
-rw-r--r--fs/proc/inode.c6
-rw-r--r--fs/proc/kcore.c16
-rw-r--r--fs/proc/proc_misc.c2
-rw-r--r--fs/qnx4/inode.c6
-rw-r--r--fs/reiserfs/file.c61
-rw-r--r--fs/reiserfs/inode.c10
-rw-r--r--fs/reiserfs/super.c9
-rw-r--r--fs/romfs/inode.c6
-rw-r--r--fs/seq_file.c4
-rw-r--r--fs/smbfs/inode.c6
-rw-r--r--fs/smbfs/request.c4
-rw-r--r--fs/stat.c7
-rw-r--r--fs/sysfs/mount.c2
-rw-r--r--fs/sysfs/sysfs.h2
-rw-r--r--fs/sysv/CHANGES60
-rw-r--r--fs/sysv/ChangeLog106
-rw-r--r--fs/sysv/INTRO182
-rw-r--r--fs/sysv/inode.c6
-rw-r--r--fs/udf/super.c10
-rw-r--r--fs/ufs/super.c12
-rw-r--r--fs/ufs/util.h2
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c5
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c1
219 files changed, 3451 insertions, 2232 deletions
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 5241c600ce28..18f26cdfd882 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -256,7 +256,7 @@ static int
256v9fs_create(struct v9fs_session_info *v9ses, u32 pfid, char *name, u32 perm, 256v9fs_create(struct v9fs_session_info *v9ses, u32 pfid, char *name, u32 perm,
257 u8 mode, char *extension, u32 *fidp, struct v9fs_qid *qid, u32 *iounit) 257 u8 mode, char *extension, u32 *fidp, struct v9fs_qid *qid, u32 *iounit)
258{ 258{
259 u32 fid; 259 int fid;
260 int err; 260 int err;
261 struct v9fs_fcall *fcall; 261 struct v9fs_fcall *fcall;
262 262
@@ -310,7 +310,7 @@ static struct v9fs_fid*
310v9fs_clone_walk(struct v9fs_session_info *v9ses, u32 fid, struct dentry *dentry) 310v9fs_clone_walk(struct v9fs_session_info *v9ses, u32 fid, struct dentry *dentry)
311{ 311{
312 int err; 312 int err;
313 u32 nfid; 313 int nfid;
314 struct v9fs_fid *ret; 314 struct v9fs_fid *ret;
315 struct v9fs_fcall *fcall; 315 struct v9fs_fcall *fcall;
316 316
diff --git a/fs/adfs/super.c b/fs/adfs/super.c
index 9ade139086fc..5023351a7afe 100644
--- a/fs/adfs/super.c
+++ b/fs/adfs/super.c
@@ -36,7 +36,7 @@ void __adfs_error(struct super_block *sb, const char *function, const char *fmt,
36 va_list args; 36 va_list args;
37 37
38 va_start(args, fmt); 38 va_start(args, fmt);
39 vsprintf(error_buf, fmt, args); 39 vsnprintf(error_buf, sizeof(error_buf), fmt, args);
40 va_end(args); 40 va_end(args);
41 41
42 printk(KERN_CRIT "ADFS-fs error (device %s)%s%s: %s\n", 42 printk(KERN_CRIT "ADFS-fs error (device %s)%s%s: %s\n",
@@ -212,12 +212,12 @@ static int adfs_statfs(struct dentry *dentry, struct kstatfs *buf)
212 return 0; 212 return 0;
213} 213}
214 214
215static kmem_cache_t *adfs_inode_cachep; 215static struct kmem_cache *adfs_inode_cachep;
216 216
217static struct inode *adfs_alloc_inode(struct super_block *sb) 217static struct inode *adfs_alloc_inode(struct super_block *sb)
218{ 218{
219 struct adfs_inode_info *ei; 219 struct adfs_inode_info *ei;
220 ei = (struct adfs_inode_info *)kmem_cache_alloc(adfs_inode_cachep, SLAB_KERNEL); 220 ei = (struct adfs_inode_info *)kmem_cache_alloc(adfs_inode_cachep, GFP_KERNEL);
221 if (!ei) 221 if (!ei)
222 return NULL; 222 return NULL;
223 return &ei->vfs_inode; 223 return &ei->vfs_inode;
@@ -228,7 +228,7 @@ static void adfs_destroy_inode(struct inode *inode)
228 kmem_cache_free(adfs_inode_cachep, ADFS_I(inode)); 228 kmem_cache_free(adfs_inode_cachep, ADFS_I(inode));
229} 229}
230 230
231static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) 231static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
232{ 232{
233 struct adfs_inode_info *ei = (struct adfs_inode_info *) foo; 233 struct adfs_inode_info *ei = (struct adfs_inode_info *) foo;
234 234
diff --git a/fs/affs/amigaffs.c b/fs/affs/amigaffs.c
index ccd624ef4272..f4de4b98004f 100644
--- a/fs/affs/amigaffs.c
+++ b/fs/affs/amigaffs.c
@@ -445,7 +445,7 @@ affs_error(struct super_block *sb, const char *function, const char *fmt, ...)
445 va_list args; 445 va_list args;
446 446
447 va_start(args,fmt); 447 va_start(args,fmt);
448 vsprintf(ErrorBuffer,fmt,args); 448 vsnprintf(ErrorBuffer,sizeof(ErrorBuffer),fmt,args);
449 va_end(args); 449 va_end(args);
450 450
451 printk(KERN_CRIT "AFFS error (device %s): %s(): %s\n", sb->s_id, 451 printk(KERN_CRIT "AFFS error (device %s): %s(): %s\n", sb->s_id,
@@ -461,7 +461,7 @@ affs_warning(struct super_block *sb, const char *function, const char *fmt, ...)
461 va_list args; 461 va_list args;
462 462
463 va_start(args,fmt); 463 va_start(args,fmt);
464 vsprintf(ErrorBuffer,fmt,args); 464 vsnprintf(ErrorBuffer,sizeof(ErrorBuffer),fmt,args);
465 va_end(args); 465 va_end(args);
466 466
467 printk(KERN_WARNING "AFFS warning (device %s): %s(): %s\n", sb->s_id, 467 printk(KERN_WARNING "AFFS warning (device %s): %s(): %s\n", sb->s_id,
diff --git a/fs/affs/bitmap.c b/fs/affs/bitmap.c
index b0b953683c1a..b330009fe42d 100644
--- a/fs/affs/bitmap.c
+++ b/fs/affs/bitmap.c
@@ -289,12 +289,11 @@ int affs_init_bitmap(struct super_block *sb, int *flags)
289 sbi->s_bmap_count = (sbi->s_partition_size - sbi->s_reserved + 289 sbi->s_bmap_count = (sbi->s_partition_size - sbi->s_reserved +
290 sbi->s_bmap_bits - 1) / sbi->s_bmap_bits; 290 sbi->s_bmap_bits - 1) / sbi->s_bmap_bits;
291 size = sbi->s_bmap_count * sizeof(*bm); 291 size = sbi->s_bmap_count * sizeof(*bm);
292 bm = sbi->s_bitmap = kmalloc(size, GFP_KERNEL); 292 bm = sbi->s_bitmap = kzalloc(size, GFP_KERNEL);
293 if (!sbi->s_bitmap) { 293 if (!sbi->s_bitmap) {
294 printk(KERN_ERR "AFFS: Bitmap allocation failed\n"); 294 printk(KERN_ERR "AFFS: Bitmap allocation failed\n");
295 return -ENOMEM; 295 return -ENOMEM;
296 } 296 }
297 memset(sbi->s_bitmap, 0, size);
298 297
299 bmap_blk = (__be32 *)sbi->s_root_bh->b_data; 298 bmap_blk = (__be32 *)sbi->s_root_bh->b_data;
300 blk = sb->s_blocksize / 4 - 49; 299 blk = sb->s_blocksize / 4 - 49;
diff --git a/fs/affs/super.c b/fs/affs/super.c
index 5ea72c3a16c3..3de93e799949 100644
--- a/fs/affs/super.c
+++ b/fs/affs/super.c
@@ -66,12 +66,12 @@ affs_write_super(struct super_block *sb)
66 pr_debug("AFFS: write_super() at %lu, clean=%d\n", get_seconds(), clean); 66 pr_debug("AFFS: write_super() at %lu, clean=%d\n", get_seconds(), clean);
67} 67}
68 68
69static kmem_cache_t * affs_inode_cachep; 69static struct kmem_cache * affs_inode_cachep;
70 70
71static struct inode *affs_alloc_inode(struct super_block *sb) 71static struct inode *affs_alloc_inode(struct super_block *sb)
72{ 72{
73 struct affs_inode_info *ei; 73 struct affs_inode_info *ei;
74 ei = (struct affs_inode_info *)kmem_cache_alloc(affs_inode_cachep, SLAB_KERNEL); 74 ei = (struct affs_inode_info *)kmem_cache_alloc(affs_inode_cachep, GFP_KERNEL);
75 if (!ei) 75 if (!ei)
76 return NULL; 76 return NULL;
77 ei->vfs_inode.i_version = 1; 77 ei->vfs_inode.i_version = 1;
@@ -83,7 +83,7 @@ static void affs_destroy_inode(struct inode *inode)
83 kmem_cache_free(affs_inode_cachep, AFFS_I(inode)); 83 kmem_cache_free(affs_inode_cachep, AFFS_I(inode));
84} 84}
85 85
86static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) 86static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
87{ 87{
88 struct affs_inode_info *ei = (struct affs_inode_info *) foo; 88 struct affs_inode_info *ei = (struct affs_inode_info *) foo;
89 89
diff --git a/fs/afs/kafsasyncd.c b/fs/afs/kafsasyncd.c
index f09a794f248e..615df2407cb2 100644
--- a/fs/afs/kafsasyncd.c
+++ b/fs/afs/kafsasyncd.c
@@ -20,6 +20,7 @@
20#include <linux/init.h> 20#include <linux/init.h>
21#include <linux/sched.h> 21#include <linux/sched.h>
22#include <linux/completion.h> 22#include <linux/completion.h>
23#include <linux/freezer.h>
23#include "cell.h" 24#include "cell.h"
24#include "server.h" 25#include "server.h"
25#include "volume.h" 26#include "volume.h"
diff --git a/fs/afs/kafstimod.c b/fs/afs/kafstimod.c
index 65bc05ab8182..694344e4d3c7 100644
--- a/fs/afs/kafstimod.c
+++ b/fs/afs/kafstimod.c
@@ -13,6 +13,7 @@
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/sched.h> 14#include <linux/sched.h>
15#include <linux/completion.h> 15#include <linux/completion.h>
16#include <linux/freezer.h>
16#include "cell.h" 17#include "cell.h"
17#include "volume.h" 18#include "volume.h"
18#include "kafstimod.h" 19#include "kafstimod.h"
diff --git a/fs/afs/server.c b/fs/afs/server.c
index 22afaae1a4ce..44aff81dc6a7 100644
--- a/fs/afs/server.c
+++ b/fs/afs/server.c
@@ -55,13 +55,12 @@ int afs_server_lookup(struct afs_cell *cell, const struct in_addr *addr,
55 _enter("%p,%08x,", cell, ntohl(addr->s_addr)); 55 _enter("%p,%08x,", cell, ntohl(addr->s_addr));
56 56
57 /* allocate and initialise a server record */ 57 /* allocate and initialise a server record */
58 server = kmalloc(sizeof(struct afs_server), GFP_KERNEL); 58 server = kzalloc(sizeof(struct afs_server), GFP_KERNEL);
59 if (!server) { 59 if (!server) {
60 _leave(" = -ENOMEM"); 60 _leave(" = -ENOMEM");
61 return -ENOMEM; 61 return -ENOMEM;
62 } 62 }
63 63
64 memset(server, 0, sizeof(struct afs_server));
65 atomic_set(&server->usage, 1); 64 atomic_set(&server->usage, 1);
66 65
67 INIT_LIST_HEAD(&server->link); 66 INIT_LIST_HEAD(&server->link);
diff --git a/fs/afs/super.c b/fs/afs/super.c
index 67d1f5c819ec..18d9b77ba40f 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -35,7 +35,7 @@ struct afs_mount_params {
35 struct afs_volume *volume; 35 struct afs_volume *volume;
36}; 36};
37 37
38static void afs_i_init_once(void *foo, kmem_cache_t *cachep, 38static void afs_i_init_once(void *foo, struct kmem_cache *cachep,
39 unsigned long flags); 39 unsigned long flags);
40 40
41static int afs_get_sb(struct file_system_type *fs_type, 41static int afs_get_sb(struct file_system_type *fs_type,
@@ -65,7 +65,7 @@ static struct super_operations afs_super_ops = {
65 .put_super = afs_put_super, 65 .put_super = afs_put_super,
66}; 66};
67 67
68static kmem_cache_t *afs_inode_cachep; 68static struct kmem_cache *afs_inode_cachep;
69static atomic_t afs_count_active_inodes; 69static atomic_t afs_count_active_inodes;
70 70
71/*****************************************************************************/ 71/*****************************************************************************/
@@ -242,14 +242,12 @@ static int afs_fill_super(struct super_block *sb, void *data, int silent)
242 kenter(""); 242 kenter("");
243 243
244 /* allocate a superblock info record */ 244 /* allocate a superblock info record */
245 as = kmalloc(sizeof(struct afs_super_info), GFP_KERNEL); 245 as = kzalloc(sizeof(struct afs_super_info), GFP_KERNEL);
246 if (!as) { 246 if (!as) {
247 _leave(" = -ENOMEM"); 247 _leave(" = -ENOMEM");
248 return -ENOMEM; 248 return -ENOMEM;
249 } 249 }
250 250
251 memset(as, 0, sizeof(struct afs_super_info));
252
253 afs_get_volume(params->volume); 251 afs_get_volume(params->volume);
254 as->volume = params->volume; 252 as->volume = params->volume;
255 253
@@ -384,7 +382,7 @@ static void afs_put_super(struct super_block *sb)
384/* 382/*
385 * initialise an inode cache slab element prior to any use 383 * initialise an inode cache slab element prior to any use
386 */ 384 */
387static void afs_i_init_once(void *_vnode, kmem_cache_t *cachep, 385static void afs_i_init_once(void *_vnode, struct kmem_cache *cachep,
388 unsigned long flags) 386 unsigned long flags)
389{ 387{
390 struct afs_vnode *vnode = (struct afs_vnode *) _vnode; 388 struct afs_vnode *vnode = (struct afs_vnode *) _vnode;
@@ -412,7 +410,7 @@ static struct inode *afs_alloc_inode(struct super_block *sb)
412 struct afs_vnode *vnode; 410 struct afs_vnode *vnode;
413 411
414 vnode = (struct afs_vnode *) 412 vnode = (struct afs_vnode *)
415 kmem_cache_alloc(afs_inode_cachep, SLAB_KERNEL); 413 kmem_cache_alloc(afs_inode_cachep, GFP_KERNEL);
416 if (!vnode) 414 if (!vnode)
417 return NULL; 415 return NULL;
418 416
diff --git a/fs/aio.c b/fs/aio.c
index 287a1bc7a182..d3a6ec2c9627 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -47,8 +47,8 @@ unsigned long aio_nr; /* current system wide number of aio requests */
47unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */ 47unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */
48/*----end sysctl variables---*/ 48/*----end sysctl variables---*/
49 49
50static kmem_cache_t *kiocb_cachep; 50static struct kmem_cache *kiocb_cachep;
51static kmem_cache_t *kioctx_cachep; 51static struct kmem_cache *kioctx_cachep;
52 52
53static struct workqueue_struct *aio_wq; 53static struct workqueue_struct *aio_wq;
54 54
@@ -666,17 +666,6 @@ static ssize_t aio_run_iocb(struct kiocb *iocb)
666 ssize_t (*retry)(struct kiocb *); 666 ssize_t (*retry)(struct kiocb *);
667 ssize_t ret; 667 ssize_t ret;
668 668
669 if (iocb->ki_retried++ > 1024*1024) {
670 printk("Maximal retry count. Bytes done %Zd\n",
671 iocb->ki_nbytes - iocb->ki_left);
672 return -EAGAIN;
673 }
674
675 if (!(iocb->ki_retried & 0xff)) {
676 pr_debug("%ld retry: %zd of %zd\n", iocb->ki_retried,
677 iocb->ki_nbytes - iocb->ki_left, iocb->ki_nbytes);
678 }
679
680 if (!(retry = iocb->ki_retry)) { 669 if (!(retry = iocb->ki_retry)) {
681 printk("aio_run_iocb: iocb->ki_retry = NULL\n"); 670 printk("aio_run_iocb: iocb->ki_retry = NULL\n");
682 return 0; 671 return 0;
@@ -1005,9 +994,6 @@ int fastcall aio_complete(struct kiocb *iocb, long res, long res2)
1005 kunmap_atomic(ring, KM_IRQ1); 994 kunmap_atomic(ring, KM_IRQ1);
1006 995
1007 pr_debug("added to ring %p at [%lu]\n", iocb, tail); 996 pr_debug("added to ring %p at [%lu]\n", iocb, tail);
1008
1009 pr_debug("%ld retries: %zd of %zd\n", iocb->ki_retried,
1010 iocb->ki_nbytes - iocb->ki_left, iocb->ki_nbytes);
1011put_rq: 997put_rq:
1012 /* everything turned out well, dispose of the aiocb. */ 998 /* everything turned out well, dispose of the aiocb. */
1013 ret = __aio_put_req(ctx, iocb); 999 ret = __aio_put_req(ctx, iocb);
@@ -1413,7 +1399,6 @@ static ssize_t aio_setup_single_vector(struct kiocb *kiocb)
1413 kiocb->ki_iovec->iov_len = kiocb->ki_left; 1399 kiocb->ki_iovec->iov_len = kiocb->ki_left;
1414 kiocb->ki_nr_segs = 1; 1400 kiocb->ki_nr_segs = 1;
1415 kiocb->ki_cur_seg = 0; 1401 kiocb->ki_cur_seg = 0;
1416 kiocb->ki_nbytes = kiocb->ki_left;
1417 return 0; 1402 return 0;
1418} 1403}
1419 1404
@@ -1591,7 +1576,6 @@ int fastcall io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
1591 req->ki_opcode = iocb->aio_lio_opcode; 1576 req->ki_opcode = iocb->aio_lio_opcode;
1592 init_waitqueue_func_entry(&req->ki_wait, aio_wake_function); 1577 init_waitqueue_func_entry(&req->ki_wait, aio_wake_function);
1593 INIT_LIST_HEAD(&req->ki_wait.task_list); 1578 INIT_LIST_HEAD(&req->ki_wait.task_list);
1594 req->ki_retried = 0;
1595 1579
1596 ret = aio_setup_iocb(req); 1580 ret = aio_setup_iocb(req);
1597 1581
diff --git a/fs/autofs/inode.c b/fs/autofs/inode.c
index 38ede5c9d6fd..f968d1342808 100644
--- a/fs/autofs/inode.c
+++ b/fs/autofs/inode.c
@@ -28,10 +28,11 @@ void autofs_kill_sb(struct super_block *sb)
28 /* 28 /*
29 * In the event of a failure in get_sb_nodev the superblock 29 * In the event of a failure in get_sb_nodev the superblock
30 * info is not present so nothing else has been setup, so 30 * info is not present so nothing else has been setup, so
31 * just exit when we are called from deactivate_super. 31 * just call kill_anon_super when we are called from
32 * deactivate_super.
32 */ 33 */
33 if (!sbi) 34 if (!sbi)
34 return; 35 goto out_kill_sb;
35 36
36 if ( !sbi->catatonic ) 37 if ( !sbi->catatonic )
37 autofs_catatonic_mode(sbi); /* Free wait queues, close pipe */ 38 autofs_catatonic_mode(sbi); /* Free wait queues, close pipe */
@@ -44,6 +45,7 @@ void autofs_kill_sb(struct super_block *sb)
44 45
45 kfree(sb->s_fs_info); 46 kfree(sb->s_fs_info);
46 47
48out_kill_sb:
47 DPRINTK(("autofs: shutting down\n")); 49 DPRINTK(("autofs: shutting down\n"));
48 kill_anon_super(sb); 50 kill_anon_super(sb);
49} 51}
@@ -209,7 +211,6 @@ fail_iput:
209fail_free: 211fail_free:
210 kfree(sbi); 212 kfree(sbi);
211 s->s_fs_info = NULL; 213 s->s_fs_info = NULL;
212 kill_anon_super(s);
213fail_unlock: 214fail_unlock:
214 return -EINVAL; 215 return -EINVAL;
215} 216}
diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c
index ce7c0f1dd529..9c48250fd726 100644
--- a/fs/autofs4/inode.c
+++ b/fs/autofs4/inode.c
@@ -152,10 +152,11 @@ void autofs4_kill_sb(struct super_block *sb)
152 /* 152 /*
153 * In the event of a failure in get_sb_nodev the superblock 153 * In the event of a failure in get_sb_nodev the superblock
154 * info is not present so nothing else has been setup, so 154 * info is not present so nothing else has been setup, so
155 * just exit when we are called from deactivate_super. 155 * just call kill_anon_super when we are called from
156 * deactivate_super.
156 */ 157 */
157 if (!sbi) 158 if (!sbi)
158 return; 159 goto out_kill_sb;
159 160
160 sb->s_fs_info = NULL; 161 sb->s_fs_info = NULL;
161 162
@@ -167,6 +168,7 @@ void autofs4_kill_sb(struct super_block *sb)
167 168
168 kfree(sbi); 169 kfree(sbi);
169 170
171out_kill_sb:
170 DPRINTK("shutting down"); 172 DPRINTK("shutting down");
171 kill_anon_super(sb); 173 kill_anon_super(sb);
172} 174}
@@ -426,7 +428,6 @@ fail_ino:
426fail_free: 428fail_free:
427 kfree(sbi); 429 kfree(sbi);
428 s->s_fs_info = NULL; 430 s->s_fs_info = NULL;
429 kill_anon_super(s);
430fail_unlock: 431fail_unlock:
431 return -EINVAL; 432 return -EINVAL;
432} 433}
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
index 07f7144f0e2e..bce402eee554 100644
--- a/fs/befs/linuxvfs.c
+++ b/fs/befs/linuxvfs.c
@@ -61,7 +61,7 @@ static const struct super_operations befs_sops = {
61}; 61};
62 62
63/* slab cache for befs_inode_info objects */ 63/* slab cache for befs_inode_info objects */
64static kmem_cache_t *befs_inode_cachep; 64static struct kmem_cache *befs_inode_cachep;
65 65
66static const struct file_operations befs_dir_operations = { 66static const struct file_operations befs_dir_operations = {
67 .read = generic_read_dir, 67 .read = generic_read_dir,
@@ -277,7 +277,7 @@ befs_alloc_inode(struct super_block *sb)
277{ 277{
278 struct befs_inode_info *bi; 278 struct befs_inode_info *bi;
279 bi = (struct befs_inode_info *)kmem_cache_alloc(befs_inode_cachep, 279 bi = (struct befs_inode_info *)kmem_cache_alloc(befs_inode_cachep,
280 SLAB_KERNEL); 280 GFP_KERNEL);
281 if (!bi) 281 if (!bi)
282 return NULL; 282 return NULL;
283 return &bi->vfs_inode; 283 return &bi->vfs_inode;
@@ -289,7 +289,7 @@ befs_destroy_inode(struct inode *inode)
289 kmem_cache_free(befs_inode_cachep, BEFS_I(inode)); 289 kmem_cache_free(befs_inode_cachep, BEFS_I(inode));
290} 290}
291 291
292static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) 292static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
293{ 293{
294 struct befs_inode_info *bi = (struct befs_inode_info *) foo; 294 struct befs_inode_info *bi = (struct befs_inode_info *) foo;
295 295
diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c
index ed27ffb3459e..eac175ed9f44 100644
--- a/fs/bfs/inode.c
+++ b/fs/bfs/inode.c
@@ -228,12 +228,12 @@ static void bfs_write_super(struct super_block *s)
228 unlock_kernel(); 228 unlock_kernel();
229} 229}
230 230
231static kmem_cache_t * bfs_inode_cachep; 231static struct kmem_cache * bfs_inode_cachep;
232 232
233static struct inode *bfs_alloc_inode(struct super_block *sb) 233static struct inode *bfs_alloc_inode(struct super_block *sb)
234{ 234{
235 struct bfs_inode_info *bi; 235 struct bfs_inode_info *bi;
236 bi = kmem_cache_alloc(bfs_inode_cachep, SLAB_KERNEL); 236 bi = kmem_cache_alloc(bfs_inode_cachep, GFP_KERNEL);
237 if (!bi) 237 if (!bi)
238 return NULL; 238 return NULL;
239 return &bi->vfs_inode; 239 return &bi->vfs_inode;
@@ -244,7 +244,7 @@ static void bfs_destroy_inode(struct inode *inode)
244 kmem_cache_free(bfs_inode_cachep, BFS_I(inode)); 244 kmem_cache_free(bfs_inode_cachep, BFS_I(inode));
245} 245}
246 246
247static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) 247static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
248{ 248{
249 struct bfs_inode_info *bi = foo; 249 struct bfs_inode_info *bi = foo;
250 250
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index cc72bb43061d..be5869d34999 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -47,10 +47,6 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs);
47static int load_elf_library(struct file *); 47static int load_elf_library(struct file *);
48static unsigned long elf_map (struct file *, unsigned long, struct elf_phdr *, int, int); 48static unsigned long elf_map (struct file *, unsigned long, struct elf_phdr *, int, int);
49 49
50#ifndef elf_addr_t
51#define elf_addr_t unsigned long
52#endif
53
54/* 50/*
55 * If we don't support core dumping, then supply a NULL so we 51 * If we don't support core dumping, then supply a NULL so we
56 * don't even try. 52 * don't even try.
@@ -243,8 +239,9 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
243 if (interp_aout) { 239 if (interp_aout) {
244 argv = sp + 2; 240 argv = sp + 2;
245 envp = argv + argc + 1; 241 envp = argv + argc + 1;
246 __put_user((elf_addr_t)(unsigned long)argv, sp++); 242 if (__put_user((elf_addr_t)(unsigned long)argv, sp++) ||
247 __put_user((elf_addr_t)(unsigned long)envp, sp++); 243 __put_user((elf_addr_t)(unsigned long)envp, sp++))
244 return -EFAULT;
248 } else { 245 } else {
249 argv = sp; 246 argv = sp;
250 envp = argv + argc + 1; 247 envp = argv + argc + 1;
@@ -254,7 +251,8 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
254 p = current->mm->arg_end = current->mm->arg_start; 251 p = current->mm->arg_end = current->mm->arg_start;
255 while (argc-- > 0) { 252 while (argc-- > 0) {
256 size_t len; 253 size_t len;
257 __put_user((elf_addr_t)p, argv++); 254 if (__put_user((elf_addr_t)p, argv++))
255 return -EFAULT;
258 len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES); 256 len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
259 if (!len || len > PAGE_SIZE*MAX_ARG_PAGES) 257 if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
260 return 0; 258 return 0;
@@ -265,7 +263,8 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
265 current->mm->arg_end = current->mm->env_start = p; 263 current->mm->arg_end = current->mm->env_start = p;
266 while (envc-- > 0) { 264 while (envc-- > 0) {
267 size_t len; 265 size_t len;
268 __put_user((elf_addr_t)p, envp++); 266 if (__put_user((elf_addr_t)p, envp++))
267 return -EFAULT;
269 len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES); 268 len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
270 if (!len || len > PAGE_SIZE*MAX_ARG_PAGES) 269 if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
271 return 0; 270 return 0;
@@ -545,7 +544,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
545 unsigned long reloc_func_desc = 0; 544 unsigned long reloc_func_desc = 0;
546 char passed_fileno[6]; 545 char passed_fileno[6];
547 struct files_struct *files; 546 struct files_struct *files;
548 int have_pt_gnu_stack, executable_stack = EXSTACK_DEFAULT; 547 int executable_stack = EXSTACK_DEFAULT;
549 unsigned long def_flags = 0; 548 unsigned long def_flags = 0;
550 struct { 549 struct {
551 struct elfhdr elf_ex; 550 struct elfhdr elf_ex;
@@ -708,7 +707,6 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
708 executable_stack = EXSTACK_DISABLE_X; 707 executable_stack = EXSTACK_DISABLE_X;
709 break; 708 break;
710 } 709 }
711 have_pt_gnu_stack = (i < loc->elf_ex.e_phnum);
712 710
713 /* Some simple consistency checks for the interpreter */ 711 /* Some simple consistency checks for the interpreter */
714 if (elf_interpreter) { 712 if (elf_interpreter) {
@@ -856,7 +854,13 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
856 * default mmap base, as well as whatever program they 854 * default mmap base, as well as whatever program they
857 * might try to exec. This is because the brk will 855 * might try to exec. This is because the brk will
858 * follow the loader, and is not movable. */ 856 * follow the loader, and is not movable. */
859 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr); 857 if (current->flags & PF_RANDOMIZE)
858 load_bias = randomize_range(0x10000,
859 ELF_ET_DYN_BASE,
860 0);
861 else
862 load_bias = ELF_ET_DYN_BASE;
863 load_bias = ELF_PAGESTART(load_bias - vaddr);
860 } 864 }
861 865
862 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, 866 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index f86d5c9ce5eb..ed9a61c6beb3 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -40,9 +40,6 @@
40#include <asm/pgalloc.h> 40#include <asm/pgalloc.h>
41 41
42typedef char *elf_caddr_t; 42typedef char *elf_caddr_t;
43#ifndef elf_addr_t
44#define elf_addr_t unsigned long
45#endif
46 43
47#if 0 44#if 0
48#define kdebug(fmt, ...) printk("FDPIC "fmt"\n" ,##__VA_ARGS__ ) 45#define kdebug(fmt, ...) printk("FDPIC "fmt"\n" ,##__VA_ARGS__ )
diff --git a/fs/bio.c b/fs/bio.c
index 50c40ce2cead..7ec737eda72b 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -30,7 +30,7 @@
30 30
31#define BIO_POOL_SIZE 256 31#define BIO_POOL_SIZE 256
32 32
33static kmem_cache_t *bio_slab __read_mostly; 33static struct kmem_cache *bio_slab __read_mostly;
34 34
35#define BIOVEC_NR_POOLS 6 35#define BIOVEC_NR_POOLS 6
36 36
@@ -44,7 +44,7 @@ mempool_t *bio_split_pool __read_mostly;
44struct biovec_slab { 44struct biovec_slab {
45 int nr_vecs; 45 int nr_vecs;
46 char *name; 46 char *name;
47 kmem_cache_t *slab; 47 struct kmem_cache *slab;
48}; 48};
49 49
50/* 50/*
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 36c0e7af9d0f..13816b4d76f6 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -235,11 +235,11 @@ static int block_fsync(struct file *filp, struct dentry *dentry, int datasync)
235 */ 235 */
236 236
237static __cacheline_aligned_in_smp DEFINE_SPINLOCK(bdev_lock); 237static __cacheline_aligned_in_smp DEFINE_SPINLOCK(bdev_lock);
238static kmem_cache_t * bdev_cachep __read_mostly; 238static struct kmem_cache * bdev_cachep __read_mostly;
239 239
240static struct inode *bdev_alloc_inode(struct super_block *sb) 240static struct inode *bdev_alloc_inode(struct super_block *sb)
241{ 241{
242 struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, SLAB_KERNEL); 242 struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, GFP_KERNEL);
243 if (!ei) 243 if (!ei)
244 return NULL; 244 return NULL;
245 return &ei->vfs_inode; 245 return &ei->vfs_inode;
@@ -253,7 +253,7 @@ static void bdev_destroy_inode(struct inode *inode)
253 kmem_cache_free(bdev_cachep, bdi); 253 kmem_cache_free(bdev_cachep, bdi);
254} 254}
255 255
256static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) 256static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
257{ 257{
258 struct bdev_inode *ei = (struct bdev_inode *) foo; 258 struct bdev_inode *ei = (struct bdev_inode *) foo;
259 struct block_device *bdev = &ei->bdev; 259 struct block_device *bdev = &ei->bdev;
diff --git a/fs/buffer.c b/fs/buffer.c
index 35527dca1dbc..517860f2d75b 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -2908,7 +2908,7 @@ asmlinkage long sys_bdflush(int func, long data)
2908/* 2908/*
2909 * Buffer-head allocation 2909 * Buffer-head allocation
2910 */ 2910 */
2911static kmem_cache_t *bh_cachep; 2911static struct kmem_cache *bh_cachep;
2912 2912
2913/* 2913/*
2914 * Once the number of bh's in the machine exceeds this level, we start 2914 * Once the number of bh's in the machine exceeds this level, we start
@@ -2961,7 +2961,7 @@ void free_buffer_head(struct buffer_head *bh)
2961EXPORT_SYMBOL(free_buffer_head); 2961EXPORT_SYMBOL(free_buffer_head);
2962 2962
2963static void 2963static void
2964init_buffer_head(void *data, kmem_cache_t *cachep, unsigned long flags) 2964init_buffer_head(void *data, struct kmem_cache *cachep, unsigned long flags)
2965{ 2965{
2966 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == 2966 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
2967 SLAB_CTOR_CONSTRUCTOR) { 2967 SLAB_CTOR_CONSTRUCTOR) {
@@ -2972,7 +2972,6 @@ init_buffer_head(void *data, kmem_cache_t *cachep, unsigned long flags)
2972 } 2972 }
2973} 2973}
2974 2974
2975#ifdef CONFIG_HOTPLUG_CPU
2976static void buffer_exit_cpu(int cpu) 2975static void buffer_exit_cpu(int cpu)
2977{ 2976{
2978 int i; 2977 int i;
@@ -2994,7 +2993,6 @@ static int buffer_cpu_notify(struct notifier_block *self,
2994 buffer_exit_cpu((unsigned long)hcpu); 2993 buffer_exit_cpu((unsigned long)hcpu);
2995 return NOTIFY_OK; 2994 return NOTIFY_OK;
2996} 2995}
2997#endif /* CONFIG_HOTPLUG_CPU */
2998 2996
2999void __init buffer_init(void) 2997void __init buffer_init(void)
3000{ 2998{
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 84976cdbe713..71bc87a37fc1 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -34,6 +34,7 @@
34#include <linux/mempool.h> 34#include <linux/mempool.h>
35#include <linux/delay.h> 35#include <linux/delay.h>
36#include <linux/kthread.h> 36#include <linux/kthread.h>
37#include <linux/freezer.h>
37#include "cifsfs.h" 38#include "cifsfs.h"
38#include "cifspdu.h" 39#include "cifspdu.h"
39#define DECLARE_GLOBALS_HERE 40#define DECLARE_GLOBALS_HERE
@@ -81,7 +82,7 @@ extern mempool_t *cifs_sm_req_poolp;
81extern mempool_t *cifs_req_poolp; 82extern mempool_t *cifs_req_poolp;
82extern mempool_t *cifs_mid_poolp; 83extern mempool_t *cifs_mid_poolp;
83 84
84extern kmem_cache_t *cifs_oplock_cachep; 85extern struct kmem_cache *cifs_oplock_cachep;
85 86
86static int 87static int
87cifs_read_super(struct super_block *sb, void *data, 88cifs_read_super(struct super_block *sb, void *data,
@@ -232,11 +233,11 @@ static int cifs_permission(struct inode * inode, int mask, struct nameidata *nd)
232 return generic_permission(inode, mask, NULL); 233 return generic_permission(inode, mask, NULL);
233} 234}
234 235
235static kmem_cache_t *cifs_inode_cachep; 236static struct kmem_cache *cifs_inode_cachep;
236static kmem_cache_t *cifs_req_cachep; 237static struct kmem_cache *cifs_req_cachep;
237static kmem_cache_t *cifs_mid_cachep; 238static struct kmem_cache *cifs_mid_cachep;
238kmem_cache_t *cifs_oplock_cachep; 239struct kmem_cache *cifs_oplock_cachep;
239static kmem_cache_t *cifs_sm_req_cachep; 240static struct kmem_cache *cifs_sm_req_cachep;
240mempool_t *cifs_sm_req_poolp; 241mempool_t *cifs_sm_req_poolp;
241mempool_t *cifs_req_poolp; 242mempool_t *cifs_req_poolp;
242mempool_t *cifs_mid_poolp; 243mempool_t *cifs_mid_poolp;
@@ -245,7 +246,7 @@ static struct inode *
245cifs_alloc_inode(struct super_block *sb) 246cifs_alloc_inode(struct super_block *sb)
246{ 247{
247 struct cifsInodeInfo *cifs_inode; 248 struct cifsInodeInfo *cifs_inode;
248 cifs_inode = kmem_cache_alloc(cifs_inode_cachep, SLAB_KERNEL); 249 cifs_inode = kmem_cache_alloc(cifs_inode_cachep, GFP_KERNEL);
249 if (!cifs_inode) 250 if (!cifs_inode)
250 return NULL; 251 return NULL;
251 cifs_inode->cifsAttrs = 0x20; /* default */ 252 cifs_inode->cifsAttrs = 0x20; /* default */
@@ -668,7 +669,7 @@ const struct file_operations cifs_dir_ops = {
668}; 669};
669 670
670static void 671static void
671cifs_init_once(void *inode, kmem_cache_t * cachep, unsigned long flags) 672cifs_init_once(void *inode, struct kmem_cache * cachep, unsigned long flags)
672{ 673{
673 struct cifsInodeInfo *cifsi = inode; 674 struct cifsInodeInfo *cifsi = inode;
674 675
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 71f77914ce93..2caca06b4bae 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -31,6 +31,7 @@
31#include <linux/delay.h> 31#include <linux/delay.h>
32#include <linux/completion.h> 32#include <linux/completion.h>
33#include <linux/pagevec.h> 33#include <linux/pagevec.h>
34#include <linux/freezer.h>
34#include <asm/uaccess.h> 35#include <asm/uaccess.h>
35#include <asm/processor.h> 36#include <asm/processor.h>
36#include "cifspdu.h" 37#include "cifspdu.h"
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index bbc9cd34b6ea..aedf683f011f 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -153,7 +153,7 @@ cifs_buf_get(void)
153 albeit slightly larger than necessary and maxbuffersize 153 albeit slightly larger than necessary and maxbuffersize
154 defaults to this and can not be bigger */ 154 defaults to this and can not be bigger */
155 ret_buf = 155 ret_buf =
156 (struct smb_hdr *) mempool_alloc(cifs_req_poolp, SLAB_KERNEL | SLAB_NOFS); 156 (struct smb_hdr *) mempool_alloc(cifs_req_poolp, GFP_KERNEL | GFP_NOFS);
157 157
158 /* clear the first few header bytes */ 158 /* clear the first few header bytes */
159 /* for most paths, more is cleared in header_assemble */ 159 /* for most paths, more is cleared in header_assemble */
@@ -192,7 +192,7 @@ cifs_small_buf_get(void)
192 albeit slightly larger than necessary and maxbuffersize 192 albeit slightly larger than necessary and maxbuffersize
193 defaults to this and can not be bigger */ 193 defaults to this and can not be bigger */
194 ret_buf = 194 ret_buf =
195 (struct smb_hdr *) mempool_alloc(cifs_sm_req_poolp, SLAB_KERNEL | SLAB_NOFS); 195 (struct smb_hdr *) mempool_alloc(cifs_sm_req_poolp, GFP_KERNEL | GFP_NOFS);
196 if (ret_buf) { 196 if (ret_buf) {
197 /* No need to clear memory here, cleared in header assemble */ 197 /* No need to clear memory here, cleared in header assemble */
198 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/ 198 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 48d47b46b1fb..f80007eaebf4 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -34,7 +34,7 @@
34#include "cifs_debug.h" 34#include "cifs_debug.h"
35 35
36extern mempool_t *cifs_mid_poolp; 36extern mempool_t *cifs_mid_poolp;
37extern kmem_cache_t *cifs_oplock_cachep; 37extern struct kmem_cache *cifs_oplock_cachep;
38 38
39static struct mid_q_entry * 39static struct mid_q_entry *
40AllocMidQEntry(const struct smb_hdr *smb_buffer, struct cifsSesInfo *ses) 40AllocMidQEntry(const struct smb_hdr *smb_buffer, struct cifsSesInfo *ses)
@@ -51,7 +51,7 @@ AllocMidQEntry(const struct smb_hdr *smb_buffer, struct cifsSesInfo *ses)
51 } 51 }
52 52
53 temp = (struct mid_q_entry *) mempool_alloc(cifs_mid_poolp, 53 temp = (struct mid_q_entry *) mempool_alloc(cifs_mid_poolp,
54 SLAB_KERNEL | SLAB_NOFS); 54 GFP_KERNEL | GFP_NOFS);
55 if (temp == NULL) 55 if (temp == NULL)
56 return temp; 56 return temp;
57 else { 57 else {
@@ -118,7 +118,7 @@ AllocOplockQEntry(struct inode * pinode, __u16 fid, struct cifsTconInfo * tcon)
118 return NULL; 118 return NULL;
119 } 119 }
120 temp = (struct oplock_q_entry *) kmem_cache_alloc(cifs_oplock_cachep, 120 temp = (struct oplock_q_entry *) kmem_cache_alloc(cifs_oplock_cachep,
121 SLAB_KERNEL); 121 GFP_KERNEL);
122 if (temp == NULL) 122 if (temp == NULL)
123 return temp; 123 return temp;
124 else { 124 else {
diff --git a/fs/coda/inode.c b/fs/coda/inode.c
index 88d123321164..b64659fa82d0 100644
--- a/fs/coda/inode.c
+++ b/fs/coda/inode.c
@@ -38,12 +38,12 @@ static void coda_clear_inode(struct inode *);
38static void coda_put_super(struct super_block *); 38static void coda_put_super(struct super_block *);
39static int coda_statfs(struct dentry *dentry, struct kstatfs *buf); 39static int coda_statfs(struct dentry *dentry, struct kstatfs *buf);
40 40
41static kmem_cache_t * coda_inode_cachep; 41static struct kmem_cache * coda_inode_cachep;
42 42
43static struct inode *coda_alloc_inode(struct super_block *sb) 43static struct inode *coda_alloc_inode(struct super_block *sb)
44{ 44{
45 struct coda_inode_info *ei; 45 struct coda_inode_info *ei;
46 ei = (struct coda_inode_info *)kmem_cache_alloc(coda_inode_cachep, SLAB_KERNEL); 46 ei = (struct coda_inode_info *)kmem_cache_alloc(coda_inode_cachep, GFP_KERNEL);
47 if (!ei) 47 if (!ei)
48 return NULL; 48 return NULL;
49 memset(&ei->c_fid, 0, sizeof(struct CodaFid)); 49 memset(&ei->c_fid, 0, sizeof(struct CodaFid));
@@ -58,7 +58,7 @@ static void coda_destroy_inode(struct inode *inode)
58 kmem_cache_free(coda_inode_cachep, ITOC(inode)); 58 kmem_cache_free(coda_inode_cachep, ITOC(inode));
59} 59}
60 60
61static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) 61static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
62{ 62{
63 struct coda_inode_info *ei = (struct coda_inode_info *) foo; 63 struct coda_inode_info *ei = (struct coda_inode_info *) foo;
64 64
diff --git a/fs/compat.c b/fs/compat.c
index 06dad665b88f..a7e3f162fb15 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -871,7 +871,7 @@ asmlinkage long compat_sys_mount(char __user * dev_name, char __user * dir_name,
871 871
872 retval = -EINVAL; 872 retval = -EINVAL;
873 873
874 if (type_page) { 874 if (type_page && data_page) {
875 if (!strcmp((char *)type_page, SMBFS_NAME)) { 875 if (!strcmp((char *)type_page, SMBFS_NAME)) {
876 do_smb_super_data_conv((void *)data_page); 876 do_smb_super_data_conv((void *)data_page);
877 } else if (!strcmp((char *)type_page, NCPFS_NAME)) { 877 } else if (!strcmp((char *)type_page, NCPFS_NAME)) {
@@ -1144,7 +1144,9 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
1144 lastdirent = buf.previous; 1144 lastdirent = buf.previous;
1145 if (lastdirent) { 1145 if (lastdirent) {
1146 typeof(lastdirent->d_off) d_off = file->f_pos; 1146 typeof(lastdirent->d_off) d_off = file->f_pos;
1147 __put_user_unaligned(d_off, &lastdirent->d_off); 1147 error = -EFAULT;
1148 if (__put_user_unaligned(d_off, &lastdirent->d_off))
1149 goto out_putf;
1148 error = count - buf.count; 1150 error = count - buf.count;
1149 } 1151 }
1150 1152
@@ -1611,14 +1613,14 @@ int compat_get_fd_set(unsigned long nr, compat_ulong_t __user *ufdset,
1611 nr &= ~1UL; 1613 nr &= ~1UL;
1612 while (nr) { 1614 while (nr) {
1613 unsigned long h, l; 1615 unsigned long h, l;
1614 __get_user(l, ufdset); 1616 if (__get_user(l, ufdset) || __get_user(h, ufdset+1))
1615 __get_user(h, ufdset+1); 1617 return -EFAULT;
1616 ufdset += 2; 1618 ufdset += 2;
1617 *fdset++ = h << 32 | l; 1619 *fdset++ = h << 32 | l;
1618 nr -= 2; 1620 nr -= 2;
1619 } 1621 }
1620 if (odd) 1622 if (odd && __get_user(*fdset, ufdset))
1621 __get_user(*fdset, ufdset); 1623 return -EFAULT;
1622 } else { 1624 } else {
1623 /* Tricky, must clear full unsigned long in the 1625 /* Tricky, must clear full unsigned long in the
1624 * kernel fdset at the end, this makes sure that 1626 * kernel fdset at the end, this makes sure that
@@ -1630,14 +1632,14 @@ int compat_get_fd_set(unsigned long nr, compat_ulong_t __user *ufdset,
1630} 1632}
1631 1633
1632static 1634static
1633void compat_set_fd_set(unsigned long nr, compat_ulong_t __user *ufdset, 1635int compat_set_fd_set(unsigned long nr, compat_ulong_t __user *ufdset,
1634 unsigned long *fdset) 1636 unsigned long *fdset)
1635{ 1637{
1636 unsigned long odd; 1638 unsigned long odd;
1637 nr = ROUND_UP(nr, __COMPAT_NFDBITS); 1639 nr = ROUND_UP(nr, __COMPAT_NFDBITS);
1638 1640
1639 if (!ufdset) 1641 if (!ufdset)
1640 return; 1642 return 0;
1641 1643
1642 odd = nr & 1UL; 1644 odd = nr & 1UL;
1643 nr &= ~1UL; 1645 nr &= ~1UL;
@@ -1645,13 +1647,14 @@ void compat_set_fd_set(unsigned long nr, compat_ulong_t __user *ufdset,
1645 unsigned long h, l; 1647 unsigned long h, l;
1646 l = *fdset++; 1648 l = *fdset++;
1647 h = l >> 32; 1649 h = l >> 32;
1648 __put_user(l, ufdset); 1650 if (__put_user(l, ufdset) || __put_user(h, ufdset+1))
1649 __put_user(h, ufdset+1); 1651 return -EFAULT;
1650 ufdset += 2; 1652 ufdset += 2;
1651 nr -= 2; 1653 nr -= 2;
1652 } 1654 }
1653 if (odd) 1655 if (odd && __put_user(*fdset, ufdset))
1654 __put_user(*fdset, ufdset); 1656 return -EFAULT;
1657 return 0;
1655} 1658}
1656 1659
1657 1660
@@ -1726,10 +1729,10 @@ int compat_core_sys_select(int n, compat_ulong_t __user *inp,
1726 ret = 0; 1729 ret = 0;
1727 } 1730 }
1728 1731
1729 compat_set_fd_set(n, inp, fds.res_in); 1732 if (compat_set_fd_set(n, inp, fds.res_in) ||
1730 compat_set_fd_set(n, outp, fds.res_out); 1733 compat_set_fd_set(n, outp, fds.res_out) ||
1731 compat_set_fd_set(n, exp, fds.res_ex); 1734 compat_set_fd_set(n, exp, fds.res_ex))
1732 1735 ret = -EFAULT;
1733out: 1736out:
1734 kfree(bits); 1737 kfree(bits);
1735out_nofds: 1738out_nofds:
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index a91f2628c981..bcc3caf5d820 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -211,8 +211,10 @@ static int do_video_stillpicture(unsigned int fd, unsigned int cmd, unsigned lon
211 up_native = 211 up_native =
212 compat_alloc_user_space(sizeof(struct video_still_picture)); 212 compat_alloc_user_space(sizeof(struct video_still_picture));
213 213
214 put_user(compat_ptr(fp), &up_native->iFrame); 214 err = put_user(compat_ptr(fp), &up_native->iFrame);
215 put_user(size, &up_native->size); 215 err |= put_user(size, &up_native->size);
216 if (err)
217 return -EFAULT;
216 218
217 err = sys_ioctl(fd, cmd, (unsigned long) up_native); 219 err = sys_ioctl(fd, cmd, (unsigned long) up_native);
218 220
@@ -236,8 +238,10 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd, unsigned
236 err |= get_user(length, &up->length); 238 err |= get_user(length, &up->length);
237 239
238 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette)); 240 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
239 put_user(compat_ptr(palp), &up_native->palette); 241 err = put_user(compat_ptr(palp), &up_native->palette);
240 put_user(length, &up_native->length); 242 err |= put_user(length, &up_native->length);
243 if (err)
244 return -EFAULT;
241 245
242 err = sys_ioctl(fd, cmd, (unsigned long) up_native); 246 err = sys_ioctl(fd, cmd, (unsigned long) up_native);
243 247
@@ -2043,16 +2047,19 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd, unsigned long arg)
2043 struct serial_struct ss; 2047 struct serial_struct ss;
2044 mm_segment_t oldseg = get_fs(); 2048 mm_segment_t oldseg = get_fs();
2045 __u32 udata; 2049 __u32 udata;
2050 unsigned int base;
2046 2051
2047 if (cmd == TIOCSSERIAL) { 2052 if (cmd == TIOCSSERIAL) {
2048 if (!access_ok(VERIFY_READ, ss32, sizeof(SS32))) 2053 if (!access_ok(VERIFY_READ, ss32, sizeof(SS32)))
2049 return -EFAULT; 2054 return -EFAULT;
2050 if (__copy_from_user(&ss, ss32, offsetof(SS32, iomem_base))) 2055 if (__copy_from_user(&ss, ss32, offsetof(SS32, iomem_base)))
2051 return -EFAULT; 2056 return -EFAULT;
2052 __get_user(udata, &ss32->iomem_base); 2057 if (__get_user(udata, &ss32->iomem_base))
2058 return -EFAULT;
2053 ss.iomem_base = compat_ptr(udata); 2059 ss.iomem_base = compat_ptr(udata);
2054 __get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift); 2060 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
2055 __get_user(ss.port_high, &ss32->port_high); 2061 __get_user(ss.port_high, &ss32->port_high))
2062 return -EFAULT;
2056 ss.iomap_base = 0UL; 2063 ss.iomap_base = 0UL;
2057 } 2064 }
2058 set_fs(KERNEL_DS); 2065 set_fs(KERNEL_DS);
@@ -2063,12 +2070,12 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd, unsigned long arg)
2063 return -EFAULT; 2070 return -EFAULT;
2064 if (__copy_to_user(ss32,&ss,offsetof(SS32,iomem_base))) 2071 if (__copy_to_user(ss32,&ss,offsetof(SS32,iomem_base)))
2065 return -EFAULT; 2072 return -EFAULT;
2066 __put_user((unsigned long)ss.iomem_base >> 32 ? 2073 base = (unsigned long)ss.iomem_base >> 32 ?
2067 0xffffffff : (unsigned)(unsigned long)ss.iomem_base, 2074 0xffffffff : (unsigned)(unsigned long)ss.iomem_base;
2068 &ss32->iomem_base); 2075 if (__put_user(base, &ss32->iomem_base) ||
2069 __put_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift); 2076 __put_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
2070 __put_user(ss.port_high, &ss32->port_high); 2077 __put_user(ss.port_high, &ss32->port_high))
2071 2078 return -EFAULT;
2072 } 2079 }
2073 return err; 2080 return err;
2074} 2081}
@@ -2397,6 +2404,7 @@ HANDLE_IOCTL(SIOCGIFMAP, dev_ifsioc)
2397HANDLE_IOCTL(SIOCSIFMAP, dev_ifsioc) 2404HANDLE_IOCTL(SIOCSIFMAP, dev_ifsioc)
2398HANDLE_IOCTL(SIOCGIFADDR, dev_ifsioc) 2405HANDLE_IOCTL(SIOCGIFADDR, dev_ifsioc)
2399HANDLE_IOCTL(SIOCSIFADDR, dev_ifsioc) 2406HANDLE_IOCTL(SIOCSIFADDR, dev_ifsioc)
2407HANDLE_IOCTL(SIOCSIFHWBROADCAST, dev_ifsioc)
2400 2408
2401/* ioctls used by appletalk ddp.c */ 2409/* ioctls used by appletalk ddp.c */
2402HANDLE_IOCTL(SIOCATALKDIFADDR, dev_ifsioc) 2410HANDLE_IOCTL(SIOCATALKDIFADDR, dev_ifsioc)
diff --git a/fs/configfs/configfs_internal.h b/fs/configfs/configfs_internal.h
index 3f4ff7a242b9..f92cd303d2c9 100644
--- a/fs/configfs/configfs_internal.h
+++ b/fs/configfs/configfs_internal.h
@@ -49,7 +49,7 @@ struct configfs_dirent {
49#define CONFIGFS_NOT_PINNED (CONFIGFS_ITEM_ATTR) 49#define CONFIGFS_NOT_PINNED (CONFIGFS_ITEM_ATTR)
50 50
51extern struct vfsmount * configfs_mount; 51extern struct vfsmount * configfs_mount;
52extern kmem_cache_t *configfs_dir_cachep; 52extern struct kmem_cache *configfs_dir_cachep;
53 53
54extern int configfs_is_root(struct config_item *item); 54extern int configfs_is_root(struct config_item *item);
55 55
diff --git a/fs/configfs/mount.c b/fs/configfs/mount.c
index 68bd5c93ca52..ed678529ebb2 100644
--- a/fs/configfs/mount.c
+++ b/fs/configfs/mount.c
@@ -38,7 +38,7 @@
38 38
39struct vfsmount * configfs_mount = NULL; 39struct vfsmount * configfs_mount = NULL;
40struct super_block * configfs_sb = NULL; 40struct super_block * configfs_sb = NULL;
41kmem_cache_t *configfs_dir_cachep; 41struct kmem_cache *configfs_dir_cachep;
42static int configfs_mnt_count = 0; 42static int configfs_mnt_count = 0;
43 43
44static struct super_operations configfs_ops = { 44static struct super_operations configfs_ops = {
diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
index a624c3ec8189..0509cedd415c 100644
--- a/fs/cramfs/inode.c
+++ b/fs/cramfs/inode.c
@@ -481,6 +481,8 @@ static int cramfs_readpage(struct file *file, struct page * page)
481 pgdata = kmap(page); 481 pgdata = kmap(page);
482 if (compr_len == 0) 482 if (compr_len == 0)
483 ; /* hole */ 483 ; /* hole */
484 else if (compr_len > (PAGE_CACHE_SIZE << 1))
485 printk(KERN_ERR "cramfs: bad compressed blocksize %u\n", compr_len);
484 else { 486 else {
485 mutex_lock(&read_mutex); 487 mutex_lock(&read_mutex);
486 bytes_filled = cramfs_uncompress_block(pgdata, 488 bytes_filled = cramfs_uncompress_block(pgdata,
diff --git a/fs/dcache.c b/fs/dcache.c
index fd4a428998ef..d68631f18df1 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -43,7 +43,7 @@ static __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
43 43
44EXPORT_SYMBOL(dcache_lock); 44EXPORT_SYMBOL(dcache_lock);
45 45
46static kmem_cache_t *dentry_cache __read_mostly; 46static struct kmem_cache *dentry_cache __read_mostly;
47 47
48#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname)) 48#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
49 49
@@ -68,15 +68,19 @@ struct dentry_stat_t dentry_stat = {
68 .age_limit = 45, 68 .age_limit = 45,
69}; 69};
70 70
71static void d_callback(struct rcu_head *head) 71static void __d_free(struct dentry *dentry)
72{ 72{
73 struct dentry * dentry = container_of(head, struct dentry, d_u.d_rcu);
74
75 if (dname_external(dentry)) 73 if (dname_external(dentry))
76 kfree(dentry->d_name.name); 74 kfree(dentry->d_name.name);
77 kmem_cache_free(dentry_cache, dentry); 75 kmem_cache_free(dentry_cache, dentry);
78} 76}
79 77
78static void d_callback(struct rcu_head *head)
79{
80 struct dentry * dentry = container_of(head, struct dentry, d_u.d_rcu);
81 __d_free(dentry);
82}
83
80/* 84/*
81 * no dcache_lock, please. The caller must decrement dentry_stat.nr_dentry 85 * no dcache_lock, please. The caller must decrement dentry_stat.nr_dentry
82 * inside dcache_lock. 86 * inside dcache_lock.
@@ -85,7 +89,11 @@ static void d_free(struct dentry *dentry)
85{ 89{
86 if (dentry->d_op && dentry->d_op->d_release) 90 if (dentry->d_op && dentry->d_op->d_release)
87 dentry->d_op->d_release(dentry); 91 dentry->d_op->d_release(dentry);
88 call_rcu(&dentry->d_u.d_rcu, d_callback); 92 /* if dentry was never inserted into hash, immediate free is OK */
93 if (dentry->d_hash.pprev == NULL)
94 __d_free(dentry);
95 else
96 call_rcu(&dentry->d_u.d_rcu, d_callback);
89} 97}
90 98
91/* 99/*
@@ -2072,10 +2080,10 @@ static void __init dcache_init(unsigned long mempages)
2072} 2080}
2073 2081
2074/* SLAB cache for __getname() consumers */ 2082/* SLAB cache for __getname() consumers */
2075kmem_cache_t *names_cachep __read_mostly; 2083struct kmem_cache *names_cachep __read_mostly;
2076 2084
2077/* SLAB cache for file structures */ 2085/* SLAB cache for file structures */
2078kmem_cache_t *filp_cachep __read_mostly; 2086struct kmem_cache *filp_cachep __read_mostly;
2079 2087
2080EXPORT_SYMBOL(d_genocide); 2088EXPORT_SYMBOL(d_genocide);
2081 2089
diff --git a/fs/dcookies.c b/fs/dcookies.c
index 0c4b0674854b..21af1629f9bc 100644
--- a/fs/dcookies.c
+++ b/fs/dcookies.c
@@ -37,7 +37,7 @@ struct dcookie_struct {
37 37
38static LIST_HEAD(dcookie_users); 38static LIST_HEAD(dcookie_users);
39static DEFINE_MUTEX(dcookie_mutex); 39static DEFINE_MUTEX(dcookie_mutex);
40static kmem_cache_t *dcookie_cache __read_mostly; 40static struct kmem_cache *dcookie_cache __read_mostly;
41static struct list_head *dcookie_hashtable __read_mostly; 41static struct list_head *dcookie_hashtable __read_mostly;
42static size_t hash_size __read_mostly; 42static size_t hash_size __read_mostly;
43 43
diff --git a/fs/dlm/Kconfig b/fs/dlm/Kconfig
index 81b2c6465eeb..b5654a284fef 100644
--- a/fs/dlm/Kconfig
+++ b/fs/dlm/Kconfig
@@ -1,14 +1,32 @@
1menu "Distributed Lock Manager" 1menu "Distributed Lock Manager"
2 depends on INET && IP_SCTP && EXPERIMENTAL 2 depends on EXPERIMENTAL && INET
3 3
4config DLM 4config DLM
5 tristate "Distributed Lock Manager (DLM)" 5 tristate "Distributed Lock Manager (DLM)"
6 depends on IPV6 || IPV6=n 6 depends on IPV6 || IPV6=n
7 select CONFIGFS_FS 7 select CONFIGFS_FS
8 select IP_SCTP if DLM_SCTP
8 help 9 help
9 A general purpose distributed lock manager for kernel or userspace 10 A general purpose distributed lock manager for kernel or userspace
10 applications. 11 applications.
11 12
13choice
14 prompt "Select DLM communications protocol"
15 depends on DLM
16 default DLM_TCP
17 help
18 The DLM Can use TCP or SCTP for it's network communications.
19 SCTP supports multi-homed operations whereas TCP doesn't.
20 However, SCTP seems to have stability problems at the moment.
21
22config DLM_TCP
23 bool "TCP/IP"
24
25config DLM_SCTP
26 bool "SCTP"
27
28endchoice
29
12config DLM_DEBUG 30config DLM_DEBUG
13 bool "DLM debugging" 31 bool "DLM debugging"
14 depends on DLM 32 depends on DLM
diff --git a/fs/dlm/Makefile b/fs/dlm/Makefile
index 1832e0297f7d..65388944eba0 100644
--- a/fs/dlm/Makefile
+++ b/fs/dlm/Makefile
@@ -4,7 +4,6 @@ dlm-y := ast.o \
4 dir.o \ 4 dir.o \
5 lock.o \ 5 lock.o \
6 lockspace.o \ 6 lockspace.o \
7 lowcomms.o \
8 main.o \ 7 main.o \
9 member.o \ 8 member.o \
10 memory.o \ 9 memory.o \
@@ -17,3 +16,6 @@ dlm-y := ast.o \
17 util.o 16 util.o
18dlm-$(CONFIG_DLM_DEBUG) += debug_fs.o 17dlm-$(CONFIG_DLM_DEBUG) += debug_fs.o
19 18
19dlm-$(CONFIG_DLM_TCP) += lowcomms-tcp.o
20
21dlm-$(CONFIG_DLM_SCTP) += lowcomms-sctp.o \ No newline at end of file
diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h
index 1e5cd67e1b7a..1ee8195e6fc0 100644
--- a/fs/dlm/dlm_internal.h
+++ b/fs/dlm/dlm_internal.h
@@ -471,6 +471,7 @@ struct dlm_ls {
471 char *ls_recover_buf; 471 char *ls_recover_buf;
472 int ls_recover_nodeid; /* for debugging */ 472 int ls_recover_nodeid; /* for debugging */
473 uint64_t ls_rcom_seq; 473 uint64_t ls_rcom_seq;
474 spinlock_t ls_rcom_spin;
474 struct list_head ls_recover_list; 475 struct list_head ls_recover_list;
475 spinlock_t ls_recover_list_lock; 476 spinlock_t ls_recover_list_lock;
476 int ls_recover_list_count; 477 int ls_recover_list_count;
@@ -488,7 +489,8 @@ struct dlm_ls {
488#define LSFL_RUNNING 1 489#define LSFL_RUNNING 1
489#define LSFL_RECOVERY_STOP 2 490#define LSFL_RECOVERY_STOP 2
490#define LSFL_RCOM_READY 3 491#define LSFL_RCOM_READY 3
491#define LSFL_UEVENT_WAIT 4 492#define LSFL_RCOM_WAIT 4
493#define LSFL_UEVENT_WAIT 5
492 494
493/* much of this is just saving user space pointers associated with the 495/* much of this is just saving user space pointers associated with the
494 lock that we pass back to the user lib with an ast */ 496 lock that we pass back to the user lib with an ast */
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
index 3f2befa4797b..30878defaeb6 100644
--- a/fs/dlm/lock.c
+++ b/fs/dlm/lock.c
@@ -2372,6 +2372,7 @@ static int send_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms_in,
2372static void receive_flags(struct dlm_lkb *lkb, struct dlm_message *ms) 2372static void receive_flags(struct dlm_lkb *lkb, struct dlm_message *ms)
2373{ 2373{
2374 lkb->lkb_exflags = ms->m_exflags; 2374 lkb->lkb_exflags = ms->m_exflags;
2375 lkb->lkb_sbflags = ms->m_sbflags;
2375 lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) | 2376 lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) |
2376 (ms->m_flags & 0x0000FFFF); 2377 (ms->m_flags & 0x0000FFFF);
2377} 2378}
@@ -3028,10 +3029,17 @@ int dlm_receive_message(struct dlm_header *hd, int nodeid, int recovery)
3028 3029
3029 while (1) { 3030 while (1) {
3030 if (dlm_locking_stopped(ls)) { 3031 if (dlm_locking_stopped(ls)) {
3031 if (!recovery) 3032 if (recovery) {
3032 dlm_add_requestqueue(ls, nodeid, hd); 3033 error = -EINTR;
3033 error = -EINTR; 3034 goto out;
3034 goto out; 3035 }
3036 error = dlm_add_requestqueue(ls, nodeid, hd);
3037 if (error == -EAGAIN)
3038 continue;
3039 else {
3040 error = -EINTR;
3041 goto out;
3042 }
3035 } 3043 }
3036 3044
3037 if (lock_recovery_try(ls)) 3045 if (lock_recovery_try(ls))
diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
index f8842ca443c2..59012b089e8d 100644
--- a/fs/dlm/lockspace.c
+++ b/fs/dlm/lockspace.c
@@ -22,6 +22,7 @@
22#include "memory.h" 22#include "memory.h"
23#include "lock.h" 23#include "lock.h"
24#include "recover.h" 24#include "recover.h"
25#include "requestqueue.h"
25 26
26#ifdef CONFIG_DLM_DEBUG 27#ifdef CONFIG_DLM_DEBUG
27int dlm_create_debug_file(struct dlm_ls *ls); 28int dlm_create_debug_file(struct dlm_ls *ls);
@@ -478,6 +479,8 @@ static int new_lockspace(char *name, int namelen, void **lockspace,
478 ls->ls_recoverd_task = NULL; 479 ls->ls_recoverd_task = NULL;
479 mutex_init(&ls->ls_recoverd_active); 480 mutex_init(&ls->ls_recoverd_active);
480 spin_lock_init(&ls->ls_recover_lock); 481 spin_lock_init(&ls->ls_recover_lock);
482 spin_lock_init(&ls->ls_rcom_spin);
483 get_random_bytes(&ls->ls_rcom_seq, sizeof(uint64_t));
481 ls->ls_recover_status = 0; 484 ls->ls_recover_status = 0;
482 ls->ls_recover_seq = 0; 485 ls->ls_recover_seq = 0;
483 ls->ls_recover_args = NULL; 486 ls->ls_recover_args = NULL;
@@ -684,6 +687,7 @@ static int release_lockspace(struct dlm_ls *ls, int force)
684 * Free structures on any other lists 687 * Free structures on any other lists
685 */ 688 */
686 689
690 dlm_purge_requestqueue(ls);
687 kfree(ls->ls_recover_args); 691 kfree(ls->ls_recover_args);
688 dlm_clear_free_entries(ls); 692 dlm_clear_free_entries(ls);
689 dlm_clear_members(ls); 693 dlm_clear_members(ls);
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms-sctp.c
index 6da6b14d5a61..fe158d7a9285 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms-sctp.c
@@ -2,7 +2,7 @@
2******************************************************************************* 2*******************************************************************************
3** 3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved. 5** Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
6** 6**
7** This copyrighted material is made available to anyone wishing to use, 7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions 8** modify, copy, or redistribute it subject to the terms and conditions
@@ -75,13 +75,13 @@ struct nodeinfo {
75}; 75};
76 76
77static DEFINE_IDR(nodeinfo_idr); 77static DEFINE_IDR(nodeinfo_idr);
78static struct rw_semaphore nodeinfo_lock; 78static DECLARE_RWSEM(nodeinfo_lock);
79static int max_nodeid; 79static int max_nodeid;
80 80
81struct cbuf { 81struct cbuf {
82 unsigned base; 82 unsigned int base;
83 unsigned len; 83 unsigned int len;
84 unsigned mask; 84 unsigned int mask;
85}; 85};
86 86
87/* Just the one of these, now. But this struct keeps 87/* Just the one of these, now. But this struct keeps
@@ -90,9 +90,9 @@ struct cbuf {
90#define CF_READ_PENDING 1 90#define CF_READ_PENDING 1
91 91
92struct connection { 92struct connection {
93 struct socket *sock; 93 struct socket *sock;
94 unsigned long flags; 94 unsigned long flags;
95 struct page *rx_page; 95 struct page *rx_page;
96 atomic_t waiting_requests; 96 atomic_t waiting_requests;
97 struct cbuf cb; 97 struct cbuf cb;
98 int eagain_flag; 98 int eagain_flag;
@@ -102,36 +102,40 @@ struct connection {
102 102
103struct writequeue_entry { 103struct writequeue_entry {
104 struct list_head list; 104 struct list_head list;
105 struct page *page; 105 struct page *page;
106 int offset; 106 int offset;
107 int len; 107 int len;
108 int end; 108 int end;
109 int users; 109 int users;
110 struct nodeinfo *ni; 110 struct nodeinfo *ni;
111}; 111};
112 112
113#define CBUF_ADD(cb, n) do { (cb)->len += n; } while(0) 113static void cbuf_add(struct cbuf *cb, int n)
114#define CBUF_EMPTY(cb) ((cb)->len == 0) 114{
115#define CBUF_MAY_ADD(cb, n) (((cb)->len + (n)) < ((cb)->mask + 1)) 115 cb->len += n;
116#define CBUF_DATA(cb) (((cb)->base + (cb)->len) & (cb)->mask) 116}
117 117
118#define CBUF_INIT(cb, size) \ 118static int cbuf_data(struct cbuf *cb)
119do { \ 119{
120 (cb)->base = (cb)->len = 0; \ 120 return ((cb->base + cb->len) & cb->mask);
121 (cb)->mask = ((size)-1); \ 121}
122} while(0)
123 122
124#define CBUF_EAT(cb, n) \ 123static void cbuf_init(struct cbuf *cb, int size)
125do { \ 124{
126 (cb)->len -= (n); \ 125 cb->base = cb->len = 0;
127 (cb)->base += (n); \ 126 cb->mask = size-1;
128 (cb)->base &= (cb)->mask; \ 127}
129} while(0)
130 128
129static void cbuf_eat(struct cbuf *cb, int n)
130{
131 cb->len -= n;
132 cb->base += n;
133 cb->base &= cb->mask;
134}
131 135
132/* List of nodes which have writes pending */ 136/* List of nodes which have writes pending */
133static struct list_head write_nodes; 137static LIST_HEAD(write_nodes);
134static spinlock_t write_nodes_lock; 138static DEFINE_SPINLOCK(write_nodes_lock);
135 139
136/* Maximum number of incoming messages to process before 140/* Maximum number of incoming messages to process before
137 * doing a schedule() 141 * doing a schedule()
@@ -141,8 +145,7 @@ static spinlock_t write_nodes_lock;
141/* Manage daemons */ 145/* Manage daemons */
142static struct task_struct *recv_task; 146static struct task_struct *recv_task;
143static struct task_struct *send_task; 147static struct task_struct *send_task;
144static wait_queue_head_t lowcomms_recv_wait; 148static DECLARE_WAIT_QUEUE_HEAD(lowcomms_recv_wait);
145static atomic_t accepting;
146 149
147/* The SCTP connection */ 150/* The SCTP connection */
148static struct connection sctp_con; 151static struct connection sctp_con;
@@ -161,11 +164,11 @@ static int nodeid_to_addr(int nodeid, struct sockaddr *retaddr)
161 return error; 164 return error;
162 165
163 if (dlm_local_addr[0]->ss_family == AF_INET) { 166 if (dlm_local_addr[0]->ss_family == AF_INET) {
164 struct sockaddr_in *in4 = (struct sockaddr_in *) &addr; 167 struct sockaddr_in *in4 = (struct sockaddr_in *) &addr;
165 struct sockaddr_in *ret4 = (struct sockaddr_in *) retaddr; 168 struct sockaddr_in *ret4 = (struct sockaddr_in *) retaddr;
166 ret4->sin_addr.s_addr = in4->sin_addr.s_addr; 169 ret4->sin_addr.s_addr = in4->sin_addr.s_addr;
167 } else { 170 } else {
168 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) &addr; 171 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) &addr;
169 struct sockaddr_in6 *ret6 = (struct sockaddr_in6 *) retaddr; 172 struct sockaddr_in6 *ret6 = (struct sockaddr_in6 *) retaddr;
170 memcpy(&ret6->sin6_addr, &in6->sin6_addr, 173 memcpy(&ret6->sin6_addr, &in6->sin6_addr,
171 sizeof(in6->sin6_addr)); 174 sizeof(in6->sin6_addr));
@@ -174,6 +177,8 @@ static int nodeid_to_addr(int nodeid, struct sockaddr *retaddr)
174 return 0; 177 return 0;
175} 178}
176 179
180/* If alloc is 0 here we will not attempt to allocate a new
181 nodeinfo struct */
177static struct nodeinfo *nodeid2nodeinfo(int nodeid, gfp_t alloc) 182static struct nodeinfo *nodeid2nodeinfo(int nodeid, gfp_t alloc)
178{ 183{
179 struct nodeinfo *ni; 184 struct nodeinfo *ni;
@@ -184,44 +189,45 @@ static struct nodeinfo *nodeid2nodeinfo(int nodeid, gfp_t alloc)
184 ni = idr_find(&nodeinfo_idr, nodeid); 189 ni = idr_find(&nodeinfo_idr, nodeid);
185 up_read(&nodeinfo_lock); 190 up_read(&nodeinfo_lock);
186 191
187 if (!ni && alloc) { 192 if (ni || !alloc)
188 down_write(&nodeinfo_lock); 193 return ni;
189 194
190 ni = idr_find(&nodeinfo_idr, nodeid); 195 down_write(&nodeinfo_lock);
191 if (ni)
192 goto out_up;
193 196
194 r = idr_pre_get(&nodeinfo_idr, alloc); 197 ni = idr_find(&nodeinfo_idr, nodeid);
195 if (!r) 198 if (ni)
196 goto out_up; 199 goto out_up;
197 200
198 ni = kmalloc(sizeof(struct nodeinfo), alloc); 201 r = idr_pre_get(&nodeinfo_idr, alloc);
199 if (!ni) 202 if (!r)
200 goto out_up; 203 goto out_up;
201 204
202 r = idr_get_new_above(&nodeinfo_idr, ni, nodeid, &n); 205 ni = kmalloc(sizeof(struct nodeinfo), alloc);
203 if (r) { 206 if (!ni)
204 kfree(ni); 207 goto out_up;
205 ni = NULL; 208
206 goto out_up; 209 r = idr_get_new_above(&nodeinfo_idr, ni, nodeid, &n);
207 } 210 if (r) {
208 if (n != nodeid) { 211 kfree(ni);
209 idr_remove(&nodeinfo_idr, n); 212 ni = NULL;
210 kfree(ni); 213 goto out_up;
211 ni = NULL;
212 goto out_up;
213 }
214 memset(ni, 0, sizeof(struct nodeinfo));
215 spin_lock_init(&ni->lock);
216 INIT_LIST_HEAD(&ni->writequeue);
217 spin_lock_init(&ni->writequeue_lock);
218 ni->nodeid = nodeid;
219
220 if (nodeid > max_nodeid)
221 max_nodeid = nodeid;
222 out_up:
223 up_write(&nodeinfo_lock);
224 } 214 }
215 if (n != nodeid) {
216 idr_remove(&nodeinfo_idr, n);
217 kfree(ni);
218 ni = NULL;
219 goto out_up;
220 }
221 memset(ni, 0, sizeof(struct nodeinfo));
222 spin_lock_init(&ni->lock);
223 INIT_LIST_HEAD(&ni->writequeue);
224 spin_lock_init(&ni->writequeue_lock);
225 ni->nodeid = nodeid;
226
227 if (nodeid > max_nodeid)
228 max_nodeid = nodeid;
229out_up:
230 up_write(&nodeinfo_lock);
225 231
226 return ni; 232 return ni;
227} 233}
@@ -279,13 +285,13 @@ static void make_sockaddr(struct sockaddr_storage *saddr, uint16_t port,
279 in4_addr->sin_port = cpu_to_be16(port); 285 in4_addr->sin_port = cpu_to_be16(port);
280 memset(&in4_addr->sin_zero, 0, sizeof(in4_addr->sin_zero)); 286 memset(&in4_addr->sin_zero, 0, sizeof(in4_addr->sin_zero));
281 memset(in4_addr+1, 0, sizeof(struct sockaddr_storage) - 287 memset(in4_addr+1, 0, sizeof(struct sockaddr_storage) -
282 sizeof(struct sockaddr_in)); 288 sizeof(struct sockaddr_in));
283 *addr_len = sizeof(struct sockaddr_in); 289 *addr_len = sizeof(struct sockaddr_in);
284 } else { 290 } else {
285 struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)saddr; 291 struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)saddr;
286 in6_addr->sin6_port = cpu_to_be16(port); 292 in6_addr->sin6_port = cpu_to_be16(port);
287 memset(in6_addr+1, 0, sizeof(struct sockaddr_storage) - 293 memset(in6_addr+1, 0, sizeof(struct sockaddr_storage) -
288 sizeof(struct sockaddr_in6)); 294 sizeof(struct sockaddr_in6));
289 *addr_len = sizeof(struct sockaddr_in6); 295 *addr_len = sizeof(struct sockaddr_in6);
290 } 296 }
291} 297}
@@ -324,7 +330,7 @@ static void send_shutdown(sctp_assoc_t associd)
324 cmsg->cmsg_type = SCTP_SNDRCV; 330 cmsg->cmsg_type = SCTP_SNDRCV;
325 cmsg->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo)); 331 cmsg->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
326 outmessage.msg_controllen = cmsg->cmsg_len; 332 outmessage.msg_controllen = cmsg->cmsg_len;
327 sinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmsg); 333 sinfo = CMSG_DATA(cmsg);
328 memset(sinfo, 0x00, sizeof(struct sctp_sndrcvinfo)); 334 memset(sinfo, 0x00, sizeof(struct sctp_sndrcvinfo));
329 335
330 sinfo->sinfo_flags |= MSG_EOF; 336 sinfo->sinfo_flags |= MSG_EOF;
@@ -387,7 +393,7 @@ static void process_sctp_notification(struct msghdr *msg, char *buf)
387 393
388 if ((int)sn->sn_assoc_change.sac_assoc_id <= 0) { 394 if ((int)sn->sn_assoc_change.sac_assoc_id <= 0) {
389 log_print("COMM_UP for invalid assoc ID %d", 395 log_print("COMM_UP for invalid assoc ID %d",
390 (int)sn->sn_assoc_change.sac_assoc_id); 396 (int)sn->sn_assoc_change.sac_assoc_id);
391 init_failed(); 397 init_failed();
392 return; 398 return;
393 } 399 }
@@ -398,15 +404,18 @@ static void process_sctp_notification(struct msghdr *msg, char *buf)
398 fs = get_fs(); 404 fs = get_fs();
399 set_fs(get_ds()); 405 set_fs(get_ds());
400 ret = sctp_con.sock->ops->getsockopt(sctp_con.sock, 406 ret = sctp_con.sock->ops->getsockopt(sctp_con.sock,
401 IPPROTO_SCTP, SCTP_PRIMARY_ADDR, 407 IPPROTO_SCTP,
402 (char*)&prim, &prim_len); 408 SCTP_PRIMARY_ADDR,
409 (char*)&prim,
410 &prim_len);
403 set_fs(fs); 411 set_fs(fs);
404 if (ret < 0) { 412 if (ret < 0) {
405 struct nodeinfo *ni; 413 struct nodeinfo *ni;
406 414
407 log_print("getsockopt/sctp_primary_addr on " 415 log_print("getsockopt/sctp_primary_addr on "
408 "new assoc %d failed : %d", 416 "new assoc %d failed : %d",
409 (int)sn->sn_assoc_change.sac_assoc_id, ret); 417 (int)sn->sn_assoc_change.sac_assoc_id,
418 ret);
410 419
411 /* Retry INIT later */ 420 /* Retry INIT later */
412 ni = assoc2nodeinfo(sn->sn_assoc_change.sac_assoc_id); 421 ni = assoc2nodeinfo(sn->sn_assoc_change.sac_assoc_id);
@@ -426,12 +435,10 @@ static void process_sctp_notification(struct msghdr *msg, char *buf)
426 return; 435 return;
427 436
428 /* Save the assoc ID */ 437 /* Save the assoc ID */
429 spin_lock(&ni->lock);
430 ni->assoc_id = sn->sn_assoc_change.sac_assoc_id; 438 ni->assoc_id = sn->sn_assoc_change.sac_assoc_id;
431 spin_unlock(&ni->lock);
432 439
433 log_print("got new/restarted association %d nodeid %d", 440 log_print("got new/restarted association %d nodeid %d",
434 (int)sn->sn_assoc_change.sac_assoc_id, nodeid); 441 (int)sn->sn_assoc_change.sac_assoc_id, nodeid);
435 442
436 /* Send any pending writes */ 443 /* Send any pending writes */
437 clear_bit(NI_INIT_PENDING, &ni->flags); 444 clear_bit(NI_INIT_PENDING, &ni->flags);
@@ -507,13 +514,12 @@ static int receive_from_sock(void)
507 sctp_con.rx_page = alloc_page(GFP_ATOMIC); 514 sctp_con.rx_page = alloc_page(GFP_ATOMIC);
508 if (sctp_con.rx_page == NULL) 515 if (sctp_con.rx_page == NULL)
509 goto out_resched; 516 goto out_resched;
510 CBUF_INIT(&sctp_con.cb, PAGE_CACHE_SIZE); 517 cbuf_init(&sctp_con.cb, PAGE_CACHE_SIZE);
511 } 518 }
512 519
513 memset(&incmsg, 0, sizeof(incmsg)); 520 memset(&incmsg, 0, sizeof(incmsg));
514 memset(&msgname, 0, sizeof(msgname)); 521 memset(&msgname, 0, sizeof(msgname));
515 522
516 memset(incmsg, 0, sizeof(incmsg));
517 msg.msg_name = &msgname; 523 msg.msg_name = &msgname;
518 msg.msg_namelen = sizeof(msgname); 524 msg.msg_namelen = sizeof(msgname);
519 msg.msg_flags = 0; 525 msg.msg_flags = 0;
@@ -532,17 +538,17 @@ static int receive_from_sock(void)
532 * iov[0] is the bit of the circular buffer between the current end 538 * iov[0] is the bit of the circular buffer between the current end
533 * point (cb.base + cb.len) and the end of the buffer. 539 * point (cb.base + cb.len) and the end of the buffer.
534 */ 540 */
535 iov[0].iov_len = sctp_con.cb.base - CBUF_DATA(&sctp_con.cb); 541 iov[0].iov_len = sctp_con.cb.base - cbuf_data(&sctp_con.cb);
536 iov[0].iov_base = page_address(sctp_con.rx_page) + 542 iov[0].iov_base = page_address(sctp_con.rx_page) +
537 CBUF_DATA(&sctp_con.cb); 543 cbuf_data(&sctp_con.cb);
538 iov[1].iov_len = 0; 544 iov[1].iov_len = 0;
539 545
540 /* 546 /*
541 * iov[1] is the bit of the circular buffer between the start of the 547 * iov[1] is the bit of the circular buffer between the start of the
542 * buffer and the start of the currently used section (cb.base) 548 * buffer and the start of the currently used section (cb.base)
543 */ 549 */
544 if (CBUF_DATA(&sctp_con.cb) >= sctp_con.cb.base) { 550 if (cbuf_data(&sctp_con.cb) >= sctp_con.cb.base) {
545 iov[0].iov_len = PAGE_CACHE_SIZE - CBUF_DATA(&sctp_con.cb); 551 iov[0].iov_len = PAGE_CACHE_SIZE - cbuf_data(&sctp_con.cb);
546 iov[1].iov_len = sctp_con.cb.base; 552 iov[1].iov_len = sctp_con.cb.base;
547 iov[1].iov_base = page_address(sctp_con.rx_page); 553 iov[1].iov_base = page_address(sctp_con.rx_page);
548 msg.msg_iovlen = 2; 554 msg.msg_iovlen = 2;
@@ -557,7 +563,7 @@ static int receive_from_sock(void)
557 msg.msg_control = incmsg; 563 msg.msg_control = incmsg;
558 msg.msg_controllen = sizeof(incmsg); 564 msg.msg_controllen = sizeof(incmsg);
559 cmsg = CMSG_FIRSTHDR(&msg); 565 cmsg = CMSG_FIRSTHDR(&msg);
560 sinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmsg); 566 sinfo = CMSG_DATA(cmsg);
561 567
562 if (msg.msg_flags & MSG_NOTIFICATION) { 568 if (msg.msg_flags & MSG_NOTIFICATION) {
563 process_sctp_notification(&msg, page_address(sctp_con.rx_page)); 569 process_sctp_notification(&msg, page_address(sctp_con.rx_page));
@@ -583,29 +589,29 @@ static int receive_from_sock(void)
583 if (r == 1) 589 if (r == 1)
584 return 0; 590 return 0;
585 591
586 CBUF_ADD(&sctp_con.cb, ret); 592 cbuf_add(&sctp_con.cb, ret);
587 ret = dlm_process_incoming_buffer(cpu_to_le32(sinfo->sinfo_ppid), 593 ret = dlm_process_incoming_buffer(cpu_to_le32(sinfo->sinfo_ppid),
588 page_address(sctp_con.rx_page), 594 page_address(sctp_con.rx_page),
589 sctp_con.cb.base, sctp_con.cb.len, 595 sctp_con.cb.base, sctp_con.cb.len,
590 PAGE_CACHE_SIZE); 596 PAGE_CACHE_SIZE);
591 if (ret < 0) 597 if (ret < 0)
592 goto out_close; 598 goto out_close;
593 CBUF_EAT(&sctp_con.cb, ret); 599 cbuf_eat(&sctp_con.cb, ret);
594 600
595 out: 601out:
596 ret = 0; 602 ret = 0;
597 goto out_ret; 603 goto out_ret;
598 604
599 out_resched: 605out_resched:
600 lowcomms_data_ready(sctp_con.sock->sk, 0); 606 lowcomms_data_ready(sctp_con.sock->sk, 0);
601 ret = 0; 607 ret = 0;
602 schedule(); 608 cond_resched();
603 goto out_ret; 609 goto out_ret;
604 610
605 out_close: 611out_close:
606 if (ret != -EAGAIN) 612 if (ret != -EAGAIN)
607 log_print("error reading from sctp socket: %d", ret); 613 log_print("error reading from sctp socket: %d", ret);
608 out_ret: 614out_ret:
609 return ret; 615 return ret;
610} 616}
611 617
@@ -619,10 +625,12 @@ static int add_bind_addr(struct sockaddr_storage *addr, int addr_len, int num)
619 set_fs(get_ds()); 625 set_fs(get_ds());
620 if (num == 1) 626 if (num == 1)
621 result = sctp_con.sock->ops->bind(sctp_con.sock, 627 result = sctp_con.sock->ops->bind(sctp_con.sock,
622 (struct sockaddr *) addr, addr_len); 628 (struct sockaddr *) addr,
629 addr_len);
623 else 630 else
624 result = sctp_con.sock->ops->setsockopt(sctp_con.sock, SOL_SCTP, 631 result = sctp_con.sock->ops->setsockopt(sctp_con.sock, SOL_SCTP,
625 SCTP_SOCKOPT_BINDX_ADD, (char *)addr, addr_len); 632 SCTP_SOCKOPT_BINDX_ADD,
633 (char *)addr, addr_len);
626 set_fs(fs); 634 set_fs(fs);
627 635
628 if (result < 0) 636 if (result < 0)
@@ -719,10 +727,10 @@ static int init_sock(void)
719 727
720 return 0; 728 return 0;
721 729
722 create_delsock: 730create_delsock:
723 sock_release(sock); 731 sock_release(sock);
724 sctp_con.sock = NULL; 732 sctp_con.sock = NULL;
725 out: 733out:
726 return result; 734 return result;
727} 735}
728 736
@@ -756,16 +764,13 @@ void *dlm_lowcomms_get_buffer(int nodeid, int len, gfp_t allocation, char **ppc)
756 int users = 0; 764 int users = 0;
757 struct nodeinfo *ni; 765 struct nodeinfo *ni;
758 766
759 if (!atomic_read(&accepting))
760 return NULL;
761
762 ni = nodeid2nodeinfo(nodeid, allocation); 767 ni = nodeid2nodeinfo(nodeid, allocation);
763 if (!ni) 768 if (!ni)
764 return NULL; 769 return NULL;
765 770
766 spin_lock(&ni->writequeue_lock); 771 spin_lock(&ni->writequeue_lock);
767 e = list_entry(ni->writequeue.prev, struct writequeue_entry, list); 772 e = list_entry(ni->writequeue.prev, struct writequeue_entry, list);
768 if (((struct list_head *) e == &ni->writequeue) || 773 if ((&e->list == &ni->writequeue) ||
769 (PAGE_CACHE_SIZE - e->end < len)) { 774 (PAGE_CACHE_SIZE - e->end < len)) {
770 e = NULL; 775 e = NULL;
771 } else { 776 } else {
@@ -776,7 +781,7 @@ void *dlm_lowcomms_get_buffer(int nodeid, int len, gfp_t allocation, char **ppc)
776 spin_unlock(&ni->writequeue_lock); 781 spin_unlock(&ni->writequeue_lock);
777 782
778 if (e) { 783 if (e) {
779 got_one: 784 got_one:
780 if (users == 0) 785 if (users == 0)
781 kmap(e->page); 786 kmap(e->page);
782 *ppc = page_address(e->page) + offset; 787 *ppc = page_address(e->page) + offset;
@@ -803,9 +808,6 @@ void dlm_lowcomms_commit_buffer(void *arg)
803 int users; 808 int users;
804 struct nodeinfo *ni = e->ni; 809 struct nodeinfo *ni = e->ni;
805 810
806 if (!atomic_read(&accepting))
807 return;
808
809 spin_lock(&ni->writequeue_lock); 811 spin_lock(&ni->writequeue_lock);
810 users = --e->users; 812 users = --e->users;
811 if (users) 813 if (users)
@@ -822,7 +824,7 @@ void dlm_lowcomms_commit_buffer(void *arg)
822 } 824 }
823 return; 825 return;
824 826
825 out: 827out:
826 spin_unlock(&ni->writequeue_lock); 828 spin_unlock(&ni->writequeue_lock);
827 return; 829 return;
828} 830}
@@ -878,7 +880,7 @@ static void initiate_association(int nodeid)
878 cmsg->cmsg_level = IPPROTO_SCTP; 880 cmsg->cmsg_level = IPPROTO_SCTP;
879 cmsg->cmsg_type = SCTP_SNDRCV; 881 cmsg->cmsg_type = SCTP_SNDRCV;
880 cmsg->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo)); 882 cmsg->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
881 sinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmsg); 883 sinfo = CMSG_DATA(cmsg);
882 memset(sinfo, 0x00, sizeof(struct sctp_sndrcvinfo)); 884 memset(sinfo, 0x00, sizeof(struct sctp_sndrcvinfo));
883 sinfo->sinfo_ppid = cpu_to_le32(dlm_local_nodeid); 885 sinfo->sinfo_ppid = cpu_to_le32(dlm_local_nodeid);
884 886
@@ -892,7 +894,7 @@ static void initiate_association(int nodeid)
892} 894}
893 895
894/* Send a message */ 896/* Send a message */
895static int send_to_sock(struct nodeinfo *ni) 897static void send_to_sock(struct nodeinfo *ni)
896{ 898{
897 int ret = 0; 899 int ret = 0;
898 struct writequeue_entry *e; 900 struct writequeue_entry *e;
@@ -903,13 +905,13 @@ static int send_to_sock(struct nodeinfo *ni)
903 struct sctp_sndrcvinfo *sinfo; 905 struct sctp_sndrcvinfo *sinfo;
904 struct kvec iov; 906 struct kvec iov;
905 907
906 /* See if we need to init an association before we start 908 /* See if we need to init an association before we start
907 sending precious messages */ 909 sending precious messages */
908 spin_lock(&ni->lock); 910 spin_lock(&ni->lock);
909 if (!ni->assoc_id && !test_and_set_bit(NI_INIT_PENDING, &ni->flags)) { 911 if (!ni->assoc_id && !test_and_set_bit(NI_INIT_PENDING, &ni->flags)) {
910 spin_unlock(&ni->lock); 912 spin_unlock(&ni->lock);
911 initiate_association(ni->nodeid); 913 initiate_association(ni->nodeid);
912 return 0; 914 return;
913 } 915 }
914 spin_unlock(&ni->lock); 916 spin_unlock(&ni->lock);
915 917
@@ -923,7 +925,7 @@ static int send_to_sock(struct nodeinfo *ni)
923 cmsg->cmsg_level = IPPROTO_SCTP; 925 cmsg->cmsg_level = IPPROTO_SCTP;
924 cmsg->cmsg_type = SCTP_SNDRCV; 926 cmsg->cmsg_type = SCTP_SNDRCV;
925 cmsg->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo)); 927 cmsg->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
926 sinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmsg); 928 sinfo = CMSG_DATA(cmsg);
927 memset(sinfo, 0x00, sizeof(struct sctp_sndrcvinfo)); 929 memset(sinfo, 0x00, sizeof(struct sctp_sndrcvinfo));
928 sinfo->sinfo_ppid = cpu_to_le32(dlm_local_nodeid); 930 sinfo->sinfo_ppid = cpu_to_le32(dlm_local_nodeid);
929 sinfo->sinfo_assoc_id = ni->assoc_id; 931 sinfo->sinfo_assoc_id = ni->assoc_id;
@@ -955,7 +957,7 @@ static int send_to_sock(struct nodeinfo *ni)
955 goto send_error; 957 goto send_error;
956 } else { 958 } else {
957 /* Don't starve people filling buffers */ 959 /* Don't starve people filling buffers */
958 schedule(); 960 cond_resched();
959 } 961 }
960 962
961 spin_lock(&ni->writequeue_lock); 963 spin_lock(&ni->writequeue_lock);
@@ -964,15 +966,16 @@ static int send_to_sock(struct nodeinfo *ni)
964 966
965 if (e->len == 0 && e->users == 0) { 967 if (e->len == 0 && e->users == 0) {
966 list_del(&e->list); 968 list_del(&e->list);
969 kunmap(e->page);
967 free_entry(e); 970 free_entry(e);
968 continue; 971 continue;
969 } 972 }
970 } 973 }
971 spin_unlock(&ni->writequeue_lock); 974 spin_unlock(&ni->writequeue_lock);
972 out: 975out:
973 return ret; 976 return;
974 977
975 send_error: 978send_error:
976 log_print("Error sending to node %d %d", ni->nodeid, ret); 979 log_print("Error sending to node %d %d", ni->nodeid, ret);
977 spin_lock(&ni->lock); 980 spin_lock(&ni->lock);
978 if (!test_and_set_bit(NI_INIT_PENDING, &ni->flags)) { 981 if (!test_and_set_bit(NI_INIT_PENDING, &ni->flags)) {
@@ -982,7 +985,7 @@ static int send_to_sock(struct nodeinfo *ni)
982 } else 985 } else
983 spin_unlock(&ni->lock); 986 spin_unlock(&ni->lock);
984 987
985 return ret; 988 return;
986} 989}
987 990
988/* Try to send any messages that are pending */ 991/* Try to send any messages that are pending */
@@ -994,7 +997,7 @@ static void process_output_queue(void)
994 spin_lock_bh(&write_nodes_lock); 997 spin_lock_bh(&write_nodes_lock);
995 list_for_each_safe(list, temp, &write_nodes) { 998 list_for_each_safe(list, temp, &write_nodes) {
996 struct nodeinfo *ni = 999 struct nodeinfo *ni =
997 list_entry(list, struct nodeinfo, write_list); 1000 list_entry(list, struct nodeinfo, write_list);
998 clear_bit(NI_WRITE_PENDING, &ni->flags); 1001 clear_bit(NI_WRITE_PENDING, &ni->flags);
999 list_del(&ni->write_list); 1002 list_del(&ni->write_list);
1000 1003
@@ -1106,7 +1109,7 @@ static int dlm_recvd(void *data)
1106 set_current_state(TASK_INTERRUPTIBLE); 1109 set_current_state(TASK_INTERRUPTIBLE);
1107 add_wait_queue(&lowcomms_recv_wait, &wait); 1110 add_wait_queue(&lowcomms_recv_wait, &wait);
1108 if (!test_bit(CF_READ_PENDING, &sctp_con.flags)) 1111 if (!test_bit(CF_READ_PENDING, &sctp_con.flags))
1109 schedule(); 1112 cond_resched();
1110 remove_wait_queue(&lowcomms_recv_wait, &wait); 1113 remove_wait_queue(&lowcomms_recv_wait, &wait);
1111 set_current_state(TASK_RUNNING); 1114 set_current_state(TASK_RUNNING);
1112 1115
@@ -1118,12 +1121,12 @@ static int dlm_recvd(void *data)
1118 1121
1119 /* Don't starve out everyone else */ 1122 /* Don't starve out everyone else */
1120 if (++count >= MAX_RX_MSG_COUNT) { 1123 if (++count >= MAX_RX_MSG_COUNT) {
1121 schedule(); 1124 cond_resched();
1122 count = 0; 1125 count = 0;
1123 } 1126 }
1124 } while (!kthread_should_stop() && ret >=0); 1127 } while (!kthread_should_stop() && ret >=0);
1125 } 1128 }
1126 schedule(); 1129 cond_resched();
1127 } 1130 }
1128 1131
1129 return 0; 1132 return 0;
@@ -1138,7 +1141,7 @@ static int dlm_sendd(void *data)
1138 while (!kthread_should_stop()) { 1141 while (!kthread_should_stop()) {
1139 set_current_state(TASK_INTERRUPTIBLE); 1142 set_current_state(TASK_INTERRUPTIBLE);
1140 if (write_list_empty()) 1143 if (write_list_empty())
1141 schedule(); 1144 cond_resched();
1142 set_current_state(TASK_RUNNING); 1145 set_current_state(TASK_RUNNING);
1143 1146
1144 if (sctp_con.eagain_flag) { 1147 if (sctp_con.eagain_flag) {
@@ -1166,7 +1169,7 @@ static int daemons_start(void)
1166 1169
1167 p = kthread_run(dlm_recvd, NULL, "dlm_recvd"); 1170 p = kthread_run(dlm_recvd, NULL, "dlm_recvd");
1168 error = IS_ERR(p); 1171 error = IS_ERR(p);
1169 if (error) { 1172 if (error) {
1170 log_print("can't start dlm_recvd %d", error); 1173 log_print("can't start dlm_recvd %d", error);
1171 return error; 1174 return error;
1172 } 1175 }
@@ -1174,7 +1177,7 @@ static int daemons_start(void)
1174 1177
1175 p = kthread_run(dlm_sendd, NULL, "dlm_sendd"); 1178 p = kthread_run(dlm_sendd, NULL, "dlm_sendd");
1176 error = IS_ERR(p); 1179 error = IS_ERR(p);
1177 if (error) { 1180 if (error) {
1178 log_print("can't start dlm_sendd %d", error); 1181 log_print("can't start dlm_sendd %d", error);
1179 kthread_stop(recv_task); 1182 kthread_stop(recv_task);
1180 return error; 1183 return error;
@@ -1197,43 +1200,28 @@ int dlm_lowcomms_start(void)
1197 error = daemons_start(); 1200 error = daemons_start();
1198 if (error) 1201 if (error)
1199 goto fail_sock; 1202 goto fail_sock;
1200 atomic_set(&accepting, 1);
1201 return 0; 1203 return 0;
1202 1204
1203 fail_sock: 1205fail_sock:
1204 close_connection(); 1206 close_connection();
1205 return error; 1207 return error;
1206} 1208}
1207 1209
1208/* Set all the activity flags to prevent any socket activity. */
1209
1210void dlm_lowcomms_stop(void) 1210void dlm_lowcomms_stop(void)
1211{ 1211{
1212 atomic_set(&accepting, 0); 1212 int i;
1213
1213 sctp_con.flags = 0x7; 1214 sctp_con.flags = 0x7;
1214 daemons_stop(); 1215 daemons_stop();
1215 clean_writequeues(); 1216 clean_writequeues();
1216 close_connection(); 1217 close_connection();
1217 dealloc_nodeinfo(); 1218 dealloc_nodeinfo();
1218 max_nodeid = 0; 1219 max_nodeid = 0;
1219}
1220 1220
1221int dlm_lowcomms_init(void) 1221 dlm_local_count = 0;
1222{ 1222 dlm_local_nodeid = 0;
1223 init_waitqueue_head(&lowcomms_recv_wait);
1224 spin_lock_init(&write_nodes_lock);
1225 INIT_LIST_HEAD(&write_nodes);
1226 init_rwsem(&nodeinfo_lock);
1227 return 0;
1228}
1229
1230void dlm_lowcomms_exit(void)
1231{
1232 int i;
1233 1223
1234 for (i = 0; i < dlm_local_count; i++) 1224 for (i = 0; i < dlm_local_count; i++)
1235 kfree(dlm_local_addr[i]); 1225 kfree(dlm_local_addr[i]);
1236 dlm_local_count = 0;
1237 dlm_local_nodeid = 0;
1238} 1226}
1239 1227
diff --git a/fs/dlm/lowcomms-tcp.c b/fs/dlm/lowcomms-tcp.c
new file mode 100644
index 000000000000..8f2791fc8447
--- /dev/null
+++ b/fs/dlm/lowcomms-tcp.c
@@ -0,0 +1,1189 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5** Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
6**
7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions
9** of the GNU General Public License v.2.
10**
11*******************************************************************************
12******************************************************************************/
13
14/*
15 * lowcomms.c
16 *
17 * This is the "low-level" comms layer.
18 *
19 * It is responsible for sending/receiving messages
20 * from other nodes in the cluster.
21 *
22 * Cluster nodes are referred to by their nodeids. nodeids are
23 * simply 32 bit numbers to the locking module - if they need to
24 * be expanded for the cluster infrastructure then that is it's
25 * responsibility. It is this layer's
26 * responsibility to resolve these into IP address or
27 * whatever it needs for inter-node communication.
28 *
29 * The comms level is two kernel threads that deal mainly with
30 * the receiving of messages from other nodes and passing them
31 * up to the mid-level comms layer (which understands the
32 * message format) for execution by the locking core, and
33 * a send thread which does all the setting up of connections
34 * to remote nodes and the sending of data. Threads are not allowed
35 * to send their own data because it may cause them to wait in times
36 * of high load. Also, this way, the sending thread can collect together
37 * messages bound for one node and send them in one block.
38 *
39 * I don't see any problem with the recv thread executing the locking
40 * code on behalf of remote processes as the locking code is
41 * short, efficient and never waits.
42 *
43 */
44
45
46#include <asm/ioctls.h>
47#include <net/sock.h>
48#include <net/tcp.h>
49#include <linux/pagemap.h>
50
51#include "dlm_internal.h"
52#include "lowcomms.h"
53#include "midcomms.h"
54#include "config.h"
55
56struct cbuf {
57 unsigned int base;
58 unsigned int len;
59 unsigned int mask;
60};
61
62#define NODE_INCREMENT 32
63static void cbuf_add(struct cbuf *cb, int n)
64{
65 cb->len += n;
66}
67
68static int cbuf_data(struct cbuf *cb)
69{
70 return ((cb->base + cb->len) & cb->mask);
71}
72
73static void cbuf_init(struct cbuf *cb, int size)
74{
75 cb->base = cb->len = 0;
76 cb->mask = size-1;
77}
78
79static void cbuf_eat(struct cbuf *cb, int n)
80{
81 cb->len -= n;
82 cb->base += n;
83 cb->base &= cb->mask;
84}
85
86static bool cbuf_empty(struct cbuf *cb)
87{
88 return cb->len == 0;
89}
90
91/* Maximum number of incoming messages to process before
92 doing a cond_resched()
93*/
94#define MAX_RX_MSG_COUNT 25
95
96struct connection {
97 struct socket *sock; /* NULL if not connected */
98 uint32_t nodeid; /* So we know who we are in the list */
99 struct rw_semaphore sock_sem; /* Stop connect races */
100 struct list_head read_list; /* On this list when ready for reading */
101 struct list_head write_list; /* On this list when ready for writing */
102 struct list_head state_list; /* On this list when ready to connect */
103 unsigned long flags; /* bit 1,2 = We are on the read/write lists */
104#define CF_READ_PENDING 1
105#define CF_WRITE_PENDING 2
106#define CF_CONNECT_PENDING 3
107#define CF_IS_OTHERCON 4
108 struct list_head writequeue; /* List of outgoing writequeue_entries */
109 struct list_head listenlist; /* List of allocated listening sockets */
110 spinlock_t writequeue_lock;
111 int (*rx_action) (struct connection *); /* What to do when active */
112 struct page *rx_page;
113 struct cbuf cb;
114 int retries;
115 atomic_t waiting_requests;
116#define MAX_CONNECT_RETRIES 3
117 struct connection *othercon;
118};
119#define sock2con(x) ((struct connection *)(x)->sk_user_data)
120
121/* An entry waiting to be sent */
122struct writequeue_entry {
123 struct list_head list;
124 struct page *page;
125 int offset;
126 int len;
127 int end;
128 int users;
129 struct connection *con;
130};
131
132static struct sockaddr_storage dlm_local_addr;
133
134/* Manage daemons */
135static struct task_struct *recv_task;
136static struct task_struct *send_task;
137
138static wait_queue_t lowcomms_send_waitq_head;
139static DECLARE_WAIT_QUEUE_HEAD(lowcomms_send_waitq);
140static wait_queue_t lowcomms_recv_waitq_head;
141static DECLARE_WAIT_QUEUE_HEAD(lowcomms_recv_waitq);
142
143/* An array of pointers to connections, indexed by NODEID */
144static struct connection **connections;
145static DECLARE_MUTEX(connections_lock);
146static kmem_cache_t *con_cache;
147static int conn_array_size;
148
149/* List of sockets that have reads pending */
150static LIST_HEAD(read_sockets);
151static DEFINE_SPINLOCK(read_sockets_lock);
152
153/* List of sockets which have writes pending */
154static LIST_HEAD(write_sockets);
155static DEFINE_SPINLOCK(write_sockets_lock);
156
157/* List of sockets which have connects pending */
158static LIST_HEAD(state_sockets);
159static DEFINE_SPINLOCK(state_sockets_lock);
160
161static struct connection *nodeid2con(int nodeid, gfp_t allocation)
162{
163 struct connection *con = NULL;
164
165 down(&connections_lock);
166 if (nodeid >= conn_array_size) {
167 int new_size = nodeid + NODE_INCREMENT;
168 struct connection **new_conns;
169
170 new_conns = kzalloc(sizeof(struct connection *) *
171 new_size, allocation);
172 if (!new_conns)
173 goto finish;
174
175 memcpy(new_conns, connections, sizeof(struct connection *) * conn_array_size);
176 conn_array_size = new_size;
177 kfree(connections);
178 connections = new_conns;
179
180 }
181
182 con = connections[nodeid];
183 if (con == NULL && allocation) {
184 con = kmem_cache_zalloc(con_cache, allocation);
185 if (!con)
186 goto finish;
187
188 con->nodeid = nodeid;
189 init_rwsem(&con->sock_sem);
190 INIT_LIST_HEAD(&con->writequeue);
191 spin_lock_init(&con->writequeue_lock);
192
193 connections[nodeid] = con;
194 }
195
196finish:
197 up(&connections_lock);
198 return con;
199}
200
201/* Data available on socket or listen socket received a connect */
202static void lowcomms_data_ready(struct sock *sk, int count_unused)
203{
204 struct connection *con = sock2con(sk);
205
206 atomic_inc(&con->waiting_requests);
207 if (test_and_set_bit(CF_READ_PENDING, &con->flags))
208 return;
209
210 spin_lock_bh(&read_sockets_lock);
211 list_add_tail(&con->read_list, &read_sockets);
212 spin_unlock_bh(&read_sockets_lock);
213
214 wake_up_interruptible(&lowcomms_recv_waitq);
215}
216
217static void lowcomms_write_space(struct sock *sk)
218{
219 struct connection *con = sock2con(sk);
220
221 if (test_and_set_bit(CF_WRITE_PENDING, &con->flags))
222 return;
223
224 spin_lock_bh(&write_sockets_lock);
225 list_add_tail(&con->write_list, &write_sockets);
226 spin_unlock_bh(&write_sockets_lock);
227
228 wake_up_interruptible(&lowcomms_send_waitq);
229}
230
231static inline void lowcomms_connect_sock(struct connection *con)
232{
233 if (test_and_set_bit(CF_CONNECT_PENDING, &con->flags))
234 return;
235
236 spin_lock_bh(&state_sockets_lock);
237 list_add_tail(&con->state_list, &state_sockets);
238 spin_unlock_bh(&state_sockets_lock);
239
240 wake_up_interruptible(&lowcomms_send_waitq);
241}
242
243static void lowcomms_state_change(struct sock *sk)
244{
245 if (sk->sk_state == TCP_ESTABLISHED)
246 lowcomms_write_space(sk);
247}
248
249/* Make a socket active */
250static int add_sock(struct socket *sock, struct connection *con)
251{
252 con->sock = sock;
253
254 /* Install a data_ready callback */
255 con->sock->sk->sk_data_ready = lowcomms_data_ready;
256 con->sock->sk->sk_write_space = lowcomms_write_space;
257 con->sock->sk->sk_state_change = lowcomms_state_change;
258
259 return 0;
260}
261
262/* Add the port number to an IP6 or 4 sockaddr and return the address
263 length */
264static void make_sockaddr(struct sockaddr_storage *saddr, uint16_t port,
265 int *addr_len)
266{
267 saddr->ss_family = dlm_local_addr.ss_family;
268 if (saddr->ss_family == AF_INET) {
269 struct sockaddr_in *in4_addr = (struct sockaddr_in *)saddr;
270 in4_addr->sin_port = cpu_to_be16(port);
271 *addr_len = sizeof(struct sockaddr_in);
272 } else {
273 struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)saddr;
274 in6_addr->sin6_port = cpu_to_be16(port);
275 *addr_len = sizeof(struct sockaddr_in6);
276 }
277}
278
279/* Close a remote connection and tidy up */
280static void close_connection(struct connection *con, bool and_other)
281{
282 down_write(&con->sock_sem);
283
284 if (con->sock) {
285 sock_release(con->sock);
286 con->sock = NULL;
287 }
288 if (con->othercon && and_other) {
289 /* Will only re-enter once. */
290 close_connection(con->othercon, false);
291 }
292 if (con->rx_page) {
293 __free_page(con->rx_page);
294 con->rx_page = NULL;
295 }
296 con->retries = 0;
297 up_write(&con->sock_sem);
298}
299
300/* Data received from remote end */
301static int receive_from_sock(struct connection *con)
302{
303 int ret = 0;
304 struct msghdr msg;
305 struct iovec iov[2];
306 mm_segment_t fs;
307 unsigned len;
308 int r;
309 int call_again_soon = 0;
310
311 down_read(&con->sock_sem);
312
313 if (con->sock == NULL)
314 goto out;
315 if (con->rx_page == NULL) {
316 /*
317 * This doesn't need to be atomic, but I think it should
318 * improve performance if it is.
319 */
320 con->rx_page = alloc_page(GFP_ATOMIC);
321 if (con->rx_page == NULL)
322 goto out_resched;
323 cbuf_init(&con->cb, PAGE_CACHE_SIZE);
324 }
325
326 msg.msg_control = NULL;
327 msg.msg_controllen = 0;
328 msg.msg_iovlen = 1;
329 msg.msg_iov = iov;
330 msg.msg_name = NULL;
331 msg.msg_namelen = 0;
332 msg.msg_flags = 0;
333
334 /*
335 * iov[0] is the bit of the circular buffer between the current end
336 * point (cb.base + cb.len) and the end of the buffer.
337 */
338 iov[0].iov_len = con->cb.base - cbuf_data(&con->cb);
339 iov[0].iov_base = page_address(con->rx_page) + cbuf_data(&con->cb);
340 iov[1].iov_len = 0;
341
342 /*
343 * iov[1] is the bit of the circular buffer between the start of the
344 * buffer and the start of the currently used section (cb.base)
345 */
346 if (cbuf_data(&con->cb) >= con->cb.base) {
347 iov[0].iov_len = PAGE_CACHE_SIZE - cbuf_data(&con->cb);
348 iov[1].iov_len = con->cb.base;
349 iov[1].iov_base = page_address(con->rx_page);
350 msg.msg_iovlen = 2;
351 }
352 len = iov[0].iov_len + iov[1].iov_len;
353
354 fs = get_fs();
355 set_fs(get_ds());
356 r = ret = sock_recvmsg(con->sock, &msg, len,
357 MSG_DONTWAIT | MSG_NOSIGNAL);
358 set_fs(fs);
359
360 if (ret <= 0)
361 goto out_close;
362 if (ret == len)
363 call_again_soon = 1;
364 cbuf_add(&con->cb, ret);
365 ret = dlm_process_incoming_buffer(con->nodeid,
366 page_address(con->rx_page),
367 con->cb.base, con->cb.len,
368 PAGE_CACHE_SIZE);
369 if (ret == -EBADMSG) {
370 printk(KERN_INFO "dlm: lowcomms: addr=%p, base=%u, len=%u, "
371 "iov_len=%u, iov_base[0]=%p, read=%d\n",
372 page_address(con->rx_page), con->cb.base, con->cb.len,
373 len, iov[0].iov_base, r);
374 }
375 if (ret < 0)
376 goto out_close;
377 cbuf_eat(&con->cb, ret);
378
379 if (cbuf_empty(&con->cb) && !call_again_soon) {
380 __free_page(con->rx_page);
381 con->rx_page = NULL;
382 }
383
384out:
385 if (call_again_soon)
386 goto out_resched;
387 up_read(&con->sock_sem);
388 return 0;
389
390out_resched:
391 lowcomms_data_ready(con->sock->sk, 0);
392 up_read(&con->sock_sem);
393 cond_resched();
394 return 0;
395
396out_close:
397 up_read(&con->sock_sem);
398 if (ret != -EAGAIN && !test_bit(CF_IS_OTHERCON, &con->flags)) {
399 close_connection(con, false);
400 /* Reconnect when there is something to send */
401 }
402
403 return ret;
404}
405
406/* Listening socket is busy, accept a connection */
407static int accept_from_sock(struct connection *con)
408{
409 int result;
410 struct sockaddr_storage peeraddr;
411 struct socket *newsock;
412 int len;
413 int nodeid;
414 struct connection *newcon;
415
416 memset(&peeraddr, 0, sizeof(peeraddr));
417 result = sock_create_kern(dlm_local_addr.ss_family, SOCK_STREAM,
418 IPPROTO_TCP, &newsock);
419 if (result < 0)
420 return -ENOMEM;
421
422 down_read(&con->sock_sem);
423
424 result = -ENOTCONN;
425 if (con->sock == NULL)
426 goto accept_err;
427
428 newsock->type = con->sock->type;
429 newsock->ops = con->sock->ops;
430
431 result = con->sock->ops->accept(con->sock, newsock, O_NONBLOCK);
432 if (result < 0)
433 goto accept_err;
434
435 /* Get the connected socket's peer */
436 memset(&peeraddr, 0, sizeof(peeraddr));
437 if (newsock->ops->getname(newsock, (struct sockaddr *)&peeraddr,
438 &len, 2)) {
439 result = -ECONNABORTED;
440 goto accept_err;
441 }
442
443 /* Get the new node's NODEID */
444 make_sockaddr(&peeraddr, 0, &len);
445 if (dlm_addr_to_nodeid(&peeraddr, &nodeid)) {
446 printk("dlm: connect from non cluster node\n");
447 sock_release(newsock);
448 up_read(&con->sock_sem);
449 return -1;
450 }
451
452 log_print("got connection from %d", nodeid);
453
454 /* Check to see if we already have a connection to this node. This
455 * could happen if the two nodes initiate a connection at roughly
456 * the same time and the connections cross on the wire.
457 * TEMPORARY FIX:
458 * In this case we store the incoming one in "othercon"
459 */
460 newcon = nodeid2con(nodeid, GFP_KERNEL);
461 if (!newcon) {
462 result = -ENOMEM;
463 goto accept_err;
464 }
465 down_write(&newcon->sock_sem);
466 if (newcon->sock) {
467 struct connection *othercon = newcon->othercon;
468
469 if (!othercon) {
470 othercon = kmem_cache_zalloc(con_cache, GFP_KERNEL);
471 if (!othercon) {
472 printk("dlm: failed to allocate incoming socket\n");
473 up_write(&newcon->sock_sem);
474 result = -ENOMEM;
475 goto accept_err;
476 }
477 othercon->nodeid = nodeid;
478 othercon->rx_action = receive_from_sock;
479 init_rwsem(&othercon->sock_sem);
480 set_bit(CF_IS_OTHERCON, &othercon->flags);
481 newcon->othercon = othercon;
482 }
483 othercon->sock = newsock;
484 newsock->sk->sk_user_data = othercon;
485 add_sock(newsock, othercon);
486 }
487 else {
488 newsock->sk->sk_user_data = newcon;
489 newcon->rx_action = receive_from_sock;
490 add_sock(newsock, newcon);
491
492 }
493
494 up_write(&newcon->sock_sem);
495
496 /*
497 * Add it to the active queue in case we got data
498 * beween processing the accept adding the socket
499 * to the read_sockets list
500 */
501 lowcomms_data_ready(newsock->sk, 0);
502 up_read(&con->sock_sem);
503
504 return 0;
505
506accept_err:
507 up_read(&con->sock_sem);
508 sock_release(newsock);
509
510 if (result != -EAGAIN)
511 printk("dlm: error accepting connection from node: %d\n", result);
512 return result;
513}
514
515/* Connect a new socket to its peer */
516static void connect_to_sock(struct connection *con)
517{
518 int result = -EHOSTUNREACH;
519 struct sockaddr_storage saddr;
520 int addr_len;
521 struct socket *sock;
522
523 if (con->nodeid == 0) {
524 log_print("attempt to connect sock 0 foiled");
525 return;
526 }
527
528 down_write(&con->sock_sem);
529 if (con->retries++ > MAX_CONNECT_RETRIES)
530 goto out;
531
532 /* Some odd races can cause double-connects, ignore them */
533 if (con->sock) {
534 result = 0;
535 goto out;
536 }
537
538 /* Create a socket to communicate with */
539 result = sock_create_kern(dlm_local_addr.ss_family, SOCK_STREAM,
540 IPPROTO_TCP, &sock);
541 if (result < 0)
542 goto out_err;
543
544 memset(&saddr, 0, sizeof(saddr));
545 if (dlm_nodeid_to_addr(con->nodeid, &saddr))
546 goto out_err;
547
548 sock->sk->sk_user_data = con;
549 con->rx_action = receive_from_sock;
550
551 make_sockaddr(&saddr, dlm_config.tcp_port, &addr_len);
552
553 add_sock(sock, con);
554
555 log_print("connecting to %d", con->nodeid);
556 result =
557 sock->ops->connect(sock, (struct sockaddr *)&saddr, addr_len,
558 O_NONBLOCK);
559 if (result == -EINPROGRESS)
560 result = 0;
561 if (result == 0)
562 goto out;
563
564out_err:
565 if (con->sock) {
566 sock_release(con->sock);
567 con->sock = NULL;
568 }
569 /*
570 * Some errors are fatal and this list might need adjusting. For other
571 * errors we try again until the max number of retries is reached.
572 */
573 if (result != -EHOSTUNREACH && result != -ENETUNREACH &&
574 result != -ENETDOWN && result != EINVAL
575 && result != -EPROTONOSUPPORT) {
576 lowcomms_connect_sock(con);
577 result = 0;
578 }
579out:
580 up_write(&con->sock_sem);
581 return;
582}
583
584static struct socket *create_listen_sock(struct connection *con,
585 struct sockaddr_storage *saddr)
586{
587 struct socket *sock = NULL;
588 mm_segment_t fs;
589 int result = 0;
590 int one = 1;
591 int addr_len;
592
593 if (dlm_local_addr.ss_family == AF_INET)
594 addr_len = sizeof(struct sockaddr_in);
595 else
596 addr_len = sizeof(struct sockaddr_in6);
597
598 /* Create a socket to communicate with */
599 result = sock_create_kern(dlm_local_addr.ss_family, SOCK_STREAM, IPPROTO_TCP, &sock);
600 if (result < 0) {
601 printk("dlm: Can't create listening comms socket\n");
602 goto create_out;
603 }
604
605 fs = get_fs();
606 set_fs(get_ds());
607 result = sock_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
608 (char *)&one, sizeof(one));
609 set_fs(fs);
610 if (result < 0) {
611 printk("dlm: Failed to set SO_REUSEADDR on socket: result=%d\n",
612 result);
613 }
614 sock->sk->sk_user_data = con;
615 con->rx_action = accept_from_sock;
616 con->sock = sock;
617
618 /* Bind to our port */
619 make_sockaddr(saddr, dlm_config.tcp_port, &addr_len);
620 result = sock->ops->bind(sock, (struct sockaddr *) saddr, addr_len);
621 if (result < 0) {
622 printk("dlm: Can't bind to port %d\n", dlm_config.tcp_port);
623 sock_release(sock);
624 sock = NULL;
625 con->sock = NULL;
626 goto create_out;
627 }
628
629 fs = get_fs();
630 set_fs(get_ds());
631
632 result = sock_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE,
633 (char *)&one, sizeof(one));
634 set_fs(fs);
635 if (result < 0) {
636 printk("dlm: Set keepalive failed: %d\n", result);
637 }
638
639 result = sock->ops->listen(sock, 5);
640 if (result < 0) {
641 printk("dlm: Can't listen on port %d\n", dlm_config.tcp_port);
642 sock_release(sock);
643 sock = NULL;
644 goto create_out;
645 }
646
647create_out:
648 return sock;
649}
650
651
652/* Listen on all interfaces */
653static int listen_for_all(void)
654{
655 struct socket *sock = NULL;
656 struct connection *con = nodeid2con(0, GFP_KERNEL);
657 int result = -EINVAL;
658
659 /* We don't support multi-homed hosts */
660 set_bit(CF_IS_OTHERCON, &con->flags);
661
662 sock = create_listen_sock(con, &dlm_local_addr);
663 if (sock) {
664 add_sock(sock, con);
665 result = 0;
666 }
667 else {
668 result = -EADDRINUSE;
669 }
670
671 return result;
672}
673
674
675
676static struct writequeue_entry *new_writequeue_entry(struct connection *con,
677 gfp_t allocation)
678{
679 struct writequeue_entry *entry;
680
681 entry = kmalloc(sizeof(struct writequeue_entry), allocation);
682 if (!entry)
683 return NULL;
684
685 entry->page = alloc_page(allocation);
686 if (!entry->page) {
687 kfree(entry);
688 return NULL;
689 }
690
691 entry->offset = 0;
692 entry->len = 0;
693 entry->end = 0;
694 entry->users = 0;
695 entry->con = con;
696
697 return entry;
698}
699
700void *dlm_lowcomms_get_buffer(int nodeid, int len,
701 gfp_t allocation, char **ppc)
702{
703 struct connection *con;
704 struct writequeue_entry *e;
705 int offset = 0;
706 int users = 0;
707
708 con = nodeid2con(nodeid, allocation);
709 if (!con)
710 return NULL;
711
712 e = list_entry(con->writequeue.prev, struct writequeue_entry, list);
713 if ((&e->list == &con->writequeue) ||
714 (PAGE_CACHE_SIZE - e->end < len)) {
715 e = NULL;
716 } else {
717 offset = e->end;
718 e->end += len;
719 users = e->users++;
720 }
721 spin_unlock(&con->writequeue_lock);
722
723 if (e) {
724 got_one:
725 if (users == 0)
726 kmap(e->page);
727 *ppc = page_address(e->page) + offset;
728 return e;
729 }
730
731 e = new_writequeue_entry(con, allocation);
732 if (e) {
733 spin_lock(&con->writequeue_lock);
734 offset = e->end;
735 e->end += len;
736 users = e->users++;
737 list_add_tail(&e->list, &con->writequeue);
738 spin_unlock(&con->writequeue_lock);
739 goto got_one;
740 }
741 return NULL;
742}
743
744void dlm_lowcomms_commit_buffer(void *mh)
745{
746 struct writequeue_entry *e = (struct writequeue_entry *)mh;
747 struct connection *con = e->con;
748 int users;
749
750 users = --e->users;
751 if (users)
752 goto out;
753 e->len = e->end - e->offset;
754 kunmap(e->page);
755 spin_unlock(&con->writequeue_lock);
756
757 if (test_and_set_bit(CF_WRITE_PENDING, &con->flags) == 0) {
758 spin_lock_bh(&write_sockets_lock);
759 list_add_tail(&con->write_list, &write_sockets);
760 spin_unlock_bh(&write_sockets_lock);
761
762 wake_up_interruptible(&lowcomms_send_waitq);
763 }
764 return;
765
766out:
767 spin_unlock(&con->writequeue_lock);
768 return;
769}
770
771static void free_entry(struct writequeue_entry *e)
772{
773 __free_page(e->page);
774 kfree(e);
775}
776
777/* Send a message */
778static void send_to_sock(struct connection *con)
779{
780 int ret = 0;
781 ssize_t(*sendpage) (struct socket *, struct page *, int, size_t, int);
782 const int msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL;
783 struct writequeue_entry *e;
784 int len, offset;
785
786 down_read(&con->sock_sem);
787 if (con->sock == NULL)
788 goto out_connect;
789
790 sendpage = con->sock->ops->sendpage;
791
792 spin_lock(&con->writequeue_lock);
793 for (;;) {
794 e = list_entry(con->writequeue.next, struct writequeue_entry,
795 list);
796 if ((struct list_head *) e == &con->writequeue)
797 break;
798
799 len = e->len;
800 offset = e->offset;
801 BUG_ON(len == 0 && e->users == 0);
802 spin_unlock(&con->writequeue_lock);
803
804 ret = 0;
805 if (len) {
806 ret = sendpage(con->sock, e->page, offset, len,
807 msg_flags);
808 if (ret == -EAGAIN || ret == 0)
809 goto out;
810 if (ret <= 0)
811 goto send_error;
812 }
813 else {
814 /* Don't starve people filling buffers */
815 cond_resched();
816 }
817
818 spin_lock(&con->writequeue_lock);
819 e->offset += ret;
820 e->len -= ret;
821
822 if (e->len == 0 && e->users == 0) {
823 list_del(&e->list);
824 kunmap(e->page);
825 free_entry(e);
826 continue;
827 }
828 }
829 spin_unlock(&con->writequeue_lock);
830out:
831 up_read(&con->sock_sem);
832 return;
833
834send_error:
835 up_read(&con->sock_sem);
836 close_connection(con, false);
837 lowcomms_connect_sock(con);
838 return;
839
840out_connect:
841 up_read(&con->sock_sem);
842 lowcomms_connect_sock(con);
843 return;
844}
845
846static void clean_one_writequeue(struct connection *con)
847{
848 struct list_head *list;
849 struct list_head *temp;
850
851 spin_lock(&con->writequeue_lock);
852 list_for_each_safe(list, temp, &con->writequeue) {
853 struct writequeue_entry *e =
854 list_entry(list, struct writequeue_entry, list);
855 list_del(&e->list);
856 free_entry(e);
857 }
858 spin_unlock(&con->writequeue_lock);
859}
860
861/* Called from recovery when it knows that a node has
862 left the cluster */
863int dlm_lowcomms_close(int nodeid)
864{
865 struct connection *con;
866
867 if (!connections)
868 goto out;
869
870 log_print("closing connection to node %d", nodeid);
871 con = nodeid2con(nodeid, 0);
872 if (con) {
873 clean_one_writequeue(con);
874 close_connection(con, true);
875 atomic_set(&con->waiting_requests, 0);
876 }
877 return 0;
878
879out:
880 return -1;
881}
882
883/* API send message call, may queue the request */
884/* N.B. This is the old interface - use the new one for new calls */
885int lowcomms_send_message(int nodeid, char *buf, int len, gfp_t allocation)
886{
887 struct writequeue_entry *e;
888 char *b;
889
890 e = dlm_lowcomms_get_buffer(nodeid, len, allocation, &b);
891 if (e) {
892 memcpy(b, buf, len);
893 dlm_lowcomms_commit_buffer(e);
894 return 0;
895 }
896 return -ENOBUFS;
897}
898
899/* Look for activity on active sockets */
900static void process_sockets(void)
901{
902 struct list_head *list;
903 struct list_head *temp;
904 int count = 0;
905
906 spin_lock_bh(&read_sockets_lock);
907 list_for_each_safe(list, temp, &read_sockets) {
908
909 struct connection *con =
910 list_entry(list, struct connection, read_list);
911 list_del(&con->read_list);
912 clear_bit(CF_READ_PENDING, &con->flags);
913
914 spin_unlock_bh(&read_sockets_lock);
915
916 /* This can reach zero if we are processing requests
917 * as they come in.
918 */
919 if (atomic_read(&con->waiting_requests) == 0) {
920 spin_lock_bh(&read_sockets_lock);
921 continue;
922 }
923
924 do {
925 con->rx_action(con);
926
927 /* Don't starve out everyone else */
928 if (++count >= MAX_RX_MSG_COUNT) {
929 cond_resched();
930 count = 0;
931 }
932
933 } while (!atomic_dec_and_test(&con->waiting_requests) &&
934 !kthread_should_stop());
935
936 spin_lock_bh(&read_sockets_lock);
937 }
938 spin_unlock_bh(&read_sockets_lock);
939}
940
941/* Try to send any messages that are pending
942 */
943static void process_output_queue(void)
944{
945 struct list_head *list;
946 struct list_head *temp;
947
948 spin_lock_bh(&write_sockets_lock);
949 list_for_each_safe(list, temp, &write_sockets) {
950 struct connection *con =
951 list_entry(list, struct connection, write_list);
952 clear_bit(CF_WRITE_PENDING, &con->flags);
953 list_del(&con->write_list);
954
955 spin_unlock_bh(&write_sockets_lock);
956 send_to_sock(con);
957 spin_lock_bh(&write_sockets_lock);
958 }
959 spin_unlock_bh(&write_sockets_lock);
960}
961
962static void process_state_queue(void)
963{
964 struct list_head *list;
965 struct list_head *temp;
966
967 spin_lock_bh(&state_sockets_lock);
968 list_for_each_safe(list, temp, &state_sockets) {
969 struct connection *con =
970 list_entry(list, struct connection, state_list);
971 list_del(&con->state_list);
972 clear_bit(CF_CONNECT_PENDING, &con->flags);
973 spin_unlock_bh(&state_sockets_lock);
974
975 connect_to_sock(con);
976 spin_lock_bh(&state_sockets_lock);
977 }
978 spin_unlock_bh(&state_sockets_lock);
979}
980
981
982/* Discard all entries on the write queues */
983static void clean_writequeues(void)
984{
985 int nodeid;
986
987 for (nodeid = 1; nodeid < conn_array_size; nodeid++) {
988 struct connection *con = nodeid2con(nodeid, 0);
989
990 if (con)
991 clean_one_writequeue(con);
992 }
993}
994
995static int read_list_empty(void)
996{
997 int status;
998
999 spin_lock_bh(&read_sockets_lock);
1000 status = list_empty(&read_sockets);
1001 spin_unlock_bh(&read_sockets_lock);
1002
1003 return status;
1004}
1005
1006/* DLM Transport comms receive daemon */
1007static int dlm_recvd(void *data)
1008{
1009 init_waitqueue_entry(&lowcomms_recv_waitq_head, current);
1010 add_wait_queue(&lowcomms_recv_waitq, &lowcomms_recv_waitq_head);
1011
1012 while (!kthread_should_stop()) {
1013 set_current_state(TASK_INTERRUPTIBLE);
1014 if (read_list_empty())
1015 cond_resched();
1016 set_current_state(TASK_RUNNING);
1017
1018 process_sockets();
1019 }
1020
1021 return 0;
1022}
1023
1024static int write_and_state_lists_empty(void)
1025{
1026 int status;
1027
1028 spin_lock_bh(&write_sockets_lock);
1029 status = list_empty(&write_sockets);
1030 spin_unlock_bh(&write_sockets_lock);
1031
1032 spin_lock_bh(&state_sockets_lock);
1033 if (list_empty(&state_sockets) == 0)
1034 status = 0;
1035 spin_unlock_bh(&state_sockets_lock);
1036
1037 return status;
1038}
1039
1040/* DLM Transport send daemon */
1041static int dlm_sendd(void *data)
1042{
1043 init_waitqueue_entry(&lowcomms_send_waitq_head, current);
1044 add_wait_queue(&lowcomms_send_waitq, &lowcomms_send_waitq_head);
1045
1046 while (!kthread_should_stop()) {
1047 set_current_state(TASK_INTERRUPTIBLE);
1048 if (write_and_state_lists_empty())
1049 cond_resched();
1050 set_current_state(TASK_RUNNING);
1051
1052 process_state_queue();
1053 process_output_queue();
1054 }
1055
1056 return 0;
1057}
1058
1059static void daemons_stop(void)
1060{
1061 kthread_stop(recv_task);
1062 kthread_stop(send_task);
1063}
1064
1065static int daemons_start(void)
1066{
1067 struct task_struct *p;
1068 int error;
1069
1070 p = kthread_run(dlm_recvd, NULL, "dlm_recvd");
1071 error = IS_ERR(p);
1072 if (error) {
1073 log_print("can't start dlm_recvd %d", error);
1074 return error;
1075 }
1076 recv_task = p;
1077
1078 p = kthread_run(dlm_sendd, NULL, "dlm_sendd");
1079 error = IS_ERR(p);
1080 if (error) {
1081 log_print("can't start dlm_sendd %d", error);
1082 kthread_stop(recv_task);
1083 return error;
1084 }
1085 send_task = p;
1086
1087 return 0;
1088}
1089
1090/*
1091 * Return the largest buffer size we can cope with.
1092 */
1093int lowcomms_max_buffer_size(void)
1094{
1095 return PAGE_CACHE_SIZE;
1096}
1097
1098void dlm_lowcomms_stop(void)
1099{
1100 int i;
1101
1102 /* Set all the flags to prevent any
1103 socket activity.
1104 */
1105 for (i = 0; i < conn_array_size; i++) {
1106 if (connections[i])
1107 connections[i]->flags |= 0xFF;
1108 }
1109
1110 daemons_stop();
1111 clean_writequeues();
1112
1113 for (i = 0; i < conn_array_size; i++) {
1114 if (connections[i]) {
1115 close_connection(connections[i], true);
1116 if (connections[i]->othercon)
1117 kmem_cache_free(con_cache, connections[i]->othercon);
1118 kmem_cache_free(con_cache, connections[i]);
1119 }
1120 }
1121
1122 kfree(connections);
1123 connections = NULL;
1124
1125 kmem_cache_destroy(con_cache);
1126}
1127
1128/* This is quite likely to sleep... */
1129int dlm_lowcomms_start(void)
1130{
1131 int error = 0;
1132
1133 error = -ENOMEM;
1134 connections = kzalloc(sizeof(struct connection *) *
1135 NODE_INCREMENT, GFP_KERNEL);
1136 if (!connections)
1137 goto out;
1138
1139 conn_array_size = NODE_INCREMENT;
1140
1141 if (dlm_our_addr(&dlm_local_addr, 0)) {
1142 log_print("no local IP address has been set");
1143 goto fail_free_conn;
1144 }
1145 if (!dlm_our_addr(&dlm_local_addr, 1)) {
1146 log_print("This dlm comms module does not support multi-homed clustering");
1147 goto fail_free_conn;
1148 }
1149
1150 con_cache = kmem_cache_create("dlm_conn", sizeof(struct connection),
1151 __alignof__(struct connection), 0,
1152 NULL, NULL);
1153 if (!con_cache)
1154 goto fail_free_conn;
1155
1156
1157 /* Start listening */
1158 error = listen_for_all();
1159 if (error)
1160 goto fail_unlisten;
1161
1162 error = daemons_start();
1163 if (error)
1164 goto fail_unlisten;
1165
1166 return 0;
1167
1168fail_unlisten:
1169 close_connection(connections[0], false);
1170 kmem_cache_free(con_cache, connections[0]);
1171 kmem_cache_destroy(con_cache);
1172
1173fail_free_conn:
1174 kfree(connections);
1175
1176out:
1177 return error;
1178}
1179
1180/*
1181 * Overrides for Emacs so that we follow Linus's tabbing style.
1182 * Emacs will notice this stuff at the end of the file and automatically
1183 * adjust the settings for this buffer only. This must remain at the end
1184 * of the file.
1185 * ---------------------------------------------------------------------------
1186 * Local variables:
1187 * c-file-style: "linux"
1188 * End:
1189 */
diff --git a/fs/dlm/lowcomms.h b/fs/dlm/lowcomms.h
index 2d045e0daae1..a9a9618c0d3f 100644
--- a/fs/dlm/lowcomms.h
+++ b/fs/dlm/lowcomms.h
@@ -14,8 +14,6 @@
14#ifndef __LOWCOMMS_DOT_H__ 14#ifndef __LOWCOMMS_DOT_H__
15#define __LOWCOMMS_DOT_H__ 15#define __LOWCOMMS_DOT_H__
16 16
17int dlm_lowcomms_init(void);
18void dlm_lowcomms_exit(void);
19int dlm_lowcomms_start(void); 17int dlm_lowcomms_start(void);
20void dlm_lowcomms_stop(void); 18void dlm_lowcomms_stop(void);
21int dlm_lowcomms_close(int nodeid); 19int dlm_lowcomms_close(int nodeid);
diff --git a/fs/dlm/main.c b/fs/dlm/main.c
index a8da8dc36b2e..162fbae58fe5 100644
--- a/fs/dlm/main.c
+++ b/fs/dlm/main.c
@@ -16,7 +16,6 @@
16#include "lock.h" 16#include "lock.h"
17#include "user.h" 17#include "user.h"
18#include "memory.h" 18#include "memory.h"
19#include "lowcomms.h"
20#include "config.h" 19#include "config.h"
21 20
22#ifdef CONFIG_DLM_DEBUG 21#ifdef CONFIG_DLM_DEBUG
@@ -47,20 +46,14 @@ static int __init init_dlm(void)
47 if (error) 46 if (error)
48 goto out_config; 47 goto out_config;
49 48
50 error = dlm_lowcomms_init();
51 if (error)
52 goto out_debug;
53
54 error = dlm_user_init(); 49 error = dlm_user_init();
55 if (error) 50 if (error)
56 goto out_lowcomms; 51 goto out_debug;
57 52
58 printk("DLM (built %s %s) installed\n", __DATE__, __TIME__); 53 printk("DLM (built %s %s) installed\n", __DATE__, __TIME__);
59 54
60 return 0; 55 return 0;
61 56
62 out_lowcomms:
63 dlm_lowcomms_exit();
64 out_debug: 57 out_debug:
65 dlm_unregister_debugfs(); 58 dlm_unregister_debugfs();
66 out_config: 59 out_config:
@@ -76,7 +69,6 @@ static int __init init_dlm(void)
76static void __exit exit_dlm(void) 69static void __exit exit_dlm(void)
77{ 70{
78 dlm_user_exit(); 71 dlm_user_exit();
79 dlm_lowcomms_exit();
80 dlm_config_exit(); 72 dlm_config_exit();
81 dlm_memory_exit(); 73 dlm_memory_exit();
82 dlm_lockspace_exit(); 74 dlm_lockspace_exit();
diff --git a/fs/dlm/member.c b/fs/dlm/member.c
index a3f7de7f3a8f..85e2897bd740 100644
--- a/fs/dlm/member.c
+++ b/fs/dlm/member.c
@@ -186,6 +186,14 @@ int dlm_recover_members(struct dlm_ls *ls, struct dlm_recover *rv, int *neg_out)
186 struct dlm_member *memb, *safe; 186 struct dlm_member *memb, *safe;
187 int i, error, found, pos = 0, neg = 0, low = -1; 187 int i, error, found, pos = 0, neg = 0, low = -1;
188 188
189 /* previously removed members that we've not finished removing need to
190 count as a negative change so the "neg" recovery steps will happen */
191
192 list_for_each_entry(memb, &ls->ls_nodes_gone, list) {
193 log_debug(ls, "prev removed member %d", memb->nodeid);
194 neg++;
195 }
196
189 /* move departed members from ls_nodes to ls_nodes_gone */ 197 /* move departed members from ls_nodes to ls_nodes_gone */
190 198
191 list_for_each_entry_safe(memb, safe, &ls->ls_nodes, list) { 199 list_for_each_entry_safe(memb, safe, &ls->ls_nodes, list) {
diff --git a/fs/dlm/memory.c b/fs/dlm/memory.c
index 989b608fd836..5352b03ff5aa 100644
--- a/fs/dlm/memory.c
+++ b/fs/dlm/memory.c
@@ -15,7 +15,7 @@
15#include "config.h" 15#include "config.h"
16#include "memory.h" 16#include "memory.h"
17 17
18static kmem_cache_t *lkb_cache; 18static struct kmem_cache *lkb_cache;
19 19
20 20
21int dlm_memory_init(void) 21int dlm_memory_init(void)
diff --git a/fs/dlm/rcom.c b/fs/dlm/rcom.c
index 518239a8b1e9..4cc31be9cd9d 100644
--- a/fs/dlm/rcom.c
+++ b/fs/dlm/rcom.c
@@ -90,13 +90,28 @@ static int check_config(struct dlm_ls *ls, struct rcom_config *rf, int nodeid)
90 return 0; 90 return 0;
91} 91}
92 92
93static void allow_sync_reply(struct dlm_ls *ls, uint64_t *new_seq)
94{
95 spin_lock(&ls->ls_rcom_spin);
96 *new_seq = ++ls->ls_rcom_seq;
97 set_bit(LSFL_RCOM_WAIT, &ls->ls_flags);
98 spin_unlock(&ls->ls_rcom_spin);
99}
100
101static void disallow_sync_reply(struct dlm_ls *ls)
102{
103 spin_lock(&ls->ls_rcom_spin);
104 clear_bit(LSFL_RCOM_WAIT, &ls->ls_flags);
105 clear_bit(LSFL_RCOM_READY, &ls->ls_flags);
106 spin_unlock(&ls->ls_rcom_spin);
107}
108
93int dlm_rcom_status(struct dlm_ls *ls, int nodeid) 109int dlm_rcom_status(struct dlm_ls *ls, int nodeid)
94{ 110{
95 struct dlm_rcom *rc; 111 struct dlm_rcom *rc;
96 struct dlm_mhandle *mh; 112 struct dlm_mhandle *mh;
97 int error = 0; 113 int error = 0;
98 114
99 memset(ls->ls_recover_buf, 0, dlm_config.buffer_size);
100 ls->ls_recover_nodeid = nodeid; 115 ls->ls_recover_nodeid = nodeid;
101 116
102 if (nodeid == dlm_our_nodeid()) { 117 if (nodeid == dlm_our_nodeid()) {
@@ -108,12 +123,14 @@ int dlm_rcom_status(struct dlm_ls *ls, int nodeid)
108 error = create_rcom(ls, nodeid, DLM_RCOM_STATUS, 0, &rc, &mh); 123 error = create_rcom(ls, nodeid, DLM_RCOM_STATUS, 0, &rc, &mh);
109 if (error) 124 if (error)
110 goto out; 125 goto out;
111 rc->rc_id = ++ls->ls_rcom_seq; 126
127 allow_sync_reply(ls, &rc->rc_id);
128 memset(ls->ls_recover_buf, 0, dlm_config.buffer_size);
112 129
113 send_rcom(ls, mh, rc); 130 send_rcom(ls, mh, rc);
114 131
115 error = dlm_wait_function(ls, &rcom_response); 132 error = dlm_wait_function(ls, &rcom_response);
116 clear_bit(LSFL_RCOM_READY, &ls->ls_flags); 133 disallow_sync_reply(ls);
117 if (error) 134 if (error)
118 goto out; 135 goto out;
119 136
@@ -150,14 +167,21 @@ static void receive_rcom_status(struct dlm_ls *ls, struct dlm_rcom *rc_in)
150 167
151static void receive_sync_reply(struct dlm_ls *ls, struct dlm_rcom *rc_in) 168static void receive_sync_reply(struct dlm_ls *ls, struct dlm_rcom *rc_in)
152{ 169{
153 if (rc_in->rc_id != ls->ls_rcom_seq) { 170 spin_lock(&ls->ls_rcom_spin);
154 log_debug(ls, "reject old reply %d got %llx wanted %llx", 171 if (!test_bit(LSFL_RCOM_WAIT, &ls->ls_flags) ||
155 rc_in->rc_type, rc_in->rc_id, ls->ls_rcom_seq); 172 rc_in->rc_id != ls->ls_rcom_seq) {
156 return; 173 log_debug(ls, "reject reply %d from %d seq %llx expect %llx",
174 rc_in->rc_type, rc_in->rc_header.h_nodeid,
175 (unsigned long long)rc_in->rc_id,
176 (unsigned long long)ls->ls_rcom_seq);
177 goto out;
157 } 178 }
158 memcpy(ls->ls_recover_buf, rc_in, rc_in->rc_header.h_length); 179 memcpy(ls->ls_recover_buf, rc_in, rc_in->rc_header.h_length);
159 set_bit(LSFL_RCOM_READY, &ls->ls_flags); 180 set_bit(LSFL_RCOM_READY, &ls->ls_flags);
181 clear_bit(LSFL_RCOM_WAIT, &ls->ls_flags);
160 wake_up(&ls->ls_wait_general); 182 wake_up(&ls->ls_wait_general);
183 out:
184 spin_unlock(&ls->ls_rcom_spin);
161} 185}
162 186
163static void receive_rcom_status_reply(struct dlm_ls *ls, struct dlm_rcom *rc_in) 187static void receive_rcom_status_reply(struct dlm_ls *ls, struct dlm_rcom *rc_in)
@@ -171,7 +195,6 @@ int dlm_rcom_names(struct dlm_ls *ls, int nodeid, char *last_name, int last_len)
171 struct dlm_mhandle *mh; 195 struct dlm_mhandle *mh;
172 int error = 0, len = sizeof(struct dlm_rcom); 196 int error = 0, len = sizeof(struct dlm_rcom);
173 197
174 memset(ls->ls_recover_buf, 0, dlm_config.buffer_size);
175 ls->ls_recover_nodeid = nodeid; 198 ls->ls_recover_nodeid = nodeid;
176 199
177 if (nodeid == dlm_our_nodeid()) { 200 if (nodeid == dlm_our_nodeid()) {
@@ -185,12 +208,14 @@ int dlm_rcom_names(struct dlm_ls *ls, int nodeid, char *last_name, int last_len)
185 if (error) 208 if (error)
186 goto out; 209 goto out;
187 memcpy(rc->rc_buf, last_name, last_len); 210 memcpy(rc->rc_buf, last_name, last_len);
188 rc->rc_id = ++ls->ls_rcom_seq; 211
212 allow_sync_reply(ls, &rc->rc_id);
213 memset(ls->ls_recover_buf, 0, dlm_config.buffer_size);
189 214
190 send_rcom(ls, mh, rc); 215 send_rcom(ls, mh, rc);
191 216
192 error = dlm_wait_function(ls, &rcom_response); 217 error = dlm_wait_function(ls, &rcom_response);
193 clear_bit(LSFL_RCOM_READY, &ls->ls_flags); 218 disallow_sync_reply(ls);
194 out: 219 out:
195 return error; 220 return error;
196} 221}
@@ -370,9 +395,10 @@ static void receive_rcom_lock_reply(struct dlm_ls *ls, struct dlm_rcom *rc_in)
370static int send_ls_not_ready(int nodeid, struct dlm_rcom *rc_in) 395static int send_ls_not_ready(int nodeid, struct dlm_rcom *rc_in)
371{ 396{
372 struct dlm_rcom *rc; 397 struct dlm_rcom *rc;
398 struct rcom_config *rf;
373 struct dlm_mhandle *mh; 399 struct dlm_mhandle *mh;
374 char *mb; 400 char *mb;
375 int mb_len = sizeof(struct dlm_rcom); 401 int mb_len = sizeof(struct dlm_rcom) + sizeof(struct rcom_config);
376 402
377 mh = dlm_lowcomms_get_buffer(nodeid, mb_len, GFP_KERNEL, &mb); 403 mh = dlm_lowcomms_get_buffer(nodeid, mb_len, GFP_KERNEL, &mb);
378 if (!mh) 404 if (!mh)
@@ -391,6 +417,9 @@ static int send_ls_not_ready(int nodeid, struct dlm_rcom *rc_in)
391 rc->rc_id = rc_in->rc_id; 417 rc->rc_id = rc_in->rc_id;
392 rc->rc_result = -ESRCH; 418 rc->rc_result = -ESRCH;
393 419
420 rf = (struct rcom_config *) rc->rc_buf;
421 rf->rf_lvblen = -1;
422
394 dlm_rcom_out(rc); 423 dlm_rcom_out(rc);
395 dlm_lowcomms_commit_buffer(mh); 424 dlm_lowcomms_commit_buffer(mh);
396 425
@@ -412,9 +441,10 @@ void dlm_receive_rcom(struct dlm_header *hd, int nodeid)
412 441
413 ls = dlm_find_lockspace_global(hd->h_lockspace); 442 ls = dlm_find_lockspace_global(hd->h_lockspace);
414 if (!ls) { 443 if (!ls) {
415 log_print("lockspace %x from %d not found", 444 log_print("lockspace %x from %d type %x not found",
416 hd->h_lockspace, nodeid); 445 hd->h_lockspace, nodeid, rc->rc_type);
417 send_ls_not_ready(nodeid, rc); 446 if (rc->rc_type == DLM_RCOM_STATUS)
447 send_ls_not_ready(nodeid, rc);
418 return; 448 return;
419 } 449 }
420 450
diff --git a/fs/dlm/recover.c b/fs/dlm/recover.c
index a5e6d184872e..cf9f6831bab5 100644
--- a/fs/dlm/recover.c
+++ b/fs/dlm/recover.c
@@ -252,6 +252,7 @@ static void recover_list_clear(struct dlm_ls *ls)
252 spin_lock(&ls->ls_recover_list_lock); 252 spin_lock(&ls->ls_recover_list_lock);
253 list_for_each_entry_safe(r, s, &ls->ls_recover_list, res_recover_list) { 253 list_for_each_entry_safe(r, s, &ls->ls_recover_list, res_recover_list) {
254 list_del_init(&r->res_recover_list); 254 list_del_init(&r->res_recover_list);
255 r->res_recover_locks_count = 0;
255 dlm_put_rsb(r); 256 dlm_put_rsb(r);
256 ls->ls_recover_list_count--; 257 ls->ls_recover_list_count--;
257 } 258 }
diff --git a/fs/dlm/recoverd.c b/fs/dlm/recoverd.c
index 362e3eff4dc9..650536aa5139 100644
--- a/fs/dlm/recoverd.c
+++ b/fs/dlm/recoverd.c
@@ -45,7 +45,7 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
45 unsigned long start; 45 unsigned long start;
46 int error, neg = 0; 46 int error, neg = 0;
47 47
48 log_debug(ls, "recover %llx", rv->seq); 48 log_debug(ls, "recover %llx", (unsigned long long)rv->seq);
49 49
50 mutex_lock(&ls->ls_recoverd_active); 50 mutex_lock(&ls->ls_recoverd_active);
51 51
@@ -94,14 +94,6 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
94 } 94 }
95 95
96 /* 96 /*
97 * Purge directory-related requests that are saved in requestqueue.
98 * All dir requests from before recovery are invalid now due to the dir
99 * rebuild and will be resent by the requesting nodes.
100 */
101
102 dlm_purge_requestqueue(ls);
103
104 /*
105 * Wait for all nodes to complete directory rebuild. 97 * Wait for all nodes to complete directory rebuild.
106 */ 98 */
107 99
@@ -164,10 +156,31 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
164 */ 156 */
165 157
166 dlm_recover_rsbs(ls); 158 dlm_recover_rsbs(ls);
159 } else {
160 /*
161 * Other lockspace members may be going through the "neg" steps
162 * while also adding us to the lockspace, in which case they'll
163 * be doing the recover_locks (RS_LOCKS) barrier.
164 */
165 dlm_set_recover_status(ls, DLM_RS_LOCKS);
166
167 error = dlm_recover_locks_wait(ls);
168 if (error) {
169 log_error(ls, "recover_locks_wait failed %d", error);
170 goto fail;
171 }
167 } 172 }
168 173
169 dlm_release_root_list(ls); 174 dlm_release_root_list(ls);
170 175
176 /*
177 * Purge directory-related requests that are saved in requestqueue.
178 * All dir requests from before recovery are invalid now due to the dir
179 * rebuild and will be resent by the requesting nodes.
180 */
181
182 dlm_purge_requestqueue(ls);
183
171 dlm_set_recover_status(ls, DLM_RS_DONE); 184 dlm_set_recover_status(ls, DLM_RS_DONE);
172 error = dlm_recover_done_wait(ls); 185 error = dlm_recover_done_wait(ls);
173 if (error) { 186 if (error) {
@@ -199,7 +212,8 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
199 212
200 dlm_astd_wake(); 213 dlm_astd_wake();
201 214
202 log_debug(ls, "recover %llx done: %u ms", rv->seq, 215 log_debug(ls, "recover %llx done: %u ms",
216 (unsigned long long)rv->seq,
203 jiffies_to_msecs(jiffies - start)); 217 jiffies_to_msecs(jiffies - start));
204 mutex_unlock(&ls->ls_recoverd_active); 218 mutex_unlock(&ls->ls_recoverd_active);
205 219
@@ -207,11 +221,16 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
207 221
208 fail: 222 fail:
209 dlm_release_root_list(ls); 223 dlm_release_root_list(ls);
210 log_debug(ls, "recover %llx error %d", rv->seq, error); 224 log_debug(ls, "recover %llx error %d",
225 (unsigned long long)rv->seq, error);
211 mutex_unlock(&ls->ls_recoverd_active); 226 mutex_unlock(&ls->ls_recoverd_active);
212 return error; 227 return error;
213} 228}
214 229
230/* The dlm_ls_start() that created the rv we take here may already have been
231 stopped via dlm_ls_stop(); in that case we need to leave the RECOVERY_STOP
232 flag set. */
233
215static void do_ls_recovery(struct dlm_ls *ls) 234static void do_ls_recovery(struct dlm_ls *ls)
216{ 235{
217 struct dlm_recover *rv = NULL; 236 struct dlm_recover *rv = NULL;
@@ -219,7 +238,8 @@ static void do_ls_recovery(struct dlm_ls *ls)
219 spin_lock(&ls->ls_recover_lock); 238 spin_lock(&ls->ls_recover_lock);
220 rv = ls->ls_recover_args; 239 rv = ls->ls_recover_args;
221 ls->ls_recover_args = NULL; 240 ls->ls_recover_args = NULL;
222 clear_bit(LSFL_RECOVERY_STOP, &ls->ls_flags); 241 if (rv && ls->ls_recover_seq == rv->seq)
242 clear_bit(LSFL_RECOVERY_STOP, &ls->ls_flags);
223 spin_unlock(&ls->ls_recover_lock); 243 spin_unlock(&ls->ls_recover_lock);
224 244
225 if (rv) { 245 if (rv) {
diff --git a/fs/dlm/requestqueue.c b/fs/dlm/requestqueue.c
index 7b2b089634a2..65008d79c96d 100644
--- a/fs/dlm/requestqueue.c
+++ b/fs/dlm/requestqueue.c
@@ -30,26 +30,36 @@ struct rq_entry {
30 * lockspace is enabled on some while still suspended on others. 30 * lockspace is enabled on some while still suspended on others.
31 */ 31 */
32 32
33void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_header *hd) 33int dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_header *hd)
34{ 34{
35 struct rq_entry *e; 35 struct rq_entry *e;
36 int length = hd->h_length; 36 int length = hd->h_length;
37 37 int rv = 0;
38 if (dlm_is_removed(ls, nodeid))
39 return;
40 38
41 e = kmalloc(sizeof(struct rq_entry) + length, GFP_KERNEL); 39 e = kmalloc(sizeof(struct rq_entry) + length, GFP_KERNEL);
42 if (!e) { 40 if (!e) {
43 log_print("dlm_add_requestqueue: out of memory\n"); 41 log_print("dlm_add_requestqueue: out of memory\n");
44 return; 42 return 0;
45 } 43 }
46 44
47 e->nodeid = nodeid; 45 e->nodeid = nodeid;
48 memcpy(e->request, hd, length); 46 memcpy(e->request, hd, length);
49 47
48 /* We need to check dlm_locking_stopped() after taking the mutex to
49 avoid a race where dlm_recoverd enables locking and runs
50 process_requestqueue between our earlier dlm_locking_stopped check
51 and this addition to the requestqueue. */
52
50 mutex_lock(&ls->ls_requestqueue_mutex); 53 mutex_lock(&ls->ls_requestqueue_mutex);
51 list_add_tail(&e->list, &ls->ls_requestqueue); 54 if (dlm_locking_stopped(ls))
55 list_add_tail(&e->list, &ls->ls_requestqueue);
56 else {
57 log_debug(ls, "dlm_add_requestqueue skip from %d", nodeid);
58 kfree(e);
59 rv = -EAGAIN;
60 }
52 mutex_unlock(&ls->ls_requestqueue_mutex); 61 mutex_unlock(&ls->ls_requestqueue_mutex);
62 return rv;
53} 63}
54 64
55int dlm_process_requestqueue(struct dlm_ls *ls) 65int dlm_process_requestqueue(struct dlm_ls *ls)
@@ -120,6 +130,10 @@ static int purge_request(struct dlm_ls *ls, struct dlm_message *ms, int nodeid)
120{ 130{
121 uint32_t type = ms->m_type; 131 uint32_t type = ms->m_type;
122 132
133 /* the ls is being cleaned up and freed by release_lockspace */
134 if (!ls->ls_count)
135 return 1;
136
123 if (dlm_is_removed(ls, nodeid)) 137 if (dlm_is_removed(ls, nodeid))
124 return 1; 138 return 1;
125 139
diff --git a/fs/dlm/requestqueue.h b/fs/dlm/requestqueue.h
index 349f0d292d95..6a53ea03335d 100644
--- a/fs/dlm/requestqueue.h
+++ b/fs/dlm/requestqueue.h
@@ -13,7 +13,7 @@
13#ifndef __REQUESTQUEUE_DOT_H__ 13#ifndef __REQUESTQUEUE_DOT_H__
14#define __REQUESTQUEUE_DOT_H__ 14#define __REQUESTQUEUE_DOT_H__
15 15
16void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_header *hd); 16int dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_header *hd);
17int dlm_process_requestqueue(struct dlm_ls *ls); 17int dlm_process_requestqueue(struct dlm_ls *ls);
18void dlm_wait_requestqueue(struct dlm_ls *ls); 18void dlm_wait_requestqueue(struct dlm_ls *ls);
19void dlm_purge_requestqueue(struct dlm_ls *ls); 19void dlm_purge_requestqueue(struct dlm_ls *ls);
diff --git a/fs/dnotify.c b/fs/dnotify.c
index 2b0442db67e0..1f26a2b9eee1 100644
--- a/fs/dnotify.c
+++ b/fs/dnotify.c
@@ -23,7 +23,7 @@
23 23
24int dir_notify_enable __read_mostly = 1; 24int dir_notify_enable __read_mostly = 1;
25 25
26static kmem_cache_t *dn_cache __read_mostly; 26static struct kmem_cache *dn_cache __read_mostly;
27 27
28static void redo_inode_mask(struct inode *inode) 28static void redo_inode_mask(struct inode *inode)
29{ 29{
@@ -77,7 +77,7 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
77 inode = filp->f_dentry->d_inode; 77 inode = filp->f_dentry->d_inode;
78 if (!S_ISDIR(inode->i_mode)) 78 if (!S_ISDIR(inode->i_mode))
79 return -ENOTDIR; 79 return -ENOTDIR;
80 dn = kmem_cache_alloc(dn_cache, SLAB_KERNEL); 80 dn = kmem_cache_alloc(dn_cache, GFP_KERNEL);
81 if (dn == NULL) 81 if (dn == NULL)
82 return -ENOMEM; 82 return -ENOMEM;
83 spin_lock(&inode->i_lock); 83 spin_lock(&inode->i_lock);
diff --git a/fs/dquot.c b/fs/dquot.c
index 9af789567e51..f9cd5e23ebdf 100644
--- a/fs/dquot.c
+++ b/fs/dquot.c
@@ -131,7 +131,7 @@ static struct quota_format_type *quota_formats; /* List of registered formats */
131static struct quota_module_name module_names[] = INIT_QUOTA_MODULE_NAMES; 131static struct quota_module_name module_names[] = INIT_QUOTA_MODULE_NAMES;
132 132
133/* SLAB cache for dquot structures */ 133/* SLAB cache for dquot structures */
134static kmem_cache_t *dquot_cachep; 134static struct kmem_cache *dquot_cachep;
135 135
136int register_quota_format(struct quota_format_type *fmt) 136int register_quota_format(struct quota_format_type *fmt)
137{ 137{
@@ -600,7 +600,7 @@ static struct dquot *get_empty_dquot(struct super_block *sb, int type)
600{ 600{
601 struct dquot *dquot; 601 struct dquot *dquot;
602 602
603 dquot = kmem_cache_alloc(dquot_cachep, SLAB_NOFS); 603 dquot = kmem_cache_alloc(dquot_cachep, GFP_NOFS);
604 if(!dquot) 604 if(!dquot)
605 return NODQUOT; 605 return NODQUOT;
606 606
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index f63a7755fe86..7196f50fe152 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -628,7 +628,7 @@ int ecryptfs_decrypt_page(struct file *file, struct page *page)
628 num_extents_per_page = PAGE_CACHE_SIZE / crypt_stat->extent_size; 628 num_extents_per_page = PAGE_CACHE_SIZE / crypt_stat->extent_size;
629 base_extent = (page->index * num_extents_per_page); 629 base_extent = (page->index * num_extents_per_page);
630 lower_page_virt = kmem_cache_alloc(ecryptfs_lower_page_cache, 630 lower_page_virt = kmem_cache_alloc(ecryptfs_lower_page_cache,
631 SLAB_KERNEL); 631 GFP_KERNEL);
632 if (!lower_page_virt) { 632 if (!lower_page_virt) {
633 rc = -ENOMEM; 633 rc = -ENOMEM;
634 ecryptfs_printk(KERN_ERR, "Error getting page for encrypted " 634 ecryptfs_printk(KERN_ERR, "Error getting page for encrypted "
@@ -1334,7 +1334,7 @@ int ecryptfs_write_headers(struct dentry *ecryptfs_dentry,
1334 goto out; 1334 goto out;
1335 } 1335 }
1336 /* Released in this function */ 1336 /* Released in this function */
1337 page_virt = kmem_cache_alloc(ecryptfs_header_cache_0, SLAB_USER); 1337 page_virt = kmem_cache_alloc(ecryptfs_header_cache_0, GFP_USER);
1338 if (!page_virt) { 1338 if (!page_virt) {
1339 ecryptfs_printk(KERN_ERR, "Out of memory\n"); 1339 ecryptfs_printk(KERN_ERR, "Out of memory\n");
1340 rc = -ENOMEM; 1340 rc = -ENOMEM;
@@ -1493,7 +1493,7 @@ int ecryptfs_read_headers(struct dentry *ecryptfs_dentry,
1493 &ecryptfs_inode_to_private(ecryptfs_dentry->d_inode)->crypt_stat; 1493 &ecryptfs_inode_to_private(ecryptfs_dentry->d_inode)->crypt_stat;
1494 1494
1495 /* Read the first page from the underlying file */ 1495 /* Read the first page from the underlying file */
1496 page_virt = kmem_cache_alloc(ecryptfs_header_cache_1, SLAB_USER); 1496 page_virt = kmem_cache_alloc(ecryptfs_header_cache_1, GFP_USER);
1497 if (!page_virt) { 1497 if (!page_virt) {
1498 rc = -ENOMEM; 1498 rc = -ENOMEM;
1499 ecryptfs_printk(KERN_ERR, "Unable to allocate page_virt\n"); 1499 ecryptfs_printk(KERN_ERR, "Unable to allocate page_virt\n");
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
index a92ef05eff8f..42099e779a56 100644
--- a/fs/ecryptfs/file.c
+++ b/fs/ecryptfs/file.c
@@ -250,7 +250,7 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
250 int lower_flags; 250 int lower_flags;
251 251
252 /* Released in ecryptfs_release or end of function if failure */ 252 /* Released in ecryptfs_release or end of function if failure */
253 file_info = kmem_cache_alloc(ecryptfs_file_info_cache, SLAB_KERNEL); 253 file_info = kmem_cache_alloc(ecryptfs_file_info_cache, GFP_KERNEL);
254 ecryptfs_set_file_private(file, file_info); 254 ecryptfs_set_file_private(file, file_info);
255 if (!file_info) { 255 if (!file_info) {
256 ecryptfs_printk(KERN_ERR, 256 ecryptfs_printk(KERN_ERR,
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index dfcc68484f47..8a1945a84c36 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -369,7 +369,7 @@ static struct dentry *ecryptfs_lookup(struct inode *dir, struct dentry *dentry,
369 BUG_ON(!atomic_read(&lower_dentry->d_count)); 369 BUG_ON(!atomic_read(&lower_dentry->d_count));
370 ecryptfs_set_dentry_private(dentry, 370 ecryptfs_set_dentry_private(dentry,
371 kmem_cache_alloc(ecryptfs_dentry_info_cache, 371 kmem_cache_alloc(ecryptfs_dentry_info_cache,
372 SLAB_KERNEL)); 372 GFP_KERNEL));
373 if (!ecryptfs_dentry_to_private(dentry)) { 373 if (!ecryptfs_dentry_to_private(dentry)) {
374 rc = -ENOMEM; 374 rc = -ENOMEM;
375 ecryptfs_printk(KERN_ERR, "Out of memory whilst attempting " 375 ecryptfs_printk(KERN_ERR, "Out of memory whilst attempting "
@@ -404,7 +404,7 @@ static struct dentry *ecryptfs_lookup(struct inode *dir, struct dentry *dentry,
404 /* Released in this function */ 404 /* Released in this function */
405 page_virt = 405 page_virt =
406 (char *)kmem_cache_alloc(ecryptfs_header_cache_2, 406 (char *)kmem_cache_alloc(ecryptfs_header_cache_2,
407 SLAB_USER); 407 GFP_USER);
408 if (!page_virt) { 408 if (!page_virt) {
409 rc = -ENOMEM; 409 rc = -ENOMEM;
410 ecryptfs_printk(KERN_ERR, 410 ecryptfs_printk(KERN_ERR,
@@ -795,7 +795,7 @@ int ecryptfs_truncate(struct dentry *dentry, loff_t new_length)
795 /* Released at out_free: label */ 795 /* Released at out_free: label */
796 ecryptfs_set_file_private(&fake_ecryptfs_file, 796 ecryptfs_set_file_private(&fake_ecryptfs_file,
797 kmem_cache_alloc(ecryptfs_file_info_cache, 797 kmem_cache_alloc(ecryptfs_file_info_cache,
798 SLAB_KERNEL)); 798 GFP_KERNEL));
799 if (unlikely(!ecryptfs_file_to_private(&fake_ecryptfs_file))) { 799 if (unlikely(!ecryptfs_file_to_private(&fake_ecryptfs_file))) {
800 rc = -ENOMEM; 800 rc = -ENOMEM;
801 goto out; 801 goto out;
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
index c3746f56d162..745c0f1bfbbd 100644
--- a/fs/ecryptfs/keystore.c
+++ b/fs/ecryptfs/keystore.c
@@ -207,7 +207,7 @@ parse_tag_3_packet(struct ecryptfs_crypt_stat *crypt_stat,
207 /* Released: wipe_auth_tok_list called in ecryptfs_parse_packet_set or 207 /* Released: wipe_auth_tok_list called in ecryptfs_parse_packet_set or
208 * at end of function upon failure */ 208 * at end of function upon failure */
209 auth_tok_list_item = 209 auth_tok_list_item =
210 kmem_cache_alloc(ecryptfs_auth_tok_list_item_cache, SLAB_KERNEL); 210 kmem_cache_alloc(ecryptfs_auth_tok_list_item_cache, GFP_KERNEL);
211 if (!auth_tok_list_item) { 211 if (!auth_tok_list_item) {
212 ecryptfs_printk(KERN_ERR, "Unable to allocate memory\n"); 212 ecryptfs_printk(KERN_ERR, "Unable to allocate memory\n");
213 rc = -ENOMEM; 213 rc = -ENOMEM;
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
index a78d87d14baf..3ede12b25933 100644
--- a/fs/ecryptfs/main.c
+++ b/fs/ecryptfs/main.c
@@ -378,7 +378,7 @@ ecryptfs_fill_super(struct super_block *sb, void *raw_data, int silent)
378 /* Released in ecryptfs_put_super() */ 378 /* Released in ecryptfs_put_super() */
379 ecryptfs_set_superblock_private(sb, 379 ecryptfs_set_superblock_private(sb,
380 kmem_cache_alloc(ecryptfs_sb_info_cache, 380 kmem_cache_alloc(ecryptfs_sb_info_cache,
381 SLAB_KERNEL)); 381 GFP_KERNEL));
382 if (!ecryptfs_superblock_to_private(sb)) { 382 if (!ecryptfs_superblock_to_private(sb)) {
383 ecryptfs_printk(KERN_WARNING, "Out of memory\n"); 383 ecryptfs_printk(KERN_WARNING, "Out of memory\n");
384 rc = -ENOMEM; 384 rc = -ENOMEM;
@@ -402,7 +402,7 @@ ecryptfs_fill_super(struct super_block *sb, void *raw_data, int silent)
402 /* through deactivate_super(sb) from get_sb_nodev() */ 402 /* through deactivate_super(sb) from get_sb_nodev() */
403 ecryptfs_set_dentry_private(sb->s_root, 403 ecryptfs_set_dentry_private(sb->s_root,
404 kmem_cache_alloc(ecryptfs_dentry_info_cache, 404 kmem_cache_alloc(ecryptfs_dentry_info_cache,
405 SLAB_KERNEL)); 405 GFP_KERNEL));
406 if (!ecryptfs_dentry_to_private(sb->s_root)) { 406 if (!ecryptfs_dentry_to_private(sb->s_root)) {
407 ecryptfs_printk(KERN_ERR, 407 ecryptfs_printk(KERN_ERR,
408 "dentry_info_cache alloc failed\n"); 408 "dentry_info_cache alloc failed\n");
@@ -546,7 +546,7 @@ inode_info_init_once(void *vptr, struct kmem_cache *cachep, unsigned long flags)
546} 546}
547 547
548static struct ecryptfs_cache_info { 548static struct ecryptfs_cache_info {
549 kmem_cache_t **cache; 549 struct kmem_cache **cache;
550 const char *name; 550 const char *name;
551 size_t size; 551 size_t size;
552 void (*ctor)(void*, struct kmem_cache *, unsigned long); 552 void (*ctor)(void*, struct kmem_cache *, unsigned long);
@@ -691,7 +691,7 @@ static ssize_t version_show(struct ecryptfs_obj *obj, char *buff)
691 691
692static struct ecryptfs_attribute sysfs_attr_version = __ATTR_RO(version); 692static struct ecryptfs_attribute sysfs_attr_version = __ATTR_RO(version);
693 693
694struct ecryptfs_version_str_map_elem { 694static struct ecryptfs_version_str_map_elem {
695 u32 flag; 695 u32 flag;
696 char *str; 696 char *str;
697} ecryptfs_version_str_map[] = { 697} ecryptfs_version_str_map[] = {
diff --git a/fs/ecryptfs/super.c b/fs/ecryptfs/super.c
index 825757ae4867..eaa5daaf106e 100644
--- a/fs/ecryptfs/super.c
+++ b/fs/ecryptfs/super.c
@@ -50,7 +50,7 @@ static struct inode *ecryptfs_alloc_inode(struct super_block *sb)
50 struct inode *inode = NULL; 50 struct inode *inode = NULL;
51 51
52 ecryptfs_inode = kmem_cache_alloc(ecryptfs_inode_info_cache, 52 ecryptfs_inode = kmem_cache_alloc(ecryptfs_inode_info_cache,
53 SLAB_KERNEL); 53 GFP_KERNEL);
54 if (unlikely(!ecryptfs_inode)) 54 if (unlikely(!ecryptfs_inode))
55 goto out; 55 goto out;
56 ecryptfs_init_crypt_stat(&ecryptfs_inode->crypt_stat); 56 ecryptfs_init_crypt_stat(&ecryptfs_inode->crypt_stat);
diff --git a/fs/efs/super.c b/fs/efs/super.c
index b3f50651eb6b..dfebf21289f4 100644
--- a/fs/efs/super.c
+++ b/fs/efs/super.c
@@ -52,12 +52,12 @@ static struct pt_types sgi_pt_types[] = {
52}; 52};
53 53
54 54
55static kmem_cache_t * efs_inode_cachep; 55static struct kmem_cache * efs_inode_cachep;
56 56
57static struct inode *efs_alloc_inode(struct super_block *sb) 57static struct inode *efs_alloc_inode(struct super_block *sb)
58{ 58{
59 struct efs_inode_info *ei; 59 struct efs_inode_info *ei;
60 ei = (struct efs_inode_info *)kmem_cache_alloc(efs_inode_cachep, SLAB_KERNEL); 60 ei = (struct efs_inode_info *)kmem_cache_alloc(efs_inode_cachep, GFP_KERNEL);
61 if (!ei) 61 if (!ei)
62 return NULL; 62 return NULL;
63 return &ei->vfs_inode; 63 return &ei->vfs_inode;
@@ -68,7 +68,7 @@ static void efs_destroy_inode(struct inode *inode)
68 kmem_cache_free(efs_inode_cachep, INODE_INFO(inode)); 68 kmem_cache_free(efs_inode_cachep, INODE_INFO(inode));
69} 69}
70 70
71static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) 71static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
72{ 72{
73 struct efs_inode_info *ei = (struct efs_inode_info *) foo; 73 struct efs_inode_info *ei = (struct efs_inode_info *) foo;
74 74
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index ae228ec54e94..88a6f8d0b88e 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -283,10 +283,10 @@ static struct mutex epmutex;
283static struct poll_safewake psw; 283static struct poll_safewake psw;
284 284
285/* Slab cache used to allocate "struct epitem" */ 285/* Slab cache used to allocate "struct epitem" */
286static kmem_cache_t *epi_cache __read_mostly; 286static struct kmem_cache *epi_cache __read_mostly;
287 287
288/* Slab cache used to allocate "struct eppoll_entry" */ 288/* Slab cache used to allocate "struct eppoll_entry" */
289static kmem_cache_t *pwq_cache __read_mostly; 289static struct kmem_cache *pwq_cache __read_mostly;
290 290
291/* Virtual fs used to allocate inodes for eventpoll files */ 291/* Virtual fs used to allocate inodes for eventpoll files */
292static struct vfsmount *eventpoll_mnt __read_mostly; 292static struct vfsmount *eventpoll_mnt __read_mostly;
@@ -961,7 +961,7 @@ static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead,
961 struct epitem *epi = ep_item_from_epqueue(pt); 961 struct epitem *epi = ep_item_from_epqueue(pt);
962 struct eppoll_entry *pwq; 962 struct eppoll_entry *pwq;
963 963
964 if (epi->nwait >= 0 && (pwq = kmem_cache_alloc(pwq_cache, SLAB_KERNEL))) { 964 if (epi->nwait >= 0 && (pwq = kmem_cache_alloc(pwq_cache, GFP_KERNEL))) {
965 init_waitqueue_func_entry(&pwq->wait, ep_poll_callback); 965 init_waitqueue_func_entry(&pwq->wait, ep_poll_callback);
966 pwq->whead = whead; 966 pwq->whead = whead;
967 pwq->base = epi; 967 pwq->base = epi;
@@ -1004,7 +1004,7 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
1004 struct ep_pqueue epq; 1004 struct ep_pqueue epq;
1005 1005
1006 error = -ENOMEM; 1006 error = -ENOMEM;
1007 if (!(epi = kmem_cache_alloc(epi_cache, SLAB_KERNEL))) 1007 if (!(epi = kmem_cache_alloc(epi_cache, GFP_KERNEL)))
1008 goto eexit_1; 1008 goto eexit_1;
1009 1009
1010 /* Item initialization follow here ... */ 1010 /* Item initialization follow here ... */
diff --git a/fs/exec.c b/fs/exec.c
index d993ea1a81ae..add0e03c3ea9 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -404,7 +404,7 @@ int setup_arg_pages(struct linux_binprm *bprm,
404 bprm->loader += stack_base; 404 bprm->loader += stack_base;
405 bprm->exec += stack_base; 405 bprm->exec += stack_base;
406 406
407 mpnt = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); 407 mpnt = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
408 if (!mpnt) 408 if (!mpnt)
409 return -ENOMEM; 409 return -ENOMEM;
410 410
@@ -1515,7 +1515,8 @@ int do_coredump(long signr, int exit_code, struct pt_regs * regs)
1515 ispipe = 1; 1515 ispipe = 1;
1516 } else 1516 } else
1517 file = filp_open(corename, 1517 file = filp_open(corename,
1518 O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE, 0600); 1518 O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag,
1519 0600);
1519 if (IS_ERR(file)) 1520 if (IS_ERR(file))
1520 goto fail_unlock; 1521 goto fail_unlock;
1521 inode = file->f_dentry->d_inode; 1522 inode = file->f_dentry->d_inode;
diff --git a/fs/ext2/ioctl.c b/fs/ext2/ioctl.c
index 1dfba77eab10..e3cf8c81507f 100644
--- a/fs/ext2/ioctl.c
+++ b/fs/ext2/ioctl.c
@@ -44,6 +44,7 @@ int ext2_ioctl (struct inode * inode, struct file * filp, unsigned int cmd,
44 if (!S_ISDIR(inode->i_mode)) 44 if (!S_ISDIR(inode->i_mode))
45 flags &= ~EXT2_DIRSYNC_FL; 45 flags &= ~EXT2_DIRSYNC_FL;
46 46
47 mutex_lock(&inode->i_mutex);
47 oldflags = ei->i_flags; 48 oldflags = ei->i_flags;
48 49
49 /* 50 /*
@@ -53,13 +54,16 @@ int ext2_ioctl (struct inode * inode, struct file * filp, unsigned int cmd,
53 * This test looks nicer. Thanks to Pauline Middelink 54 * This test looks nicer. Thanks to Pauline Middelink
54 */ 55 */
55 if ((flags ^ oldflags) & (EXT2_APPEND_FL | EXT2_IMMUTABLE_FL)) { 56 if ((flags ^ oldflags) & (EXT2_APPEND_FL | EXT2_IMMUTABLE_FL)) {
56 if (!capable(CAP_LINUX_IMMUTABLE)) 57 if (!capable(CAP_LINUX_IMMUTABLE)) {
58 mutex_unlock(&inode->i_mutex);
57 return -EPERM; 59 return -EPERM;
60 }
58 } 61 }
59 62
60 flags = flags & EXT2_FL_USER_MODIFIABLE; 63 flags = flags & EXT2_FL_USER_MODIFIABLE;
61 flags |= oldflags & ~EXT2_FL_USER_MODIFIABLE; 64 flags |= oldflags & ~EXT2_FL_USER_MODIFIABLE;
62 ei->i_flags = flags; 65 ei->i_flags = flags;
66 mutex_unlock(&inode->i_mutex);
63 67
64 ext2_set_inode_flags(inode); 68 ext2_set_inode_flags(inode);
65 inode->i_ctime = CURRENT_TIME_SEC; 69 inode->i_ctime = CURRENT_TIME_SEC;
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index d8b9abd95d07..255cef5f7420 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -135,12 +135,12 @@ static void ext2_put_super (struct super_block * sb)
135 return; 135 return;
136} 136}
137 137
138static kmem_cache_t * ext2_inode_cachep; 138static struct kmem_cache * ext2_inode_cachep;
139 139
140static struct inode *ext2_alloc_inode(struct super_block *sb) 140static struct inode *ext2_alloc_inode(struct super_block *sb)
141{ 141{
142 struct ext2_inode_info *ei; 142 struct ext2_inode_info *ei;
143 ei = (struct ext2_inode_info *)kmem_cache_alloc(ext2_inode_cachep, SLAB_KERNEL); 143 ei = (struct ext2_inode_info *)kmem_cache_alloc(ext2_inode_cachep, GFP_KERNEL);
144 if (!ei) 144 if (!ei)
145 return NULL; 145 return NULL;
146#ifdef CONFIG_EXT2_FS_POSIX_ACL 146#ifdef CONFIG_EXT2_FS_POSIX_ACL
@@ -156,7 +156,7 @@ static void ext2_destroy_inode(struct inode *inode)
156 kmem_cache_free(ext2_inode_cachep, EXT2_I(inode)); 156 kmem_cache_free(ext2_inode_cachep, EXT2_I(inode));
157} 157}
158 158
159static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) 159static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
160{ 160{
161 struct ext2_inode_info *ei = (struct ext2_inode_info *) foo; 161 struct ext2_inode_info *ei = (struct ext2_inode_info *) foo;
162 162
@@ -1090,8 +1090,10 @@ static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf)
1090{ 1090{
1091 struct super_block *sb = dentry->d_sb; 1091 struct super_block *sb = dentry->d_sb;
1092 struct ext2_sb_info *sbi = EXT2_SB(sb); 1092 struct ext2_sb_info *sbi = EXT2_SB(sb);
1093 struct ext2_super_block *es = sbi->s_es;
1093 unsigned long overhead; 1094 unsigned long overhead;
1094 int i; 1095 int i;
1096 u64 fsid;
1095 1097
1096 if (test_opt (sb, MINIX_DF)) 1098 if (test_opt (sb, MINIX_DF))
1097 overhead = 0; 1099 overhead = 0;
@@ -1104,7 +1106,7 @@ static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf)
1104 * All of the blocks before first_data_block are 1106 * All of the blocks before first_data_block are
1105 * overhead 1107 * overhead
1106 */ 1108 */
1107 overhead = le32_to_cpu(sbi->s_es->s_first_data_block); 1109 overhead = le32_to_cpu(es->s_first_data_block);
1108 1110
1109 /* 1111 /*
1110 * Add the overhead attributed to the superblock and 1112 * Add the overhead attributed to the superblock and
@@ -1125,14 +1127,18 @@ static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf)
1125 1127
1126 buf->f_type = EXT2_SUPER_MAGIC; 1128 buf->f_type = EXT2_SUPER_MAGIC;
1127 buf->f_bsize = sb->s_blocksize; 1129 buf->f_bsize = sb->s_blocksize;
1128 buf->f_blocks = le32_to_cpu(sbi->s_es->s_blocks_count) - overhead; 1130 buf->f_blocks = le32_to_cpu(es->s_blocks_count) - overhead;
1129 buf->f_bfree = ext2_count_free_blocks(sb); 1131 buf->f_bfree = ext2_count_free_blocks(sb);
1130 buf->f_bavail = buf->f_bfree - le32_to_cpu(sbi->s_es->s_r_blocks_count); 1132 buf->f_bavail = buf->f_bfree - le32_to_cpu(es->s_r_blocks_count);
1131 if (buf->f_bfree < le32_to_cpu(sbi->s_es->s_r_blocks_count)) 1133 if (buf->f_bfree < le32_to_cpu(es->s_r_blocks_count))
1132 buf->f_bavail = 0; 1134 buf->f_bavail = 0;
1133 buf->f_files = le32_to_cpu(sbi->s_es->s_inodes_count); 1135 buf->f_files = le32_to_cpu(es->s_inodes_count);
1134 buf->f_ffree = ext2_count_free_inodes (sb); 1136 buf->f_ffree = ext2_count_free_inodes(sb);
1135 buf->f_namelen = EXT2_NAME_LEN; 1137 buf->f_namelen = EXT2_NAME_LEN;
1138 fsid = le64_to_cpup((void *)es->s_uuid) ^
1139 le64_to_cpup((void *)es->s_uuid + sizeof(u64));
1140 buf->f_fsid.val[0] = fsid & 0xFFFFFFFFUL;
1141 buf->f_fsid.val[1] = (fsid >> 32) & 0xFFFFFFFFUL;
1136 return 0; 1142 return 0;
1137} 1143}
1138 1144
diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
index af52a7f8b291..247efd0b51d6 100644
--- a/fs/ext2/xattr.c
+++ b/fs/ext2/xattr.c
@@ -342,12 +342,9 @@ static void ext2_xattr_update_super_block(struct super_block *sb)
342 if (EXT2_HAS_COMPAT_FEATURE(sb, EXT2_FEATURE_COMPAT_EXT_ATTR)) 342 if (EXT2_HAS_COMPAT_FEATURE(sb, EXT2_FEATURE_COMPAT_EXT_ATTR))
343 return; 343 return;
344 344
345 lock_super(sb); 345 EXT2_SET_COMPAT_FEATURE(sb, EXT2_FEATURE_COMPAT_EXT_ATTR);
346 EXT2_SB(sb)->s_es->s_feature_compat |=
347 cpu_to_le32(EXT2_FEATURE_COMPAT_EXT_ATTR);
348 sb->s_dirt = 1; 346 sb->s_dirt = 1;
349 mark_buffer_dirty(EXT2_SB(sb)->s_sbh); 347 mark_buffer_dirty(EXT2_SB(sb)->s_sbh);
350 unlock_super(sb);
351} 348}
352 349
353/* 350/*
diff --git a/fs/ext3/Makefile b/fs/ext3/Makefile
index 704cd44a40c2..e77766a8b3f0 100644
--- a/fs/ext3/Makefile
+++ b/fs/ext3/Makefile
@@ -5,7 +5,7 @@
5obj-$(CONFIG_EXT3_FS) += ext3.o 5obj-$(CONFIG_EXT3_FS) += ext3.o
6 6
7ext3-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o \ 7ext3-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o \
8 ioctl.o namei.o super.o symlink.o hash.o resize.o 8 ioctl.o namei.o super.o symlink.o hash.o resize.o ext3_jbd.o
9 9
10ext3-$(CONFIG_EXT3_FS_XATTR) += xattr.o xattr_user.o xattr_trusted.o 10ext3-$(CONFIG_EXT3_FS_XATTR) += xattr.o xattr_user.o xattr_trusted.o
11ext3-$(CONFIG_EXT3_FS_POSIX_ACL) += acl.o 11ext3-$(CONFIG_EXT3_FS_POSIX_ACL) += acl.o
diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
index b41a7d7e20f0..22161740ba29 100644
--- a/fs/ext3/balloc.c
+++ b/fs/ext3/balloc.c
@@ -144,7 +144,7 @@ restart:
144 144
145 printk("Block Allocation Reservation Windows Map (%s):\n", fn); 145 printk("Block Allocation Reservation Windows Map (%s):\n", fn);
146 while (n) { 146 while (n) {
147 rsv = list_entry(n, struct ext3_reserve_window_node, rsv_node); 147 rsv = rb_entry(n, struct ext3_reserve_window_node, rsv_node);
148 if (verbose) 148 if (verbose)
149 printk("reservation window 0x%p " 149 printk("reservation window 0x%p "
150 "start: %lu, end: %lu\n", 150 "start: %lu, end: %lu\n",
@@ -730,7 +730,7 @@ find_next_usable_block(ext3_grpblk_t start, struct buffer_head *bh,
730 here = 0; 730 here = 0;
731 731
732 p = ((char *)bh->b_data) + (here >> 3); 732 p = ((char *)bh->b_data) + (here >> 3);
733 r = memscan(p, 0, (maxblocks - here + 7) >> 3); 733 r = memscan(p, 0, ((maxblocks + 7) >> 3) - (here >> 3));
734 next = (r - ((char *)bh->b_data)) << 3; 734 next = (r - ((char *)bh->b_data)) << 3;
735 735
736 if (next < maxblocks && next >= start && ext3_test_allocatable(next, bh)) 736 if (next < maxblocks && next >= start && ext3_test_allocatable(next, bh))
@@ -949,7 +949,7 @@ static int find_next_reservable_window(
949 949
950 prev = rsv; 950 prev = rsv;
951 next = rb_next(&rsv->rsv_node); 951 next = rb_next(&rsv->rsv_node);
952 rsv = list_entry(next,struct ext3_reserve_window_node,rsv_node); 952 rsv = rb_entry(next,struct ext3_reserve_window_node,rsv_node);
953 953
954 /* 954 /*
955 * Reached the last reservation, we can just append to the 955 * Reached the last reservation, we can just append to the
@@ -1148,7 +1148,7 @@ retry:
1148 * check if the first free block is within the 1148 * check if the first free block is within the
1149 * free space we just reserved 1149 * free space we just reserved
1150 */ 1150 */
1151 if (start_block >= my_rsv->rsv_start && start_block < my_rsv->rsv_end) 1151 if (start_block >= my_rsv->rsv_start && start_block <= my_rsv->rsv_end)
1152 return 0; /* success */ 1152 return 0; /* success */
1153 /* 1153 /*
1154 * if the first free bit we found is out of the reservable space 1154 * if the first free bit we found is out of the reservable space
@@ -1193,7 +1193,7 @@ static void try_to_extend_reservation(struct ext3_reserve_window_node *my_rsv,
1193 if (!next) 1193 if (!next)
1194 my_rsv->rsv_end += size; 1194 my_rsv->rsv_end += size;
1195 else { 1195 else {
1196 next_rsv = list_entry(next, struct ext3_reserve_window_node, rsv_node); 1196 next_rsv = rb_entry(next, struct ext3_reserve_window_node, rsv_node);
1197 1197
1198 if ((next_rsv->rsv_start - my_rsv->rsv_end - 1) >= size) 1198 if ((next_rsv->rsv_start - my_rsv->rsv_end - 1) >= size)
1199 my_rsv->rsv_end += size; 1199 my_rsv->rsv_end += size;
@@ -1271,7 +1271,7 @@ ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle,
1271 } 1271 }
1272 /* 1272 /*
1273 * grp_goal is a group relative block number (if there is a goal) 1273 * grp_goal is a group relative block number (if there is a goal)
1274 * 0 < grp_goal < EXT3_BLOCKS_PER_GROUP(sb) 1274 * 0 <= grp_goal < EXT3_BLOCKS_PER_GROUP(sb)
1275 * first block is a filesystem wide block number 1275 * first block is a filesystem wide block number
1276 * first block is the block number of the first block in this group 1276 * first block is the block number of the first block in this group
1277 */ 1277 */
@@ -1307,10 +1307,14 @@ ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle,
1307 if (!goal_in_my_reservation(&my_rsv->rsv_window, 1307 if (!goal_in_my_reservation(&my_rsv->rsv_window,
1308 grp_goal, group, sb)) 1308 grp_goal, group, sb))
1309 grp_goal = -1; 1309 grp_goal = -1;
1310 } else if (grp_goal > 0 && 1310 } else if (grp_goal >= 0) {
1311 (my_rsv->rsv_end-grp_goal+1) < *count) 1311 int curr = my_rsv->rsv_end -
1312 try_to_extend_reservation(my_rsv, sb, 1312 (grp_goal + group_first_block) + 1;
1313 *count-my_rsv->rsv_end + grp_goal - 1); 1313
1314 if (curr < *count)
1315 try_to_extend_reservation(my_rsv, sb,
1316 *count - curr);
1317 }
1314 1318
1315 if ((my_rsv->rsv_start > group_last_block) || 1319 if ((my_rsv->rsv_start > group_last_block) ||
1316 (my_rsv->rsv_end < group_first_block)) { 1320 (my_rsv->rsv_end < group_first_block)) {
@@ -1511,10 +1515,8 @@ retry_alloc:
1511 if (group_no >= ngroups) 1515 if (group_no >= ngroups)
1512 group_no = 0; 1516 group_no = 0;
1513 gdp = ext3_get_group_desc(sb, group_no, &gdp_bh); 1517 gdp = ext3_get_group_desc(sb, group_no, &gdp_bh);
1514 if (!gdp) { 1518 if (!gdp)
1515 *errp = -EIO; 1519 goto io_error;
1516 goto out;
1517 }
1518 free_blocks = le16_to_cpu(gdp->bg_free_blocks_count); 1520 free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
1519 /* 1521 /*
1520 * skip this group if the number of 1522 * skip this group if the number of
@@ -1548,6 +1550,7 @@ retry_alloc:
1548 */ 1550 */
1549 if (my_rsv) { 1551 if (my_rsv) {
1550 my_rsv = NULL; 1552 my_rsv = NULL;
1553 windowsz = 0;
1551 group_no = goal_group; 1554 group_no = goal_group;
1552 goto retry_alloc; 1555 goto retry_alloc;
1553 } 1556 }
diff --git a/fs/ext3/dir.c b/fs/ext3/dir.c
index d0b54f30b914..5a9313ecd4ef 100644
--- a/fs/ext3/dir.c
+++ b/fs/ext3/dir.c
@@ -154,6 +154,9 @@ static int ext3_readdir(struct file * filp,
154 ext3_error (sb, "ext3_readdir", 154 ext3_error (sb, "ext3_readdir",
155 "directory #%lu contains a hole at offset %lu", 155 "directory #%lu contains a hole at offset %lu",
156 inode->i_ino, (unsigned long)filp->f_pos); 156 inode->i_ino, (unsigned long)filp->f_pos);
157 /* corrupt size? Maybe no more blocks to read */
158 if (filp->f_pos > inode->i_blocks << 9)
159 break;
157 filp->f_pos += sb->s_blocksize - offset; 160 filp->f_pos += sb->s_blocksize - offset;
158 continue; 161 continue;
159 } 162 }
diff --git a/fs/ext3/ext3_jbd.c b/fs/ext3/ext3_jbd.c
new file mode 100644
index 000000000000..e1f91fd26a93
--- /dev/null
+++ b/fs/ext3/ext3_jbd.c
@@ -0,0 +1,59 @@
1/*
2 * Interface between ext3 and JBD
3 */
4
5#include <linux/ext3_jbd.h>
6
7int __ext3_journal_get_undo_access(const char *where, handle_t *handle,
8 struct buffer_head *bh)
9{
10 int err = journal_get_undo_access(handle, bh);
11 if (err)
12 ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
13 return err;
14}
15
16int __ext3_journal_get_write_access(const char *where, handle_t *handle,
17 struct buffer_head *bh)
18{
19 int err = journal_get_write_access(handle, bh);
20 if (err)
21 ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
22 return err;
23}
24
25int __ext3_journal_forget(const char *where, handle_t *handle,
26 struct buffer_head *bh)
27{
28 int err = journal_forget(handle, bh);
29 if (err)
30 ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
31 return err;
32}
33
34int __ext3_journal_revoke(const char *where, handle_t *handle,
35 unsigned long blocknr, struct buffer_head *bh)
36{
37 int err = journal_revoke(handle, blocknr, bh);
38 if (err)
39 ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
40 return err;
41}
42
43int __ext3_journal_get_create_access(const char *where,
44 handle_t *handle, struct buffer_head *bh)
45{
46 int err = journal_get_create_access(handle, bh);
47 if (err)
48 ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
49 return err;
50}
51
52int __ext3_journal_dirty_metadata(const char *where,
53 handle_t *handle, struct buffer_head *bh)
54{
55 int err = journal_dirty_metadata(handle, bh);
56 if (err)
57 ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
58 return err;
59}
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index 03ba5bcab186..beaf25f5112f 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -1148,37 +1148,102 @@ static int do_journal_get_write_access(handle_t *handle,
1148 return ext3_journal_get_write_access(handle, bh); 1148 return ext3_journal_get_write_access(handle, bh);
1149} 1149}
1150 1150
1151/*
1152 * The idea of this helper function is following:
1153 * if prepare_write has allocated some blocks, but not all of them, the
1154 * transaction must include the content of the newly allocated blocks.
1155 * This content is expected to be set to zeroes by block_prepare_write().
1156 * 2006/10/14 SAW
1157 */
1158static int ext3_prepare_failure(struct file *file, struct page *page,
1159 unsigned from, unsigned to)
1160{
1161 struct address_space *mapping;
1162 struct buffer_head *bh, *head, *next;
1163 unsigned block_start, block_end;
1164 unsigned blocksize;
1165 int ret;
1166 handle_t *handle = ext3_journal_current_handle();
1167
1168 mapping = page->mapping;
1169 if (ext3_should_writeback_data(mapping->host)) {
1170 /* optimization: no constraints about data */
1171skip:
1172 return ext3_journal_stop(handle);
1173 }
1174
1175 head = page_buffers(page);
1176 blocksize = head->b_size;
1177 for ( bh = head, block_start = 0;
1178 bh != head || !block_start;
1179 block_start = block_end, bh = next)
1180 {
1181 next = bh->b_this_page;
1182 block_end = block_start + blocksize;
1183 if (block_end <= from)
1184 continue;
1185 if (block_start >= to) {
1186 block_start = to;
1187 break;
1188 }
1189 if (!buffer_mapped(bh))
1190 /* prepare_write failed on this bh */
1191 break;
1192 if (ext3_should_journal_data(mapping->host)) {
1193 ret = do_journal_get_write_access(handle, bh);
1194 if (ret) {
1195 ext3_journal_stop(handle);
1196 return ret;
1197 }
1198 }
1199 /*
1200 * block_start here becomes the first block where the current iteration
1201 * of prepare_write failed.
1202 */
1203 }
1204 if (block_start <= from)
1205 goto skip;
1206
1207 /* commit allocated and zeroed buffers */
1208 return mapping->a_ops->commit_write(file, page, from, block_start);
1209}
1210
1151static int ext3_prepare_write(struct file *file, struct page *page, 1211static int ext3_prepare_write(struct file *file, struct page *page,
1152 unsigned from, unsigned to) 1212 unsigned from, unsigned to)
1153{ 1213{
1154 struct inode *inode = page->mapping->host; 1214 struct inode *inode = page->mapping->host;
1155 int ret, needed_blocks = ext3_writepage_trans_blocks(inode); 1215 int ret, ret2;
1216 int needed_blocks = ext3_writepage_trans_blocks(inode);
1156 handle_t *handle; 1217 handle_t *handle;
1157 int retries = 0; 1218 int retries = 0;
1158 1219
1159retry: 1220retry:
1160 handle = ext3_journal_start(inode, needed_blocks); 1221 handle = ext3_journal_start(inode, needed_blocks);
1161 if (IS_ERR(handle)) { 1222 if (IS_ERR(handle))
1162 ret = PTR_ERR(handle); 1223 return PTR_ERR(handle);
1163 goto out;
1164 }
1165 if (test_opt(inode->i_sb, NOBH) && ext3_should_writeback_data(inode)) 1224 if (test_opt(inode->i_sb, NOBH) && ext3_should_writeback_data(inode))
1166 ret = nobh_prepare_write(page, from, to, ext3_get_block); 1225 ret = nobh_prepare_write(page, from, to, ext3_get_block);
1167 else 1226 else
1168 ret = block_prepare_write(page, from, to, ext3_get_block); 1227 ret = block_prepare_write(page, from, to, ext3_get_block);
1169 if (ret) 1228 if (ret)
1170 goto prepare_write_failed; 1229 goto failure;
1171 1230
1172 if (ext3_should_journal_data(inode)) { 1231 if (ext3_should_journal_data(inode)) {
1173 ret = walk_page_buffers(handle, page_buffers(page), 1232 ret = walk_page_buffers(handle, page_buffers(page),
1174 from, to, NULL, do_journal_get_write_access); 1233 from, to, NULL, do_journal_get_write_access);
1234 if (ret)
1235 /* fatal error, just put the handle and return */
1236 journal_stop(handle);
1175 } 1237 }
1176prepare_write_failed: 1238 return ret;
1177 if (ret) 1239
1178 ext3_journal_stop(handle); 1240failure:
1241 ret2 = ext3_prepare_failure(file, page, from, to);
1242 if (ret2 < 0)
1243 return ret2;
1179 if (ret == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries)) 1244 if (ret == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries))
1180 goto retry; 1245 goto retry;
1181out: 1246 /* retry number exceeded, or other error like -EDQUOT */
1182 return ret; 1247 return ret;
1183} 1248}
1184 1249
diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c
index 906731a20f1a..60d2f9dbdb00 100644
--- a/fs/ext3/namei.c
+++ b/fs/ext3/namei.c
@@ -552,6 +552,15 @@ static int htree_dirblock_to_tree(struct file *dir_file,
552 dir->i_sb->s_blocksize - 552 dir->i_sb->s_blocksize -
553 EXT3_DIR_REC_LEN(0)); 553 EXT3_DIR_REC_LEN(0));
554 for (; de < top; de = ext3_next_entry(de)) { 554 for (; de < top; de = ext3_next_entry(de)) {
555 if (!ext3_check_dir_entry("htree_dirblock_to_tree", dir, de, bh,
556 (block<<EXT3_BLOCK_SIZE_BITS(dir->i_sb))
557 +((char *)de - bh->b_data))) {
558 /* On error, skip the f_pos to the next block. */
559 dir_file->f_pos = (dir_file->f_pos |
560 (dir->i_sb->s_blocksize - 1)) + 1;
561 brelse (bh);
562 return count;
563 }
555 ext3fs_dirhash(de->name, de->name_len, hinfo); 564 ext3fs_dirhash(de->name, de->name_len, hinfo);
556 if ((hinfo->hash < start_hash) || 565 if ((hinfo->hash < start_hash) ||
557 ((hinfo->hash == start_hash) && 566 ((hinfo->hash == start_hash) &&
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index afc2d4f42d77..580b8a6ca979 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -436,7 +436,7 @@ static void ext3_put_super (struct super_block * sb)
436 return; 436 return;
437} 437}
438 438
439static kmem_cache_t *ext3_inode_cachep; 439static struct kmem_cache *ext3_inode_cachep;
440 440
441/* 441/*
442 * Called inside transaction, so use GFP_NOFS 442 * Called inside transaction, so use GFP_NOFS
@@ -445,7 +445,7 @@ static struct inode *ext3_alloc_inode(struct super_block *sb)
445{ 445{
446 struct ext3_inode_info *ei; 446 struct ext3_inode_info *ei;
447 447
448 ei = kmem_cache_alloc(ext3_inode_cachep, SLAB_NOFS); 448 ei = kmem_cache_alloc(ext3_inode_cachep, GFP_NOFS);
449 if (!ei) 449 if (!ei)
450 return NULL; 450 return NULL;
451#ifdef CONFIG_EXT3_FS_POSIX_ACL 451#ifdef CONFIG_EXT3_FS_POSIX_ACL
@@ -462,7 +462,7 @@ static void ext3_destroy_inode(struct inode *inode)
462 kmem_cache_free(ext3_inode_cachep, EXT3_I(inode)); 462 kmem_cache_free(ext3_inode_cachep, EXT3_I(inode));
463} 463}
464 464
465static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) 465static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
466{ 466{
467 struct ext3_inode_info *ei = (struct ext3_inode_info *) foo; 467 struct ext3_inode_info *ei = (struct ext3_inode_info *) foo;
468 468
@@ -1264,6 +1264,12 @@ static void ext3_orphan_cleanup (struct super_block * sb,
1264 return; 1264 return;
1265 } 1265 }
1266 1266
1267 if (bdev_read_only(sb->s_bdev)) {
1268 printk(KERN_ERR "EXT3-fs: write access "
1269 "unavailable, skipping orphan cleanup.\n");
1270 return;
1271 }
1272
1267 if (EXT3_SB(sb)->s_mount_state & EXT3_ERROR_FS) { 1273 if (EXT3_SB(sb)->s_mount_state & EXT3_ERROR_FS) {
1268 if (es->s_last_orphan) 1274 if (es->s_last_orphan)
1269 jbd_debug(1, "Errors on filesystem, " 1275 jbd_debug(1, "Errors on filesystem, "
@@ -2387,6 +2393,7 @@ static int ext3_statfs (struct dentry * dentry, struct kstatfs * buf)
2387 struct ext3_super_block *es = sbi->s_es; 2393 struct ext3_super_block *es = sbi->s_es;
2388 ext3_fsblk_t overhead; 2394 ext3_fsblk_t overhead;
2389 int i; 2395 int i;
2396 u64 fsid;
2390 2397
2391 if (test_opt (sb, MINIX_DF)) 2398 if (test_opt (sb, MINIX_DF))
2392 overhead = 0; 2399 overhead = 0;
@@ -2433,6 +2440,10 @@ static int ext3_statfs (struct dentry * dentry, struct kstatfs * buf)
2433 buf->f_files = le32_to_cpu(es->s_inodes_count); 2440 buf->f_files = le32_to_cpu(es->s_inodes_count);
2434 buf->f_ffree = percpu_counter_sum(&sbi->s_freeinodes_counter); 2441 buf->f_ffree = percpu_counter_sum(&sbi->s_freeinodes_counter);
2435 buf->f_namelen = EXT3_NAME_LEN; 2442 buf->f_namelen = EXT3_NAME_LEN;
2443 fsid = le64_to_cpup((void *)es->s_uuid) ^
2444 le64_to_cpup((void *)es->s_uuid + sizeof(u64));
2445 buf->f_fsid.val[0] = fsid & 0xFFFFFFFFUL;
2446 buf->f_fsid.val[1] = (fsid >> 32) & 0xFFFFFFFFUL;
2436 return 0; 2447 return 0;
2437} 2448}
2438 2449
diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
index f86f2482f01d..99857a400f4b 100644
--- a/fs/ext3/xattr.c
+++ b/fs/ext3/xattr.c
@@ -459,14 +459,11 @@ static void ext3_xattr_update_super_block(handle_t *handle,
459 if (EXT3_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_EXT_ATTR)) 459 if (EXT3_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_EXT_ATTR))
460 return; 460 return;
461 461
462 lock_super(sb);
463 if (ext3_journal_get_write_access(handle, EXT3_SB(sb)->s_sbh) == 0) { 462 if (ext3_journal_get_write_access(handle, EXT3_SB(sb)->s_sbh) == 0) {
464 EXT3_SB(sb)->s_es->s_feature_compat |= 463 EXT3_SET_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_EXT_ATTR);
465 cpu_to_le32(EXT3_FEATURE_COMPAT_EXT_ATTR);
466 sb->s_dirt = 1; 464 sb->s_dirt = 1;
467 ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh); 465 ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
468 } 466 }
469 unlock_super(sb);
470} 467}
471 468
472/* 469/*
diff --git a/fs/ext4/Makefile b/fs/ext4/Makefile
index a6acb96ebeb9..ae6e7e502ac9 100644
--- a/fs/ext4/Makefile
+++ b/fs/ext4/Makefile
@@ -5,7 +5,8 @@
5obj-$(CONFIG_EXT4DEV_FS) += ext4dev.o 5obj-$(CONFIG_EXT4DEV_FS) += ext4dev.o
6 6
7ext4dev-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o \ 7ext4dev-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o \
8 ioctl.o namei.o super.o symlink.o hash.o resize.o extents.o 8 ioctl.o namei.o super.o symlink.o hash.o resize.o extents.o \
9 ext4_jbd2.o
9 10
10ext4dev-$(CONFIG_EXT4DEV_FS_XATTR) += xattr.o xattr_user.o xattr_trusted.o 11ext4dev-$(CONFIG_EXT4DEV_FS_XATTR) += xattr.o xattr_user.o xattr_trusted.o
11ext4dev-$(CONFIG_EXT4DEV_FS_POSIX_ACL) += acl.o 12ext4dev-$(CONFIG_EXT4DEV_FS_POSIX_ACL) += acl.o
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index 5d45582f9517..c4dd1103ccf1 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -165,7 +165,7 @@ restart:
165 165
166 printk("Block Allocation Reservation Windows Map (%s):\n", fn); 166 printk("Block Allocation Reservation Windows Map (%s):\n", fn);
167 while (n) { 167 while (n) {
168 rsv = list_entry(n, struct ext4_reserve_window_node, rsv_node); 168 rsv = rb_entry(n, struct ext4_reserve_window_node, rsv_node);
169 if (verbose) 169 if (verbose)
170 printk("reservation window 0x%p " 170 printk("reservation window 0x%p "
171 "start: %llu, end: %llu\n", 171 "start: %llu, end: %llu\n",
@@ -747,7 +747,7 @@ find_next_usable_block(ext4_grpblk_t start, struct buffer_head *bh,
747 here = 0; 747 here = 0;
748 748
749 p = ((char *)bh->b_data) + (here >> 3); 749 p = ((char *)bh->b_data) + (here >> 3);
750 r = memscan(p, 0, (maxblocks - here + 7) >> 3); 750 r = memscan(p, 0, ((maxblocks + 7) >> 3) - (here >> 3));
751 next = (r - ((char *)bh->b_data)) << 3; 751 next = (r - ((char *)bh->b_data)) << 3;
752 752
753 if (next < maxblocks && next >= start && ext4_test_allocatable(next, bh)) 753 if (next < maxblocks && next >= start && ext4_test_allocatable(next, bh))
@@ -966,7 +966,7 @@ static int find_next_reservable_window(
966 966
967 prev = rsv; 967 prev = rsv;
968 next = rb_next(&rsv->rsv_node); 968 next = rb_next(&rsv->rsv_node);
969 rsv = list_entry(next,struct ext4_reserve_window_node,rsv_node); 969 rsv = rb_entry(next,struct ext4_reserve_window_node,rsv_node);
970 970
971 /* 971 /*
972 * Reached the last reservation, we can just append to the 972 * Reached the last reservation, we can just append to the
@@ -1165,7 +1165,7 @@ retry:
1165 * check if the first free block is within the 1165 * check if the first free block is within the
1166 * free space we just reserved 1166 * free space we just reserved
1167 */ 1167 */
1168 if (start_block >= my_rsv->rsv_start && start_block < my_rsv->rsv_end) 1168 if (start_block >= my_rsv->rsv_start && start_block <= my_rsv->rsv_end)
1169 return 0; /* success */ 1169 return 0; /* success */
1170 /* 1170 /*
1171 * if the first free bit we found is out of the reservable space 1171 * if the first free bit we found is out of the reservable space
@@ -1210,7 +1210,7 @@ static void try_to_extend_reservation(struct ext4_reserve_window_node *my_rsv,
1210 if (!next) 1210 if (!next)
1211 my_rsv->rsv_end += size; 1211 my_rsv->rsv_end += size;
1212 else { 1212 else {
1213 next_rsv = list_entry(next, struct ext4_reserve_window_node, rsv_node); 1213 next_rsv = rb_entry(next, struct ext4_reserve_window_node, rsv_node);
1214 1214
1215 if ((next_rsv->rsv_start - my_rsv->rsv_end - 1) >= size) 1215 if ((next_rsv->rsv_start - my_rsv->rsv_end - 1) >= size)
1216 my_rsv->rsv_end += size; 1216 my_rsv->rsv_end += size;
@@ -1288,7 +1288,7 @@ ext4_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle,
1288 } 1288 }
1289 /* 1289 /*
1290 * grp_goal is a group relative block number (if there is a goal) 1290 * grp_goal is a group relative block number (if there is a goal)
1291 * 0 < grp_goal < EXT4_BLOCKS_PER_GROUP(sb) 1291 * 0 <= grp_goal < EXT4_BLOCKS_PER_GROUP(sb)
1292 * first block is a filesystem wide block number 1292 * first block is a filesystem wide block number
1293 * first block is the block number of the first block in this group 1293 * first block is the block number of the first block in this group
1294 */ 1294 */
@@ -1324,10 +1324,14 @@ ext4_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle,
1324 if (!goal_in_my_reservation(&my_rsv->rsv_window, 1324 if (!goal_in_my_reservation(&my_rsv->rsv_window,
1325 grp_goal, group, sb)) 1325 grp_goal, group, sb))
1326 grp_goal = -1; 1326 grp_goal = -1;
1327 } else if (grp_goal > 0 && 1327 } else if (grp_goal >= 0) {
1328 (my_rsv->rsv_end-grp_goal+1) < *count) 1328 int curr = my_rsv->rsv_end -
1329 try_to_extend_reservation(my_rsv, sb, 1329 (grp_goal + group_first_block) + 1;
1330 *count-my_rsv->rsv_end + grp_goal - 1); 1330
1331 if (curr < *count)
1332 try_to_extend_reservation(my_rsv, sb,
1333 *count - curr);
1334 }
1331 1335
1332 if ((my_rsv->rsv_start > group_last_block) || 1336 if ((my_rsv->rsv_start > group_last_block) ||
1333 (my_rsv->rsv_end < group_first_block)) { 1337 (my_rsv->rsv_end < group_first_block)) {
@@ -1525,10 +1529,8 @@ retry_alloc:
1525 if (group_no >= ngroups) 1529 if (group_no >= ngroups)
1526 group_no = 0; 1530 group_no = 0;
1527 gdp = ext4_get_group_desc(sb, group_no, &gdp_bh); 1531 gdp = ext4_get_group_desc(sb, group_no, &gdp_bh);
1528 if (!gdp) { 1532 if (!gdp)
1529 *errp = -EIO; 1533 goto io_error;
1530 goto out;
1531 }
1532 free_blocks = le16_to_cpu(gdp->bg_free_blocks_count); 1534 free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
1533 /* 1535 /*
1534 * skip this group if the number of 1536 * skip this group if the number of
@@ -1562,6 +1564,7 @@ retry_alloc:
1562 */ 1564 */
1563 if (my_rsv) { 1565 if (my_rsv) {
1564 my_rsv = NULL; 1566 my_rsv = NULL;
1567 windowsz = 0;
1565 group_no = goal_group; 1568 group_no = goal_group;
1566 goto retry_alloc; 1569 goto retry_alloc;
1567 } 1570 }
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index f8595787a70e..f2ed3e7fb9f5 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -153,6 +153,9 @@ static int ext4_readdir(struct file * filp,
153 ext4_error (sb, "ext4_readdir", 153 ext4_error (sb, "ext4_readdir",
154 "directory #%lu contains a hole at offset %lu", 154 "directory #%lu contains a hole at offset %lu",
155 inode->i_ino, (unsigned long)filp->f_pos); 155 inode->i_ino, (unsigned long)filp->f_pos);
156 /* corrupt size? Maybe no more blocks to read */
157 if (filp->f_pos > inode->i_blocks << 9)
158 break;
156 filp->f_pos += sb->s_blocksize - offset; 159 filp->f_pos += sb->s_blocksize - offset;
157 continue; 160 continue;
158 } 161 }
diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
new file mode 100644
index 000000000000..d6afe4e27340
--- /dev/null
+++ b/fs/ext4/ext4_jbd2.c
@@ -0,0 +1,59 @@
1/*
2 * Interface between ext4 and JBD
3 */
4
5#include <linux/ext4_jbd2.h>
6
7int __ext4_journal_get_undo_access(const char *where, handle_t *handle,
8 struct buffer_head *bh)
9{
10 int err = jbd2_journal_get_undo_access(handle, bh);
11 if (err)
12 ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
13 return err;
14}
15
16int __ext4_journal_get_write_access(const char *where, handle_t *handle,
17 struct buffer_head *bh)
18{
19 int err = jbd2_journal_get_write_access(handle, bh);
20 if (err)
21 ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
22 return err;
23}
24
25int __ext4_journal_forget(const char *where, handle_t *handle,
26 struct buffer_head *bh)
27{
28 int err = jbd2_journal_forget(handle, bh);
29 if (err)
30 ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
31 return err;
32}
33
34int __ext4_journal_revoke(const char *where, handle_t *handle,
35 ext4_fsblk_t blocknr, struct buffer_head *bh)
36{
37 int err = jbd2_journal_revoke(handle, blocknr, bh);
38 if (err)
39 ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
40 return err;
41}
42
43int __ext4_journal_get_create_access(const char *where,
44 handle_t *handle, struct buffer_head *bh)
45{
46 int err = jbd2_journal_get_create_access(handle, bh);
47 if (err)
48 ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
49 return err;
50}
51
52int __ext4_journal_dirty_metadata(const char *where,
53 handle_t *handle, struct buffer_head *bh)
54{
55 int err = jbd2_journal_dirty_metadata(handle, bh);
56 if (err)
57 ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
58 return err;
59}
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 2608dce18f3e..dc2724fa7622 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -48,7 +48,7 @@
48 * ext_pblock: 48 * ext_pblock:
49 * combine low and high parts of physical block number into ext4_fsblk_t 49 * combine low and high parts of physical block number into ext4_fsblk_t
50 */ 50 */
51static inline ext4_fsblk_t ext_pblock(struct ext4_extent *ex) 51static ext4_fsblk_t ext_pblock(struct ext4_extent *ex)
52{ 52{
53 ext4_fsblk_t block; 53 ext4_fsblk_t block;
54 54
@@ -61,7 +61,7 @@ static inline ext4_fsblk_t ext_pblock(struct ext4_extent *ex)
61 * idx_pblock: 61 * idx_pblock:
62 * combine low and high parts of a leaf physical block number into ext4_fsblk_t 62 * combine low and high parts of a leaf physical block number into ext4_fsblk_t
63 */ 63 */
64static inline ext4_fsblk_t idx_pblock(struct ext4_extent_idx *ix) 64static ext4_fsblk_t idx_pblock(struct ext4_extent_idx *ix)
65{ 65{
66 ext4_fsblk_t block; 66 ext4_fsblk_t block;
67 67
@@ -75,7 +75,7 @@ static inline ext4_fsblk_t idx_pblock(struct ext4_extent_idx *ix)
75 * stores a large physical block number into an extent struct, 75 * stores a large physical block number into an extent struct,
76 * breaking it into parts 76 * breaking it into parts
77 */ 77 */
78static inline void ext4_ext_store_pblock(struct ext4_extent *ex, ext4_fsblk_t pb) 78static void ext4_ext_store_pblock(struct ext4_extent *ex, ext4_fsblk_t pb)
79{ 79{
80 ex->ee_start = cpu_to_le32((unsigned long) (pb & 0xffffffff)); 80 ex->ee_start = cpu_to_le32((unsigned long) (pb & 0xffffffff));
81 ex->ee_start_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff); 81 ex->ee_start_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff);
@@ -86,7 +86,7 @@ static inline void ext4_ext_store_pblock(struct ext4_extent *ex, ext4_fsblk_t pb
86 * stores a large physical block number into an index struct, 86 * stores a large physical block number into an index struct,
87 * breaking it into parts 87 * breaking it into parts
88 */ 88 */
89static inline void ext4_idx_store_pblock(struct ext4_extent_idx *ix, ext4_fsblk_t pb) 89static void ext4_idx_store_pblock(struct ext4_extent_idx *ix, ext4_fsblk_t pb)
90{ 90{
91 ix->ei_leaf = cpu_to_le32((unsigned long) (pb & 0xffffffff)); 91 ix->ei_leaf = cpu_to_le32((unsigned long) (pb & 0xffffffff));
92 ix->ei_leaf_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff); 92 ix->ei_leaf_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff);
@@ -186,7 +186,8 @@ static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
186 depth = path->p_depth; 186 depth = path->p_depth;
187 187
188 /* try to predict block placement */ 188 /* try to predict block placement */
189 if ((ex = path[depth].p_ext)) 189 ex = path[depth].p_ext;
190 if (ex)
190 return ext_pblock(ex)+(block-le32_to_cpu(ex->ee_block)); 191 return ext_pblock(ex)+(block-le32_to_cpu(ex->ee_block));
191 192
192 /* it looks like index is empty; 193 /* it looks like index is empty;
@@ -215,7 +216,7 @@ ext4_ext_new_block(handle_t *handle, struct inode *inode,
215 return newblock; 216 return newblock;
216} 217}
217 218
218static inline int ext4_ext_space_block(struct inode *inode) 219static int ext4_ext_space_block(struct inode *inode)
219{ 220{
220 int size; 221 int size;
221 222
@@ -228,7 +229,7 @@ static inline int ext4_ext_space_block(struct inode *inode)
228 return size; 229 return size;
229} 230}
230 231
231static inline int ext4_ext_space_block_idx(struct inode *inode) 232static int ext4_ext_space_block_idx(struct inode *inode)
232{ 233{
233 int size; 234 int size;
234 235
@@ -241,7 +242,7 @@ static inline int ext4_ext_space_block_idx(struct inode *inode)
241 return size; 242 return size;
242} 243}
243 244
244static inline int ext4_ext_space_root(struct inode *inode) 245static int ext4_ext_space_root(struct inode *inode)
245{ 246{
246 int size; 247 int size;
247 248
@@ -255,7 +256,7 @@ static inline int ext4_ext_space_root(struct inode *inode)
255 return size; 256 return size;
256} 257}
257 258
258static inline int ext4_ext_space_root_idx(struct inode *inode) 259static int ext4_ext_space_root_idx(struct inode *inode)
259{ 260{
260 int size; 261 int size;
261 262
@@ -476,13 +477,12 @@ ext4_ext_find_extent(struct inode *inode, int block, struct ext4_ext_path *path)
476 477
477 /* account possible depth increase */ 478 /* account possible depth increase */
478 if (!path) { 479 if (!path) {
479 path = kmalloc(sizeof(struct ext4_ext_path) * (depth + 2), 480 path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2),
480 GFP_NOFS); 481 GFP_NOFS);
481 if (!path) 482 if (!path)
482 return ERR_PTR(-ENOMEM); 483 return ERR_PTR(-ENOMEM);
483 alloc = 1; 484 alloc = 1;
484 } 485 }
485 memset(path, 0, sizeof(struct ext4_ext_path) * (depth + 1));
486 path[0].p_hdr = eh; 486 path[0].p_hdr = eh;
487 487
488 /* walk through the tree */ 488 /* walk through the tree */
@@ -543,7 +543,8 @@ static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
543 struct ext4_extent_idx *ix; 543 struct ext4_extent_idx *ix;
544 int len, err; 544 int len, err;
545 545
546 if ((err = ext4_ext_get_access(handle, inode, curp))) 546 err = ext4_ext_get_access(handle, inode, curp);
547 if (err)
547 return err; 548 return err;
548 549
549 BUG_ON(logical == le32_to_cpu(curp->p_idx->ei_block)); 550 BUG_ON(logical == le32_to_cpu(curp->p_idx->ei_block));
@@ -641,10 +642,9 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
641 * We need this to handle errors and free blocks 642 * We need this to handle errors and free blocks
642 * upon them. 643 * upon them.
643 */ 644 */
644 ablocks = kmalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS); 645 ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS);
645 if (!ablocks) 646 if (!ablocks)
646 return -ENOMEM; 647 return -ENOMEM;
647 memset(ablocks, 0, sizeof(ext4_fsblk_t) * depth);
648 648
649 /* allocate all needed blocks */ 649 /* allocate all needed blocks */
650 ext_debug("allocate %d blocks for indexes/leaf\n", depth - at); 650 ext_debug("allocate %d blocks for indexes/leaf\n", depth - at);
@@ -665,7 +665,8 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
665 } 665 }
666 lock_buffer(bh); 666 lock_buffer(bh);
667 667
668 if ((err = ext4_journal_get_create_access(handle, bh))) 668 err = ext4_journal_get_create_access(handle, bh);
669 if (err)
669 goto cleanup; 670 goto cleanup;
670 671
671 neh = ext_block_hdr(bh); 672 neh = ext_block_hdr(bh);
@@ -702,18 +703,21 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
702 set_buffer_uptodate(bh); 703 set_buffer_uptodate(bh);
703 unlock_buffer(bh); 704 unlock_buffer(bh);
704 705
705 if ((err = ext4_journal_dirty_metadata(handle, bh))) 706 err = ext4_journal_dirty_metadata(handle, bh);
707 if (err)
706 goto cleanup; 708 goto cleanup;
707 brelse(bh); 709 brelse(bh);
708 bh = NULL; 710 bh = NULL;
709 711
710 /* correct old leaf */ 712 /* correct old leaf */
711 if (m) { 713 if (m) {
712 if ((err = ext4_ext_get_access(handle, inode, path + depth))) 714 err = ext4_ext_get_access(handle, inode, path + depth);
715 if (err)
713 goto cleanup; 716 goto cleanup;
714 path[depth].p_hdr->eh_entries = 717 path[depth].p_hdr->eh_entries =
715 cpu_to_le16(le16_to_cpu(path[depth].p_hdr->eh_entries)-m); 718 cpu_to_le16(le16_to_cpu(path[depth].p_hdr->eh_entries)-m);
716 if ((err = ext4_ext_dirty(handle, inode, path + depth))) 719 err = ext4_ext_dirty(handle, inode, path + depth);
720 if (err)
717 goto cleanup; 721 goto cleanup;
718 722
719 } 723 }
@@ -736,7 +740,8 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
736 } 740 }
737 lock_buffer(bh); 741 lock_buffer(bh);
738 742
739 if ((err = ext4_journal_get_create_access(handle, bh))) 743 err = ext4_journal_get_create_access(handle, bh);
744 if (err)
740 goto cleanup; 745 goto cleanup;
741 746
742 neh = ext_block_hdr(bh); 747 neh = ext_block_hdr(bh);
@@ -780,7 +785,8 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
780 set_buffer_uptodate(bh); 785 set_buffer_uptodate(bh);
781 unlock_buffer(bh); 786 unlock_buffer(bh);
782 787
783 if ((err = ext4_journal_dirty_metadata(handle, bh))) 788 err = ext4_journal_dirty_metadata(handle, bh);
789 if (err)
784 goto cleanup; 790 goto cleanup;
785 brelse(bh); 791 brelse(bh);
786 bh = NULL; 792 bh = NULL;
@@ -800,9 +806,6 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
800 } 806 }
801 807
802 /* insert new index */ 808 /* insert new index */
803 if (err)
804 goto cleanup;
805
806 err = ext4_ext_insert_index(handle, inode, path + at, 809 err = ext4_ext_insert_index(handle, inode, path + at,
807 le32_to_cpu(border), newblock); 810 le32_to_cpu(border), newblock);
808 811
@@ -857,7 +860,8 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
857 } 860 }
858 lock_buffer(bh); 861 lock_buffer(bh);
859 862
860 if ((err = ext4_journal_get_create_access(handle, bh))) { 863 err = ext4_journal_get_create_access(handle, bh);
864 if (err) {
861 unlock_buffer(bh); 865 unlock_buffer(bh);
862 goto out; 866 goto out;
863 } 867 }
@@ -877,11 +881,13 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
877 set_buffer_uptodate(bh); 881 set_buffer_uptodate(bh);
878 unlock_buffer(bh); 882 unlock_buffer(bh);
879 883
880 if ((err = ext4_journal_dirty_metadata(handle, bh))) 884 err = ext4_journal_dirty_metadata(handle, bh);
885 if (err)
881 goto out; 886 goto out;
882 887
883 /* create index in new top-level index: num,max,pointer */ 888 /* create index in new top-level index: num,max,pointer */
884 if ((err = ext4_ext_get_access(handle, inode, curp))) 889 err = ext4_ext_get_access(handle, inode, curp);
890 if (err)
885 goto out; 891 goto out;
886 892
887 curp->p_hdr->eh_magic = EXT4_EXT_MAGIC; 893 curp->p_hdr->eh_magic = EXT4_EXT_MAGIC;
@@ -1073,27 +1079,31 @@ int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
1073 */ 1079 */
1074 k = depth - 1; 1080 k = depth - 1;
1075 border = path[depth].p_ext->ee_block; 1081 border = path[depth].p_ext->ee_block;
1076 if ((err = ext4_ext_get_access(handle, inode, path + k))) 1082 err = ext4_ext_get_access(handle, inode, path + k);
1083 if (err)
1077 return err; 1084 return err;
1078 path[k].p_idx->ei_block = border; 1085 path[k].p_idx->ei_block = border;
1079 if ((err = ext4_ext_dirty(handle, inode, path + k))) 1086 err = ext4_ext_dirty(handle, inode, path + k);
1087 if (err)
1080 return err; 1088 return err;
1081 1089
1082 while (k--) { 1090 while (k--) {
1083 /* change all left-side indexes */ 1091 /* change all left-side indexes */
1084 if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr)) 1092 if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
1085 break; 1093 break;
1086 if ((err = ext4_ext_get_access(handle, inode, path + k))) 1094 err = ext4_ext_get_access(handle, inode, path + k);
1095 if (err)
1087 break; 1096 break;
1088 path[k].p_idx->ei_block = border; 1097 path[k].p_idx->ei_block = border;
1089 if ((err = ext4_ext_dirty(handle, inode, path + k))) 1098 err = ext4_ext_dirty(handle, inode, path + k);
1099 if (err)
1090 break; 1100 break;
1091 } 1101 }
1092 1102
1093 return err; 1103 return err;
1094} 1104}
1095 1105
1096static int inline 1106static int
1097ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1, 1107ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
1098 struct ext4_extent *ex2) 1108 struct ext4_extent *ex2)
1099{ 1109{
@@ -1145,7 +1155,8 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
1145 le16_to_cpu(newext->ee_len), 1155 le16_to_cpu(newext->ee_len),
1146 le32_to_cpu(ex->ee_block), 1156 le32_to_cpu(ex->ee_block),
1147 le16_to_cpu(ex->ee_len), ext_pblock(ex)); 1157 le16_to_cpu(ex->ee_len), ext_pblock(ex));
1148 if ((err = ext4_ext_get_access(handle, inode, path + depth))) 1158 err = ext4_ext_get_access(handle, inode, path + depth);
1159 if (err)
1149 return err; 1160 return err;
1150 ex->ee_len = cpu_to_le16(le16_to_cpu(ex->ee_len) 1161 ex->ee_len = cpu_to_le16(le16_to_cpu(ex->ee_len)
1151 + le16_to_cpu(newext->ee_len)); 1162 + le16_to_cpu(newext->ee_len));
@@ -1195,7 +1206,8 @@ repeat:
1195has_space: 1206has_space:
1196 nearex = path[depth].p_ext; 1207 nearex = path[depth].p_ext;
1197 1208
1198 if ((err = ext4_ext_get_access(handle, inode, path + depth))) 1209 err = ext4_ext_get_access(handle, inode, path + depth);
1210 if (err)
1199 goto cleanup; 1211 goto cleanup;
1200 1212
1201 if (!nearex) { 1213 if (!nearex) {
@@ -1383,7 +1395,7 @@ int ext4_ext_walk_space(struct inode *inode, unsigned long block,
1383 return err; 1395 return err;
1384} 1396}
1385 1397
1386static inline void 1398static void
1387ext4_ext_put_in_cache(struct inode *inode, __u32 block, 1399ext4_ext_put_in_cache(struct inode *inode, __u32 block,
1388 __u32 len, __u32 start, int type) 1400 __u32 len, __u32 start, int type)
1389{ 1401{
@@ -1401,7 +1413,7 @@ ext4_ext_put_in_cache(struct inode *inode, __u32 block,
1401 * calculate boundaries of the gap that the requested block fits into 1413 * calculate boundaries of the gap that the requested block fits into
1402 * and cache this gap 1414 * and cache this gap
1403 */ 1415 */
1404static inline void 1416static void
1405ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path, 1417ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
1406 unsigned long block) 1418 unsigned long block)
1407{ 1419{
@@ -1442,7 +1454,7 @@ ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
1442 ext4_ext_put_in_cache(inode, lblock, len, 0, EXT4_EXT_CACHE_GAP); 1454 ext4_ext_put_in_cache(inode, lblock, len, 0, EXT4_EXT_CACHE_GAP);
1443} 1455}
1444 1456
1445static inline int 1457static int
1446ext4_ext_in_cache(struct inode *inode, unsigned long block, 1458ext4_ext_in_cache(struct inode *inode, unsigned long block,
1447 struct ext4_extent *ex) 1459 struct ext4_extent *ex)
1448{ 1460{
@@ -1489,10 +1501,12 @@ int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
1489 path--; 1501 path--;
1490 leaf = idx_pblock(path->p_idx); 1502 leaf = idx_pblock(path->p_idx);
1491 BUG_ON(path->p_hdr->eh_entries == 0); 1503 BUG_ON(path->p_hdr->eh_entries == 0);
1492 if ((err = ext4_ext_get_access(handle, inode, path))) 1504 err = ext4_ext_get_access(handle, inode, path);
1505 if (err)
1493 return err; 1506 return err;
1494 path->p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(path->p_hdr->eh_entries)-1); 1507 path->p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(path->p_hdr->eh_entries)-1);
1495 if ((err = ext4_ext_dirty(handle, inode, path))) 1508 err = ext4_ext_dirty(handle, inode, path);
1509 if (err)
1496 return err; 1510 return err;
1497 ext_debug("index is empty, remove it, free block %llu\n", leaf); 1511 ext_debug("index is empty, remove it, free block %llu\n", leaf);
1498 bh = sb_find_get_block(inode->i_sb, leaf); 1512 bh = sb_find_get_block(inode->i_sb, leaf);
@@ -1509,7 +1523,7 @@ int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
1509 * the caller should calculate credits under truncate_mutex and 1523 * the caller should calculate credits under truncate_mutex and
1510 * pass the actual path. 1524 * pass the actual path.
1511 */ 1525 */
1512int inline ext4_ext_calc_credits_for_insert(struct inode *inode, 1526int ext4_ext_calc_credits_for_insert(struct inode *inode,
1513 struct ext4_ext_path *path) 1527 struct ext4_ext_path *path)
1514{ 1528{
1515 int depth, needed; 1529 int depth, needed;
@@ -1534,16 +1548,17 @@ int inline ext4_ext_calc_credits_for_insert(struct inode *inode,
1534 1548
1535 /* 1549 /*
1536 * tree can be full, so it would need to grow in depth: 1550 * tree can be full, so it would need to grow in depth:
1537 * allocation + old root + new root 1551 * we need one credit to modify old root, credits for
1552 * new root will be added in split accounting
1538 */ 1553 */
1539 needed += 2 + 1 + 1; 1554 needed += 1;
1540 1555
1541 /* 1556 /*
1542 * Index split can happen, we would need: 1557 * Index split can happen, we would need:
1543 * allocate intermediate indexes (bitmap + group) 1558 * allocate intermediate indexes (bitmap + group)
1544 * + change two blocks at each level, but root (already included) 1559 * + change two blocks at each level, but root (already included)
1545 */ 1560 */
1546 needed = (depth * 2) + (depth * 2); 1561 needed += (depth * 2) + (depth * 2);
1547 1562
1548 /* any allocation modifies superblock */ 1563 /* any allocation modifies superblock */
1549 needed += 1; 1564 needed += 1;
@@ -1718,7 +1733,7 @@ out:
1718 * ext4_ext_more_to_rm: 1733 * ext4_ext_more_to_rm:
1719 * returns 1 if current index has to be freed (even partial) 1734 * returns 1 if current index has to be freed (even partial)
1720 */ 1735 */
1721static int inline 1736static int
1722ext4_ext_more_to_rm(struct ext4_ext_path *path) 1737ext4_ext_more_to_rm(struct ext4_ext_path *path)
1723{ 1738{
1724 BUG_ON(path->p_idx == NULL); 1739 BUG_ON(path->p_idx == NULL);
@@ -1756,12 +1771,11 @@ int ext4_ext_remove_space(struct inode *inode, unsigned long start)
1756 * We start scanning from right side, freeing all the blocks 1771 * We start scanning from right side, freeing all the blocks
1757 * after i_size and walking into the tree depth-wise. 1772 * after i_size and walking into the tree depth-wise.
1758 */ 1773 */
1759 path = kmalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_KERNEL); 1774 path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_KERNEL);
1760 if (path == NULL) { 1775 if (path == NULL) {
1761 ext4_journal_stop(handle); 1776 ext4_journal_stop(handle);
1762 return -ENOMEM; 1777 return -ENOMEM;
1763 } 1778 }
1764 memset(path, 0, sizeof(struct ext4_ext_path) * (depth + 1));
1765 path[0].p_hdr = ext_inode_hdr(inode); 1779 path[0].p_hdr = ext_inode_hdr(inode);
1766 if (ext4_ext_check_header(__FUNCTION__, inode, path[0].p_hdr)) { 1780 if (ext4_ext_check_header(__FUNCTION__, inode, path[0].p_hdr)) {
1767 err = -EIO; 1781 err = -EIO;
@@ -1932,7 +1946,8 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
1932 mutex_lock(&EXT4_I(inode)->truncate_mutex); 1946 mutex_lock(&EXT4_I(inode)->truncate_mutex);
1933 1947
1934 /* check in cache */ 1948 /* check in cache */
1935 if ((goal = ext4_ext_in_cache(inode, iblock, &newex))) { 1949 goal = ext4_ext_in_cache(inode, iblock, &newex);
1950 if (goal) {
1936 if (goal == EXT4_EXT_CACHE_GAP) { 1951 if (goal == EXT4_EXT_CACHE_GAP) {
1937 if (!create) { 1952 if (!create) {
1938 /* block isn't allocated yet and 1953 /* block isn't allocated yet and
@@ -1971,7 +1986,8 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
1971 */ 1986 */
1972 BUG_ON(path[depth].p_ext == NULL && depth != 0); 1987 BUG_ON(path[depth].p_ext == NULL && depth != 0);
1973 1988
1974 if ((ex = path[depth].p_ext)) { 1989 ex = path[depth].p_ext;
1990 if (ex) {
1975 unsigned long ee_block = le32_to_cpu(ex->ee_block); 1991 unsigned long ee_block = le32_to_cpu(ex->ee_block);
1976 ext4_fsblk_t ee_start = ext_pblock(ex); 1992 ext4_fsblk_t ee_start = ext_pblock(ex);
1977 unsigned short ee_len = le16_to_cpu(ex->ee_len); 1993 unsigned short ee_len = le16_to_cpu(ex->ee_len);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 0a60ec5a16db..1d85d4ec9598 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -1147,37 +1147,102 @@ static int do_journal_get_write_access(handle_t *handle,
1147 return ext4_journal_get_write_access(handle, bh); 1147 return ext4_journal_get_write_access(handle, bh);
1148} 1148}
1149 1149
1150/*
1151 * The idea of this helper function is following:
1152 * if prepare_write has allocated some blocks, but not all of them, the
1153 * transaction must include the content of the newly allocated blocks.
1154 * This content is expected to be set to zeroes by block_prepare_write().
1155 * 2006/10/14 SAW
1156 */
1157static int ext4_prepare_failure(struct file *file, struct page *page,
1158 unsigned from, unsigned to)
1159{
1160 struct address_space *mapping;
1161 struct buffer_head *bh, *head, *next;
1162 unsigned block_start, block_end;
1163 unsigned blocksize;
1164 int ret;
1165 handle_t *handle = ext4_journal_current_handle();
1166
1167 mapping = page->mapping;
1168 if (ext4_should_writeback_data(mapping->host)) {
1169 /* optimization: no constraints about data */
1170skip:
1171 return ext4_journal_stop(handle);
1172 }
1173
1174 head = page_buffers(page);
1175 blocksize = head->b_size;
1176 for ( bh = head, block_start = 0;
1177 bh != head || !block_start;
1178 block_start = block_end, bh = next)
1179 {
1180 next = bh->b_this_page;
1181 block_end = block_start + blocksize;
1182 if (block_end <= from)
1183 continue;
1184 if (block_start >= to) {
1185 block_start = to;
1186 break;
1187 }
1188 if (!buffer_mapped(bh))
1189 /* prepare_write failed on this bh */
1190 break;
1191 if (ext4_should_journal_data(mapping->host)) {
1192 ret = do_journal_get_write_access(handle, bh);
1193 if (ret) {
1194 ext4_journal_stop(handle);
1195 return ret;
1196 }
1197 }
1198 /*
1199 * block_start here becomes the first block where the current iteration
1200 * of prepare_write failed.
1201 */
1202 }
1203 if (block_start <= from)
1204 goto skip;
1205
1206 /* commit allocated and zeroed buffers */
1207 return mapping->a_ops->commit_write(file, page, from, block_start);
1208}
1209
1150static int ext4_prepare_write(struct file *file, struct page *page, 1210static int ext4_prepare_write(struct file *file, struct page *page,
1151 unsigned from, unsigned to) 1211 unsigned from, unsigned to)
1152{ 1212{
1153 struct inode *inode = page->mapping->host; 1213 struct inode *inode = page->mapping->host;
1154 int ret, needed_blocks = ext4_writepage_trans_blocks(inode); 1214 int ret, ret2;
1215 int needed_blocks = ext4_writepage_trans_blocks(inode);
1155 handle_t *handle; 1216 handle_t *handle;
1156 int retries = 0; 1217 int retries = 0;
1157 1218
1158retry: 1219retry:
1159 handle = ext4_journal_start(inode, needed_blocks); 1220 handle = ext4_journal_start(inode, needed_blocks);
1160 if (IS_ERR(handle)) { 1221 if (IS_ERR(handle))
1161 ret = PTR_ERR(handle); 1222 return PTR_ERR(handle);
1162 goto out;
1163 }
1164 if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode)) 1223 if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode))
1165 ret = nobh_prepare_write(page, from, to, ext4_get_block); 1224 ret = nobh_prepare_write(page, from, to, ext4_get_block);
1166 else 1225 else
1167 ret = block_prepare_write(page, from, to, ext4_get_block); 1226 ret = block_prepare_write(page, from, to, ext4_get_block);
1168 if (ret) 1227 if (ret)
1169 goto prepare_write_failed; 1228 goto failure;
1170 1229
1171 if (ext4_should_journal_data(inode)) { 1230 if (ext4_should_journal_data(inode)) {
1172 ret = walk_page_buffers(handle, page_buffers(page), 1231 ret = walk_page_buffers(handle, page_buffers(page),
1173 from, to, NULL, do_journal_get_write_access); 1232 from, to, NULL, do_journal_get_write_access);
1233 if (ret)
1234 /* fatal error, just put the handle and return */
1235 journal_stop(handle);
1174 } 1236 }
1175prepare_write_failed: 1237 return ret;
1176 if (ret) 1238
1177 ext4_journal_stop(handle); 1239failure:
1240 ret2 = ext4_prepare_failure(file, page, from, to);
1241 if (ret2 < 0)
1242 return ret2;
1178 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 1243 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
1179 goto retry; 1244 goto retry;
1180out: 1245 /* retry number exceeded, or other error like -EDQUOT */
1181 return ret; 1246 return ret;
1182} 1247}
1183 1248
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 8b1bd03d20f5..859990eac504 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -552,6 +552,15 @@ static int htree_dirblock_to_tree(struct file *dir_file,
552 dir->i_sb->s_blocksize - 552 dir->i_sb->s_blocksize -
553 EXT4_DIR_REC_LEN(0)); 553 EXT4_DIR_REC_LEN(0));
554 for (; de < top; de = ext4_next_entry(de)) { 554 for (; de < top; de = ext4_next_entry(de)) {
555 if (!ext4_check_dir_entry("htree_dirblock_to_tree", dir, de, bh,
556 (block<<EXT4_BLOCK_SIZE_BITS(dir->i_sb))
557 +((char *)de - bh->b_data))) {
558 /* On error, skip the f_pos to the next block. */
559 dir_file->f_pos = (dir_file->f_pos |
560 (dir->i_sb->s_blocksize - 1)) + 1;
561 brelse (bh);
562 return count;
563 }
555 ext4fs_dirhash(de->name, de->name_len, hinfo); 564 ext4fs_dirhash(de->name, de->name_len, hinfo);
556 if ((hinfo->hash < start_hash) || 565 if ((hinfo->hash < start_hash) ||
557 ((hinfo->hash == start_hash) && 566 ((hinfo->hash == start_hash) &&
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index b4b022aa2bc2..486a641ca71b 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -486,7 +486,7 @@ static void ext4_put_super (struct super_block * sb)
486 return; 486 return;
487} 487}
488 488
489static kmem_cache_t *ext4_inode_cachep; 489static struct kmem_cache *ext4_inode_cachep;
490 490
491/* 491/*
492 * Called inside transaction, so use GFP_NOFS 492 * Called inside transaction, so use GFP_NOFS
@@ -495,7 +495,7 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
495{ 495{
496 struct ext4_inode_info *ei; 496 struct ext4_inode_info *ei;
497 497
498 ei = kmem_cache_alloc(ext4_inode_cachep, SLAB_NOFS); 498 ei = kmem_cache_alloc(ext4_inode_cachep, GFP_NOFS);
499 if (!ei) 499 if (!ei)
500 return NULL; 500 return NULL;
501#ifdef CONFIG_EXT4DEV_FS_POSIX_ACL 501#ifdef CONFIG_EXT4DEV_FS_POSIX_ACL
@@ -513,7 +513,7 @@ static void ext4_destroy_inode(struct inode *inode)
513 kmem_cache_free(ext4_inode_cachep, EXT4_I(inode)); 513 kmem_cache_free(ext4_inode_cachep, EXT4_I(inode));
514} 514}
515 515
516static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) 516static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
517{ 517{
518 struct ext4_inode_info *ei = (struct ext4_inode_info *) foo; 518 struct ext4_inode_info *ei = (struct ext4_inode_info *) foo;
519 519
@@ -1321,6 +1321,12 @@ static void ext4_orphan_cleanup (struct super_block * sb,
1321 return; 1321 return;
1322 } 1322 }
1323 1323
1324 if (bdev_read_only(sb->s_bdev)) {
1325 printk(KERN_ERR "EXT4-fs: write access "
1326 "unavailable, skipping orphan cleanup.\n");
1327 return;
1328 }
1329
1324 if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) { 1330 if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
1325 if (es->s_last_orphan) 1331 if (es->s_last_orphan)
1326 jbd_debug(1, "Errors on filesystem, " 1332 jbd_debug(1, "Errors on filesystem, "
@@ -2460,6 +2466,7 @@ static int ext4_statfs (struct dentry * dentry, struct kstatfs * buf)
2460 struct ext4_super_block *es = sbi->s_es; 2466 struct ext4_super_block *es = sbi->s_es;
2461 ext4_fsblk_t overhead; 2467 ext4_fsblk_t overhead;
2462 int i; 2468 int i;
2469 u64 fsid;
2463 2470
2464 if (test_opt (sb, MINIX_DF)) 2471 if (test_opt (sb, MINIX_DF))
2465 overhead = 0; 2472 overhead = 0;
@@ -2506,6 +2513,10 @@ static int ext4_statfs (struct dentry * dentry, struct kstatfs * buf)
2506 buf->f_files = le32_to_cpu(es->s_inodes_count); 2513 buf->f_files = le32_to_cpu(es->s_inodes_count);
2507 buf->f_ffree = percpu_counter_sum(&sbi->s_freeinodes_counter); 2514 buf->f_ffree = percpu_counter_sum(&sbi->s_freeinodes_counter);
2508 buf->f_namelen = EXT4_NAME_LEN; 2515 buf->f_namelen = EXT4_NAME_LEN;
2516 fsid = le64_to_cpup((void *)es->s_uuid) ^
2517 le64_to_cpup((void *)es->s_uuid + sizeof(u64));
2518 buf->f_fsid.val[0] = fsid & 0xFFFFFFFFUL;
2519 buf->f_fsid.val[1] = (fsid >> 32) & 0xFFFFFFFFUL;
2509 return 0; 2520 return 0;
2510} 2521}
2511 2522
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 63233cd946a7..dc969c357aa1 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -459,14 +459,11 @@ static void ext4_xattr_update_super_block(handle_t *handle,
459 if (EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_EXT_ATTR)) 459 if (EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_EXT_ATTR))
460 return; 460 return;
461 461
462 lock_super(sb);
463 if (ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh) == 0) { 462 if (ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh) == 0) {
464 EXT4_SB(sb)->s_es->s_feature_compat |= 463 EXT4_SET_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_EXT_ATTR);
465 cpu_to_le32(EXT4_FEATURE_COMPAT_EXT_ATTR);
466 sb->s_dirt = 1; 464 sb->s_dirt = 1;
467 ext4_journal_dirty_metadata(handle, EXT4_SB(sb)->s_sbh); 465 ext4_journal_dirty_metadata(handle, EXT4_SB(sb)->s_sbh);
468 } 466 }
469 unlock_super(sb);
470} 467}
471 468
472/* 469/*
diff --git a/fs/fat/cache.c b/fs/fat/cache.c
index 82cc4f59e3ba..05c2941c74f2 100644
--- a/fs/fat/cache.c
+++ b/fs/fat/cache.c
@@ -34,9 +34,9 @@ static inline int fat_max_cache(struct inode *inode)
34 return FAT_MAX_CACHE; 34 return FAT_MAX_CACHE;
35} 35}
36 36
37static kmem_cache_t *fat_cache_cachep; 37static struct kmem_cache *fat_cache_cachep;
38 38
39static void init_once(void *foo, kmem_cache_t *cachep, unsigned long flags) 39static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags)
40{ 40{
41 struct fat_cache *cache = (struct fat_cache *)foo; 41 struct fat_cache *cache = (struct fat_cache *)foo;
42 42
@@ -63,7 +63,7 @@ void fat_cache_destroy(void)
63 63
64static inline struct fat_cache *fat_cache_alloc(struct inode *inode) 64static inline struct fat_cache *fat_cache_alloc(struct inode *inode)
65{ 65{
66 return kmem_cache_alloc(fat_cache_cachep, SLAB_KERNEL); 66 return kmem_cache_alloc(fat_cache_cachep, GFP_KERNEL);
67} 67}
68 68
69static inline void fat_cache_free(struct fat_cache *cache) 69static inline void fat_cache_free(struct fat_cache *cache)
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 78945b53b0f8..a9e4688582a2 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -477,12 +477,12 @@ static void fat_put_super(struct super_block *sb)
477 kfree(sbi); 477 kfree(sbi);
478} 478}
479 479
480static kmem_cache_t *fat_inode_cachep; 480static struct kmem_cache *fat_inode_cachep;
481 481
482static struct inode *fat_alloc_inode(struct super_block *sb) 482static struct inode *fat_alloc_inode(struct super_block *sb)
483{ 483{
484 struct msdos_inode_info *ei; 484 struct msdos_inode_info *ei;
485 ei = kmem_cache_alloc(fat_inode_cachep, SLAB_KERNEL); 485 ei = kmem_cache_alloc(fat_inode_cachep, GFP_KERNEL);
486 if (!ei) 486 if (!ei)
487 return NULL; 487 return NULL;
488 return &ei->vfs_inode; 488 return &ei->vfs_inode;
@@ -493,7 +493,7 @@ static void fat_destroy_inode(struct inode *inode)
493 kmem_cache_free(fat_inode_cachep, MSDOS_I(inode)); 493 kmem_cache_free(fat_inode_cachep, MSDOS_I(inode));
494} 494}
495 495
496static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) 496static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
497{ 497{
498 struct msdos_inode_info *ei = (struct msdos_inode_info *)foo; 498 struct msdos_inode_info *ei = (struct msdos_inode_info *)foo;
499 499
diff --git a/fs/fcntl.c b/fs/fcntl.c
index e4f26165f12a..4740d35e52cd 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -553,7 +553,7 @@ int send_sigurg(struct fown_struct *fown)
553} 553}
554 554
555static DEFINE_RWLOCK(fasync_lock); 555static DEFINE_RWLOCK(fasync_lock);
556static kmem_cache_t *fasync_cache __read_mostly; 556static struct kmem_cache *fasync_cache __read_mostly;
557 557
558/* 558/*
559 * fasync_helper() is used by some character device drivers (mainly mice) 559 * fasync_helper() is used by some character device drivers (mainly mice)
@@ -567,7 +567,7 @@ int fasync_helper(int fd, struct file * filp, int on, struct fasync_struct **fap
567 int result = 0; 567 int result = 0;
568 568
569 if (on) { 569 if (on) {
570 new = kmem_cache_alloc(fasync_cache, SLAB_KERNEL); 570 new = kmem_cache_alloc(fasync_cache, GFP_KERNEL);
571 if (!new) 571 if (!new)
572 return -ENOMEM; 572 return -ENOMEM;
573 } 573 }
diff --git a/fs/file.c b/fs/file.c
index 3787e82f54c1..51aef675470f 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -21,7 +21,6 @@
21struct fdtable_defer { 21struct fdtable_defer {
22 spinlock_t lock; 22 spinlock_t lock;
23 struct work_struct wq; 23 struct work_struct wq;
24 struct timer_list timer;
25 struct fdtable *next; 24 struct fdtable *next;
26}; 25};
27 26
@@ -75,22 +74,6 @@ static void __free_fdtable(struct fdtable *fdt)
75 kfree(fdt); 74 kfree(fdt);
76} 75}
77 76
78static void fdtable_timer(unsigned long data)
79{
80 struct fdtable_defer *fddef = (struct fdtable_defer *)data;
81
82 spin_lock(&fddef->lock);
83 /*
84 * If someone already emptied the queue return.
85 */
86 if (!fddef->next)
87 goto out;
88 if (!schedule_work(&fddef->wq))
89 mod_timer(&fddef->timer, 5);
90out:
91 spin_unlock(&fddef->lock);
92}
93
94static void free_fdtable_work(struct work_struct *work) 77static void free_fdtable_work(struct work_struct *work)
95{ 78{
96 struct fdtable_defer *f = 79 struct fdtable_defer *f =
@@ -144,13 +127,8 @@ static void free_fdtable_rcu(struct rcu_head *rcu)
144 spin_lock(&fddef->lock); 127 spin_lock(&fddef->lock);
145 fdt->next = fddef->next; 128 fdt->next = fddef->next;
146 fddef->next = fdt; 129 fddef->next = fdt;
147 /* 130 /* vmallocs are handled from the workqueue context */
148 * vmallocs are handled from the workqueue context. 131 schedule_work(&fddef->wq);
149 * If the per-cpu workqueue is running, then we
150 * defer work scheduling through a timer.
151 */
152 if (!schedule_work(&fddef->wq))
153 mod_timer(&fddef->timer, 5);
154 spin_unlock(&fddef->lock); 132 spin_unlock(&fddef->lock);
155 put_cpu_var(fdtable_defer_list); 133 put_cpu_var(fdtable_defer_list);
156 } 134 }
@@ -354,9 +332,6 @@ static void __devinit fdtable_defer_list_init(int cpu)
354 struct fdtable_defer *fddef = &per_cpu(fdtable_defer_list, cpu); 332 struct fdtable_defer *fddef = &per_cpu(fdtable_defer_list, cpu);
355 spin_lock_init(&fddef->lock); 333 spin_lock_init(&fddef->lock);
356 INIT_WORK(&fddef->wq, free_fdtable_work); 334 INIT_WORK(&fddef->wq, free_fdtable_work);
357 init_timer(&fddef->timer);
358 fddef->timer.data = (unsigned long)fddef;
359 fddef->timer.function = fdtable_timer;
360 fddef->next = NULL; 335 fddef->next = NULL;
361} 336}
362 337
diff --git a/fs/freevxfs/vxfs_inode.c b/fs/freevxfs/vxfs_inode.c
index 4786d51ad3bd..0b7ae897cb78 100644
--- a/fs/freevxfs/vxfs_inode.c
+++ b/fs/freevxfs/vxfs_inode.c
@@ -46,7 +46,7 @@ extern const struct address_space_operations vxfs_immed_aops;
46 46
47extern struct inode_operations vxfs_immed_symlink_iops; 47extern struct inode_operations vxfs_immed_symlink_iops;
48 48
49kmem_cache_t *vxfs_inode_cachep; 49struct kmem_cache *vxfs_inode_cachep;
50 50
51 51
52#ifdef DIAGNOSTIC 52#ifdef DIAGNOSTIC
@@ -103,7 +103,7 @@ vxfs_blkiget(struct super_block *sbp, u_long extent, ino_t ino)
103 struct vxfs_inode_info *vip; 103 struct vxfs_inode_info *vip;
104 struct vxfs_dinode *dip; 104 struct vxfs_dinode *dip;
105 105
106 if (!(vip = kmem_cache_alloc(vxfs_inode_cachep, SLAB_KERNEL))) 106 if (!(vip = kmem_cache_alloc(vxfs_inode_cachep, GFP_KERNEL)))
107 goto fail; 107 goto fail;
108 dip = (struct vxfs_dinode *)(bp->b_data + offset); 108 dip = (struct vxfs_dinode *)(bp->b_data + offset);
109 memcpy(vip, dip, sizeof(*vip)); 109 memcpy(vip, dip, sizeof(*vip));
@@ -145,7 +145,7 @@ __vxfs_iget(ino_t ino, struct inode *ilistp)
145 struct vxfs_dinode *dip; 145 struct vxfs_dinode *dip;
146 caddr_t kaddr = (char *)page_address(pp); 146 caddr_t kaddr = (char *)page_address(pp);
147 147
148 if (!(vip = kmem_cache_alloc(vxfs_inode_cachep, SLAB_KERNEL))) 148 if (!(vip = kmem_cache_alloc(vxfs_inode_cachep, GFP_KERNEL)))
149 goto fail; 149 goto fail;
150 dip = (struct vxfs_dinode *)(kaddr + offset); 150 dip = (struct vxfs_dinode *)(kaddr + offset);
151 memcpy(vip, dip, sizeof(*vip)); 151 memcpy(vip, dip, sizeof(*vip));
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 66571eafbb1e..357764d85ff1 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -19,7 +19,7 @@
19 19
20MODULE_ALIAS_MISCDEV(FUSE_MINOR); 20MODULE_ALIAS_MISCDEV(FUSE_MINOR);
21 21
22static kmem_cache_t *fuse_req_cachep; 22static struct kmem_cache *fuse_req_cachep;
23 23
24static struct fuse_conn *fuse_get_conn(struct file *file) 24static struct fuse_conn *fuse_get_conn(struct file *file)
25{ 25{
@@ -41,7 +41,7 @@ static void fuse_request_init(struct fuse_req *req)
41 41
42struct fuse_req *fuse_request_alloc(void) 42struct fuse_req *fuse_request_alloc(void)
43{ 43{
44 struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, SLAB_KERNEL); 44 struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, GFP_KERNEL);
45 if (req) 45 if (req)
46 fuse_request_init(req); 46 fuse_request_init(req);
47 return req; 47 return req;
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index c71a6c092ad9..1cabdb229adb 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -141,9 +141,6 @@ static int fuse_dentry_revalidate(struct dentry *entry, struct nameidata *nd)
141 struct fuse_req *forget_req; 141 struct fuse_req *forget_req;
142 struct dentry *parent; 142 struct dentry *parent;
143 143
144 /* Doesn't hurt to "reset" the validity timeout */
145 fuse_invalidate_entry_cache(entry);
146
147 /* For negative dentries, always do a fresh lookup */ 144 /* For negative dentries, always do a fresh lookup */
148 if (!inode) 145 if (!inode)
149 return 0; 146 return 0;
@@ -1027,6 +1024,8 @@ static int fuse_setattr(struct dentry *entry, struct iattr *attr)
1027 if (attr->ia_valid & ATTR_SIZE) { 1024 if (attr->ia_valid & ATTR_SIZE) {
1028 unsigned long limit; 1025 unsigned long limit;
1029 is_truncate = 1; 1026 is_truncate = 1;
1027 if (IS_SWAPFILE(inode))
1028 return -ETXTBSY;
1030 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur; 1029 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
1031 if (limit != RLIM_INFINITY && attr->ia_size > (loff_t) limit) { 1030 if (limit != RLIM_INFINITY && attr->ia_size > (loff_t) limit) {
1032 send_sig(SIGXFSZ, current, 0); 1031 send_sig(SIGXFSZ, current, 0);
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 763a50daf1c0..128f79c40803 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -754,6 +754,42 @@ static int fuse_file_lock(struct file *file, int cmd, struct file_lock *fl)
754 return err; 754 return err;
755} 755}
756 756
757static sector_t fuse_bmap(struct address_space *mapping, sector_t block)
758{
759 struct inode *inode = mapping->host;
760 struct fuse_conn *fc = get_fuse_conn(inode);
761 struct fuse_req *req;
762 struct fuse_bmap_in inarg;
763 struct fuse_bmap_out outarg;
764 int err;
765
766 if (!inode->i_sb->s_bdev || fc->no_bmap)
767 return 0;
768
769 req = fuse_get_req(fc);
770 if (IS_ERR(req))
771 return 0;
772
773 memset(&inarg, 0, sizeof(inarg));
774 inarg.block = block;
775 inarg.blocksize = inode->i_sb->s_blocksize;
776 req->in.h.opcode = FUSE_BMAP;
777 req->in.h.nodeid = get_node_id(inode);
778 req->in.numargs = 1;
779 req->in.args[0].size = sizeof(inarg);
780 req->in.args[0].value = &inarg;
781 req->out.numargs = 1;
782 req->out.args[0].size = sizeof(outarg);
783 req->out.args[0].value = &outarg;
784 request_send(fc, req);
785 err = req->out.h.error;
786 fuse_put_request(fc, req);
787 if (err == -ENOSYS)
788 fc->no_bmap = 1;
789
790 return err ? 0 : outarg.block;
791}
792
757static const struct file_operations fuse_file_operations = { 793static const struct file_operations fuse_file_operations = {
758 .llseek = generic_file_llseek, 794 .llseek = generic_file_llseek,
759 .read = do_sync_read, 795 .read = do_sync_read,
@@ -787,6 +823,7 @@ static const struct address_space_operations fuse_file_aops = {
787 .commit_write = fuse_commit_write, 823 .commit_write = fuse_commit_write,
788 .readpages = fuse_readpages, 824 .readpages = fuse_readpages,
789 .set_page_dirty = fuse_set_page_dirty, 825 .set_page_dirty = fuse_set_page_dirty,
826 .bmap = fuse_bmap,
790}; 827};
791 828
792void fuse_init_file_inode(struct inode *inode) 829void fuse_init_file_inode(struct inode *inode)
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 91edb8932d90..b98b20de7405 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -298,6 +298,9 @@ struct fuse_conn {
298 reply, before any other request, and never cleared */ 298 reply, before any other request, and never cleared */
299 unsigned conn_error : 1; 299 unsigned conn_error : 1;
300 300
301 /** Connection successful. Only set in INIT */
302 unsigned conn_init : 1;
303
301 /** Do readpages asynchronously? Only set in INIT */ 304 /** Do readpages asynchronously? Only set in INIT */
302 unsigned async_read : 1; 305 unsigned async_read : 1;
303 306
@@ -339,6 +342,9 @@ struct fuse_conn {
339 /** Is interrupt not implemented by fs? */ 342 /** Is interrupt not implemented by fs? */
340 unsigned no_interrupt : 1; 343 unsigned no_interrupt : 1;
341 344
345 /** Is bmap not implemented by fs? */
346 unsigned no_bmap : 1;
347
342 /** The number of requests waiting for completion */ 348 /** The number of requests waiting for completion */
343 atomic_t num_waiting; 349 atomic_t num_waiting;
344 350
@@ -365,6 +371,9 @@ struct fuse_conn {
365 371
366 /** Key for lock owner ID scrambling */ 372 /** Key for lock owner ID scrambling */
367 u32 scramble_key[4]; 373 u32 scramble_key[4];
374
375 /** Reserved request for the DESTROY message */
376 struct fuse_req *destroy_req;
368}; 377};
369 378
370static inline struct fuse_conn *get_fuse_conn_super(struct super_block *sb) 379static inline struct fuse_conn *get_fuse_conn_super(struct super_block *sb)
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index fc4203570370..12450d2b320e 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -22,7 +22,7 @@ MODULE_AUTHOR("Miklos Szeredi <miklos@szeredi.hu>");
22MODULE_DESCRIPTION("Filesystem in Userspace"); 22MODULE_DESCRIPTION("Filesystem in Userspace");
23MODULE_LICENSE("GPL"); 23MODULE_LICENSE("GPL");
24 24
25static kmem_cache_t *fuse_inode_cachep; 25static struct kmem_cache *fuse_inode_cachep;
26struct list_head fuse_conn_list; 26struct list_head fuse_conn_list;
27DEFINE_MUTEX(fuse_mutex); 27DEFINE_MUTEX(fuse_mutex);
28 28
@@ -39,6 +39,7 @@ struct fuse_mount_data {
39 unsigned group_id_present : 1; 39 unsigned group_id_present : 1;
40 unsigned flags; 40 unsigned flags;
41 unsigned max_read; 41 unsigned max_read;
42 unsigned blksize;
42}; 43};
43 44
44static struct inode *fuse_alloc_inode(struct super_block *sb) 45static struct inode *fuse_alloc_inode(struct super_block *sb)
@@ -46,7 +47,7 @@ static struct inode *fuse_alloc_inode(struct super_block *sb)
46 struct inode *inode; 47 struct inode *inode;
47 struct fuse_inode *fi; 48 struct fuse_inode *fi;
48 49
49 inode = kmem_cache_alloc(fuse_inode_cachep, SLAB_KERNEL); 50 inode = kmem_cache_alloc(fuse_inode_cachep, GFP_KERNEL);
50 if (!inode) 51 if (!inode)
51 return NULL; 52 return NULL;
52 53
@@ -205,10 +206,23 @@ static void fuse_umount_begin(struct vfsmount *vfsmnt, int flags)
205 fuse_abort_conn(get_fuse_conn_super(vfsmnt->mnt_sb)); 206 fuse_abort_conn(get_fuse_conn_super(vfsmnt->mnt_sb));
206} 207}
207 208
209static void fuse_send_destroy(struct fuse_conn *fc)
210{
211 struct fuse_req *req = fc->destroy_req;
212 if (req && fc->conn_init) {
213 fc->destroy_req = NULL;
214 req->in.h.opcode = FUSE_DESTROY;
215 req->force = 1;
216 request_send(fc, req);
217 fuse_put_request(fc, req);
218 }
219}
220
208static void fuse_put_super(struct super_block *sb) 221static void fuse_put_super(struct super_block *sb)
209{ 222{
210 struct fuse_conn *fc = get_fuse_conn_super(sb); 223 struct fuse_conn *fc = get_fuse_conn_super(sb);
211 224
225 fuse_send_destroy(fc);
212 spin_lock(&fc->lock); 226 spin_lock(&fc->lock);
213 fc->connected = 0; 227 fc->connected = 0;
214 fc->blocked = 0; 228 fc->blocked = 0;
@@ -274,6 +288,7 @@ enum {
274 OPT_DEFAULT_PERMISSIONS, 288 OPT_DEFAULT_PERMISSIONS,
275 OPT_ALLOW_OTHER, 289 OPT_ALLOW_OTHER,
276 OPT_MAX_READ, 290 OPT_MAX_READ,
291 OPT_BLKSIZE,
277 OPT_ERR 292 OPT_ERR
278}; 293};
279 294
@@ -285,14 +300,16 @@ static match_table_t tokens = {
285 {OPT_DEFAULT_PERMISSIONS, "default_permissions"}, 300 {OPT_DEFAULT_PERMISSIONS, "default_permissions"},
286 {OPT_ALLOW_OTHER, "allow_other"}, 301 {OPT_ALLOW_OTHER, "allow_other"},
287 {OPT_MAX_READ, "max_read=%u"}, 302 {OPT_MAX_READ, "max_read=%u"},
303 {OPT_BLKSIZE, "blksize=%u"},
288 {OPT_ERR, NULL} 304 {OPT_ERR, NULL}
289}; 305};
290 306
291static int parse_fuse_opt(char *opt, struct fuse_mount_data *d) 307static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev)
292{ 308{
293 char *p; 309 char *p;
294 memset(d, 0, sizeof(struct fuse_mount_data)); 310 memset(d, 0, sizeof(struct fuse_mount_data));
295 d->max_read = ~0; 311 d->max_read = ~0;
312 d->blksize = 512;
296 313
297 while ((p = strsep(&opt, ",")) != NULL) { 314 while ((p = strsep(&opt, ",")) != NULL) {
298 int token; 315 int token;
@@ -345,6 +362,12 @@ static int parse_fuse_opt(char *opt, struct fuse_mount_data *d)
345 d->max_read = value; 362 d->max_read = value;
346 break; 363 break;
347 364
365 case OPT_BLKSIZE:
366 if (!is_bdev || match_int(&args[0], &value))
367 return 0;
368 d->blksize = value;
369 break;
370
348 default: 371 default:
349 return 0; 372 return 0;
350 } 373 }
@@ -400,6 +423,8 @@ static struct fuse_conn *new_conn(void)
400void fuse_conn_put(struct fuse_conn *fc) 423void fuse_conn_put(struct fuse_conn *fc)
401{ 424{
402 if (atomic_dec_and_test(&fc->count)) { 425 if (atomic_dec_and_test(&fc->count)) {
426 if (fc->destroy_req)
427 fuse_request_free(fc->destroy_req);
403 mutex_destroy(&fc->inst_mutex); 428 mutex_destroy(&fc->inst_mutex);
404 kfree(fc); 429 kfree(fc);
405 } 430 }
@@ -456,6 +481,7 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
456 fc->bdi.ra_pages = min(fc->bdi.ra_pages, ra_pages); 481 fc->bdi.ra_pages = min(fc->bdi.ra_pages, ra_pages);
457 fc->minor = arg->minor; 482 fc->minor = arg->minor;
458 fc->max_write = arg->minor < 5 ? 4096 : arg->max_write; 483 fc->max_write = arg->minor < 5 ? 4096 : arg->max_write;
484 fc->conn_init = 1;
459 } 485 }
460 fuse_put_request(fc, req); 486 fuse_put_request(fc, req);
461 fc->blocked = 0; 487 fc->blocked = 0;
@@ -500,15 +526,23 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
500 struct dentry *root_dentry; 526 struct dentry *root_dentry;
501 struct fuse_req *init_req; 527 struct fuse_req *init_req;
502 int err; 528 int err;
529 int is_bdev = sb->s_bdev != NULL;
503 530
504 if (sb->s_flags & MS_MANDLOCK) 531 if (sb->s_flags & MS_MANDLOCK)
505 return -EINVAL; 532 return -EINVAL;
506 533
507 if (!parse_fuse_opt((char *) data, &d)) 534 if (!parse_fuse_opt((char *) data, &d, is_bdev))
508 return -EINVAL; 535 return -EINVAL;
509 536
510 sb->s_blocksize = PAGE_CACHE_SIZE; 537 if (is_bdev) {
511 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 538#ifdef CONFIG_BLOCK
539 if (!sb_set_blocksize(sb, d.blksize))
540 return -EINVAL;
541#endif
542 } else {
543 sb->s_blocksize = PAGE_CACHE_SIZE;
544 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
545 }
512 sb->s_magic = FUSE_SUPER_MAGIC; 546 sb->s_magic = FUSE_SUPER_MAGIC;
513 sb->s_op = &fuse_super_operations; 547 sb->s_op = &fuse_super_operations;
514 sb->s_maxbytes = MAX_LFS_FILESIZE; 548 sb->s_maxbytes = MAX_LFS_FILESIZE;
@@ -547,6 +581,12 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
547 if (!init_req) 581 if (!init_req)
548 goto err_put_root; 582 goto err_put_root;
549 583
584 if (is_bdev) {
585 fc->destroy_req = fuse_request_alloc();
586 if (!fc->destroy_req)
587 goto err_put_root;
588 }
589
550 mutex_lock(&fuse_mutex); 590 mutex_lock(&fuse_mutex);
551 err = -EINVAL; 591 err = -EINVAL;
552 if (file->private_data) 592 if (file->private_data)
@@ -598,10 +638,47 @@ static struct file_system_type fuse_fs_type = {
598 .kill_sb = kill_anon_super, 638 .kill_sb = kill_anon_super,
599}; 639};
600 640
641#ifdef CONFIG_BLOCK
642static int fuse_get_sb_blk(struct file_system_type *fs_type,
643 int flags, const char *dev_name,
644 void *raw_data, struct vfsmount *mnt)
645{
646 return get_sb_bdev(fs_type, flags, dev_name, raw_data, fuse_fill_super,
647 mnt);
648}
649
650static struct file_system_type fuseblk_fs_type = {
651 .owner = THIS_MODULE,
652 .name = "fuseblk",
653 .get_sb = fuse_get_sb_blk,
654 .kill_sb = kill_block_super,
655 .fs_flags = FS_REQUIRES_DEV,
656};
657
658static inline int register_fuseblk(void)
659{
660 return register_filesystem(&fuseblk_fs_type);
661}
662
663static inline void unregister_fuseblk(void)
664{
665 unregister_filesystem(&fuseblk_fs_type);
666}
667#else
668static inline int register_fuseblk(void)
669{
670 return 0;
671}
672
673static inline void unregister_fuseblk(void)
674{
675}
676#endif
677
601static decl_subsys(fuse, NULL, NULL); 678static decl_subsys(fuse, NULL, NULL);
602static decl_subsys(connections, NULL, NULL); 679static decl_subsys(connections, NULL, NULL);
603 680
604static void fuse_inode_init_once(void *foo, kmem_cache_t *cachep, 681static void fuse_inode_init_once(void *foo, struct kmem_cache *cachep,
605 unsigned long flags) 682 unsigned long flags)
606{ 683{
607 struct inode * inode = foo; 684 struct inode * inode = foo;
@@ -617,24 +694,34 @@ static int __init fuse_fs_init(void)
617 694
618 err = register_filesystem(&fuse_fs_type); 695 err = register_filesystem(&fuse_fs_type);
619 if (err) 696 if (err)
620 printk("fuse: failed to register filesystem\n"); 697 goto out;
621 else {
622 fuse_inode_cachep = kmem_cache_create("fuse_inode",
623 sizeof(struct fuse_inode),
624 0, SLAB_HWCACHE_ALIGN,
625 fuse_inode_init_once, NULL);
626 if (!fuse_inode_cachep) {
627 unregister_filesystem(&fuse_fs_type);
628 err = -ENOMEM;
629 }
630 }
631 698
699 err = register_fuseblk();
700 if (err)
701 goto out_unreg;
702
703 fuse_inode_cachep = kmem_cache_create("fuse_inode",
704 sizeof(struct fuse_inode),
705 0, SLAB_HWCACHE_ALIGN,
706 fuse_inode_init_once, NULL);
707 err = -ENOMEM;
708 if (!fuse_inode_cachep)
709 goto out_unreg2;
710
711 return 0;
712
713 out_unreg2:
714 unregister_fuseblk();
715 out_unreg:
716 unregister_filesystem(&fuse_fs_type);
717 out:
632 return err; 718 return err;
633} 719}
634 720
635static void fuse_fs_cleanup(void) 721static void fuse_fs_cleanup(void)
636{ 722{
637 unregister_filesystem(&fuse_fs_type); 723 unregister_filesystem(&fuse_fs_type);
724 unregister_fuseblk();
638 kmem_cache_destroy(fuse_inode_cachep); 725 kmem_cache_destroy(fuse_inode_cachep);
639} 726}
640 727
diff --git a/fs/gfs2/Kconfig b/fs/gfs2/Kconfig
index 8c27de8b9568..c0791cbacad9 100644
--- a/fs/gfs2/Kconfig
+++ b/fs/gfs2/Kconfig
@@ -2,6 +2,7 @@ config GFS2_FS
2 tristate "GFS2 file system support" 2 tristate "GFS2 file system support"
3 depends on EXPERIMENTAL 3 depends on EXPERIMENTAL
4 select FS_POSIX_ACL 4 select FS_POSIX_ACL
5 select CRC32
5 help 6 help
6 A cluster filesystem. 7 A cluster filesystem.
7 8
diff --git a/fs/gfs2/acl.c b/fs/gfs2/acl.c
index 5f959b8ce406..6e80844367ee 100644
--- a/fs/gfs2/acl.c
+++ b/fs/gfs2/acl.c
@@ -74,11 +74,11 @@ int gfs2_acl_validate_remove(struct gfs2_inode *ip, int access)
74{ 74{
75 if (!GFS2_SB(&ip->i_inode)->sd_args.ar_posix_acl) 75 if (!GFS2_SB(&ip->i_inode)->sd_args.ar_posix_acl)
76 return -EOPNOTSUPP; 76 return -EOPNOTSUPP;
77 if (current->fsuid != ip->i_di.di_uid && !capable(CAP_FOWNER)) 77 if (current->fsuid != ip->i_inode.i_uid && !capable(CAP_FOWNER))
78 return -EPERM; 78 return -EPERM;
79 if (S_ISLNK(ip->i_di.di_mode)) 79 if (S_ISLNK(ip->i_inode.i_mode))
80 return -EOPNOTSUPP; 80 return -EOPNOTSUPP;
81 if (!access && !S_ISDIR(ip->i_di.di_mode)) 81 if (!access && !S_ISDIR(ip->i_inode.i_mode))
82 return -EACCES; 82 return -EACCES;
83 83
84 return 0; 84 return 0;
@@ -145,14 +145,14 @@ out:
145} 145}
146 146
147/** 147/**
148 * gfs2_check_acl_locked - Check an ACL to see if we're allowed to do something 148 * gfs2_check_acl - Check an ACL to see if we're allowed to do something
149 * @inode: the file we want to do something to 149 * @inode: the file we want to do something to
150 * @mask: what we want to do 150 * @mask: what we want to do
151 * 151 *
152 * Returns: errno 152 * Returns: errno
153 */ 153 */
154 154
155int gfs2_check_acl_locked(struct inode *inode, int mask) 155int gfs2_check_acl(struct inode *inode, int mask)
156{ 156{
157 struct posix_acl *acl = NULL; 157 struct posix_acl *acl = NULL;
158 int error; 158 int error;
@@ -170,21 +170,6 @@ int gfs2_check_acl_locked(struct inode *inode, int mask)
170 return -EAGAIN; 170 return -EAGAIN;
171} 171}
172 172
173int gfs2_check_acl(struct inode *inode, int mask)
174{
175 struct gfs2_inode *ip = GFS2_I(inode);
176 struct gfs2_holder i_gh;
177 int error;
178
179 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
180 if (!error) {
181 error = gfs2_check_acl_locked(inode, mask);
182 gfs2_glock_dq_uninit(&i_gh);
183 }
184
185 return error;
186}
187
188static int munge_mode(struct gfs2_inode *ip, mode_t mode) 173static int munge_mode(struct gfs2_inode *ip, mode_t mode)
189{ 174{
190 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 175 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
@@ -198,10 +183,10 @@ static int munge_mode(struct gfs2_inode *ip, mode_t mode)
198 error = gfs2_meta_inode_buffer(ip, &dibh); 183 error = gfs2_meta_inode_buffer(ip, &dibh);
199 if (!error) { 184 if (!error) {
200 gfs2_assert_withdraw(sdp, 185 gfs2_assert_withdraw(sdp,
201 (ip->i_di.di_mode & S_IFMT) == (mode & S_IFMT)); 186 (ip->i_inode.i_mode & S_IFMT) == (mode & S_IFMT));
202 ip->i_di.di_mode = mode; 187 ip->i_inode.i_mode = mode;
203 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 188 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
204 gfs2_dinode_out(&ip->i_di, dibh->b_data); 189 gfs2_dinode_out(ip, dibh->b_data);
205 brelse(dibh); 190 brelse(dibh);
206 } 191 }
207 192
@@ -215,12 +200,12 @@ int gfs2_acl_create(struct gfs2_inode *dip, struct gfs2_inode *ip)
215 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode); 200 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
216 struct posix_acl *acl = NULL, *clone; 201 struct posix_acl *acl = NULL, *clone;
217 struct gfs2_ea_request er; 202 struct gfs2_ea_request er;
218 mode_t mode = ip->i_di.di_mode; 203 mode_t mode = ip->i_inode.i_mode;
219 int error; 204 int error;
220 205
221 if (!sdp->sd_args.ar_posix_acl) 206 if (!sdp->sd_args.ar_posix_acl)
222 return 0; 207 return 0;
223 if (S_ISLNK(ip->i_di.di_mode)) 208 if (S_ISLNK(ip->i_inode.i_mode))
224 return 0; 209 return 0;
225 210
226 memset(&er, 0, sizeof(struct gfs2_ea_request)); 211 memset(&er, 0, sizeof(struct gfs2_ea_request));
@@ -232,7 +217,7 @@ int gfs2_acl_create(struct gfs2_inode *dip, struct gfs2_inode *ip)
232 return error; 217 return error;
233 if (!acl) { 218 if (!acl) {
234 mode &= ~current->fs->umask; 219 mode &= ~current->fs->umask;
235 if (mode != ip->i_di.di_mode) 220 if (mode != ip->i_inode.i_mode)
236 error = munge_mode(ip, mode); 221 error = munge_mode(ip, mode);
237 return error; 222 return error;
238 } 223 }
@@ -244,7 +229,7 @@ int gfs2_acl_create(struct gfs2_inode *dip, struct gfs2_inode *ip)
244 posix_acl_release(acl); 229 posix_acl_release(acl);
245 acl = clone; 230 acl = clone;
246 231
247 if (S_ISDIR(ip->i_di.di_mode)) { 232 if (S_ISDIR(ip->i_inode.i_mode)) {
248 er.er_name = GFS2_POSIX_ACL_DEFAULT; 233 er.er_name = GFS2_POSIX_ACL_DEFAULT;
249 er.er_name_len = GFS2_POSIX_ACL_DEFAULT_LEN; 234 er.er_name_len = GFS2_POSIX_ACL_DEFAULT_LEN;
250 error = gfs2_system_eaops.eo_set(ip, &er); 235 error = gfs2_system_eaops.eo_set(ip, &er);
diff --git a/fs/gfs2/acl.h b/fs/gfs2/acl.h
index 05c294fe0d78..6751930bfb64 100644
--- a/fs/gfs2/acl.h
+++ b/fs/gfs2/acl.h
@@ -31,7 +31,6 @@ int gfs2_acl_validate_set(struct gfs2_inode *ip, int access,
31 struct gfs2_ea_request *er, 31 struct gfs2_ea_request *er,
32 int *remove, mode_t *mode); 32 int *remove, mode_t *mode);
33int gfs2_acl_validate_remove(struct gfs2_inode *ip, int access); 33int gfs2_acl_validate_remove(struct gfs2_inode *ip, int access);
34int gfs2_check_acl_locked(struct inode *inode, int mask);
35int gfs2_check_acl(struct inode *inode, int mask); 34int gfs2_check_acl(struct inode *inode, int mask);
36int gfs2_acl_create(struct gfs2_inode *dip, struct gfs2_inode *ip); 35int gfs2_acl_create(struct gfs2_inode *dip, struct gfs2_inode *ip);
37int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr); 36int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr);
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 06e9a8cb45e9..8240c1ff94f4 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -38,8 +38,8 @@ struct metapath {
38}; 38};
39 39
40typedef int (*block_call_t) (struct gfs2_inode *ip, struct buffer_head *dibh, 40typedef int (*block_call_t) (struct gfs2_inode *ip, struct buffer_head *dibh,
41 struct buffer_head *bh, u64 *top, 41 struct buffer_head *bh, __be64 *top,
42 u64 *bottom, unsigned int height, 42 __be64 *bottom, unsigned int height,
43 void *data); 43 void *data);
44 44
45struct strip_mine { 45struct strip_mine {
@@ -163,6 +163,7 @@ int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page)
163 if (ip->i_di.di_size) { 163 if (ip->i_di.di_size) {
164 *(__be64 *)(di + 1) = cpu_to_be64(block); 164 *(__be64 *)(di + 1) = cpu_to_be64(block);
165 ip->i_di.di_blocks++; 165 ip->i_di.di_blocks++;
166 gfs2_set_inode_blocks(&ip->i_inode);
166 di->di_blocks = cpu_to_be64(ip->i_di.di_blocks); 167 di->di_blocks = cpu_to_be64(ip->i_di.di_blocks);
167 } 168 }
168 169
@@ -230,7 +231,7 @@ static int build_height(struct inode *inode, unsigned height)
230 struct buffer_head *blocks[GFS2_MAX_META_HEIGHT]; 231 struct buffer_head *blocks[GFS2_MAX_META_HEIGHT];
231 struct gfs2_dinode *di; 232 struct gfs2_dinode *di;
232 int error; 233 int error;
233 u64 *bp; 234 __be64 *bp;
234 u64 bn; 235 u64 bn;
235 unsigned n; 236 unsigned n;
236 237
@@ -255,7 +256,7 @@ static int build_height(struct inode *inode, unsigned height)
255 GFS2_FORMAT_IN); 256 GFS2_FORMAT_IN);
256 gfs2_buffer_clear_tail(blocks[n], 257 gfs2_buffer_clear_tail(blocks[n],
257 sizeof(struct gfs2_meta_header)); 258 sizeof(struct gfs2_meta_header));
258 bp = (u64 *)(blocks[n]->b_data + 259 bp = (__be64 *)(blocks[n]->b_data +
259 sizeof(struct gfs2_meta_header)); 260 sizeof(struct gfs2_meta_header));
260 *bp = cpu_to_be64(blocks[n+1]->b_blocknr); 261 *bp = cpu_to_be64(blocks[n+1]->b_blocknr);
261 brelse(blocks[n]); 262 brelse(blocks[n]);
@@ -272,6 +273,7 @@ static int build_height(struct inode *inode, unsigned height)
272 *(__be64 *)(di + 1) = cpu_to_be64(bn); 273 *(__be64 *)(di + 1) = cpu_to_be64(bn);
273 ip->i_di.di_height += new_height; 274 ip->i_di.di_height += new_height;
274 ip->i_di.di_blocks += new_height; 275 ip->i_di.di_blocks += new_height;
276 gfs2_set_inode_blocks(&ip->i_inode);
275 di->di_height = cpu_to_be16(ip->i_di.di_height); 277 di->di_height = cpu_to_be16(ip->i_di.di_height);
276 di->di_blocks = cpu_to_be64(ip->i_di.di_blocks); 278 di->di_blocks = cpu_to_be64(ip->i_di.di_blocks);
277 brelse(dibh); 279 brelse(dibh);
@@ -360,15 +362,15 @@ static void find_metapath(struct gfs2_inode *ip, u64 block,
360 * metadata tree. 362 * metadata tree.
361 */ 363 */
362 364
363static inline u64 *metapointer(struct buffer_head *bh, int *boundary, 365static inline __be64 *metapointer(struct buffer_head *bh, int *boundary,
364 unsigned int height, const struct metapath *mp) 366 unsigned int height, const struct metapath *mp)
365{ 367{
366 unsigned int head_size = (height > 0) ? 368 unsigned int head_size = (height > 0) ?
367 sizeof(struct gfs2_meta_header) : sizeof(struct gfs2_dinode); 369 sizeof(struct gfs2_meta_header) : sizeof(struct gfs2_dinode);
368 u64 *ptr; 370 __be64 *ptr;
369 *boundary = 0; 371 *boundary = 0;
370 ptr = ((u64 *)(bh->b_data + head_size)) + mp->mp_list[height]; 372 ptr = ((__be64 *)(bh->b_data + head_size)) + mp->mp_list[height];
371 if (ptr + 1 == (u64 *)(bh->b_data + bh->b_size)) 373 if (ptr + 1 == (__be64 *)(bh->b_data + bh->b_size))
372 *boundary = 1; 374 *boundary = 1;
373 return ptr; 375 return ptr;
374} 376}
@@ -394,7 +396,7 @@ static int lookup_block(struct gfs2_inode *ip, struct buffer_head *bh,
394 int *new, u64 *block) 396 int *new, u64 *block)
395{ 397{
396 int boundary; 398 int boundary;
397 u64 *ptr = metapointer(bh, &boundary, height, mp); 399 __be64 *ptr = metapointer(bh, &boundary, height, mp);
398 400
399 if (*ptr) { 401 if (*ptr) {
400 *block = be64_to_cpu(*ptr); 402 *block = be64_to_cpu(*ptr);
@@ -415,17 +417,35 @@ static int lookup_block(struct gfs2_inode *ip, struct buffer_head *bh,
415 417
416 *ptr = cpu_to_be64(*block); 418 *ptr = cpu_to_be64(*block);
417 ip->i_di.di_blocks++; 419 ip->i_di.di_blocks++;
420 gfs2_set_inode_blocks(&ip->i_inode);
418 421
419 *new = 1; 422 *new = 1;
420 return 0; 423 return 0;
421} 424}
422 425
426static inline void bmap_lock(struct inode *inode, int create)
427{
428 struct gfs2_inode *ip = GFS2_I(inode);
429 if (create)
430 down_write(&ip->i_rw_mutex);
431 else
432 down_read(&ip->i_rw_mutex);
433}
434
435static inline void bmap_unlock(struct inode *inode, int create)
436{
437 struct gfs2_inode *ip = GFS2_I(inode);
438 if (create)
439 up_write(&ip->i_rw_mutex);
440 else
441 up_read(&ip->i_rw_mutex);
442}
443
423/** 444/**
424 * gfs2_block_pointers - Map a block from an inode to a disk block 445 * gfs2_block_map - Map a block from an inode to a disk block
425 * @inode: The inode 446 * @inode: The inode
426 * @lblock: The logical block number 447 * @lblock: The logical block number
427 * @map_bh: The bh to be mapped 448 * @bh_map: The bh to be mapped
428 * @mp: metapath to use
429 * 449 *
430 * Find the block number on the current device which corresponds to an 450 * Find the block number on the current device which corresponds to an
431 * inode's block. If the block had to be created, "new" will be set. 451 * inode's block. If the block had to be created, "new" will be set.
@@ -433,8 +453,8 @@ static int lookup_block(struct gfs2_inode *ip, struct buffer_head *bh,
433 * Returns: errno 453 * Returns: errno
434 */ 454 */
435 455
436static int gfs2_block_pointers(struct inode *inode, u64 lblock, int create, 456int gfs2_block_map(struct inode *inode, u64 lblock, int create,
437 struct buffer_head *bh_map, struct metapath *mp) 457 struct buffer_head *bh_map)
438{ 458{
439 struct gfs2_inode *ip = GFS2_I(inode); 459 struct gfs2_inode *ip = GFS2_I(inode);
440 struct gfs2_sbd *sdp = GFS2_SB(inode); 460 struct gfs2_sbd *sdp = GFS2_SB(inode);
@@ -448,57 +468,61 @@ static int gfs2_block_pointers(struct inode *inode, u64 lblock, int create,
448 u64 dblock = 0; 468 u64 dblock = 0;
449 int boundary; 469 int boundary;
450 unsigned int maxlen = bh_map->b_size >> inode->i_blkbits; 470 unsigned int maxlen = bh_map->b_size >> inode->i_blkbits;
471 struct metapath mp;
472 u64 size;
451 473
452 BUG_ON(maxlen == 0); 474 BUG_ON(maxlen == 0);
453 475
454 if (gfs2_assert_warn(sdp, !gfs2_is_stuffed(ip))) 476 if (gfs2_assert_warn(sdp, !gfs2_is_stuffed(ip)))
455 return 0; 477 return 0;
456 478
479 bmap_lock(inode, create);
480 clear_buffer_mapped(bh_map);
481 clear_buffer_new(bh_map);
482 clear_buffer_boundary(bh_map);
457 bsize = gfs2_is_dir(ip) ? sdp->sd_jbsize : sdp->sd_sb.sb_bsize; 483 bsize = gfs2_is_dir(ip) ? sdp->sd_jbsize : sdp->sd_sb.sb_bsize;
458 484 size = (lblock + 1) * bsize;
459 height = calc_tree_height(ip, (lblock + 1) * bsize); 485
460 if (ip->i_di.di_height < height) { 486 if (size > ip->i_di.di_size) {
461 if (!create) 487 height = calc_tree_height(ip, size);
462 return 0; 488 if (ip->i_di.di_height < height) {
463 489 if (!create)
464 error = build_height(inode, height); 490 goto out_ok;
465 if (error) 491
466 return error; 492 error = build_height(inode, height);
493 if (error)
494 goto out_fail;
495 }
467 } 496 }
468 497
469 find_metapath(ip, lblock, mp); 498 find_metapath(ip, lblock, &mp);
470 end_of_metadata = ip->i_di.di_height - 1; 499 end_of_metadata = ip->i_di.di_height - 1;
471
472 error = gfs2_meta_inode_buffer(ip, &bh); 500 error = gfs2_meta_inode_buffer(ip, &bh);
473 if (error) 501 if (error)
474 return error; 502 goto out_fail;
475 503
476 for (x = 0; x < end_of_metadata; x++) { 504 for (x = 0; x < end_of_metadata; x++) {
477 lookup_block(ip, bh, x, mp, create, &new, &dblock); 505 lookup_block(ip, bh, x, &mp, create, &new, &dblock);
478 brelse(bh); 506 brelse(bh);
479 if (!dblock) 507 if (!dblock)
480 return 0; 508 goto out_ok;
481 509
482 error = gfs2_meta_indirect_buffer(ip, x+1, dblock, new, &bh); 510 error = gfs2_meta_indirect_buffer(ip, x+1, dblock, new, &bh);
483 if (error) 511 if (error)
484 return error; 512 goto out_fail;
485 } 513 }
486 514
487 boundary = lookup_block(ip, bh, end_of_metadata, mp, create, &new, &dblock); 515 boundary = lookup_block(ip, bh, end_of_metadata, &mp, create, &new, &dblock);
488 clear_buffer_mapped(bh_map);
489 clear_buffer_new(bh_map);
490 clear_buffer_boundary(bh_map);
491
492 if (dblock) { 516 if (dblock) {
493 map_bh(bh_map, inode->i_sb, dblock); 517 map_bh(bh_map, inode->i_sb, dblock);
494 if (boundary) 518 if (boundary)
495 set_buffer_boundary(bh); 519 set_buffer_boundary(bh_map);
496 if (new) { 520 if (new) {
497 struct buffer_head *dibh; 521 struct buffer_head *dibh;
498 error = gfs2_meta_inode_buffer(ip, &dibh); 522 error = gfs2_meta_inode_buffer(ip, &dibh);
499 if (!error) { 523 if (!error) {
500 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 524 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
501 gfs2_dinode_out(&ip->i_di, dibh->b_data); 525 gfs2_dinode_out(ip, dibh->b_data);
502 brelse(dibh); 526 brelse(dibh);
503 } 527 }
504 set_buffer_new(bh_map); 528 set_buffer_new(bh_map);
@@ -507,8 +531,8 @@ static int gfs2_block_pointers(struct inode *inode, u64 lblock, int create,
507 while(--maxlen && !buffer_boundary(bh_map)) { 531 while(--maxlen && !buffer_boundary(bh_map)) {
508 u64 eblock; 532 u64 eblock;
509 533
510 mp->mp_list[end_of_metadata]++; 534 mp.mp_list[end_of_metadata]++;
511 boundary = lookup_block(ip, bh, end_of_metadata, mp, 0, &new, &eblock); 535 boundary = lookup_block(ip, bh, end_of_metadata, &mp, 0, &new, &eblock);
512 if (eblock != ++dblock) 536 if (eblock != ++dblock)
513 break; 537 break;
514 bh_map->b_size += (1 << inode->i_blkbits); 538 bh_map->b_size += (1 << inode->i_blkbits);
@@ -518,43 +542,15 @@ static int gfs2_block_pointers(struct inode *inode, u64 lblock, int create,
518 } 542 }
519out_brelse: 543out_brelse:
520 brelse(bh); 544 brelse(bh);
521 return 0; 545out_ok:
522} 546 error = 0;
523 547out_fail:
524
525static inline void bmap_lock(struct inode *inode, int create)
526{
527 struct gfs2_inode *ip = GFS2_I(inode);
528 if (create)
529 down_write(&ip->i_rw_mutex);
530 else
531 down_read(&ip->i_rw_mutex);
532}
533
534static inline void bmap_unlock(struct inode *inode, int create)
535{
536 struct gfs2_inode *ip = GFS2_I(inode);
537 if (create)
538 up_write(&ip->i_rw_mutex);
539 else
540 up_read(&ip->i_rw_mutex);
541}
542
543int gfs2_block_map(struct inode *inode, u64 lblock, int create,
544 struct buffer_head *bh)
545{
546 struct metapath mp;
547 int ret;
548
549 bmap_lock(inode, create);
550 ret = gfs2_block_pointers(inode, lblock, create, bh, &mp);
551 bmap_unlock(inode, create); 548 bmap_unlock(inode, create);
552 return ret; 549 return error;
553} 550}
554 551
555int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, u64 *dblock, unsigned *extlen) 552int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, u64 *dblock, unsigned *extlen)
556{ 553{
557 struct metapath mp;
558 struct buffer_head bh = { .b_state = 0, .b_blocknr = 0 }; 554 struct buffer_head bh = { .b_state = 0, .b_blocknr = 0 };
559 int ret; 555 int ret;
560 int create = *new; 556 int create = *new;
@@ -564,9 +560,7 @@ int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, u64 *dblock, unsi
564 BUG_ON(!new); 560 BUG_ON(!new);
565 561
566 bh.b_size = 1 << (inode->i_blkbits + 5); 562 bh.b_size = 1 << (inode->i_blkbits + 5);
567 bmap_lock(inode, create); 563 ret = gfs2_block_map(inode, lblock, create, &bh);
568 ret = gfs2_block_pointers(inode, lblock, create, &bh, &mp);
569 bmap_unlock(inode, create);
570 *extlen = bh.b_size >> inode->i_blkbits; 564 *extlen = bh.b_size >> inode->i_blkbits;
571 *dblock = bh.b_blocknr; 565 *dblock = bh.b_blocknr;
572 if (buffer_new(&bh)) 566 if (buffer_new(&bh))
@@ -600,7 +594,7 @@ static int recursive_scan(struct gfs2_inode *ip, struct buffer_head *dibh,
600{ 594{
601 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 595 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
602 struct buffer_head *bh = NULL; 596 struct buffer_head *bh = NULL;
603 u64 *top, *bottom; 597 __be64 *top, *bottom;
604 u64 bn; 598 u64 bn;
605 int error; 599 int error;
606 int mh_size = sizeof(struct gfs2_meta_header); 600 int mh_size = sizeof(struct gfs2_meta_header);
@@ -611,17 +605,17 @@ static int recursive_scan(struct gfs2_inode *ip, struct buffer_head *dibh,
611 return error; 605 return error;
612 dibh = bh; 606 dibh = bh;
613 607
614 top = (u64 *)(bh->b_data + sizeof(struct gfs2_dinode)) + mp->mp_list[0]; 608 top = (__be64 *)(bh->b_data + sizeof(struct gfs2_dinode)) + mp->mp_list[0];
615 bottom = (u64 *)(bh->b_data + sizeof(struct gfs2_dinode)) + sdp->sd_diptrs; 609 bottom = (__be64 *)(bh->b_data + sizeof(struct gfs2_dinode)) + sdp->sd_diptrs;
616 } else { 610 } else {
617 error = gfs2_meta_indirect_buffer(ip, height, block, 0, &bh); 611 error = gfs2_meta_indirect_buffer(ip, height, block, 0, &bh);
618 if (error) 612 if (error)
619 return error; 613 return error;
620 614
621 top = (u64 *)(bh->b_data + mh_size) + 615 top = (__be64 *)(bh->b_data + mh_size) +
622 (first ? mp->mp_list[height] : 0); 616 (first ? mp->mp_list[height] : 0);
623 617
624 bottom = (u64 *)(bh->b_data + mh_size) + sdp->sd_inptrs; 618 bottom = (__be64 *)(bh->b_data + mh_size) + sdp->sd_inptrs;
625 } 619 }
626 620
627 error = bc(ip, dibh, bh, top, bottom, height, data); 621 error = bc(ip, dibh, bh, top, bottom, height, data);
@@ -660,7 +654,7 @@ out:
660 */ 654 */
661 655
662static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh, 656static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh,
663 struct buffer_head *bh, u64 *top, u64 *bottom, 657 struct buffer_head *bh, __be64 *top, __be64 *bottom,
664 unsigned int height, void *data) 658 unsigned int height, void *data)
665{ 659{
666 struct strip_mine *sm = data; 660 struct strip_mine *sm = data;
@@ -668,7 +662,7 @@ static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh,
668 struct gfs2_rgrp_list rlist; 662 struct gfs2_rgrp_list rlist;
669 u64 bn, bstart; 663 u64 bn, bstart;
670 u32 blen; 664 u32 blen;
671 u64 *p; 665 __be64 *p;
672 unsigned int rg_blocks = 0; 666 unsigned int rg_blocks = 0;
673 int metadata; 667 int metadata;
674 unsigned int revokes = 0; 668 unsigned int revokes = 0;
@@ -770,6 +764,7 @@ static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh,
770 if (!ip->i_di.di_blocks) 764 if (!ip->i_di.di_blocks)
771 gfs2_consist_inode(ip); 765 gfs2_consist_inode(ip);
772 ip->i_di.di_blocks--; 766 ip->i_di.di_blocks--;
767 gfs2_set_inode_blocks(&ip->i_inode);
773 } 768 }
774 if (bstart) { 769 if (bstart) {
775 if (metadata) 770 if (metadata)
@@ -778,9 +773,9 @@ static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh,
778 gfs2_free_data(ip, bstart, blen); 773 gfs2_free_data(ip, bstart, blen);
779 } 774 }
780 775
781 ip->i_di.di_mtime = ip->i_di.di_ctime = get_seconds(); 776 ip->i_inode.i_mtime.tv_sec = ip->i_inode.i_ctime.tv_sec = get_seconds();
782 777
783 gfs2_dinode_out(&ip->i_di, dibh->b_data); 778 gfs2_dinode_out(ip, dibh->b_data);
784 779
785 up_write(&ip->i_rw_mutex); 780 up_write(&ip->i_rw_mutex);
786 781
@@ -819,7 +814,7 @@ static int do_grow(struct gfs2_inode *ip, u64 size)
819 if (error) 814 if (error)
820 goto out; 815 goto out;
821 816
822 error = gfs2_quota_check(ip, ip->i_di.di_uid, ip->i_di.di_gid); 817 error = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid);
823 if (error) 818 if (error)
824 goto out_gunlock_q; 819 goto out_gunlock_q;
825 820
@@ -853,14 +848,14 @@ static int do_grow(struct gfs2_inode *ip, u64 size)
853 } 848 }
854 849
855 ip->i_di.di_size = size; 850 ip->i_di.di_size = size;
856 ip->i_di.di_mtime = ip->i_di.di_ctime = get_seconds(); 851 ip->i_inode.i_mtime.tv_sec = ip->i_inode.i_ctime.tv_sec = get_seconds();
857 852
858 error = gfs2_meta_inode_buffer(ip, &dibh); 853 error = gfs2_meta_inode_buffer(ip, &dibh);
859 if (error) 854 if (error)
860 goto out_end_trans; 855 goto out_end_trans;
861 856
862 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 857 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
863 gfs2_dinode_out(&ip->i_di, dibh->b_data); 858 gfs2_dinode_out(ip, dibh->b_data);
864 brelse(dibh); 859 brelse(dibh);
865 860
866out_end_trans: 861out_end_trans:
@@ -968,9 +963,9 @@ static int trunc_start(struct gfs2_inode *ip, u64 size)
968 963
969 if (gfs2_is_stuffed(ip)) { 964 if (gfs2_is_stuffed(ip)) {
970 ip->i_di.di_size = size; 965 ip->i_di.di_size = size;
971 ip->i_di.di_mtime = ip->i_di.di_ctime = get_seconds(); 966 ip->i_inode.i_mtime.tv_sec = ip->i_inode.i_ctime.tv_sec = get_seconds();
972 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 967 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
973 gfs2_dinode_out(&ip->i_di, dibh->b_data); 968 gfs2_dinode_out(ip, dibh->b_data);
974 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode) + size); 969 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode) + size);
975 error = 1; 970 error = 1;
976 971
@@ -980,10 +975,10 @@ static int trunc_start(struct gfs2_inode *ip, u64 size)
980 975
981 if (!error) { 976 if (!error) {
982 ip->i_di.di_size = size; 977 ip->i_di.di_size = size;
983 ip->i_di.di_mtime = ip->i_di.di_ctime = get_seconds(); 978 ip->i_inode.i_mtime.tv_sec = ip->i_inode.i_ctime.tv_sec = get_seconds();
984 ip->i_di.di_flags |= GFS2_DIF_TRUNC_IN_PROG; 979 ip->i_di.di_flags |= GFS2_DIF_TRUNC_IN_PROG;
985 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 980 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
986 gfs2_dinode_out(&ip->i_di, dibh->b_data); 981 gfs2_dinode_out(ip, dibh->b_data);
987 } 982 }
988 } 983 }
989 984
@@ -1053,11 +1048,11 @@ static int trunc_end(struct gfs2_inode *ip)
1053 ip->i_num.no_addr; 1048 ip->i_num.no_addr;
1054 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode)); 1049 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
1055 } 1050 }
1056 ip->i_di.di_mtime = ip->i_di.di_ctime = get_seconds(); 1051 ip->i_inode.i_mtime.tv_sec = ip->i_inode.i_ctime.tv_sec = get_seconds();
1057 ip->i_di.di_flags &= ~GFS2_DIF_TRUNC_IN_PROG; 1052 ip->i_di.di_flags &= ~GFS2_DIF_TRUNC_IN_PROG;
1058 1053
1059 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 1054 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1060 gfs2_dinode_out(&ip->i_di, dibh->b_data); 1055 gfs2_dinode_out(ip, dibh->b_data);
1061 brelse(dibh); 1056 brelse(dibh);
1062 1057
1063out: 1058out:
@@ -1109,7 +1104,7 @@ int gfs2_truncatei(struct gfs2_inode *ip, u64 size)
1109{ 1104{
1110 int error; 1105 int error;
1111 1106
1112 if (gfs2_assert_warn(GFS2_SB(&ip->i_inode), S_ISREG(ip->i_di.di_mode))) 1107 if (gfs2_assert_warn(GFS2_SB(&ip->i_inode), S_ISREG(ip->i_inode.i_mode)))
1113 return -EINVAL; 1108 return -EINVAL;
1114 1109
1115 if (size > ip->i_di.di_size) 1110 if (size > ip->i_di.di_size)
diff --git a/fs/gfs2/daemon.c b/fs/gfs2/daemon.c
index cab1f68d4685..683cb5bda870 100644
--- a/fs/gfs2/daemon.c
+++ b/fs/gfs2/daemon.c
@@ -112,6 +112,7 @@ int gfs2_logd(void *data)
112 struct gfs2_sbd *sdp = data; 112 struct gfs2_sbd *sdp = data;
113 struct gfs2_holder ji_gh; 113 struct gfs2_holder ji_gh;
114 unsigned long t; 114 unsigned long t;
115 int need_flush;
115 116
116 while (!kthread_should_stop()) { 117 while (!kthread_should_stop()) {
117 /* Advance the log tail */ 118 /* Advance the log tail */
@@ -120,8 +121,10 @@ int gfs2_logd(void *data)
120 gfs2_tune_get(sdp, gt_log_flush_secs) * HZ; 121 gfs2_tune_get(sdp, gt_log_flush_secs) * HZ;
121 122
122 gfs2_ail1_empty(sdp, DIO_ALL); 123 gfs2_ail1_empty(sdp, DIO_ALL);
123 124 gfs2_log_lock(sdp);
124 if (time_after_eq(jiffies, t)) { 125 need_flush = sdp->sd_log_num_buf > gfs2_tune_get(sdp, gt_incore_log_blocks);
126 gfs2_log_unlock(sdp);
127 if (need_flush || time_after_eq(jiffies, t)) {
125 gfs2_log_flush(sdp, NULL); 128 gfs2_log_flush(sdp, NULL);
126 sdp->sd_log_flush_time = jiffies; 129 sdp->sd_log_flush_time = jiffies;
127 } 130 }
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index e24af28b1a12..0fdcb7713cd9 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -131,8 +131,8 @@ static int gfs2_dir_write_stuffed(struct gfs2_inode *ip, const char *buf,
131 memcpy(dibh->b_data + offset + sizeof(struct gfs2_dinode), buf, size); 131 memcpy(dibh->b_data + offset + sizeof(struct gfs2_dinode), buf, size);
132 if (ip->i_di.di_size < offset + size) 132 if (ip->i_di.di_size < offset + size)
133 ip->i_di.di_size = offset + size; 133 ip->i_di.di_size = offset + size;
134 ip->i_di.di_mtime = ip->i_di.di_ctime = get_seconds(); 134 ip->i_inode.i_mtime.tv_sec = ip->i_inode.i_ctime.tv_sec = get_seconds();
135 gfs2_dinode_out(&ip->i_di, dibh->b_data); 135 gfs2_dinode_out(ip, dibh->b_data);
136 136
137 brelse(dibh); 137 brelse(dibh);
138 138
@@ -229,10 +229,10 @@ out:
229 229
230 if (ip->i_di.di_size < offset + copied) 230 if (ip->i_di.di_size < offset + copied)
231 ip->i_di.di_size = offset + copied; 231 ip->i_di.di_size = offset + copied;
232 ip->i_di.di_mtime = ip->i_di.di_ctime = get_seconds(); 232 ip->i_inode.i_mtime.tv_sec = ip->i_inode.i_ctime.tv_sec = get_seconds();
233 233
234 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 234 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
235 gfs2_dinode_out(&ip->i_di, dibh->b_data); 235 gfs2_dinode_out(ip, dibh->b_data);
236 brelse(dibh); 236 brelse(dibh);
237 237
238 return copied; 238 return copied;
@@ -340,10 +340,15 @@ fail:
340 return (copied) ? copied : error; 340 return (copied) ? copied : error;
341} 341}
342 342
343static inline int gfs2_dirent_sentinel(const struct gfs2_dirent *dent)
344{
345 return dent->de_inum.no_addr == 0 || dent->de_inum.no_formal_ino == 0;
346}
347
343static inline int __gfs2_dirent_find(const struct gfs2_dirent *dent, 348static inline int __gfs2_dirent_find(const struct gfs2_dirent *dent,
344 const struct qstr *name, int ret) 349 const struct qstr *name, int ret)
345{ 350{
346 if (dent->de_inum.no_addr != 0 && 351 if (!gfs2_dirent_sentinel(dent) &&
347 be32_to_cpu(dent->de_hash) == name->hash && 352 be32_to_cpu(dent->de_hash) == name->hash &&
348 be16_to_cpu(dent->de_name_len) == name->len && 353 be16_to_cpu(dent->de_name_len) == name->len &&
349 memcmp(dent+1, name->name, name->len) == 0) 354 memcmp(dent+1, name->name, name->len) == 0)
@@ -388,7 +393,7 @@ static int gfs2_dirent_find_space(const struct gfs2_dirent *dent,
388 unsigned actual = GFS2_DIRENT_SIZE(be16_to_cpu(dent->de_name_len)); 393 unsigned actual = GFS2_DIRENT_SIZE(be16_to_cpu(dent->de_name_len));
389 unsigned totlen = be16_to_cpu(dent->de_rec_len); 394 unsigned totlen = be16_to_cpu(dent->de_rec_len);
390 395
391 if (!dent->de_inum.no_addr) 396 if (gfs2_dirent_sentinel(dent))
392 actual = GFS2_DIRENT_SIZE(0); 397 actual = GFS2_DIRENT_SIZE(0);
393 if (totlen - actual >= required) 398 if (totlen - actual >= required)
394 return 1; 399 return 1;
@@ -405,7 +410,7 @@ static int gfs2_dirent_gather(const struct gfs2_dirent *dent,
405 void *opaque) 410 void *opaque)
406{ 411{
407 struct dirent_gather *g = opaque; 412 struct dirent_gather *g = opaque;
408 if (dent->de_inum.no_addr) { 413 if (!gfs2_dirent_sentinel(dent)) {
409 g->pdent[g->offset++] = dent; 414 g->pdent[g->offset++] = dent;
410 } 415 }
411 return 0; 416 return 0;
@@ -433,10 +438,10 @@ static int gfs2_check_dirent(struct gfs2_dirent *dent, unsigned int offset,
433 if (unlikely(offset + size > len)) 438 if (unlikely(offset + size > len))
434 goto error; 439 goto error;
435 msg = "zero inode number"; 440 msg = "zero inode number";
436 if (unlikely(!first && !dent->de_inum.no_addr)) 441 if (unlikely(!first && gfs2_dirent_sentinel(dent)))
437 goto error; 442 goto error;
438 msg = "name length is greater than space in dirent"; 443 msg = "name length is greater than space in dirent";
439 if (dent->de_inum.no_addr && 444 if (!gfs2_dirent_sentinel(dent) &&
440 unlikely(sizeof(struct gfs2_dirent)+be16_to_cpu(dent->de_name_len) > 445 unlikely(sizeof(struct gfs2_dirent)+be16_to_cpu(dent->de_name_len) >
441 size)) 446 size))
442 goto error; 447 goto error;
@@ -598,7 +603,7 @@ static int dirent_next(struct gfs2_inode *dip, struct buffer_head *bh,
598 return ret; 603 return ret;
599 604
600 /* Only the first dent could ever have de_inum.no_addr == 0 */ 605 /* Only the first dent could ever have de_inum.no_addr == 0 */
601 if (!tmp->de_inum.no_addr) { 606 if (gfs2_dirent_sentinel(tmp)) {
602 gfs2_consist_inode(dip); 607 gfs2_consist_inode(dip);
603 return -EIO; 608 return -EIO;
604 } 609 }
@@ -621,7 +626,7 @@ static void dirent_del(struct gfs2_inode *dip, struct buffer_head *bh,
621{ 626{
622 u16 cur_rec_len, prev_rec_len; 627 u16 cur_rec_len, prev_rec_len;
623 628
624 if (!cur->de_inum.no_addr) { 629 if (gfs2_dirent_sentinel(cur)) {
625 gfs2_consist_inode(dip); 630 gfs2_consist_inode(dip);
626 return; 631 return;
627 } 632 }
@@ -633,7 +638,8 @@ static void dirent_del(struct gfs2_inode *dip, struct buffer_head *bh,
633 out the inode number and return. */ 638 out the inode number and return. */
634 639
635 if (!prev) { 640 if (!prev) {
636 cur->de_inum.no_addr = 0; /* No endianess worries */ 641 cur->de_inum.no_addr = 0;
642 cur->de_inum.no_formal_ino = 0;
637 return; 643 return;
638 } 644 }
639 645
@@ -664,7 +670,7 @@ static struct gfs2_dirent *gfs2_init_dirent(struct inode *inode,
664 struct gfs2_dirent *ndent; 670 struct gfs2_dirent *ndent;
665 unsigned offset = 0, totlen; 671 unsigned offset = 0, totlen;
666 672
667 if (dent->de_inum.no_addr) 673 if (!gfs2_dirent_sentinel(dent))
668 offset = GFS2_DIRENT_SIZE(be16_to_cpu(dent->de_name_len)); 674 offset = GFS2_DIRENT_SIZE(be16_to_cpu(dent->de_name_len));
669 totlen = be16_to_cpu(dent->de_rec_len); 675 totlen = be16_to_cpu(dent->de_rec_len);
670 BUG_ON(offset + name->len > totlen); 676 BUG_ON(offset + name->len > totlen);
@@ -713,12 +719,12 @@ static int get_leaf(struct gfs2_inode *dip, u64 leaf_no,
713static int get_leaf_nr(struct gfs2_inode *dip, u32 index, 719static int get_leaf_nr(struct gfs2_inode *dip, u32 index,
714 u64 *leaf_out) 720 u64 *leaf_out)
715{ 721{
716 u64 leaf_no; 722 __be64 leaf_no;
717 int error; 723 int error;
718 724
719 error = gfs2_dir_read_data(dip, (char *)&leaf_no, 725 error = gfs2_dir_read_data(dip, (char *)&leaf_no,
720 index * sizeof(u64), 726 index * sizeof(__be64),
721 sizeof(u64), 0); 727 sizeof(__be64), 0);
722 if (error != sizeof(u64)) 728 if (error != sizeof(u64))
723 return (error < 0) ? error : -EIO; 729 return (error < 0) ? error : -EIO;
724 730
@@ -837,7 +843,8 @@ static int dir_make_exhash(struct inode *inode)
837 struct gfs2_leaf *leaf; 843 struct gfs2_leaf *leaf;
838 int y; 844 int y;
839 u32 x; 845 u32 x;
840 u64 *lp, bn; 846 __be64 *lp;
847 u64 bn;
841 int error; 848 int error;
842 849
843 error = gfs2_meta_inode_buffer(dip, &dibh); 850 error = gfs2_meta_inode_buffer(dip, &dibh);
@@ -893,20 +900,20 @@ static int dir_make_exhash(struct inode *inode)
893 gfs2_trans_add_bh(dip->i_gl, dibh, 1); 900 gfs2_trans_add_bh(dip->i_gl, dibh, 1);
894 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode)); 901 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
895 902
896 lp = (u64 *)(dibh->b_data + sizeof(struct gfs2_dinode)); 903 lp = (__be64 *)(dibh->b_data + sizeof(struct gfs2_dinode));
897 904
898 for (x = sdp->sd_hash_ptrs; x--; lp++) 905 for (x = sdp->sd_hash_ptrs; x--; lp++)
899 *lp = cpu_to_be64(bn); 906 *lp = cpu_to_be64(bn);
900 907
901 dip->i_di.di_size = sdp->sd_sb.sb_bsize / 2; 908 dip->i_di.di_size = sdp->sd_sb.sb_bsize / 2;
902 dip->i_di.di_blocks++; 909 dip->i_di.di_blocks++;
910 gfs2_set_inode_blocks(&dip->i_inode);
903 dip->i_di.di_flags |= GFS2_DIF_EXHASH; 911 dip->i_di.di_flags |= GFS2_DIF_EXHASH;
904 dip->i_di.di_payload_format = 0;
905 912
906 for (x = sdp->sd_hash_ptrs, y = -1; x; x >>= 1, y++) ; 913 for (x = sdp->sd_hash_ptrs, y = -1; x; x >>= 1, y++) ;
907 dip->i_di.di_depth = y; 914 dip->i_di.di_depth = y;
908 915
909 gfs2_dinode_out(&dip->i_di, dibh->b_data); 916 gfs2_dinode_out(dip, dibh->b_data);
910 917
911 brelse(dibh); 918 brelse(dibh);
912 919
@@ -929,7 +936,8 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name)
929 struct gfs2_leaf *nleaf, *oleaf; 936 struct gfs2_leaf *nleaf, *oleaf;
930 struct gfs2_dirent *dent = NULL, *prev = NULL, *next = NULL, *new; 937 struct gfs2_dirent *dent = NULL, *prev = NULL, *next = NULL, *new;
931 u32 start, len, half_len, divider; 938 u32 start, len, half_len, divider;
932 u64 bn, *lp, leaf_no; 939 u64 bn, leaf_no;
940 __be64 *lp;
933 u32 index; 941 u32 index;
934 int x, moved = 0; 942 int x, moved = 0;
935 int error; 943 int error;
@@ -974,7 +982,7 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name)
974 /* Change the pointers. 982 /* Change the pointers.
975 Don't bother distinguishing stuffed from non-stuffed. 983 Don't bother distinguishing stuffed from non-stuffed.
976 This code is complicated enough already. */ 984 This code is complicated enough already. */
977 lp = kmalloc(half_len * sizeof(u64), GFP_NOFS | __GFP_NOFAIL); 985 lp = kmalloc(half_len * sizeof(__be64), GFP_NOFS | __GFP_NOFAIL);
978 /* Change the pointers */ 986 /* Change the pointers */
979 for (x = 0; x < half_len; x++) 987 for (x = 0; x < half_len; x++)
980 lp[x] = cpu_to_be64(bn); 988 lp[x] = cpu_to_be64(bn);
@@ -1000,7 +1008,7 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name)
1000 if (dirent_next(dip, obh, &next)) 1008 if (dirent_next(dip, obh, &next))
1001 next = NULL; 1009 next = NULL;
1002 1010
1003 if (dent->de_inum.no_addr && 1011 if (!gfs2_dirent_sentinel(dent) &&
1004 be32_to_cpu(dent->de_hash) < divider) { 1012 be32_to_cpu(dent->de_hash) < divider) {
1005 struct qstr str; 1013 struct qstr str;
1006 str.name = (char*)(dent+1); 1014 str.name = (char*)(dent+1);
@@ -1037,7 +1045,8 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name)
1037 error = gfs2_meta_inode_buffer(dip, &dibh); 1045 error = gfs2_meta_inode_buffer(dip, &dibh);
1038 if (!gfs2_assert_withdraw(GFS2_SB(&dip->i_inode), !error)) { 1046 if (!gfs2_assert_withdraw(GFS2_SB(&dip->i_inode), !error)) {
1039 dip->i_di.di_blocks++; 1047 dip->i_di.di_blocks++;
1040 gfs2_dinode_out(&dip->i_di, dibh->b_data); 1048 gfs2_set_inode_blocks(&dip->i_inode);
1049 gfs2_dinode_out(dip, dibh->b_data);
1041 brelse(dibh); 1050 brelse(dibh);
1042 } 1051 }
1043 1052
@@ -1117,7 +1126,7 @@ static int dir_double_exhash(struct gfs2_inode *dip)
1117 error = gfs2_meta_inode_buffer(dip, &dibh); 1126 error = gfs2_meta_inode_buffer(dip, &dibh);
1118 if (!gfs2_assert_withdraw(sdp, !error)) { 1127 if (!gfs2_assert_withdraw(sdp, !error)) {
1119 dip->i_di.di_depth++; 1128 dip->i_di.di_depth++;
1120 gfs2_dinode_out(&dip->i_di, dibh->b_data); 1129 gfs2_dinode_out(dip, dibh->b_data);
1121 brelse(dibh); 1130 brelse(dibh);
1122 } 1131 }
1123 1132
@@ -1194,7 +1203,7 @@ static int do_filldir_main(struct gfs2_inode *dip, u64 *offset,
1194 int *copied) 1203 int *copied)
1195{ 1204{
1196 const struct gfs2_dirent *dent, *dent_next; 1205 const struct gfs2_dirent *dent, *dent_next;
1197 struct gfs2_inum inum; 1206 struct gfs2_inum_host inum;
1198 u64 off, off_next; 1207 u64 off, off_next;
1199 unsigned int x, y; 1208 unsigned int x, y;
1200 int run = 0; 1209 int run = 0;
@@ -1341,7 +1350,7 @@ static int dir_e_read(struct inode *inode, u64 *offset, void *opaque,
1341 u32 hsize, len = 0; 1350 u32 hsize, len = 0;
1342 u32 ht_offset, lp_offset, ht_offset_cur = -1; 1351 u32 ht_offset, lp_offset, ht_offset_cur = -1;
1343 u32 hash, index; 1352 u32 hash, index;
1344 u64 *lp; 1353 __be64 *lp;
1345 int copied = 0; 1354 int copied = 0;
1346 int error = 0; 1355 int error = 0;
1347 unsigned depth = 0; 1356 unsigned depth = 0;
@@ -1365,7 +1374,7 @@ static int dir_e_read(struct inode *inode, u64 *offset, void *opaque,
1365 1374
1366 if (ht_offset_cur != ht_offset) { 1375 if (ht_offset_cur != ht_offset) {
1367 error = gfs2_dir_read_data(dip, (char *)lp, 1376 error = gfs2_dir_read_data(dip, (char *)lp,
1368 ht_offset * sizeof(u64), 1377 ht_offset * sizeof(__be64),
1369 sdp->sd_hash_bsize, 1); 1378 sdp->sd_hash_bsize, 1);
1370 if (error != sdp->sd_hash_bsize) { 1379 if (error != sdp->sd_hash_bsize) {
1371 if (error >= 0) 1380 if (error >= 0)
@@ -1456,7 +1465,7 @@ out:
1456 */ 1465 */
1457 1466
1458int gfs2_dir_search(struct inode *dir, const struct qstr *name, 1467int gfs2_dir_search(struct inode *dir, const struct qstr *name,
1459 struct gfs2_inum *inum, unsigned int *type) 1468 struct gfs2_inum_host *inum, unsigned int *type)
1460{ 1469{
1461 struct buffer_head *bh; 1470 struct buffer_head *bh;
1462 struct gfs2_dirent *dent; 1471 struct gfs2_dirent *dent;
@@ -1515,7 +1524,8 @@ static int dir_new_leaf(struct inode *inode, const struct qstr *name)
1515 return error; 1524 return error;
1516 gfs2_trans_add_bh(ip->i_gl, bh, 1); 1525 gfs2_trans_add_bh(ip->i_gl, bh, 1);
1517 ip->i_di.di_blocks++; 1526 ip->i_di.di_blocks++;
1518 gfs2_dinode_out(&ip->i_di, bh->b_data); 1527 gfs2_set_inode_blocks(&ip->i_inode);
1528 gfs2_dinode_out(ip, bh->b_data);
1519 brelse(bh); 1529 brelse(bh);
1520 return 0; 1530 return 0;
1521} 1531}
@@ -1531,7 +1541,7 @@ static int dir_new_leaf(struct inode *inode, const struct qstr *name)
1531 */ 1541 */
1532 1542
1533int gfs2_dir_add(struct inode *inode, const struct qstr *name, 1543int gfs2_dir_add(struct inode *inode, const struct qstr *name,
1534 const struct gfs2_inum *inum, unsigned type) 1544 const struct gfs2_inum_host *inum, unsigned type)
1535{ 1545{
1536 struct gfs2_inode *ip = GFS2_I(inode); 1546 struct gfs2_inode *ip = GFS2_I(inode);
1537 struct buffer_head *bh; 1547 struct buffer_head *bh;
@@ -1558,8 +1568,8 @@ int gfs2_dir_add(struct inode *inode, const struct qstr *name,
1558 break; 1568 break;
1559 gfs2_trans_add_bh(ip->i_gl, bh, 1); 1569 gfs2_trans_add_bh(ip->i_gl, bh, 1);
1560 ip->i_di.di_entries++; 1570 ip->i_di.di_entries++;
1561 ip->i_di.di_mtime = ip->i_di.di_ctime = get_seconds(); 1571 ip->i_inode.i_mtime.tv_sec = ip->i_inode.i_ctime.tv_sec = get_seconds();
1562 gfs2_dinode_out(&ip->i_di, bh->b_data); 1572 gfs2_dinode_out(ip, bh->b_data);
1563 brelse(bh); 1573 brelse(bh);
1564 error = 0; 1574 error = 0;
1565 break; 1575 break;
@@ -1644,8 +1654,8 @@ int gfs2_dir_del(struct gfs2_inode *dip, const struct qstr *name)
1644 gfs2_consist_inode(dip); 1654 gfs2_consist_inode(dip);
1645 gfs2_trans_add_bh(dip->i_gl, bh, 1); 1655 gfs2_trans_add_bh(dip->i_gl, bh, 1);
1646 dip->i_di.di_entries--; 1656 dip->i_di.di_entries--;
1647 dip->i_di.di_mtime = dip->i_di.di_ctime = get_seconds(); 1657 dip->i_inode.i_mtime.tv_sec = dip->i_inode.i_ctime.tv_sec = get_seconds();
1648 gfs2_dinode_out(&dip->i_di, bh->b_data); 1658 gfs2_dinode_out(dip, bh->b_data);
1649 brelse(bh); 1659 brelse(bh);
1650 mark_inode_dirty(&dip->i_inode); 1660 mark_inode_dirty(&dip->i_inode);
1651 1661
@@ -1666,7 +1676,7 @@ int gfs2_dir_del(struct gfs2_inode *dip, const struct qstr *name)
1666 */ 1676 */
1667 1677
1668int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename, 1678int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename,
1669 struct gfs2_inum *inum, unsigned int new_type) 1679 struct gfs2_inum_host *inum, unsigned int new_type)
1670{ 1680{
1671 struct buffer_head *bh; 1681 struct buffer_head *bh;
1672 struct gfs2_dirent *dent; 1682 struct gfs2_dirent *dent;
@@ -1692,8 +1702,8 @@ int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename,
1692 gfs2_trans_add_bh(dip->i_gl, bh, 1); 1702 gfs2_trans_add_bh(dip->i_gl, bh, 1);
1693 } 1703 }
1694 1704
1695 dip->i_di.di_mtime = dip->i_di.di_ctime = get_seconds(); 1705 dip->i_inode.i_mtime.tv_sec = dip->i_inode.i_ctime.tv_sec = get_seconds();
1696 gfs2_dinode_out(&dip->i_di, bh->b_data); 1706 gfs2_dinode_out(dip, bh->b_data);
1697 brelse(bh); 1707 brelse(bh);
1698 return 0; 1708 return 0;
1699} 1709}
@@ -1715,7 +1725,7 @@ static int foreach_leaf(struct gfs2_inode *dip, leaf_call_t lc, void *data)
1715 u32 hsize, len; 1725 u32 hsize, len;
1716 u32 ht_offset, lp_offset, ht_offset_cur = -1; 1726 u32 ht_offset, lp_offset, ht_offset_cur = -1;
1717 u32 index = 0; 1727 u32 index = 0;
1718 u64 *lp; 1728 __be64 *lp;
1719 u64 leaf_no; 1729 u64 leaf_no;
1720 int error = 0; 1730 int error = 0;
1721 1731
@@ -1735,7 +1745,7 @@ static int foreach_leaf(struct gfs2_inode *dip, leaf_call_t lc, void *data)
1735 1745
1736 if (ht_offset_cur != ht_offset) { 1746 if (ht_offset_cur != ht_offset) {
1737 error = gfs2_dir_read_data(dip, (char *)lp, 1747 error = gfs2_dir_read_data(dip, (char *)lp,
1738 ht_offset * sizeof(u64), 1748 ht_offset * sizeof(__be64),
1739 sdp->sd_hash_bsize, 1); 1749 sdp->sd_hash_bsize, 1);
1740 if (error != sdp->sd_hash_bsize) { 1750 if (error != sdp->sd_hash_bsize) {
1741 if (error >= 0) 1751 if (error >= 0)
@@ -1859,6 +1869,7 @@ static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len,
1859 if (!dip->i_di.di_blocks) 1869 if (!dip->i_di.di_blocks)
1860 gfs2_consist_inode(dip); 1870 gfs2_consist_inode(dip);
1861 dip->i_di.di_blocks--; 1871 dip->i_di.di_blocks--;
1872 gfs2_set_inode_blocks(&dip->i_inode);
1862 } 1873 }
1863 1874
1864 error = gfs2_dir_write_data(dip, ht, index * sizeof(u64), size); 1875 error = gfs2_dir_write_data(dip, ht, index * sizeof(u64), size);
@@ -1873,7 +1884,7 @@ static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len,
1873 goto out_end_trans; 1884 goto out_end_trans;
1874 1885
1875 gfs2_trans_add_bh(dip->i_gl, dibh, 1); 1886 gfs2_trans_add_bh(dip->i_gl, dibh, 1);
1876 gfs2_dinode_out(&dip->i_di, dibh->b_data); 1887 gfs2_dinode_out(dip, dibh->b_data);
1877 brelse(dibh); 1888 brelse(dibh);
1878 1889
1879out_end_trans: 1890out_end_trans:
diff --git a/fs/gfs2/dir.h b/fs/gfs2/dir.h
index 371233419b07..b21b33668a5b 100644
--- a/fs/gfs2/dir.h
+++ b/fs/gfs2/dir.h
@@ -31,17 +31,17 @@ struct gfs2_inum;
31typedef int (*gfs2_filldir_t) (void *opaque, 31typedef int (*gfs2_filldir_t) (void *opaque,
32 const char *name, unsigned int length, 32 const char *name, unsigned int length,
33 u64 offset, 33 u64 offset,
34 struct gfs2_inum *inum, unsigned int type); 34 struct gfs2_inum_host *inum, unsigned int type);
35 35
36int gfs2_dir_search(struct inode *dir, const struct qstr *filename, 36int gfs2_dir_search(struct inode *dir, const struct qstr *filename,
37 struct gfs2_inum *inum, unsigned int *type); 37 struct gfs2_inum_host *inum, unsigned int *type);
38int gfs2_dir_add(struct inode *inode, const struct qstr *filename, 38int gfs2_dir_add(struct inode *inode, const struct qstr *filename,
39 const struct gfs2_inum *inum, unsigned int type); 39 const struct gfs2_inum_host *inum, unsigned int type);
40int gfs2_dir_del(struct gfs2_inode *dip, const struct qstr *filename); 40int gfs2_dir_del(struct gfs2_inode *dip, const struct qstr *filename);
41int gfs2_dir_read(struct inode *inode, u64 * offset, void *opaque, 41int gfs2_dir_read(struct inode *inode, u64 * offset, void *opaque,
42 gfs2_filldir_t filldir); 42 gfs2_filldir_t filldir);
43int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename, 43int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename,
44 struct gfs2_inum *new_inum, unsigned int new_type); 44 struct gfs2_inum_host *new_inum, unsigned int new_type);
45 45
46int gfs2_dir_exhash_dealloc(struct gfs2_inode *dip); 46int gfs2_dir_exhash_dealloc(struct gfs2_inode *dip);
47 47
diff --git a/fs/gfs2/eaops.c b/fs/gfs2/eaops.c
index 92c54e9b0dc3..cd747c00f670 100644
--- a/fs/gfs2/eaops.c
+++ b/fs/gfs2/eaops.c
@@ -120,7 +120,7 @@ static int system_eo_set(struct gfs2_inode *ip, struct gfs2_ea_request *er)
120 120
121 if (GFS2_ACL_IS_ACCESS(er->er_name, er->er_name_len)) { 121 if (GFS2_ACL_IS_ACCESS(er->er_name, er->er_name_len)) {
122 if (!(er->er_flags & GFS2_ERF_MODE)) { 122 if (!(er->er_flags & GFS2_ERF_MODE)) {
123 er->er_mode = ip->i_di.di_mode; 123 er->er_mode = ip->i_inode.i_mode;
124 er->er_flags |= GFS2_ERF_MODE; 124 er->er_flags |= GFS2_ERF_MODE;
125 } 125 }
126 error = gfs2_acl_validate_set(ip, 1, er, 126 error = gfs2_acl_validate_set(ip, 1, er,
diff --git a/fs/gfs2/eattr.c b/fs/gfs2/eattr.c
index a65a4ccfd4dd..ebebbdcd7057 100644
--- a/fs/gfs2/eattr.c
+++ b/fs/gfs2/eattr.c
@@ -112,7 +112,7 @@ fail:
112static int ea_foreach(struct gfs2_inode *ip, ea_call_t ea_call, void *data) 112static int ea_foreach(struct gfs2_inode *ip, ea_call_t ea_call, void *data)
113{ 113{
114 struct buffer_head *bh, *eabh; 114 struct buffer_head *bh, *eabh;
115 u64 *eablk, *end; 115 __be64 *eablk, *end;
116 int error; 116 int error;
117 117
118 error = gfs2_meta_read(ip->i_gl, ip->i_di.di_eattr, DIO_WAIT, &bh); 118 error = gfs2_meta_read(ip->i_gl, ip->i_di.di_eattr, DIO_WAIT, &bh);
@@ -129,7 +129,7 @@ static int ea_foreach(struct gfs2_inode *ip, ea_call_t ea_call, void *data)
129 goto out; 129 goto out;
130 } 130 }
131 131
132 eablk = (u64 *)(bh->b_data + sizeof(struct gfs2_meta_header)); 132 eablk = (__be64 *)(bh->b_data + sizeof(struct gfs2_meta_header));
133 end = eablk + GFS2_SB(&ip->i_inode)->sd_inptrs; 133 end = eablk + GFS2_SB(&ip->i_inode)->sd_inptrs;
134 134
135 for (; eablk < end; eablk++) { 135 for (; eablk < end; eablk++) {
@@ -224,7 +224,8 @@ static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
224 struct gfs2_rgrpd *rgd; 224 struct gfs2_rgrpd *rgd;
225 struct gfs2_holder rg_gh; 225 struct gfs2_holder rg_gh;
226 struct buffer_head *dibh; 226 struct buffer_head *dibh;
227 u64 *dataptrs, bn = 0; 227 __be64 *dataptrs;
228 u64 bn = 0;
228 u64 bstart = 0; 229 u64 bstart = 0;
229 unsigned int blen = 0; 230 unsigned int blen = 0;
230 unsigned int blks = 0; 231 unsigned int blks = 0;
@@ -280,6 +281,7 @@ static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
280 if (!ip->i_di.di_blocks) 281 if (!ip->i_di.di_blocks)
281 gfs2_consist_inode(ip); 282 gfs2_consist_inode(ip);
282 ip->i_di.di_blocks--; 283 ip->i_di.di_blocks--;
284 gfs2_set_inode_blocks(&ip->i_inode);
283 } 285 }
284 if (bstart) 286 if (bstart)
285 gfs2_free_meta(ip, bstart, blen); 287 gfs2_free_meta(ip, bstart, blen);
@@ -299,9 +301,9 @@ static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
299 301
300 error = gfs2_meta_inode_buffer(ip, &dibh); 302 error = gfs2_meta_inode_buffer(ip, &dibh);
301 if (!error) { 303 if (!error) {
302 ip->i_di.di_ctime = get_seconds(); 304 ip->i_inode.i_ctime.tv_sec = get_seconds();
303 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 305 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
304 gfs2_dinode_out(&ip->i_di, dibh->b_data); 306 gfs2_dinode_out(ip, dibh->b_data);
305 brelse(dibh); 307 brelse(dibh);
306 } 308 }
307 309
@@ -444,7 +446,7 @@ static int ea_get_unstuffed(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
444 struct buffer_head **bh; 446 struct buffer_head **bh;
445 unsigned int amount = GFS2_EA_DATA_LEN(ea); 447 unsigned int amount = GFS2_EA_DATA_LEN(ea);
446 unsigned int nptrs = DIV_ROUND_UP(amount, sdp->sd_jbsize); 448 unsigned int nptrs = DIV_ROUND_UP(amount, sdp->sd_jbsize);
447 u64 *dataptrs = GFS2_EA2DATAPTRS(ea); 449 __be64 *dataptrs = GFS2_EA2DATAPTRS(ea);
448 unsigned int x; 450 unsigned int x;
449 int error = 0; 451 int error = 0;
450 452
@@ -597,6 +599,7 @@ static int ea_alloc_blk(struct gfs2_inode *ip, struct buffer_head **bhp)
597 ea->ea_num_ptrs = 0; 599 ea->ea_num_ptrs = 0;
598 600
599 ip->i_di.di_blocks++; 601 ip->i_di.di_blocks++;
602 gfs2_set_inode_blocks(&ip->i_inode);
600 603
601 return 0; 604 return 0;
602} 605}
@@ -629,7 +632,7 @@ static int ea_write(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
629 ea->ea_num_ptrs = 0; 632 ea->ea_num_ptrs = 0;
630 memcpy(GFS2_EA2DATA(ea), er->er_data, er->er_data_len); 633 memcpy(GFS2_EA2DATA(ea), er->er_data, er->er_data_len);
631 } else { 634 } else {
632 u64 *dataptr = GFS2_EA2DATAPTRS(ea); 635 __be64 *dataptr = GFS2_EA2DATAPTRS(ea);
633 const char *data = er->er_data; 636 const char *data = er->er_data;
634 unsigned int data_len = er->er_data_len; 637 unsigned int data_len = er->er_data_len;
635 unsigned int copy; 638 unsigned int copy;
@@ -648,6 +651,7 @@ static int ea_write(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
648 gfs2_metatype_set(bh, GFS2_METATYPE_ED, GFS2_FORMAT_ED); 651 gfs2_metatype_set(bh, GFS2_METATYPE_ED, GFS2_FORMAT_ED);
649 652
650 ip->i_di.di_blocks++; 653 ip->i_di.di_blocks++;
654 gfs2_set_inode_blocks(&ip->i_inode);
651 655
652 copy = data_len > sdp->sd_jbsize ? sdp->sd_jbsize : 656 copy = data_len > sdp->sd_jbsize ? sdp->sd_jbsize :
653 data_len; 657 data_len;
@@ -686,7 +690,7 @@ static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er,
686 if (error) 690 if (error)
687 goto out; 691 goto out;
688 692
689 error = gfs2_quota_check(ip, ip->i_di.di_uid, ip->i_di.di_gid); 693 error = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid);
690 if (error) 694 if (error)
691 goto out_gunlock_q; 695 goto out_gunlock_q;
692 696
@@ -710,13 +714,13 @@ static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er,
710 if (!error) { 714 if (!error) {
711 if (er->er_flags & GFS2_ERF_MODE) { 715 if (er->er_flags & GFS2_ERF_MODE) {
712 gfs2_assert_withdraw(GFS2_SB(&ip->i_inode), 716 gfs2_assert_withdraw(GFS2_SB(&ip->i_inode),
713 (ip->i_di.di_mode & S_IFMT) == 717 (ip->i_inode.i_mode & S_IFMT) ==
714 (er->er_mode & S_IFMT)); 718 (er->er_mode & S_IFMT));
715 ip->i_di.di_mode = er->er_mode; 719 ip->i_inode.i_mode = er->er_mode;
716 } 720 }
717 ip->i_di.di_ctime = get_seconds(); 721 ip->i_inode.i_ctime.tv_sec = get_seconds();
718 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 722 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
719 gfs2_dinode_out(&ip->i_di, dibh->b_data); 723 gfs2_dinode_out(ip, dibh->b_data);
720 brelse(dibh); 724 brelse(dibh);
721 } 725 }
722 726
@@ -846,12 +850,12 @@ static int ea_set_simple_noalloc(struct gfs2_inode *ip, struct buffer_head *bh,
846 850
847 if (er->er_flags & GFS2_ERF_MODE) { 851 if (er->er_flags & GFS2_ERF_MODE) {
848 gfs2_assert_withdraw(GFS2_SB(&ip->i_inode), 852 gfs2_assert_withdraw(GFS2_SB(&ip->i_inode),
849 (ip->i_di.di_mode & S_IFMT) == (er->er_mode & S_IFMT)); 853 (ip->i_inode.i_mode & S_IFMT) == (er->er_mode & S_IFMT));
850 ip->i_di.di_mode = er->er_mode; 854 ip->i_inode.i_mode = er->er_mode;
851 } 855 }
852 ip->i_di.di_ctime = get_seconds(); 856 ip->i_inode.i_ctime.tv_sec = get_seconds();
853 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 857 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
854 gfs2_dinode_out(&ip->i_di, dibh->b_data); 858 gfs2_dinode_out(ip, dibh->b_data);
855 brelse(dibh); 859 brelse(dibh);
856out: 860out:
857 gfs2_trans_end(GFS2_SB(&ip->i_inode)); 861 gfs2_trans_end(GFS2_SB(&ip->i_inode));
@@ -931,12 +935,12 @@ static int ea_set_block(struct gfs2_inode *ip, struct gfs2_ea_request *er,
931{ 935{
932 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 936 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
933 struct buffer_head *indbh, *newbh; 937 struct buffer_head *indbh, *newbh;
934 u64 *eablk; 938 __be64 *eablk;
935 int error; 939 int error;
936 int mh_size = sizeof(struct gfs2_meta_header); 940 int mh_size = sizeof(struct gfs2_meta_header);
937 941
938 if (ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT) { 942 if (ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT) {
939 u64 *end; 943 __be64 *end;
940 944
941 error = gfs2_meta_read(ip->i_gl, ip->i_di.di_eattr, DIO_WAIT, 945 error = gfs2_meta_read(ip->i_gl, ip->i_di.di_eattr, DIO_WAIT,
942 &indbh); 946 &indbh);
@@ -948,7 +952,7 @@ static int ea_set_block(struct gfs2_inode *ip, struct gfs2_ea_request *er,
948 goto out; 952 goto out;
949 } 953 }
950 954
951 eablk = (u64 *)(indbh->b_data + mh_size); 955 eablk = (__be64 *)(indbh->b_data + mh_size);
952 end = eablk + sdp->sd_inptrs; 956 end = eablk + sdp->sd_inptrs;
953 957
954 for (; eablk < end; eablk++) 958 for (; eablk < end; eablk++)
@@ -971,11 +975,12 @@ static int ea_set_block(struct gfs2_inode *ip, struct gfs2_ea_request *er,
971 gfs2_metatype_set(indbh, GFS2_METATYPE_IN, GFS2_FORMAT_IN); 975 gfs2_metatype_set(indbh, GFS2_METATYPE_IN, GFS2_FORMAT_IN);
972 gfs2_buffer_clear_tail(indbh, mh_size); 976 gfs2_buffer_clear_tail(indbh, mh_size);
973 977
974 eablk = (u64 *)(indbh->b_data + mh_size); 978 eablk = (__be64 *)(indbh->b_data + mh_size);
975 *eablk = cpu_to_be64(ip->i_di.di_eattr); 979 *eablk = cpu_to_be64(ip->i_di.di_eattr);
976 ip->i_di.di_eattr = blk; 980 ip->i_di.di_eattr = blk;
977 ip->i_di.di_flags |= GFS2_DIF_EA_INDIRECT; 981 ip->i_di.di_flags |= GFS2_DIF_EA_INDIRECT;
978 ip->i_di.di_blocks++; 982 ip->i_di.di_blocks++;
983 gfs2_set_inode_blocks(&ip->i_inode);
979 984
980 eablk++; 985 eablk++;
981 } 986 }
@@ -1129,9 +1134,9 @@ static int ea_remove_stuffed(struct gfs2_inode *ip, struct gfs2_ea_location *el)
1129 1134
1130 error = gfs2_meta_inode_buffer(ip, &dibh); 1135 error = gfs2_meta_inode_buffer(ip, &dibh);
1131 if (!error) { 1136 if (!error) {
1132 ip->i_di.di_ctime = get_seconds(); 1137 ip->i_inode.i_ctime.tv_sec = get_seconds();
1133 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 1138 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1134 gfs2_dinode_out(&ip->i_di, dibh->b_data); 1139 gfs2_dinode_out(ip, dibh->b_data);
1135 brelse(dibh); 1140 brelse(dibh);
1136 } 1141 }
1137 1142
@@ -1202,7 +1207,7 @@ static int ea_acl_chmod_unstuffed(struct gfs2_inode *ip,
1202 struct buffer_head **bh; 1207 struct buffer_head **bh;
1203 unsigned int amount = GFS2_EA_DATA_LEN(ea); 1208 unsigned int amount = GFS2_EA_DATA_LEN(ea);
1204 unsigned int nptrs = DIV_ROUND_UP(amount, sdp->sd_jbsize); 1209 unsigned int nptrs = DIV_ROUND_UP(amount, sdp->sd_jbsize);
1205 u64 *dataptrs = GFS2_EA2DATAPTRS(ea); 1210 __be64 *dataptrs = GFS2_EA2DATAPTRS(ea);
1206 unsigned int x; 1211 unsigned int x;
1207 int error; 1212 int error;
1208 1213
@@ -1284,9 +1289,8 @@ int gfs2_ea_acl_chmod(struct gfs2_inode *ip, struct gfs2_ea_location *el,
1284 if (!error) { 1289 if (!error) {
1285 error = inode_setattr(&ip->i_inode, attr); 1290 error = inode_setattr(&ip->i_inode, attr);
1286 gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error); 1291 gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
1287 gfs2_inode_attr_out(ip);
1288 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 1292 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1289 gfs2_dinode_out(&ip->i_di, dibh->b_data); 1293 gfs2_dinode_out(ip, dibh->b_data);
1290 brelse(dibh); 1294 brelse(dibh);
1291 } 1295 }
1292 1296
@@ -1300,7 +1304,7 @@ static int ea_dealloc_indirect(struct gfs2_inode *ip)
1300 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1304 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1301 struct gfs2_rgrp_list rlist; 1305 struct gfs2_rgrp_list rlist;
1302 struct buffer_head *indbh, *dibh; 1306 struct buffer_head *indbh, *dibh;
1303 u64 *eablk, *end; 1307 __be64 *eablk, *end;
1304 unsigned int rg_blocks = 0; 1308 unsigned int rg_blocks = 0;
1305 u64 bstart = 0; 1309 u64 bstart = 0;
1306 unsigned int blen = 0; 1310 unsigned int blen = 0;
@@ -1319,7 +1323,7 @@ static int ea_dealloc_indirect(struct gfs2_inode *ip)
1319 goto out; 1323 goto out;
1320 } 1324 }
1321 1325
1322 eablk = (u64 *)(indbh->b_data + sizeof(struct gfs2_meta_header)); 1326 eablk = (__be64 *)(indbh->b_data + sizeof(struct gfs2_meta_header));
1323 end = eablk + sdp->sd_inptrs; 1327 end = eablk + sdp->sd_inptrs;
1324 1328
1325 for (; eablk < end; eablk++) { 1329 for (; eablk < end; eablk++) {
@@ -1363,7 +1367,7 @@ static int ea_dealloc_indirect(struct gfs2_inode *ip)
1363 1367
1364 gfs2_trans_add_bh(ip->i_gl, indbh, 1); 1368 gfs2_trans_add_bh(ip->i_gl, indbh, 1);
1365 1369
1366 eablk = (u64 *)(indbh->b_data + sizeof(struct gfs2_meta_header)); 1370 eablk = (__be64 *)(indbh->b_data + sizeof(struct gfs2_meta_header));
1367 bstart = 0; 1371 bstart = 0;
1368 blen = 0; 1372 blen = 0;
1369 1373
@@ -1387,6 +1391,7 @@ static int ea_dealloc_indirect(struct gfs2_inode *ip)
1387 if (!ip->i_di.di_blocks) 1391 if (!ip->i_di.di_blocks)
1388 gfs2_consist_inode(ip); 1392 gfs2_consist_inode(ip);
1389 ip->i_di.di_blocks--; 1393 ip->i_di.di_blocks--;
1394 gfs2_set_inode_blocks(&ip->i_inode);
1390 } 1395 }
1391 if (bstart) 1396 if (bstart)
1392 gfs2_free_meta(ip, bstart, blen); 1397 gfs2_free_meta(ip, bstart, blen);
@@ -1396,7 +1401,7 @@ static int ea_dealloc_indirect(struct gfs2_inode *ip)
1396 error = gfs2_meta_inode_buffer(ip, &dibh); 1401 error = gfs2_meta_inode_buffer(ip, &dibh);
1397 if (!error) { 1402 if (!error) {
1398 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 1403 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1399 gfs2_dinode_out(&ip->i_di, dibh->b_data); 1404 gfs2_dinode_out(ip, dibh->b_data);
1400 brelse(dibh); 1405 brelse(dibh);
1401 } 1406 }
1402 1407
@@ -1441,11 +1446,12 @@ static int ea_dealloc_block(struct gfs2_inode *ip)
1441 if (!ip->i_di.di_blocks) 1446 if (!ip->i_di.di_blocks)
1442 gfs2_consist_inode(ip); 1447 gfs2_consist_inode(ip);
1443 ip->i_di.di_blocks--; 1448 ip->i_di.di_blocks--;
1449 gfs2_set_inode_blocks(&ip->i_inode);
1444 1450
1445 error = gfs2_meta_inode_buffer(ip, &dibh); 1451 error = gfs2_meta_inode_buffer(ip, &dibh);
1446 if (!error) { 1452 if (!error) {
1447 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 1453 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1448 gfs2_dinode_out(&ip->i_di, dibh->b_data); 1454 gfs2_dinode_out(ip, dibh->b_data);
1449 brelse(dibh); 1455 brelse(dibh);
1450 } 1456 }
1451 1457
diff --git a/fs/gfs2/eattr.h b/fs/gfs2/eattr.h
index ffa65947d686..c82dbe01d713 100644
--- a/fs/gfs2/eattr.h
+++ b/fs/gfs2/eattr.h
@@ -19,7 +19,7 @@ struct iattr;
19#define GFS2_EA_SIZE(ea) \ 19#define GFS2_EA_SIZE(ea) \
20ALIGN(sizeof(struct gfs2_ea_header) + (ea)->ea_name_len + \ 20ALIGN(sizeof(struct gfs2_ea_header) + (ea)->ea_name_len + \
21 ((GFS2_EA_IS_STUFFED(ea)) ? GFS2_EA_DATA_LEN(ea) : \ 21 ((GFS2_EA_IS_STUFFED(ea)) ? GFS2_EA_DATA_LEN(ea) : \
22 (sizeof(u64) * (ea)->ea_num_ptrs)), 8) 22 (sizeof(__be64) * (ea)->ea_num_ptrs)), 8)
23 23
24#define GFS2_EA_IS_STUFFED(ea) (!(ea)->ea_num_ptrs) 24#define GFS2_EA_IS_STUFFED(ea) (!(ea)->ea_num_ptrs)
25#define GFS2_EA_IS_LAST(ea) ((ea)->ea_flags & GFS2_EAFLAG_LAST) 25#define GFS2_EA_IS_LAST(ea) ((ea)->ea_flags & GFS2_EAFLAG_LAST)
@@ -29,13 +29,13 @@ ALIGN(sizeof(struct gfs2_ea_header) + (er)->er_name_len + (er)->er_data_len, 8)
29 29
30#define GFS2_EAREQ_SIZE_UNSTUFFED(sdp, er) \ 30#define GFS2_EAREQ_SIZE_UNSTUFFED(sdp, er) \
31ALIGN(sizeof(struct gfs2_ea_header) + (er)->er_name_len + \ 31ALIGN(sizeof(struct gfs2_ea_header) + (er)->er_name_len + \
32 sizeof(u64) * DIV_ROUND_UP((er)->er_data_len, (sdp)->sd_jbsize), 8) 32 sizeof(__be64) * DIV_ROUND_UP((er)->er_data_len, (sdp)->sd_jbsize), 8)
33 33
34#define GFS2_EA2NAME(ea) ((char *)((struct gfs2_ea_header *)(ea) + 1)) 34#define GFS2_EA2NAME(ea) ((char *)((struct gfs2_ea_header *)(ea) + 1))
35#define GFS2_EA2DATA(ea) (GFS2_EA2NAME(ea) + (ea)->ea_name_len) 35#define GFS2_EA2DATA(ea) (GFS2_EA2NAME(ea) + (ea)->ea_name_len)
36 36
37#define GFS2_EA2DATAPTRS(ea) \ 37#define GFS2_EA2DATAPTRS(ea) \
38((u64 *)(GFS2_EA2NAME(ea) + ALIGN((ea)->ea_name_len, 8))) 38((__be64 *)(GFS2_EA2NAME(ea) + ALIGN((ea)->ea_name_len, 8)))
39 39
40#define GFS2_EA2NEXT(ea) \ 40#define GFS2_EA2NEXT(ea) \
41((struct gfs2_ea_header *)((char *)(ea) + GFS2_EA_REC_LEN(ea))) 41((struct gfs2_ea_header *)((char *)(ea) + GFS2_EA_REC_LEN(ea)))
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 55f5333dae99..438146904b58 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -96,7 +96,7 @@ static inline rwlock_t *gl_lock_addr(unsigned int x)
96 return &gl_hash_locks[x & (GL_HASH_LOCK_SZ-1)]; 96 return &gl_hash_locks[x & (GL_HASH_LOCK_SZ-1)];
97} 97}
98#else /* not SMP, so no spinlocks required */ 98#else /* not SMP, so no spinlocks required */
99static inline rwlock_t *gl_lock_addr(x) 99static inline rwlock_t *gl_lock_addr(unsigned int x)
100{ 100{
101 return NULL; 101 return NULL;
102} 102}
@@ -769,7 +769,7 @@ restart:
769 } else { 769 } else {
770 spin_unlock(&gl->gl_spin); 770 spin_unlock(&gl->gl_spin);
771 771
772 new_gh = gfs2_holder_get(gl, state, LM_FLAG_TRY, GFP_KERNEL); 772 new_gh = gfs2_holder_get(gl, state, LM_FLAG_TRY, GFP_NOFS);
773 if (!new_gh) 773 if (!new_gh)
774 return; 774 return;
775 set_bit(HIF_DEMOTE, &new_gh->gh_iflags); 775 set_bit(HIF_DEMOTE, &new_gh->gh_iflags);
@@ -785,21 +785,6 @@ out:
785 gfs2_holder_put(new_gh); 785 gfs2_holder_put(new_gh);
786} 786}
787 787
788void gfs2_glock_inode_squish(struct inode *inode)
789{
790 struct gfs2_holder gh;
791 struct gfs2_glock *gl = GFS2_I(inode)->i_gl;
792 gfs2_holder_init(gl, LM_ST_UNLOCKED, 0, &gh);
793 set_bit(HIF_DEMOTE, &gh.gh_iflags);
794 spin_lock(&gl->gl_spin);
795 gfs2_assert(inode->i_sb->s_fs_info, list_empty(&gl->gl_holders));
796 list_add_tail(&gh.gh_list, &gl->gl_waiters2);
797 run_queue(gl);
798 spin_unlock(&gl->gl_spin);
799 wait_for_completion(&gh.gh_wait);
800 gfs2_holder_uninit(&gh);
801}
802
803/** 788/**
804 * state_change - record that the glock is now in a different state 789 * state_change - record that the glock is now in a different state
805 * @gl: the glock 790 * @gl: the glock
@@ -847,12 +832,12 @@ static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
847 832
848 if (prev_state != LM_ST_UNLOCKED && !(ret & LM_OUT_CACHEABLE)) { 833 if (prev_state != LM_ST_UNLOCKED && !(ret & LM_OUT_CACHEABLE)) {
849 if (glops->go_inval) 834 if (glops->go_inval)
850 glops->go_inval(gl, DIO_METADATA | DIO_DATA); 835 glops->go_inval(gl, DIO_METADATA);
851 } else if (gl->gl_state == LM_ST_DEFERRED) { 836 } else if (gl->gl_state == LM_ST_DEFERRED) {
852 /* We might not want to do this here. 837 /* We might not want to do this here.
853 Look at moving to the inode glops. */ 838 Look at moving to the inode glops. */
854 if (glops->go_inval) 839 if (glops->go_inval)
855 glops->go_inval(gl, DIO_DATA); 840 glops->go_inval(gl, 0);
856 } 841 }
857 842
858 /* Deal with each possible exit condition */ 843 /* Deal with each possible exit condition */
@@ -954,7 +939,7 @@ void gfs2_glock_xmote_th(struct gfs2_glock *gl, unsigned int state, int flags)
954 gfs2_assert_warn(sdp, state != gl->gl_state); 939 gfs2_assert_warn(sdp, state != gl->gl_state);
955 940
956 if (gl->gl_state == LM_ST_EXCLUSIVE && glops->go_sync) 941 if (gl->gl_state == LM_ST_EXCLUSIVE && glops->go_sync)
957 glops->go_sync(gl, DIO_METADATA | DIO_DATA | DIO_RELEASE); 942 glops->go_sync(gl);
958 943
959 gfs2_glock_hold(gl); 944 gfs2_glock_hold(gl);
960 gl->gl_req_bh = xmote_bh; 945 gl->gl_req_bh = xmote_bh;
@@ -995,7 +980,7 @@ static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
995 state_change(gl, LM_ST_UNLOCKED); 980 state_change(gl, LM_ST_UNLOCKED);
996 981
997 if (glops->go_inval) 982 if (glops->go_inval)
998 glops->go_inval(gl, DIO_METADATA | DIO_DATA); 983 glops->go_inval(gl, DIO_METADATA);
999 984
1000 if (gh) { 985 if (gh) {
1001 spin_lock(&gl->gl_spin); 986 spin_lock(&gl->gl_spin);
@@ -1041,7 +1026,7 @@ void gfs2_glock_drop_th(struct gfs2_glock *gl)
1041 gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED); 1026 gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED);
1042 1027
1043 if (gl->gl_state == LM_ST_EXCLUSIVE && glops->go_sync) 1028 if (gl->gl_state == LM_ST_EXCLUSIVE && glops->go_sync)
1044 glops->go_sync(gl, DIO_METADATA | DIO_DATA | DIO_RELEASE); 1029 glops->go_sync(gl);
1045 1030
1046 gfs2_glock_hold(gl); 1031 gfs2_glock_hold(gl);
1047 gl->gl_req_bh = drop_bh; 1032 gl->gl_req_bh = drop_bh;
@@ -1244,9 +1229,6 @@ restart:
1244 1229
1245 clear_bit(GLF_PREFETCH, &gl->gl_flags); 1230 clear_bit(GLF_PREFETCH, &gl->gl_flags);
1246 1231
1247 if (error == GLR_TRYFAILED && (gh->gh_flags & GL_DUMP))
1248 dump_glock(gl);
1249
1250 return error; 1232 return error;
1251} 1233}
1252 1234
@@ -1923,7 +1905,7 @@ out:
1923 1905
1924static void scan_glock(struct gfs2_glock *gl) 1906static void scan_glock(struct gfs2_glock *gl)
1925{ 1907{
1926 if (gl->gl_ops == &gfs2_inode_glops) 1908 if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object)
1927 return; 1909 return;
1928 1910
1929 if (gfs2_glmutex_trylock(gl)) { 1911 if (gfs2_glmutex_trylock(gl)) {
@@ -2078,7 +2060,7 @@ static int dump_inode(struct gfs2_inode *ip)
2078 printk(KERN_INFO " num = %llu %llu\n", 2060 printk(KERN_INFO " num = %llu %llu\n",
2079 (unsigned long long)ip->i_num.no_formal_ino, 2061 (unsigned long long)ip->i_num.no_formal_ino,
2080 (unsigned long long)ip->i_num.no_addr); 2062 (unsigned long long)ip->i_num.no_addr);
2081 printk(KERN_INFO " type = %u\n", IF2DT(ip->i_di.di_mode)); 2063 printk(KERN_INFO " type = %u\n", IF2DT(ip->i_inode.i_mode));
2082 printk(KERN_INFO " i_flags ="); 2064 printk(KERN_INFO " i_flags =");
2083 for (x = 0; x < 32; x++) 2065 for (x = 0; x < 32; x++)
2084 if (test_bit(x, &ip->i_flags)) 2066 if (test_bit(x, &ip->i_flags))
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index 2b2a889ee2cc..fb39108fc05c 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -27,8 +27,6 @@
27#define GL_ATIME 0x00000200 27#define GL_ATIME 0x00000200
28#define GL_NOCACHE 0x00000400 28#define GL_NOCACHE 0x00000400
29#define GL_NOCANCEL 0x00001000 29#define GL_NOCANCEL 0x00001000
30#define GL_AOP 0x00004000
31#define GL_DUMP 0x00008000
32 30
33#define GLR_TRYFAILED 13 31#define GLR_TRYFAILED 13
34#define GLR_CANCELED 14 32#define GLR_CANCELED 14
@@ -108,7 +106,6 @@ void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs);
108void gfs2_glock_prefetch_num(struct gfs2_sbd *sdp, u64 number, 106void gfs2_glock_prefetch_num(struct gfs2_sbd *sdp, u64 number,
109 const struct gfs2_glock_operations *glops, 107 const struct gfs2_glock_operations *glops,
110 unsigned int state, int flags); 108 unsigned int state, int flags);
111void gfs2_glock_inode_squish(struct inode *inode);
112 109
113/** 110/**
114 * gfs2_glock_nq_init - intialize a holder and enqueue it on a glock 111 * gfs2_glock_nq_init - intialize a holder and enqueue it on a glock
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index 41a6b6818a50..b068d10bcb6e 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -92,7 +92,7 @@ static void gfs2_pte_inval(struct gfs2_glock *gl)
92 92
93 ip = gl->gl_object; 93 ip = gl->gl_object;
94 inode = &ip->i_inode; 94 inode = &ip->i_inode;
95 if (!ip || !S_ISREG(ip->i_di.di_mode)) 95 if (!ip || !S_ISREG(inode->i_mode))
96 return; 96 return;
97 97
98 if (!test_bit(GIF_PAGED, &ip->i_flags)) 98 if (!test_bit(GIF_PAGED, &ip->i_flags))
@@ -107,89 +107,20 @@ static void gfs2_pte_inval(struct gfs2_glock *gl)
107} 107}
108 108
109/** 109/**
110 * gfs2_page_inval - Invalidate all pages associated with a glock
111 * @gl: the glock
112 *
113 */
114
115static void gfs2_page_inval(struct gfs2_glock *gl)
116{
117 struct gfs2_inode *ip;
118 struct inode *inode;
119
120 ip = gl->gl_object;
121 inode = &ip->i_inode;
122 if (!ip || !S_ISREG(ip->i_di.di_mode))
123 return;
124
125 truncate_inode_pages(inode->i_mapping, 0);
126 gfs2_assert_withdraw(GFS2_SB(&ip->i_inode), !inode->i_mapping->nrpages);
127 clear_bit(GIF_PAGED, &ip->i_flags);
128}
129
130/**
131 * gfs2_page_wait - Wait for writeback of data
132 * @gl: the glock
133 *
134 * Syncs data (not metadata) for a regular file.
135 * No-op for all other types.
136 */
137
138static void gfs2_page_wait(struct gfs2_glock *gl)
139{
140 struct gfs2_inode *ip = gl->gl_object;
141 struct inode *inode = &ip->i_inode;
142 struct address_space *mapping = inode->i_mapping;
143 int error;
144
145 if (!S_ISREG(ip->i_di.di_mode))
146 return;
147
148 error = filemap_fdatawait(mapping);
149
150 /* Put back any errors cleared by filemap_fdatawait()
151 so they can be caught by someone who can pass them
152 up to user space. */
153
154 if (error == -ENOSPC)
155 set_bit(AS_ENOSPC, &mapping->flags);
156 else if (error)
157 set_bit(AS_EIO, &mapping->flags);
158
159}
160
161static void gfs2_page_writeback(struct gfs2_glock *gl)
162{
163 struct gfs2_inode *ip = gl->gl_object;
164 struct inode *inode = &ip->i_inode;
165 struct address_space *mapping = inode->i_mapping;
166
167 if (!S_ISREG(ip->i_di.di_mode))
168 return;
169
170 filemap_fdatawrite(mapping);
171}
172
173/**
174 * meta_go_sync - sync out the metadata for this glock 110 * meta_go_sync - sync out the metadata for this glock
175 * @gl: the glock 111 * @gl: the glock
176 * @flags: DIO_*
177 * 112 *
178 * Called when demoting or unlocking an EX glock. We must flush 113 * Called when demoting or unlocking an EX glock. We must flush
179 * to disk all dirty buffers/pages relating to this glock, and must not 114 * to disk all dirty buffers/pages relating to this glock, and must not
180 * not return to caller to demote/unlock the glock until I/O is complete. 115 * not return to caller to demote/unlock the glock until I/O is complete.
181 */ 116 */
182 117
183static void meta_go_sync(struct gfs2_glock *gl, int flags) 118static void meta_go_sync(struct gfs2_glock *gl)
184{ 119{
185 if (!(flags & DIO_METADATA))
186 return;
187
188 if (test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) { 120 if (test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) {
189 gfs2_log_flush(gl->gl_sbd, gl); 121 gfs2_log_flush(gl->gl_sbd, gl);
190 gfs2_meta_sync(gl); 122 gfs2_meta_sync(gl);
191 if (flags & DIO_RELEASE) 123 gfs2_ail_empty_gl(gl);
192 gfs2_ail_empty_gl(gl);
193 } 124 }
194 125
195} 126}
@@ -264,31 +195,31 @@ static void inode_go_drop_th(struct gfs2_glock *gl)
264/** 195/**
265 * inode_go_sync - Sync the dirty data and/or metadata for an inode glock 196 * inode_go_sync - Sync the dirty data and/or metadata for an inode glock
266 * @gl: the glock protecting the inode 197 * @gl: the glock protecting the inode
267 * @flags:
268 * 198 *
269 */ 199 */
270 200
271static void inode_go_sync(struct gfs2_glock *gl, int flags) 201static void inode_go_sync(struct gfs2_glock *gl)
272{ 202{
273 int meta = (flags & DIO_METADATA); 203 struct gfs2_inode *ip = gl->gl_object;
274 int data = (flags & DIO_DATA); 204
205 if (ip && !S_ISREG(ip->i_inode.i_mode))
206 ip = NULL;
275 207
276 if (test_bit(GLF_DIRTY, &gl->gl_flags)) { 208 if (test_bit(GLF_DIRTY, &gl->gl_flags)) {
277 if (meta && data) { 209 gfs2_log_flush(gl->gl_sbd, gl);
278 gfs2_page_writeback(gl); 210 if (ip)
279 gfs2_log_flush(gl->gl_sbd, gl); 211 filemap_fdatawrite(ip->i_inode.i_mapping);
280 gfs2_meta_sync(gl); 212 gfs2_meta_sync(gl);
281 gfs2_page_wait(gl); 213 if (ip) {
282 clear_bit(GLF_DIRTY, &gl->gl_flags); 214 struct address_space *mapping = ip->i_inode.i_mapping;
283 } else if (meta) { 215 int error = filemap_fdatawait(mapping);
284 gfs2_log_flush(gl->gl_sbd, gl); 216 if (error == -ENOSPC)
285 gfs2_meta_sync(gl); 217 set_bit(AS_ENOSPC, &mapping->flags);
286 } else if (data) { 218 else if (error)
287 gfs2_page_writeback(gl); 219 set_bit(AS_EIO, &mapping->flags);
288 gfs2_page_wait(gl);
289 } 220 }
290 if (flags & DIO_RELEASE) 221 clear_bit(GLF_DIRTY, &gl->gl_flags);
291 gfs2_ail_empty_gl(gl); 222 gfs2_ail_empty_gl(gl);
292 } 223 }
293} 224}
294 225
@@ -301,15 +232,20 @@ static void inode_go_sync(struct gfs2_glock *gl, int flags)
301 232
302static void inode_go_inval(struct gfs2_glock *gl, int flags) 233static void inode_go_inval(struct gfs2_glock *gl, int flags)
303{ 234{
235 struct gfs2_inode *ip = gl->gl_object;
304 int meta = (flags & DIO_METADATA); 236 int meta = (flags & DIO_METADATA);
305 int data = (flags & DIO_DATA);
306 237
307 if (meta) { 238 if (meta) {
308 gfs2_meta_inval(gl); 239 gfs2_meta_inval(gl);
309 gl->gl_vn++; 240 if (ip)
241 set_bit(GIF_INVALID, &ip->i_flags);
242 }
243
244 if (ip && S_ISREG(ip->i_inode.i_mode)) {
245 truncate_inode_pages(ip->i_inode.i_mapping, 0);
246 gfs2_assert_withdraw(GFS2_SB(&ip->i_inode), !ip->i_inode.i_mapping->nrpages);
247 clear_bit(GIF_PAGED, &ip->i_flags);
310 } 248 }
311 if (data)
312 gfs2_page_inval(gl);
313} 249}
314 250
315/** 251/**
@@ -351,11 +287,10 @@ static int inode_go_lock(struct gfs2_holder *gh)
351 if (!ip) 287 if (!ip)
352 return 0; 288 return 0;
353 289
354 if (ip->i_vn != gl->gl_vn) { 290 if (test_bit(GIF_INVALID, &ip->i_flags)) {
355 error = gfs2_inode_refresh(ip); 291 error = gfs2_inode_refresh(ip);
356 if (error) 292 if (error)
357 return error; 293 return error;
358 gfs2_inode_attr_in(ip);
359 } 294 }
360 295
361 if ((ip->i_di.di_flags & GFS2_DIF_TRUNC_IN_PROG) && 296 if ((ip->i_di.di_flags & GFS2_DIF_TRUNC_IN_PROG) &&
@@ -379,11 +314,8 @@ static void inode_go_unlock(struct gfs2_holder *gh)
379 struct gfs2_glock *gl = gh->gh_gl; 314 struct gfs2_glock *gl = gh->gh_gl;
380 struct gfs2_inode *ip = gl->gl_object; 315 struct gfs2_inode *ip = gl->gl_object;
381 316
382 if (ip == NULL) 317 if (ip)
383 return; 318 gfs2_meta_cache_flush(ip);
384 if (test_bit(GLF_DIRTY, &gl->gl_flags))
385 gfs2_inode_attr_in(ip);
386 gfs2_meta_cache_flush(ip);
387} 319}
388 320
389/** 321/**
@@ -491,13 +423,13 @@ static void trans_go_xmote_bh(struct gfs2_glock *gl)
491 struct gfs2_sbd *sdp = gl->gl_sbd; 423 struct gfs2_sbd *sdp = gl->gl_sbd;
492 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode); 424 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
493 struct gfs2_glock *j_gl = ip->i_gl; 425 struct gfs2_glock *j_gl = ip->i_gl;
494 struct gfs2_log_header head; 426 struct gfs2_log_header_host head;
495 int error; 427 int error;
496 428
497 if (gl->gl_state != LM_ST_UNLOCKED && 429 if (gl->gl_state != LM_ST_UNLOCKED &&
498 test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) { 430 test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
499 gfs2_meta_cache_flush(GFS2_I(sdp->sd_jdesc->jd_inode)); 431 gfs2_meta_cache_flush(GFS2_I(sdp->sd_jdesc->jd_inode));
500 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA | DIO_DATA); 432 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
501 433
502 error = gfs2_find_jhead(sdp->sd_jdesc, &head); 434 error = gfs2_find_jhead(sdp->sd_jdesc, &head);
503 if (error) 435 if (error)
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 118dc693d111..734421edae85 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -14,8 +14,6 @@
14 14
15#define DIO_WAIT 0x00000010 15#define DIO_WAIT 0x00000010
16#define DIO_METADATA 0x00000020 16#define DIO_METADATA 0x00000020
17#define DIO_DATA 0x00000040
18#define DIO_RELEASE 0x00000080
19#define DIO_ALL 0x00000100 17#define DIO_ALL 0x00000100
20 18
21struct gfs2_log_operations; 19struct gfs2_log_operations;
@@ -41,7 +39,7 @@ struct gfs2_log_operations {
41 void (*lo_before_commit) (struct gfs2_sbd *sdp); 39 void (*lo_before_commit) (struct gfs2_sbd *sdp);
42 void (*lo_after_commit) (struct gfs2_sbd *sdp, struct gfs2_ail *ai); 40 void (*lo_after_commit) (struct gfs2_sbd *sdp, struct gfs2_ail *ai);
43 void (*lo_before_scan) (struct gfs2_jdesc *jd, 41 void (*lo_before_scan) (struct gfs2_jdesc *jd,
44 struct gfs2_log_header *head, int pass); 42 struct gfs2_log_header_host *head, int pass);
45 int (*lo_scan_elements) (struct gfs2_jdesc *jd, unsigned int start, 43 int (*lo_scan_elements) (struct gfs2_jdesc *jd, unsigned int start,
46 struct gfs2_log_descriptor *ld, __be64 *ptr, 44 struct gfs2_log_descriptor *ld, __be64 *ptr,
47 int pass); 45 int pass);
@@ -67,8 +65,8 @@ struct gfs2_rgrpd {
67 struct list_head rd_list_mru; 65 struct list_head rd_list_mru;
68 struct list_head rd_recent; /* Recently used rgrps */ 66 struct list_head rd_recent; /* Recently used rgrps */
69 struct gfs2_glock *rd_gl; /* Glock for this rgrp */ 67 struct gfs2_glock *rd_gl; /* Glock for this rgrp */
70 struct gfs2_rindex rd_ri; 68 struct gfs2_rindex_host rd_ri;
71 struct gfs2_rgrp rd_rg; 69 struct gfs2_rgrp_host rd_rg;
72 u64 rd_rg_vn; 70 u64 rd_rg_vn;
73 struct gfs2_bitmap *rd_bits; 71 struct gfs2_bitmap *rd_bits;
74 unsigned int rd_bh_count; 72 unsigned int rd_bh_count;
@@ -103,18 +101,17 @@ struct gfs2_bufdata {
103}; 101};
104 102
105struct gfs2_glock_operations { 103struct gfs2_glock_operations {
106 void (*go_xmote_th) (struct gfs2_glock * gl, unsigned int state, 104 void (*go_xmote_th) (struct gfs2_glock *gl, unsigned int state, int flags);
107 int flags); 105 void (*go_xmote_bh) (struct gfs2_glock *gl);
108 void (*go_xmote_bh) (struct gfs2_glock * gl); 106 void (*go_drop_th) (struct gfs2_glock *gl);
109 void (*go_drop_th) (struct gfs2_glock * gl); 107 void (*go_drop_bh) (struct gfs2_glock *gl);
110 void (*go_drop_bh) (struct gfs2_glock * gl); 108 void (*go_sync) (struct gfs2_glock *gl);
111 void (*go_sync) (struct gfs2_glock * gl, int flags); 109 void (*go_inval) (struct gfs2_glock *gl, int flags);
112 void (*go_inval) (struct gfs2_glock * gl, int flags); 110 int (*go_demote_ok) (struct gfs2_glock *gl);
113 int (*go_demote_ok) (struct gfs2_glock * gl); 111 int (*go_lock) (struct gfs2_holder *gh);
114 int (*go_lock) (struct gfs2_holder * gh); 112 void (*go_unlock) (struct gfs2_holder *gh);
115 void (*go_unlock) (struct gfs2_holder * gh); 113 void (*go_callback) (struct gfs2_glock *gl, unsigned int state);
116 void (*go_callback) (struct gfs2_glock * gl, unsigned int state); 114 void (*go_greedy) (struct gfs2_glock *gl);
117 void (*go_greedy) (struct gfs2_glock * gl);
118 const int go_type; 115 const int go_type;
119}; 116};
120 117
@@ -217,6 +214,7 @@ struct gfs2_alloc {
217}; 214};
218 215
219enum { 216enum {
217 GIF_INVALID = 0,
220 GIF_QD_LOCKED = 1, 218 GIF_QD_LOCKED = 1,
221 GIF_PAGED = 2, 219 GIF_PAGED = 2,
222 GIF_SW_PAGED = 3, 220 GIF_SW_PAGED = 3,
@@ -224,12 +222,11 @@ enum {
224 222
225struct gfs2_inode { 223struct gfs2_inode {
226 struct inode i_inode; 224 struct inode i_inode;
227 struct gfs2_inum i_num; 225 struct gfs2_inum_host i_num;
228 226
229 unsigned long i_flags; /* GIF_... */ 227 unsigned long i_flags; /* GIF_... */
230 228
231 u64 i_vn; 229 struct gfs2_dinode_host i_di; /* To be replaced by ref to block */
232 struct gfs2_dinode i_di; /* To be replaced by ref to block */
233 230
234 struct gfs2_glock *i_gl; /* Move into i_gh? */ 231 struct gfs2_glock *i_gl; /* Move into i_gh? */
235 struct gfs2_holder i_iopen_gh; 232 struct gfs2_holder i_iopen_gh;
@@ -450,7 +447,7 @@ struct gfs2_sbd {
450 struct super_block *sd_vfs_meta; 447 struct super_block *sd_vfs_meta;
451 struct kobject sd_kobj; 448 struct kobject sd_kobj;
452 unsigned long sd_flags; /* SDF_... */ 449 unsigned long sd_flags; /* SDF_... */
453 struct gfs2_sb sd_sb; 450 struct gfs2_sb_host sd_sb;
454 451
455 /* Constants computed on mount */ 452 /* Constants computed on mount */
456 453
@@ -503,8 +500,8 @@ struct gfs2_sbd {
503 500
504 spinlock_t sd_statfs_spin; 501 spinlock_t sd_statfs_spin;
505 struct mutex sd_statfs_mutex; 502 struct mutex sd_statfs_mutex;
506 struct gfs2_statfs_change sd_statfs_master; 503 struct gfs2_statfs_change_host sd_statfs_master;
507 struct gfs2_statfs_change sd_statfs_local; 504 struct gfs2_statfs_change_host sd_statfs_local;
508 unsigned long sd_statfs_sync_time; 505 unsigned long sd_statfs_sync_time;
509 506
510 /* Resource group stuff */ 507 /* Resource group stuff */
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index d470e5286ecd..d122074c45e1 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -38,83 +38,12 @@
38#include "trans.h" 38#include "trans.h"
39#include "util.h" 39#include "util.h"
40 40
41/**
42 * gfs2_inode_attr_in - Copy attributes from the dinode into the VFS inode
43 * @ip: The GFS2 inode (with embedded disk inode data)
44 * @inode: The Linux VFS inode
45 *
46 */
47
48void gfs2_inode_attr_in(struct gfs2_inode *ip)
49{
50 struct inode *inode = &ip->i_inode;
51 struct gfs2_dinode *di = &ip->i_di;
52
53 inode->i_ino = ip->i_num.no_addr;
54
55 switch (di->di_mode & S_IFMT) {
56 case S_IFBLK:
57 case S_IFCHR:
58 inode->i_rdev = MKDEV(di->di_major, di->di_minor);
59 break;
60 default:
61 inode->i_rdev = 0;
62 break;
63 };
64
65 inode->i_mode = di->di_mode;
66 inode->i_nlink = di->di_nlink;
67 inode->i_uid = di->di_uid;
68 inode->i_gid = di->di_gid;
69 i_size_write(inode, di->di_size);
70 inode->i_atime.tv_sec = di->di_atime;
71 inode->i_mtime.tv_sec = di->di_mtime;
72 inode->i_ctime.tv_sec = di->di_ctime;
73 inode->i_atime.tv_nsec = 0;
74 inode->i_mtime.tv_nsec = 0;
75 inode->i_ctime.tv_nsec = 0;
76 inode->i_blocks = di->di_blocks <<
77 (GFS2_SB(inode)->sd_sb.sb_bsize_shift - GFS2_BASIC_BLOCK_SHIFT);
78
79 if (di->di_flags & GFS2_DIF_IMMUTABLE)
80 inode->i_flags |= S_IMMUTABLE;
81 else
82 inode->i_flags &= ~S_IMMUTABLE;
83
84 if (di->di_flags & GFS2_DIF_APPENDONLY)
85 inode->i_flags |= S_APPEND;
86 else
87 inode->i_flags &= ~S_APPEND;
88}
89
90/**
91 * gfs2_inode_attr_out - Copy attributes from VFS inode into the dinode
92 * @ip: The GFS2 inode
93 *
94 * Only copy out the attributes that we want the VFS layer
95 * to be able to modify.
96 */
97
98void gfs2_inode_attr_out(struct gfs2_inode *ip)
99{
100 struct inode *inode = &ip->i_inode;
101 struct gfs2_dinode *di = &ip->i_di;
102 gfs2_assert_withdraw(GFS2_SB(inode),
103 (di->di_mode & S_IFMT) == (inode->i_mode & S_IFMT));
104 di->di_mode = inode->i_mode;
105 di->di_uid = inode->i_uid;
106 di->di_gid = inode->i_gid;
107 di->di_atime = inode->i_atime.tv_sec;
108 di->di_mtime = inode->i_mtime.tv_sec;
109 di->di_ctime = inode->i_ctime.tv_sec;
110}
111
112static int iget_test(struct inode *inode, void *opaque) 41static int iget_test(struct inode *inode, void *opaque)
113{ 42{
114 struct gfs2_inode *ip = GFS2_I(inode); 43 struct gfs2_inode *ip = GFS2_I(inode);
115 struct gfs2_inum *inum = opaque; 44 struct gfs2_inum_host *inum = opaque;
116 45
117 if (ip && ip->i_num.no_addr == inum->no_addr) 46 if (ip->i_num.no_addr == inum->no_addr)
118 return 1; 47 return 1;
119 48
120 return 0; 49 return 0;
@@ -123,19 +52,20 @@ static int iget_test(struct inode *inode, void *opaque)
123static int iget_set(struct inode *inode, void *opaque) 52static int iget_set(struct inode *inode, void *opaque)
124{ 53{
125 struct gfs2_inode *ip = GFS2_I(inode); 54 struct gfs2_inode *ip = GFS2_I(inode);
126 struct gfs2_inum *inum = opaque; 55 struct gfs2_inum_host *inum = opaque;
127 56
128 ip->i_num = *inum; 57 ip->i_num = *inum;
58 inode->i_ino = inum->no_addr;
129 return 0; 59 return 0;
130} 60}
131 61
132struct inode *gfs2_ilookup(struct super_block *sb, struct gfs2_inum *inum) 62struct inode *gfs2_ilookup(struct super_block *sb, struct gfs2_inum_host *inum)
133{ 63{
134 return ilookup5(sb, (unsigned long)inum->no_formal_ino, 64 return ilookup5(sb, (unsigned long)inum->no_formal_ino,
135 iget_test, inum); 65 iget_test, inum);
136} 66}
137 67
138static struct inode *gfs2_iget(struct super_block *sb, struct gfs2_inum *inum) 68static struct inode *gfs2_iget(struct super_block *sb, struct gfs2_inum_host *inum)
139{ 69{
140 return iget5_locked(sb, (unsigned long)inum->no_formal_ino, 70 return iget5_locked(sb, (unsigned long)inum->no_formal_ino,
141 iget_test, iget_set, inum); 71 iget_test, iget_set, inum);
@@ -150,7 +80,7 @@ static struct inode *gfs2_iget(struct super_block *sb, struct gfs2_inum *inum)
150 * Returns: A VFS inode, or an error 80 * Returns: A VFS inode, or an error
151 */ 81 */
152 82
153struct inode *gfs2_inode_lookup(struct super_block *sb, struct gfs2_inum *inum, unsigned int type) 83struct inode *gfs2_inode_lookup(struct super_block *sb, struct gfs2_inum_host *inum, unsigned int type)
154{ 84{
155 struct inode *inode = gfs2_iget(sb, inum); 85 struct inode *inode = gfs2_iget(sb, inum);
156 struct gfs2_inode *ip = GFS2_I(inode); 86 struct gfs2_inode *ip = GFS2_I(inode);
@@ -188,7 +118,7 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, struct gfs2_inum *inum,
188 if (unlikely(error)) 118 if (unlikely(error))
189 goto fail_put; 119 goto fail_put;
190 120
191 ip->i_vn = ip->i_gl->gl_vn - 1; 121 set_bit(GIF_INVALID, &ip->i_flags);
192 error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh); 122 error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
193 if (unlikely(error)) 123 if (unlikely(error))
194 goto fail_iopen; 124 goto fail_iopen;
@@ -208,6 +138,63 @@ fail:
208 return ERR_PTR(error); 138 return ERR_PTR(error);
209} 139}
210 140
141static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
142{
143 struct gfs2_dinode_host *di = &ip->i_di;
144 const struct gfs2_dinode *str = buf;
145
146 if (ip->i_num.no_addr != be64_to_cpu(str->di_num.no_addr)) {
147 if (gfs2_consist_inode(ip))
148 gfs2_dinode_print(ip);
149 return -EIO;
150 }
151 if (ip->i_num.no_formal_ino != be64_to_cpu(str->di_num.no_formal_ino))
152 return -ESTALE;
153
154 ip->i_inode.i_mode = be32_to_cpu(str->di_mode);
155 ip->i_inode.i_rdev = 0;
156 switch (ip->i_inode.i_mode & S_IFMT) {
157 case S_IFBLK:
158 case S_IFCHR:
159 ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major),
160 be32_to_cpu(str->di_minor));
161 break;
162 };
163
164 ip->i_inode.i_uid = be32_to_cpu(str->di_uid);
165 ip->i_inode.i_gid = be32_to_cpu(str->di_gid);
166 /*
167 * We will need to review setting the nlink count here in the
168 * light of the forthcoming ro bind mount work. This is a reminder
169 * to do that.
170 */
171 ip->i_inode.i_nlink = be32_to_cpu(str->di_nlink);
172 di->di_size = be64_to_cpu(str->di_size);
173 i_size_write(&ip->i_inode, di->di_size);
174 di->di_blocks = be64_to_cpu(str->di_blocks);
175 gfs2_set_inode_blocks(&ip->i_inode);
176 ip->i_inode.i_atime.tv_sec = be64_to_cpu(str->di_atime);
177 ip->i_inode.i_atime.tv_nsec = 0;
178 ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime);
179 ip->i_inode.i_mtime.tv_nsec = 0;
180 ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime);
181 ip->i_inode.i_ctime.tv_nsec = 0;
182
183 di->di_goal_meta = be64_to_cpu(str->di_goal_meta);
184 di->di_goal_data = be64_to_cpu(str->di_goal_data);
185 di->di_generation = be64_to_cpu(str->di_generation);
186
187 di->di_flags = be32_to_cpu(str->di_flags);
188 gfs2_set_inode_flags(&ip->i_inode);
189 di->di_height = be16_to_cpu(str->di_height);
190
191 di->di_depth = be16_to_cpu(str->di_depth);
192 di->di_entries = be32_to_cpu(str->di_entries);
193
194 di->di_eattr = be64_to_cpu(str->di_eattr);
195 return 0;
196}
197
211/** 198/**
212 * gfs2_inode_refresh - Refresh the incore copy of the dinode 199 * gfs2_inode_refresh - Refresh the incore copy of the dinode
213 * @ip: The GFS2 inode 200 * @ip: The GFS2 inode
@@ -229,21 +216,11 @@ int gfs2_inode_refresh(struct gfs2_inode *ip)
229 return -EIO; 216 return -EIO;
230 } 217 }
231 218
232 gfs2_dinode_in(&ip->i_di, dibh->b_data); 219 error = gfs2_dinode_in(ip, dibh->b_data);
233
234 brelse(dibh); 220 brelse(dibh);
221 clear_bit(GIF_INVALID, &ip->i_flags);
235 222
236 if (ip->i_num.no_addr != ip->i_di.di_num.no_addr) { 223 return error;
237 if (gfs2_consist_inode(ip))
238 gfs2_dinode_print(&ip->i_di);
239 return -EIO;
240 }
241 if (ip->i_num.no_formal_ino != ip->i_di.di_num.no_formal_ino)
242 return -ESTALE;
243
244 ip->i_vn = ip->i_gl->gl_vn;
245
246 return 0;
247} 224}
248 225
249int gfs2_dinode_dealloc(struct gfs2_inode *ip) 226int gfs2_dinode_dealloc(struct gfs2_inode *ip)
@@ -255,7 +232,7 @@ int gfs2_dinode_dealloc(struct gfs2_inode *ip)
255 232
256 if (ip->i_di.di_blocks != 1) { 233 if (ip->i_di.di_blocks != 1) {
257 if (gfs2_consist_inode(ip)) 234 if (gfs2_consist_inode(ip))
258 gfs2_dinode_print(&ip->i_di); 235 gfs2_dinode_print(ip);
259 return -EIO; 236 return -EIO;
260 } 237 }
261 238
@@ -318,14 +295,14 @@ int gfs2_change_nlink(struct gfs2_inode *ip, int diff)
318 u32 nlink; 295 u32 nlink;
319 int error; 296 int error;
320 297
321 BUG_ON(ip->i_di.di_nlink != ip->i_inode.i_nlink); 298 BUG_ON(diff != 1 && diff != -1);
322 nlink = ip->i_di.di_nlink + diff; 299 nlink = ip->i_inode.i_nlink + diff;
323 300
324 /* If we are reducing the nlink count, but the new value ends up being 301 /* If we are reducing the nlink count, but the new value ends up being
325 bigger than the old one, we must have underflowed. */ 302 bigger than the old one, we must have underflowed. */
326 if (diff < 0 && nlink > ip->i_di.di_nlink) { 303 if (diff < 0 && nlink > ip->i_inode.i_nlink) {
327 if (gfs2_consist_inode(ip)) 304 if (gfs2_consist_inode(ip))
328 gfs2_dinode_print(&ip->i_di); 305 gfs2_dinode_print(ip);
329 return -EIO; 306 return -EIO;
330 } 307 }
331 308
@@ -333,16 +310,19 @@ int gfs2_change_nlink(struct gfs2_inode *ip, int diff)
333 if (error) 310 if (error)
334 return error; 311 return error;
335 312
336 ip->i_di.di_nlink = nlink; 313 if (diff > 0)
337 ip->i_di.di_ctime = get_seconds(); 314 inc_nlink(&ip->i_inode);
338 ip->i_inode.i_nlink = nlink; 315 else
316 drop_nlink(&ip->i_inode);
317
318 ip->i_inode.i_ctime.tv_sec = get_seconds();
339 319
340 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 320 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
341 gfs2_dinode_out(&ip->i_di, dibh->b_data); 321 gfs2_dinode_out(ip, dibh->b_data);
342 brelse(dibh); 322 brelse(dibh);
343 mark_inode_dirty(&ip->i_inode); 323 mark_inode_dirty(&ip->i_inode);
344 324
345 if (ip->i_di.di_nlink == 0) { 325 if (ip->i_inode.i_nlink == 0) {
346 struct gfs2_rgrpd *rgd; 326 struct gfs2_rgrpd *rgd;
347 struct gfs2_holder ri_gh, rg_gh; 327 struct gfs2_holder ri_gh, rg_gh;
348 328
@@ -357,7 +337,6 @@ int gfs2_change_nlink(struct gfs2_inode *ip, int diff)
357 if (error) 337 if (error)
358 goto out_norgrp; 338 goto out_norgrp;
359 339
360 clear_nlink(&ip->i_inode);
361 gfs2_unlink_di(&ip->i_inode); /* mark inode unlinked */ 340 gfs2_unlink_di(&ip->i_inode); /* mark inode unlinked */
362 gfs2_glock_dq_uninit(&rg_gh); 341 gfs2_glock_dq_uninit(&rg_gh);
363out_norgrp: 342out_norgrp:
@@ -394,7 +373,7 @@ struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
394 struct super_block *sb = dir->i_sb; 373 struct super_block *sb = dir->i_sb;
395 struct gfs2_inode *dip = GFS2_I(dir); 374 struct gfs2_inode *dip = GFS2_I(dir);
396 struct gfs2_holder d_gh; 375 struct gfs2_holder d_gh;
397 struct gfs2_inum inum; 376 struct gfs2_inum_host inum;
398 unsigned int type; 377 unsigned int type;
399 int error = 0; 378 int error = 0;
400 struct inode *inode = NULL; 379 struct inode *inode = NULL;
@@ -436,7 +415,7 @@ static int pick_formal_ino_1(struct gfs2_sbd *sdp, u64 *formal_ino)
436{ 415{
437 struct gfs2_inode *ip = GFS2_I(sdp->sd_ir_inode); 416 struct gfs2_inode *ip = GFS2_I(sdp->sd_ir_inode);
438 struct buffer_head *bh; 417 struct buffer_head *bh;
439 struct gfs2_inum_range ir; 418 struct gfs2_inum_range_host ir;
440 int error; 419 int error;
441 420
442 error = gfs2_trans_begin(sdp, RES_DINODE, 0); 421 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
@@ -479,7 +458,7 @@ static int pick_formal_ino_2(struct gfs2_sbd *sdp, u64 *formal_ino)
479 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_inum_inode); 458 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_inum_inode);
480 struct gfs2_holder gh; 459 struct gfs2_holder gh;
481 struct buffer_head *bh; 460 struct buffer_head *bh;
482 struct gfs2_inum_range ir; 461 struct gfs2_inum_range_host ir;
483 int error; 462 int error;
484 463
485 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); 464 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
@@ -500,21 +479,22 @@ static int pick_formal_ino_2(struct gfs2_sbd *sdp, u64 *formal_ino)
500 if (!ir.ir_length) { 479 if (!ir.ir_length) {
501 struct buffer_head *m_bh; 480 struct buffer_head *m_bh;
502 u64 x, y; 481 u64 x, y;
482 __be64 z;
503 483
504 error = gfs2_meta_inode_buffer(m_ip, &m_bh); 484 error = gfs2_meta_inode_buffer(m_ip, &m_bh);
505 if (error) 485 if (error)
506 goto out_brelse; 486 goto out_brelse;
507 487
508 x = *(u64 *)(m_bh->b_data + sizeof(struct gfs2_dinode)); 488 z = *(__be64 *)(m_bh->b_data + sizeof(struct gfs2_dinode));
509 x = y = be64_to_cpu(x); 489 x = y = be64_to_cpu(z);
510 ir.ir_start = x; 490 ir.ir_start = x;
511 ir.ir_length = GFS2_INUM_QUANTUM; 491 ir.ir_length = GFS2_INUM_QUANTUM;
512 x += GFS2_INUM_QUANTUM; 492 x += GFS2_INUM_QUANTUM;
513 if (x < y) 493 if (x < y)
514 gfs2_consist_inode(m_ip); 494 gfs2_consist_inode(m_ip);
515 x = cpu_to_be64(x); 495 z = cpu_to_be64(x);
516 gfs2_trans_add_bh(m_ip->i_gl, m_bh, 1); 496 gfs2_trans_add_bh(m_ip->i_gl, m_bh, 1);
517 *(u64 *)(m_bh->b_data + sizeof(struct gfs2_dinode)) = x; 497 *(__be64 *)(m_bh->b_data + sizeof(struct gfs2_dinode)) = z;
518 498
519 brelse(m_bh); 499 brelse(m_bh);
520 } 500 }
@@ -567,7 +547,7 @@ static int create_ok(struct gfs2_inode *dip, const struct qstr *name,
567 return error; 547 return error;
568 548
569 /* Don't create entries in an unlinked directory */ 549 /* Don't create entries in an unlinked directory */
570 if (!dip->i_di.di_nlink) 550 if (!dip->i_inode.i_nlink)
571 return -EPERM; 551 return -EPERM;
572 552
573 error = gfs2_dir_search(&dip->i_inode, name, NULL, NULL); 553 error = gfs2_dir_search(&dip->i_inode, name, NULL, NULL);
@@ -583,7 +563,7 @@ static int create_ok(struct gfs2_inode *dip, const struct qstr *name,
583 563
584 if (dip->i_di.di_entries == (u32)-1) 564 if (dip->i_di.di_entries == (u32)-1)
585 return -EFBIG; 565 return -EFBIG;
586 if (S_ISDIR(mode) && dip->i_di.di_nlink == (u32)-1) 566 if (S_ISDIR(mode) && dip->i_inode.i_nlink == (u32)-1)
587 return -EMLINK; 567 return -EMLINK;
588 568
589 return 0; 569 return 0;
@@ -593,24 +573,24 @@ static void munge_mode_uid_gid(struct gfs2_inode *dip, unsigned int *mode,
593 unsigned int *uid, unsigned int *gid) 573 unsigned int *uid, unsigned int *gid)
594{ 574{
595 if (GFS2_SB(&dip->i_inode)->sd_args.ar_suiddir && 575 if (GFS2_SB(&dip->i_inode)->sd_args.ar_suiddir &&
596 (dip->i_di.di_mode & S_ISUID) && dip->i_di.di_uid) { 576 (dip->i_inode.i_mode & S_ISUID) && dip->i_inode.i_uid) {
597 if (S_ISDIR(*mode)) 577 if (S_ISDIR(*mode))
598 *mode |= S_ISUID; 578 *mode |= S_ISUID;
599 else if (dip->i_di.di_uid != current->fsuid) 579 else if (dip->i_inode.i_uid != current->fsuid)
600 *mode &= ~07111; 580 *mode &= ~07111;
601 *uid = dip->i_di.di_uid; 581 *uid = dip->i_inode.i_uid;
602 } else 582 } else
603 *uid = current->fsuid; 583 *uid = current->fsuid;
604 584
605 if (dip->i_di.di_mode & S_ISGID) { 585 if (dip->i_inode.i_mode & S_ISGID) {
606 if (S_ISDIR(*mode)) 586 if (S_ISDIR(*mode))
607 *mode |= S_ISGID; 587 *mode |= S_ISGID;
608 *gid = dip->i_di.di_gid; 588 *gid = dip->i_inode.i_gid;
609 } else 589 } else
610 *gid = current->fsgid; 590 *gid = current->fsgid;
611} 591}
612 592
613static int alloc_dinode(struct gfs2_inode *dip, struct gfs2_inum *inum, 593static int alloc_dinode(struct gfs2_inode *dip, struct gfs2_inum_host *inum,
614 u64 *generation) 594 u64 *generation)
615{ 595{
616 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode); 596 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
@@ -650,9 +630,9 @@ out:
650 */ 630 */
651 631
652static void init_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl, 632static void init_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
653 const struct gfs2_inum *inum, unsigned int mode, 633 const struct gfs2_inum_host *inum, unsigned int mode,
654 unsigned int uid, unsigned int gid, 634 unsigned int uid, unsigned int gid,
655 const u64 *generation) 635 const u64 *generation, dev_t dev)
656{ 636{
657 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode); 637 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
658 struct gfs2_dinode *di; 638 struct gfs2_dinode *di;
@@ -669,14 +649,15 @@ static void init_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
669 di->di_mode = cpu_to_be32(mode); 649 di->di_mode = cpu_to_be32(mode);
670 di->di_uid = cpu_to_be32(uid); 650 di->di_uid = cpu_to_be32(uid);
671 di->di_gid = cpu_to_be32(gid); 651 di->di_gid = cpu_to_be32(gid);
672 di->di_nlink = cpu_to_be32(0); 652 di->di_nlink = 0;
673 di->di_size = cpu_to_be64(0); 653 di->di_size = 0;
674 di->di_blocks = cpu_to_be64(1); 654 di->di_blocks = cpu_to_be64(1);
675 di->di_atime = di->di_mtime = di->di_ctime = cpu_to_be64(get_seconds()); 655 di->di_atime = di->di_mtime = di->di_ctime = cpu_to_be64(get_seconds());
676 di->di_major = di->di_minor = cpu_to_be32(0); 656 di->di_major = cpu_to_be32(MAJOR(dev));
657 di->di_minor = cpu_to_be32(MINOR(dev));
677 di->di_goal_meta = di->di_goal_data = cpu_to_be64(inum->no_addr); 658 di->di_goal_meta = di->di_goal_data = cpu_to_be64(inum->no_addr);
678 di->di_generation = cpu_to_be64(*generation); 659 di->di_generation = cpu_to_be64(*generation);
679 di->di_flags = cpu_to_be32(0); 660 di->di_flags = 0;
680 661
681 if (S_ISREG(mode)) { 662 if (S_ISREG(mode)) {
682 if ((dip->i_di.di_flags & GFS2_DIF_INHERIT_JDATA) || 663 if ((dip->i_di.di_flags & GFS2_DIF_INHERIT_JDATA) ||
@@ -693,22 +674,22 @@ static void init_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
693 } 674 }
694 675
695 di->__pad1 = 0; 676 di->__pad1 = 0;
696 di->di_payload_format = cpu_to_be32(0); 677 di->di_payload_format = cpu_to_be32(S_ISDIR(mode) ? GFS2_FORMAT_DE : 0);
697 di->di_height = cpu_to_be32(0); 678 di->di_height = 0;
698 di->__pad2 = 0; 679 di->__pad2 = 0;
699 di->__pad3 = 0; 680 di->__pad3 = 0;
700 di->di_depth = cpu_to_be16(0); 681 di->di_depth = 0;
701 di->di_entries = cpu_to_be32(0); 682 di->di_entries = 0;
702 memset(&di->__pad4, 0, sizeof(di->__pad4)); 683 memset(&di->__pad4, 0, sizeof(di->__pad4));
703 di->di_eattr = cpu_to_be64(0); 684 di->di_eattr = 0;
704 memset(&di->di_reserved, 0, sizeof(di->di_reserved)); 685 memset(&di->di_reserved, 0, sizeof(di->di_reserved));
705 686
706 brelse(dibh); 687 brelse(dibh);
707} 688}
708 689
709static int make_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl, 690static int make_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
710 unsigned int mode, const struct gfs2_inum *inum, 691 unsigned int mode, const struct gfs2_inum_host *inum,
711 const u64 *generation) 692 const u64 *generation, dev_t dev)
712{ 693{
713 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode); 694 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
714 unsigned int uid, gid; 695 unsigned int uid, gid;
@@ -729,7 +710,7 @@ static int make_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
729 if (error) 710 if (error)
730 goto out_quota; 711 goto out_quota;
731 712
732 init_dinode(dip, gl, inum, mode, uid, gid, generation); 713 init_dinode(dip, gl, inum, mode, uid, gid, generation, dev);
733 gfs2_quota_change(dip, +1, uid, gid); 714 gfs2_quota_change(dip, +1, uid, gid);
734 gfs2_trans_end(sdp); 715 gfs2_trans_end(sdp);
735 716
@@ -759,8 +740,7 @@ static int link_dinode(struct gfs2_inode *dip, const struct qstr *name,
759 if (alloc_required < 0) 740 if (alloc_required < 0)
760 goto fail; 741 goto fail;
761 if (alloc_required) { 742 if (alloc_required) {
762 error = gfs2_quota_check(dip, dip->i_di.di_uid, 743 error = gfs2_quota_check(dip, dip->i_inode.i_uid, dip->i_inode.i_gid);
763 dip->i_di.di_gid);
764 if (error) 744 if (error)
765 goto fail_quota_locks; 745 goto fail_quota_locks;
766 746
@@ -782,16 +762,16 @@ static int link_dinode(struct gfs2_inode *dip, const struct qstr *name,
782 goto fail_quota_locks; 762 goto fail_quota_locks;
783 } 763 }
784 764
785 error = gfs2_dir_add(&dip->i_inode, name, &ip->i_num, IF2DT(ip->i_di.di_mode)); 765 error = gfs2_dir_add(&dip->i_inode, name, &ip->i_num, IF2DT(ip->i_inode.i_mode));
786 if (error) 766 if (error)
787 goto fail_end_trans; 767 goto fail_end_trans;
788 768
789 error = gfs2_meta_inode_buffer(ip, &dibh); 769 error = gfs2_meta_inode_buffer(ip, &dibh);
790 if (error) 770 if (error)
791 goto fail_end_trans; 771 goto fail_end_trans;
792 ip->i_di.di_nlink = 1; 772 ip->i_inode.i_nlink = 1;
793 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 773 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
794 gfs2_dinode_out(&ip->i_di, dibh->b_data); 774 gfs2_dinode_out(ip, dibh->b_data);
795 brelse(dibh); 775 brelse(dibh);
796 return 0; 776 return 0;
797 777
@@ -860,13 +840,13 @@ static int gfs2_security_init(struct gfs2_inode *dip, struct gfs2_inode *ip)
860 */ 840 */
861 841
862struct inode *gfs2_createi(struct gfs2_holder *ghs, const struct qstr *name, 842struct inode *gfs2_createi(struct gfs2_holder *ghs, const struct qstr *name,
863 unsigned int mode) 843 unsigned int mode, dev_t dev)
864{ 844{
865 struct inode *inode; 845 struct inode *inode;
866 struct gfs2_inode *dip = ghs->gh_gl->gl_object; 846 struct gfs2_inode *dip = ghs->gh_gl->gl_object;
867 struct inode *dir = &dip->i_inode; 847 struct inode *dir = &dip->i_inode;
868 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode); 848 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
869 struct gfs2_inum inum; 849 struct gfs2_inum_host inum;
870 int error; 850 int error;
871 u64 generation; 851 u64 generation;
872 852
@@ -890,35 +870,12 @@ struct inode *gfs2_createi(struct gfs2_holder *ghs, const struct qstr *name,
890 if (error) 870 if (error)
891 goto fail_gunlock; 871 goto fail_gunlock;
892 872
893 if (inum.no_addr < dip->i_num.no_addr) { 873 error = gfs2_glock_nq_num(sdp, inum.no_addr, &gfs2_inode_glops,
894 gfs2_glock_dq(ghs); 874 LM_ST_EXCLUSIVE, GL_SKIP, ghs + 1);
895 875 if (error)
896 error = gfs2_glock_nq_num(sdp, inum.no_addr, 876 goto fail_gunlock;
897 &gfs2_inode_glops, LM_ST_EXCLUSIVE,
898 GL_SKIP, ghs + 1);
899 if (error) {
900 return ERR_PTR(error);
901 }
902
903 gfs2_holder_reinit(LM_ST_EXCLUSIVE, 0, ghs);
904 error = gfs2_glock_nq(ghs);
905 if (error) {
906 gfs2_glock_dq_uninit(ghs + 1);
907 return ERR_PTR(error);
908 }
909
910 error = create_ok(dip, name, mode);
911 if (error)
912 goto fail_gunlock2;
913 } else {
914 error = gfs2_glock_nq_num(sdp, inum.no_addr,
915 &gfs2_inode_glops, LM_ST_EXCLUSIVE,
916 GL_SKIP, ghs + 1);
917 if (error)
918 goto fail_gunlock;
919 }
920 877
921 error = make_dinode(dip, ghs[1].gh_gl, mode, &inum, &generation); 878 error = make_dinode(dip, ghs[1].gh_gl, mode, &inum, &generation, dev);
922 if (error) 879 if (error)
923 goto fail_gunlock2; 880 goto fail_gunlock2;
924 881
@@ -975,7 +932,7 @@ int gfs2_rmdiri(struct gfs2_inode *dip, const struct qstr *name,
975 932
976 if (ip->i_di.di_entries != 2) { 933 if (ip->i_di.di_entries != 2) {
977 if (gfs2_consist_inode(ip)) 934 if (gfs2_consist_inode(ip))
978 gfs2_dinode_print(&ip->i_di); 935 gfs2_dinode_print(ip);
979 return -EIO; 936 return -EIO;
980 } 937 }
981 938
@@ -997,7 +954,12 @@ int gfs2_rmdiri(struct gfs2_inode *dip, const struct qstr *name,
997 if (error) 954 if (error)
998 return error; 955 return error;
999 956
1000 error = gfs2_change_nlink(ip, -2); 957 /* It looks odd, but it really should be done twice */
958 error = gfs2_change_nlink(ip, -1);
959 if (error)
960 return error;
961
962 error = gfs2_change_nlink(ip, -1);
1001 if (error) 963 if (error)
1002 return error; 964 return error;
1003 965
@@ -1018,16 +980,16 @@ int gfs2_rmdiri(struct gfs2_inode *dip, const struct qstr *name,
1018int gfs2_unlink_ok(struct gfs2_inode *dip, const struct qstr *name, 980int gfs2_unlink_ok(struct gfs2_inode *dip, const struct qstr *name,
1019 struct gfs2_inode *ip) 981 struct gfs2_inode *ip)
1020{ 982{
1021 struct gfs2_inum inum; 983 struct gfs2_inum_host inum;
1022 unsigned int type; 984 unsigned int type;
1023 int error; 985 int error;
1024 986
1025 if (IS_IMMUTABLE(&ip->i_inode) || IS_APPEND(&ip->i_inode)) 987 if (IS_IMMUTABLE(&ip->i_inode) || IS_APPEND(&ip->i_inode))
1026 return -EPERM; 988 return -EPERM;
1027 989
1028 if ((dip->i_di.di_mode & S_ISVTX) && 990 if ((dip->i_inode.i_mode & S_ISVTX) &&
1029 dip->i_di.di_uid != current->fsuid && 991 dip->i_inode.i_uid != current->fsuid &&
1030 ip->i_di.di_uid != current->fsuid && !capable(CAP_FOWNER)) 992 ip->i_inode.i_uid != current->fsuid && !capable(CAP_FOWNER))
1031 return -EPERM; 993 return -EPERM;
1032 994
1033 if (IS_APPEND(&dip->i_inode)) 995 if (IS_APPEND(&dip->i_inode))
@@ -1044,7 +1006,7 @@ int gfs2_unlink_ok(struct gfs2_inode *dip, const struct qstr *name,
1044 if (!gfs2_inum_equal(&inum, &ip->i_num)) 1006 if (!gfs2_inum_equal(&inum, &ip->i_num))
1045 return -ENOENT; 1007 return -ENOENT;
1046 1008
1047 if (IF2DT(ip->i_di.di_mode) != type) { 1009 if (IF2DT(ip->i_inode.i_mode) != type) {
1048 gfs2_consist_inode(dip); 1010 gfs2_consist_inode(dip);
1049 return -EIO; 1011 return -EIO;
1050 } 1012 }
@@ -1194,7 +1156,7 @@ int gfs2_glock_nq_atime(struct gfs2_holder *gh)
1194 return 0; 1156 return 0;
1195 1157
1196 curtime = get_seconds(); 1158 curtime = get_seconds();
1197 if (curtime - ip->i_di.di_atime >= quantum) { 1159 if (curtime - ip->i_inode.i_atime.tv_sec >= quantum) {
1198 gfs2_glock_dq(gh); 1160 gfs2_glock_dq(gh);
1199 gfs2_holder_reinit(LM_ST_EXCLUSIVE, gh->gh_flags & ~LM_FLAG_ANY, 1161 gfs2_holder_reinit(LM_ST_EXCLUSIVE, gh->gh_flags & ~LM_FLAG_ANY,
1200 gh); 1162 gh);
@@ -1206,7 +1168,7 @@ int gfs2_glock_nq_atime(struct gfs2_holder *gh)
1206 trying to get exclusive lock. */ 1168 trying to get exclusive lock. */
1207 1169
1208 curtime = get_seconds(); 1170 curtime = get_seconds();
1209 if (curtime - ip->i_di.di_atime >= quantum) { 1171 if (curtime - ip->i_inode.i_atime.tv_sec >= quantum) {
1210 struct buffer_head *dibh; 1172 struct buffer_head *dibh;
1211 struct gfs2_dinode *di; 1173 struct gfs2_dinode *di;
1212 1174
@@ -1220,11 +1182,11 @@ int gfs2_glock_nq_atime(struct gfs2_holder *gh)
1220 if (error) 1182 if (error)
1221 goto fail_end_trans; 1183 goto fail_end_trans;
1222 1184
1223 ip->i_di.di_atime = curtime; 1185 ip->i_inode.i_atime.tv_sec = curtime;
1224 1186
1225 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 1187 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1226 di = (struct gfs2_dinode *)dibh->b_data; 1188 di = (struct gfs2_dinode *)dibh->b_data;
1227 di->di_atime = cpu_to_be64(ip->i_di.di_atime); 1189 di->di_atime = cpu_to_be64(ip->i_inode.i_atime.tv_sec);
1228 brelse(dibh); 1190 brelse(dibh);
1229 1191
1230 gfs2_trans_end(sdp); 1192 gfs2_trans_end(sdp);
@@ -1249,92 +1211,6 @@ fail:
1249 return error; 1211 return error;
1250} 1212}
1251 1213
1252/**
1253 * glock_compare_atime - Compare two struct gfs2_glock structures for sort
1254 * @arg_a: the first structure
1255 * @arg_b: the second structure
1256 *
1257 * Returns: 1 if A > B
1258 * -1 if A < B
1259 * 0 if A == B
1260 */
1261
1262static int glock_compare_atime(const void *arg_a, const void *arg_b)
1263{
1264 const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1265 const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1266 const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1267 const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1268
1269 if (a->ln_number > b->ln_number)
1270 return 1;
1271 if (a->ln_number < b->ln_number)
1272 return -1;
1273 if (gh_a->gh_state == LM_ST_SHARED && gh_b->gh_state == LM_ST_EXCLUSIVE)
1274 return 1;
1275 if (gh_a->gh_state == LM_ST_SHARED && (gh_b->gh_flags & GL_ATIME))
1276 return 1;
1277
1278 return 0;
1279}
1280
1281/**
1282 * gfs2_glock_nq_m_atime - acquire multiple glocks where one may need an
1283 * atime update
1284 * @num_gh: the number of structures
1285 * @ghs: an array of struct gfs2_holder structures
1286 *
1287 * Returns: 0 on success (all glocks acquired),
1288 * errno on failure (no glocks acquired)
1289 */
1290
1291int gfs2_glock_nq_m_atime(unsigned int num_gh, struct gfs2_holder *ghs)
1292{
1293 struct gfs2_holder **p;
1294 unsigned int x;
1295 int error = 0;
1296
1297 if (!num_gh)
1298 return 0;
1299
1300 if (num_gh == 1) {
1301 ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1302 if (ghs->gh_flags & GL_ATIME)
1303 error = gfs2_glock_nq_atime(ghs);
1304 else
1305 error = gfs2_glock_nq(ghs);
1306 return error;
1307 }
1308
1309 p = kcalloc(num_gh, sizeof(struct gfs2_holder *), GFP_KERNEL);
1310 if (!p)
1311 return -ENOMEM;
1312
1313 for (x = 0; x < num_gh; x++)
1314 p[x] = &ghs[x];
1315
1316 sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare_atime,NULL);
1317
1318 for (x = 0; x < num_gh; x++) {
1319 p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1320
1321 if (p[x]->gh_flags & GL_ATIME)
1322 error = gfs2_glock_nq_atime(p[x]);
1323 else
1324 error = gfs2_glock_nq(p[x]);
1325
1326 if (error) {
1327 while (x--)
1328 gfs2_glock_dq(p[x]);
1329 break;
1330 }
1331 }
1332
1333 kfree(p);
1334 return error;
1335}
1336
1337
1338static int 1214static int
1339__gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr) 1215__gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr)
1340{ 1216{
@@ -1345,10 +1221,8 @@ __gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr)
1345 if (!error) { 1221 if (!error) {
1346 error = inode_setattr(&ip->i_inode, attr); 1222 error = inode_setattr(&ip->i_inode, attr);
1347 gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error); 1223 gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
1348 gfs2_inode_attr_out(ip);
1349
1350 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 1224 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1351 gfs2_dinode_out(&ip->i_di, dibh->b_data); 1225 gfs2_dinode_out(ip, dibh->b_data);
1352 brelse(dibh); 1226 brelse(dibh);
1353 } 1227 }
1354 return error; 1228 return error;
diff --git a/fs/gfs2/inode.h b/fs/gfs2/inode.h
index f5d861760579..b57f448b15bc 100644
--- a/fs/gfs2/inode.h
+++ b/fs/gfs2/inode.h
@@ -22,13 +22,19 @@ static inline int gfs2_is_jdata(struct gfs2_inode *ip)
22 22
23static inline int gfs2_is_dir(struct gfs2_inode *ip) 23static inline int gfs2_is_dir(struct gfs2_inode *ip)
24{ 24{
25 return S_ISDIR(ip->i_di.di_mode); 25 return S_ISDIR(ip->i_inode.i_mode);
26}
27
28static inline void gfs2_set_inode_blocks(struct inode *inode)
29{
30 struct gfs2_inode *ip = GFS2_I(inode);
31 inode->i_blocks = ip->i_di.di_blocks <<
32 (GFS2_SB(inode)->sd_sb.sb_bsize_shift - GFS2_BASIC_BLOCK_SHIFT);
26} 33}
27 34
28void gfs2_inode_attr_in(struct gfs2_inode *ip); 35void gfs2_inode_attr_in(struct gfs2_inode *ip);
29void gfs2_inode_attr_out(struct gfs2_inode *ip); 36struct inode *gfs2_inode_lookup(struct super_block *sb, struct gfs2_inum_host *inum, unsigned type);
30struct inode *gfs2_inode_lookup(struct super_block *sb, struct gfs2_inum *inum, unsigned type); 37struct inode *gfs2_ilookup(struct super_block *sb, struct gfs2_inum_host *inum);
31struct inode *gfs2_ilookup(struct super_block *sb, struct gfs2_inum *inum);
32 38
33int gfs2_inode_refresh(struct gfs2_inode *ip); 39int gfs2_inode_refresh(struct gfs2_inode *ip);
34 40
@@ -37,19 +43,15 @@ int gfs2_change_nlink(struct gfs2_inode *ip, int diff);
37struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name, 43struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
38 int is_root, struct nameidata *nd); 44 int is_root, struct nameidata *nd);
39struct inode *gfs2_createi(struct gfs2_holder *ghs, const struct qstr *name, 45struct inode *gfs2_createi(struct gfs2_holder *ghs, const struct qstr *name,
40 unsigned int mode); 46 unsigned int mode, dev_t dev);
41int gfs2_rmdiri(struct gfs2_inode *dip, const struct qstr *name, 47int gfs2_rmdiri(struct gfs2_inode *dip, const struct qstr *name,
42 struct gfs2_inode *ip); 48 struct gfs2_inode *ip);
43int gfs2_unlink_ok(struct gfs2_inode *dip, const struct qstr *name, 49int gfs2_unlink_ok(struct gfs2_inode *dip, const struct qstr *name,
44 struct gfs2_inode *ip); 50 struct gfs2_inode *ip);
45int gfs2_ok_to_move(struct gfs2_inode *this, struct gfs2_inode *to); 51int gfs2_ok_to_move(struct gfs2_inode *this, struct gfs2_inode *to);
46int gfs2_readlinki(struct gfs2_inode *ip, char **buf, unsigned int *len); 52int gfs2_readlinki(struct gfs2_inode *ip, char **buf, unsigned int *len);
47
48int gfs2_glock_nq_atime(struct gfs2_holder *gh); 53int gfs2_glock_nq_atime(struct gfs2_holder *gh);
49int gfs2_glock_nq_m_atime(unsigned int num_gh, struct gfs2_holder *ghs);
50
51int gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr); 54int gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr);
52
53struct inode *gfs2_lookup_simple(struct inode *dip, const char *name); 55struct inode *gfs2_lookup_simple(struct inode *dip, const char *name);
54 56
55#endif /* __INODE_DOT_H__ */ 57#endif /* __INODE_DOT_H__ */
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index 0cace3da9dbb..291415ddfe51 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -15,6 +15,7 @@
15#include <linux/gfs2_ondisk.h> 15#include <linux/gfs2_ondisk.h>
16#include <linux/crc32.h> 16#include <linux/crc32.h>
17#include <linux/lm_interface.h> 17#include <linux/lm_interface.h>
18#include <linux/delay.h>
18 19
19#include "gfs2.h" 20#include "gfs2.h"
20#include "incore.h" 21#include "incore.h"
@@ -142,7 +143,7 @@ static int gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai, int fl
142 return list_empty(&ai->ai_ail1_list); 143 return list_empty(&ai->ai_ail1_list);
143} 144}
144 145
145void gfs2_ail1_start(struct gfs2_sbd *sdp, int flags) 146static void gfs2_ail1_start(struct gfs2_sbd *sdp, int flags)
146{ 147{
147 struct list_head *head = &sdp->sd_ail1_list; 148 struct list_head *head = &sdp->sd_ail1_list;
148 u64 sync_gen; 149 u64 sync_gen;
@@ -261,6 +262,12 @@ static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail)
261 * @sdp: The GFS2 superblock 262 * @sdp: The GFS2 superblock
262 * @blks: The number of blocks to reserve 263 * @blks: The number of blocks to reserve
263 * 264 *
265 * Note that we never give out the last 6 blocks of the journal. Thats
266 * due to the fact that there is are a small number of header blocks
267 * associated with each log flush. The exact number can't be known until
268 * flush time, so we ensure that we have just enough free blocks at all
269 * times to avoid running out during a log flush.
270 *
264 * Returns: errno 271 * Returns: errno
265 */ 272 */
266 273
@@ -274,7 +281,7 @@ int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks)
274 281
275 mutex_lock(&sdp->sd_log_reserve_mutex); 282 mutex_lock(&sdp->sd_log_reserve_mutex);
276 gfs2_log_lock(sdp); 283 gfs2_log_lock(sdp);
277 while(sdp->sd_log_blks_free <= blks) { 284 while(sdp->sd_log_blks_free <= (blks + 6)) {
278 gfs2_log_unlock(sdp); 285 gfs2_log_unlock(sdp);
279 gfs2_ail1_empty(sdp, 0); 286 gfs2_ail1_empty(sdp, 0);
280 gfs2_log_flush(sdp, NULL); 287 gfs2_log_flush(sdp, NULL);
@@ -319,7 +326,8 @@ static u64 log_bmap(struct gfs2_sbd *sdp, unsigned int lbn)
319 bh_map.b_size = 1 << inode->i_blkbits; 326 bh_map.b_size = 1 << inode->i_blkbits;
320 error = gfs2_block_map(inode, lbn, 0, &bh_map); 327 error = gfs2_block_map(inode, lbn, 0, &bh_map);
321 if (error || !bh_map.b_blocknr) 328 if (error || !bh_map.b_blocknr)
322 printk(KERN_INFO "error=%d, dbn=%llu lbn=%u", error, bh_map.b_blocknr, lbn); 329 printk(KERN_INFO "error=%d, dbn=%llu lbn=%u", error,
330 (unsigned long long)bh_map.b_blocknr, lbn);
323 gfs2_assert_withdraw(sdp, !error && bh_map.b_blocknr); 331 gfs2_assert_withdraw(sdp, !error && bh_map.b_blocknr);
324 332
325 return bh_map.b_blocknr; 333 return bh_map.b_blocknr;
@@ -643,12 +651,9 @@ void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
643 up_read(&sdp->sd_log_flush_lock); 651 up_read(&sdp->sd_log_flush_lock);
644 652
645 gfs2_log_lock(sdp); 653 gfs2_log_lock(sdp);
646 if (sdp->sd_log_num_buf > gfs2_tune_get(sdp, gt_incore_log_blocks)) { 654 if (sdp->sd_log_num_buf > gfs2_tune_get(sdp, gt_incore_log_blocks))
647 gfs2_log_unlock(sdp); 655 wake_up_process(sdp->sd_logd_process);
648 gfs2_log_flush(sdp, NULL); 656 gfs2_log_unlock(sdp);
649 } else {
650 gfs2_log_unlock(sdp);
651 }
652} 657}
653 658
654/** 659/**
@@ -686,3 +691,21 @@ void gfs2_log_shutdown(struct gfs2_sbd *sdp)
686 up_write(&sdp->sd_log_flush_lock); 691 up_write(&sdp->sd_log_flush_lock);
687} 692}
688 693
694
695/**
696 * gfs2_meta_syncfs - sync all the buffers in a filesystem
697 * @sdp: the filesystem
698 *
699 */
700
701void gfs2_meta_syncfs(struct gfs2_sbd *sdp)
702{
703 gfs2_log_flush(sdp, NULL);
704 for (;;) {
705 gfs2_ail1_start(sdp, DIO_ALL);
706 if (gfs2_ail1_empty(sdp, DIO_ALL))
707 break;
708 msleep(10);
709 }
710}
711
diff --git a/fs/gfs2/log.h b/fs/gfs2/log.h
index 7f5737d55612..8e7aa0f29109 100644
--- a/fs/gfs2/log.h
+++ b/fs/gfs2/log.h
@@ -48,7 +48,6 @@ static inline void gfs2_log_pointers_init(struct gfs2_sbd *sdp,
48unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct, 48unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct,
49 unsigned int ssize); 49 unsigned int ssize);
50 50
51void gfs2_ail1_start(struct gfs2_sbd *sdp, int flags);
52int gfs2_ail1_empty(struct gfs2_sbd *sdp, int flags); 51int gfs2_ail1_empty(struct gfs2_sbd *sdp, int flags);
53 52
54int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks); 53int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks);
@@ -61,5 +60,6 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl);
61void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *trans); 60void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *trans);
62 61
63void gfs2_log_shutdown(struct gfs2_sbd *sdp); 62void gfs2_log_shutdown(struct gfs2_sbd *sdp);
63void gfs2_meta_syncfs(struct gfs2_sbd *sdp);
64 64
65#endif /* __LOG_DOT_H__ */ 65#endif /* __LOG_DOT_H__ */
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index ab6d1115f95d..4d7f94d8c7bd 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -182,7 +182,7 @@ static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
182} 182}
183 183
184static void buf_lo_before_scan(struct gfs2_jdesc *jd, 184static void buf_lo_before_scan(struct gfs2_jdesc *jd,
185 struct gfs2_log_header *head, int pass) 185 struct gfs2_log_header_host *head, int pass)
186{ 186{
187 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); 187 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
188 188
@@ -328,7 +328,7 @@ static void revoke_lo_before_commit(struct gfs2_sbd *sdp)
328} 328}
329 329
330static void revoke_lo_before_scan(struct gfs2_jdesc *jd, 330static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
331 struct gfs2_log_header *head, int pass) 331 struct gfs2_log_header_host *head, int pass)
332{ 332{
333 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); 333 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
334 334
@@ -509,7 +509,7 @@ static void databuf_lo_before_commit(struct gfs2_sbd *sdp)
509{ 509{
510 LIST_HEAD(started); 510 LIST_HEAD(started);
511 struct gfs2_bufdata *bd1 = NULL, *bd2, *bdt; 511 struct gfs2_bufdata *bd1 = NULL, *bd2, *bdt;
512 struct buffer_head *bh = NULL; 512 struct buffer_head *bh = NULL,*bh1 = NULL;
513 unsigned int offset = sizeof(struct gfs2_log_descriptor); 513 unsigned int offset = sizeof(struct gfs2_log_descriptor);
514 struct gfs2_log_descriptor *ld; 514 struct gfs2_log_descriptor *ld;
515 unsigned int limit; 515 unsigned int limit;
@@ -537,8 +537,13 @@ static void databuf_lo_before_commit(struct gfs2_sbd *sdp)
537 list_for_each_entry_safe_continue(bd1, bdt, 537 list_for_each_entry_safe_continue(bd1, bdt,
538 &sdp->sd_log_le_databuf, 538 &sdp->sd_log_le_databuf,
539 bd_le.le_list) { 539 bd_le.le_list) {
540 /* store off the buffer head in a local ptr since
541 * gfs2_bufdata might change when we drop the log lock
542 */
543 bh1 = bd1->bd_bh;
544
540 /* An ordered write buffer */ 545 /* An ordered write buffer */
541 if (bd1->bd_bh && !buffer_pinned(bd1->bd_bh)) { 546 if (bh1 && !buffer_pinned(bh1)) {
542 list_move(&bd1->bd_le.le_list, &started); 547 list_move(&bd1->bd_le.le_list, &started);
543 if (bd1 == bd2) { 548 if (bd1 == bd2) {
544 bd2 = NULL; 549 bd2 = NULL;
@@ -547,20 +552,21 @@ static void databuf_lo_before_commit(struct gfs2_sbd *sdp)
547 bd_le.le_list); 552 bd_le.le_list);
548 } 553 }
549 total_dbuf--; 554 total_dbuf--;
550 if (bd1->bd_bh) { 555 if (bh1) {
551 get_bh(bd1->bd_bh); 556 if (buffer_dirty(bh1)) {
552 if (buffer_dirty(bd1->bd_bh)) { 557 get_bh(bh1);
558
553 gfs2_log_unlock(sdp); 559 gfs2_log_unlock(sdp);
554 wait_on_buffer(bd1->bd_bh); 560
555 ll_rw_block(WRITE, 1, 561 ll_rw_block(SWRITE, 1, &bh1);
556 &bd1->bd_bh); 562 brelse(bh1);
563
557 gfs2_log_lock(sdp); 564 gfs2_log_lock(sdp);
558 } 565 }
559 brelse(bd1->bd_bh);
560 continue; 566 continue;
561 } 567 }
562 continue; 568 continue;
563 } else if (bd1->bd_bh) { /* A journaled buffer */ 569 } else if (bh1) { /* A journaled buffer */
564 int magic; 570 int magic;
565 gfs2_log_unlock(sdp); 571 gfs2_log_unlock(sdp);
566 if (!bh) { 572 if (!bh) {
@@ -582,16 +588,16 @@ static void databuf_lo_before_commit(struct gfs2_sbd *sdp)
582 ld->ld_data2 = cpu_to_be32(0); 588 ld->ld_data2 = cpu_to_be32(0);
583 memset(ld->ld_reserved, 0, sizeof(ld->ld_reserved)); 589 memset(ld->ld_reserved, 0, sizeof(ld->ld_reserved));
584 } 590 }
585 magic = gfs2_check_magic(bd1->bd_bh); 591 magic = gfs2_check_magic(bh1);
586 *ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr); 592 *ptr++ = cpu_to_be64(bh1->b_blocknr);
587 *ptr++ = cpu_to_be64((__u64)magic); 593 *ptr++ = cpu_to_be64((__u64)magic);
588 clear_buffer_escaped(bd1->bd_bh); 594 clear_buffer_escaped(bh1);
589 if (unlikely(magic != 0)) 595 if (unlikely(magic != 0))
590 set_buffer_escaped(bd1->bd_bh); 596 set_buffer_escaped(bh1);
591 gfs2_log_lock(sdp); 597 gfs2_log_lock(sdp);
592 if (n++ > num) 598 if (n++ > num)
593 break; 599 break;
594 } else if (!bd1->bd_bh) { 600 } else if (!bh1) {
595 total_dbuf--; 601 total_dbuf--;
596 sdp->sd_log_num_databuf--; 602 sdp->sd_log_num_databuf--;
597 list_del_init(&bd1->bd_le.le_list); 603 list_del_init(&bd1->bd_le.le_list);
diff --git a/fs/gfs2/lops.h b/fs/gfs2/lops.h
index 5839c05ae6be..965bc65c7c64 100644
--- a/fs/gfs2/lops.h
+++ b/fs/gfs2/lops.h
@@ -60,7 +60,7 @@ static inline void lops_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
60} 60}
61 61
62static inline void lops_before_scan(struct gfs2_jdesc *jd, 62static inline void lops_before_scan(struct gfs2_jdesc *jd,
63 struct gfs2_log_header *head, 63 struct gfs2_log_header_host *head,
64 unsigned int pass) 64 unsigned int pass)
65{ 65{
66 int x; 66 int x;
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index 9889c1eacec1..7c1a9e22a526 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -25,7 +25,7 @@
25#include "util.h" 25#include "util.h"
26#include "glock.h" 26#include "glock.h"
27 27
28static void gfs2_init_inode_once(void *foo, kmem_cache_t *cachep, unsigned long flags) 28static void gfs2_init_inode_once(void *foo, struct kmem_cache *cachep, unsigned long flags)
29{ 29{
30 struct gfs2_inode *ip = foo; 30 struct gfs2_inode *ip = foo;
31 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == 31 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
@@ -37,7 +37,7 @@ static void gfs2_init_inode_once(void *foo, kmem_cache_t *cachep, unsigned long
37 } 37 }
38} 38}
39 39
40static void gfs2_init_glock_once(void *foo, kmem_cache_t *cachep, unsigned long flags) 40static void gfs2_init_glock_once(void *foo, struct kmem_cache *cachep, unsigned long flags)
41{ 41{
42 struct gfs2_glock *gl = foo; 42 struct gfs2_glock *gl = foo;
43 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == 43 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index 3912d6a4b1e6..0e34d9918973 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -127,17 +127,17 @@ void gfs2_meta_sync(struct gfs2_glock *gl)
127 127
128/** 128/**
129 * getbuf - Get a buffer with a given address space 129 * getbuf - Get a buffer with a given address space
130 * @sdp: the filesystem 130 * @gl: the glock
131 * @aspace: the address space
132 * @blkno: the block number (filesystem scope) 131 * @blkno: the block number (filesystem scope)
133 * @create: 1 if the buffer should be created 132 * @create: 1 if the buffer should be created
134 * 133 *
135 * Returns: the buffer 134 * Returns: the buffer
136 */ 135 */
137 136
138static struct buffer_head *getbuf(struct gfs2_sbd *sdp, struct inode *aspace, 137static struct buffer_head *getbuf(struct gfs2_glock *gl, u64 blkno, int create)
139 u64 blkno, int create)
140{ 138{
139 struct address_space *mapping = gl->gl_aspace->i_mapping;
140 struct gfs2_sbd *sdp = gl->gl_sbd;
141 struct page *page; 141 struct page *page;
142 struct buffer_head *bh; 142 struct buffer_head *bh;
143 unsigned int shift; 143 unsigned int shift;
@@ -150,13 +150,13 @@ static struct buffer_head *getbuf(struct gfs2_sbd *sdp, struct inode *aspace,
150 150
151 if (create) { 151 if (create) {
152 for (;;) { 152 for (;;) {
153 page = grab_cache_page(aspace->i_mapping, index); 153 page = grab_cache_page(mapping, index);
154 if (page) 154 if (page)
155 break; 155 break;
156 yield(); 156 yield();
157 } 157 }
158 } else { 158 } else {
159 page = find_lock_page(aspace->i_mapping, index); 159 page = find_lock_page(mapping, index);
160 if (!page) 160 if (!page)
161 return NULL; 161 return NULL;
162 } 162 }
@@ -202,7 +202,7 @@ static void meta_prep_new(struct buffer_head *bh)
202struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno) 202struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno)
203{ 203{
204 struct buffer_head *bh; 204 struct buffer_head *bh;
205 bh = getbuf(gl->gl_sbd, gl->gl_aspace, blkno, CREATE); 205 bh = getbuf(gl, blkno, CREATE);
206 meta_prep_new(bh); 206 meta_prep_new(bh);
207 return bh; 207 return bh;
208} 208}
@@ -220,7 +220,7 @@ struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno)
220int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags, 220int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
221 struct buffer_head **bhp) 221 struct buffer_head **bhp)
222{ 222{
223 *bhp = getbuf(gl->gl_sbd, gl->gl_aspace, blkno, CREATE); 223 *bhp = getbuf(gl, blkno, CREATE);
224 if (!buffer_uptodate(*bhp)) 224 if (!buffer_uptodate(*bhp))
225 ll_rw_block(READ_META, 1, bhp); 225 ll_rw_block(READ_META, 1, bhp);
226 if (flags & DIO_WAIT) { 226 if (flags & DIO_WAIT) {
@@ -379,11 +379,10 @@ void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
379void gfs2_meta_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen) 379void gfs2_meta_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen)
380{ 380{
381 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 381 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
382 struct inode *aspace = ip->i_gl->gl_aspace;
383 struct buffer_head *bh; 382 struct buffer_head *bh;
384 383
385 while (blen) { 384 while (blen) {
386 bh = getbuf(sdp, aspace, bstart, NO_CREATE); 385 bh = getbuf(ip->i_gl, bstart, NO_CREATE);
387 if (bh) { 386 if (bh) {
388 struct gfs2_bufdata *bd = bh->b_private; 387 struct gfs2_bufdata *bd = bh->b_private;
389 388
@@ -472,6 +471,9 @@ int gfs2_meta_indirect_buffer(struct gfs2_inode *ip, int height, u64 num,
472 struct buffer_head *bh = NULL, **bh_slot = ip->i_cache + height; 471 struct buffer_head *bh = NULL, **bh_slot = ip->i_cache + height;
473 int in_cache = 0; 472 int in_cache = 0;
474 473
474 BUG_ON(!gl);
475 BUG_ON(!sdp);
476
475 spin_lock(&ip->i_spin); 477 spin_lock(&ip->i_spin);
476 if (*bh_slot && (*bh_slot)->b_blocknr == num) { 478 if (*bh_slot && (*bh_slot)->b_blocknr == num) {
477 bh = *bh_slot; 479 bh = *bh_slot;
@@ -481,7 +483,7 @@ int gfs2_meta_indirect_buffer(struct gfs2_inode *ip, int height, u64 num,
481 spin_unlock(&ip->i_spin); 483 spin_unlock(&ip->i_spin);
482 484
483 if (!bh) 485 if (!bh)
484 bh = getbuf(gl->gl_sbd, gl->gl_aspace, num, CREATE); 486 bh = getbuf(gl, num, CREATE);
485 487
486 if (!bh) 488 if (!bh)
487 return -ENOBUFS; 489 return -ENOBUFS;
@@ -532,7 +534,6 @@ err:
532struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen) 534struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
533{ 535{
534 struct gfs2_sbd *sdp = gl->gl_sbd; 536 struct gfs2_sbd *sdp = gl->gl_sbd;
535 struct inode *aspace = gl->gl_aspace;
536 struct buffer_head *first_bh, *bh; 537 struct buffer_head *first_bh, *bh;
537 u32 max_ra = gfs2_tune_get(sdp, gt_max_readahead) >> 538 u32 max_ra = gfs2_tune_get(sdp, gt_max_readahead) >>
538 sdp->sd_sb.sb_bsize_shift; 539 sdp->sd_sb.sb_bsize_shift;
@@ -544,7 +545,7 @@ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
544 if (extlen > max_ra) 545 if (extlen > max_ra)
545 extlen = max_ra; 546 extlen = max_ra;
546 547
547 first_bh = getbuf(sdp, aspace, dblock, CREATE); 548 first_bh = getbuf(gl, dblock, CREATE);
548 549
549 if (buffer_uptodate(first_bh)) 550 if (buffer_uptodate(first_bh))
550 goto out; 551 goto out;
@@ -555,7 +556,7 @@ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
555 extlen--; 556 extlen--;
556 557
557 while (extlen) { 558 while (extlen) {
558 bh = getbuf(sdp, aspace, dblock, CREATE); 559 bh = getbuf(gl, dblock, CREATE);
559 560
560 if (!buffer_uptodate(bh) && !buffer_locked(bh)) 561 if (!buffer_uptodate(bh) && !buffer_locked(bh))
561 ll_rw_block(READA, 1, &bh); 562 ll_rw_block(READA, 1, &bh);
@@ -571,20 +572,3 @@ out:
571 return first_bh; 572 return first_bh;
572} 573}
573 574
574/**
575 * gfs2_meta_syncfs - sync all the buffers in a filesystem
576 * @sdp: the filesystem
577 *
578 */
579
580void gfs2_meta_syncfs(struct gfs2_sbd *sdp)
581{
582 gfs2_log_flush(sdp, NULL);
583 for (;;) {
584 gfs2_ail1_start(sdp, DIO_ALL);
585 if (gfs2_ail1_empty(sdp, DIO_ALL))
586 break;
587 msleep(10);
588 }
589}
590
diff --git a/fs/gfs2/meta_io.h b/fs/gfs2/meta_io.h
index 3ec939e20dff..e037425bc042 100644
--- a/fs/gfs2/meta_io.h
+++ b/fs/gfs2/meta_io.h
@@ -67,7 +67,6 @@ static inline int gfs2_meta_inode_buffer(struct gfs2_inode *ip,
67} 67}
68 68
69struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen); 69struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen);
70void gfs2_meta_syncfs(struct gfs2_sbd *sdp);
71 70
72#define buffer_busy(bh) \ 71#define buffer_busy(bh) \
73((bh)->b_state & ((1ul << BH_Dirty) | (1ul << BH_Lock) | (1ul << BH_Pinned))) 72((bh)->b_state & ((1ul << BH_Dirty) | (1ul << BH_Lock) | (1ul << BH_Pinned)))
diff --git a/fs/gfs2/ondisk.c b/fs/gfs2/ondisk.c
index 1025960b0e6e..f2495f1e21ad 100644
--- a/fs/gfs2/ondisk.c
+++ b/fs/gfs2/ondisk.c
@@ -15,6 +15,8 @@
15 15
16#include "gfs2.h" 16#include "gfs2.h"
17#include <linux/gfs2_ondisk.h> 17#include <linux/gfs2_ondisk.h>
18#include <linux/lm_interface.h>
19#include "incore.h"
18 20
19#define pv(struct, member, fmt) printk(KERN_INFO " "#member" = "fmt"\n", \ 21#define pv(struct, member, fmt) printk(KERN_INFO " "#member" = "fmt"\n", \
20 struct->member); 22 struct->member);
@@ -32,7 +34,7 @@
32 * first arg: the cpu-order structure 34 * first arg: the cpu-order structure
33 */ 35 */
34 36
35void gfs2_inum_in(struct gfs2_inum *no, const void *buf) 37void gfs2_inum_in(struct gfs2_inum_host *no, const void *buf)
36{ 38{
37 const struct gfs2_inum *str = buf; 39 const struct gfs2_inum *str = buf;
38 40
@@ -40,7 +42,7 @@ void gfs2_inum_in(struct gfs2_inum *no, const void *buf)
40 no->no_addr = be64_to_cpu(str->no_addr); 42 no->no_addr = be64_to_cpu(str->no_addr);
41} 43}
42 44
43void gfs2_inum_out(const struct gfs2_inum *no, void *buf) 45void gfs2_inum_out(const struct gfs2_inum_host *no, void *buf)
44{ 46{
45 struct gfs2_inum *str = buf; 47 struct gfs2_inum *str = buf;
46 48
@@ -48,13 +50,13 @@ void gfs2_inum_out(const struct gfs2_inum *no, void *buf)
48 str->no_addr = cpu_to_be64(no->no_addr); 50 str->no_addr = cpu_to_be64(no->no_addr);
49} 51}
50 52
51static void gfs2_inum_print(const struct gfs2_inum *no) 53static void gfs2_inum_print(const struct gfs2_inum_host *no)
52{ 54{
53 printk(KERN_INFO " no_formal_ino = %llu\n", (unsigned long long)no->no_formal_ino); 55 printk(KERN_INFO " no_formal_ino = %llu\n", (unsigned long long)no->no_formal_ino);
54 printk(KERN_INFO " no_addr = %llu\n", (unsigned long long)no->no_addr); 56 printk(KERN_INFO " no_addr = %llu\n", (unsigned long long)no->no_addr);
55} 57}
56 58
57static void gfs2_meta_header_in(struct gfs2_meta_header *mh, const void *buf) 59static void gfs2_meta_header_in(struct gfs2_meta_header_host *mh, const void *buf)
58{ 60{
59 const struct gfs2_meta_header *str = buf; 61 const struct gfs2_meta_header *str = buf;
60 62
@@ -63,23 +65,7 @@ static void gfs2_meta_header_in(struct gfs2_meta_header *mh, const void *buf)
63 mh->mh_format = be32_to_cpu(str->mh_format); 65 mh->mh_format = be32_to_cpu(str->mh_format);
64} 66}
65 67
66static void gfs2_meta_header_out(const struct gfs2_meta_header *mh, void *buf) 68void gfs2_sb_in(struct gfs2_sb_host *sb, const void *buf)
67{
68 struct gfs2_meta_header *str = buf;
69
70 str->mh_magic = cpu_to_be32(mh->mh_magic);
71 str->mh_type = cpu_to_be32(mh->mh_type);
72 str->mh_format = cpu_to_be32(mh->mh_format);
73}
74
75static void gfs2_meta_header_print(const struct gfs2_meta_header *mh)
76{
77 pv(mh, mh_magic, "0x%.8X");
78 pv(mh, mh_type, "%u");
79 pv(mh, mh_format, "%u");
80}
81
82void gfs2_sb_in(struct gfs2_sb *sb, const void *buf)
83{ 69{
84 const struct gfs2_sb *str = buf; 70 const struct gfs2_sb *str = buf;
85 71
@@ -97,7 +83,7 @@ void gfs2_sb_in(struct gfs2_sb *sb, const void *buf)
97 memcpy(sb->sb_locktable, str->sb_locktable, GFS2_LOCKNAME_LEN); 83 memcpy(sb->sb_locktable, str->sb_locktable, GFS2_LOCKNAME_LEN);
98} 84}
99 85
100void gfs2_rindex_in(struct gfs2_rindex *ri, const void *buf) 86void gfs2_rindex_in(struct gfs2_rindex_host *ri, const void *buf)
101{ 87{
102 const struct gfs2_rindex *str = buf; 88 const struct gfs2_rindex *str = buf;
103 89
@@ -109,7 +95,7 @@ void gfs2_rindex_in(struct gfs2_rindex *ri, const void *buf)
109 95
110} 96}
111 97
112void gfs2_rindex_print(const struct gfs2_rindex *ri) 98void gfs2_rindex_print(const struct gfs2_rindex_host *ri)
113{ 99{
114 printk(KERN_INFO " ri_addr = %llu\n", (unsigned long long)ri->ri_addr); 100 printk(KERN_INFO " ri_addr = %llu\n", (unsigned long long)ri->ri_addr);
115 pv(ri, ri_length, "%u"); 101 pv(ri, ri_length, "%u");
@@ -120,22 +106,20 @@ void gfs2_rindex_print(const struct gfs2_rindex *ri)
120 pv(ri, ri_bitbytes, "%u"); 106 pv(ri, ri_bitbytes, "%u");
121} 107}
122 108
123void gfs2_rgrp_in(struct gfs2_rgrp *rg, const void *buf) 109void gfs2_rgrp_in(struct gfs2_rgrp_host *rg, const void *buf)
124{ 110{
125 const struct gfs2_rgrp *str = buf; 111 const struct gfs2_rgrp *str = buf;
126 112
127 gfs2_meta_header_in(&rg->rg_header, buf);
128 rg->rg_flags = be32_to_cpu(str->rg_flags); 113 rg->rg_flags = be32_to_cpu(str->rg_flags);
129 rg->rg_free = be32_to_cpu(str->rg_free); 114 rg->rg_free = be32_to_cpu(str->rg_free);
130 rg->rg_dinodes = be32_to_cpu(str->rg_dinodes); 115 rg->rg_dinodes = be32_to_cpu(str->rg_dinodes);
131 rg->rg_igeneration = be64_to_cpu(str->rg_igeneration); 116 rg->rg_igeneration = be64_to_cpu(str->rg_igeneration);
132} 117}
133 118
134void gfs2_rgrp_out(const struct gfs2_rgrp *rg, void *buf) 119void gfs2_rgrp_out(const struct gfs2_rgrp_host *rg, void *buf)
135{ 120{
136 struct gfs2_rgrp *str = buf; 121 struct gfs2_rgrp *str = buf;
137 122
138 gfs2_meta_header_out(&rg->rg_header, buf);
139 str->rg_flags = cpu_to_be32(rg->rg_flags); 123 str->rg_flags = cpu_to_be32(rg->rg_flags);
140 str->rg_free = cpu_to_be32(rg->rg_free); 124 str->rg_free = cpu_to_be32(rg->rg_free);
141 str->rg_dinodes = cpu_to_be32(rg->rg_dinodes); 125 str->rg_dinodes = cpu_to_be32(rg->rg_dinodes);
@@ -144,7 +128,7 @@ void gfs2_rgrp_out(const struct gfs2_rgrp *rg, void *buf)
144 memset(&str->rg_reserved, 0, sizeof(str->rg_reserved)); 128 memset(&str->rg_reserved, 0, sizeof(str->rg_reserved));
145} 129}
146 130
147void gfs2_quota_in(struct gfs2_quota *qu, const void *buf) 131void gfs2_quota_in(struct gfs2_quota_host *qu, const void *buf)
148{ 132{
149 const struct gfs2_quota *str = buf; 133 const struct gfs2_quota *str = buf;
150 134
@@ -153,96 +137,56 @@ void gfs2_quota_in(struct gfs2_quota *qu, const void *buf)
153 qu->qu_value = be64_to_cpu(str->qu_value); 137 qu->qu_value = be64_to_cpu(str->qu_value);
154} 138}
155 139
156void gfs2_dinode_in(struct gfs2_dinode *di, const void *buf) 140void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf)
157{
158 const struct gfs2_dinode *str = buf;
159
160 gfs2_meta_header_in(&di->di_header, buf);
161 gfs2_inum_in(&di->di_num, &str->di_num);
162
163 di->di_mode = be32_to_cpu(str->di_mode);
164 di->di_uid = be32_to_cpu(str->di_uid);
165 di->di_gid = be32_to_cpu(str->di_gid);
166 di->di_nlink = be32_to_cpu(str->di_nlink);
167 di->di_size = be64_to_cpu(str->di_size);
168 di->di_blocks = be64_to_cpu(str->di_blocks);
169 di->di_atime = be64_to_cpu(str->di_atime);
170 di->di_mtime = be64_to_cpu(str->di_mtime);
171 di->di_ctime = be64_to_cpu(str->di_ctime);
172 di->di_major = be32_to_cpu(str->di_major);
173 di->di_minor = be32_to_cpu(str->di_minor);
174
175 di->di_goal_meta = be64_to_cpu(str->di_goal_meta);
176 di->di_goal_data = be64_to_cpu(str->di_goal_data);
177 di->di_generation = be64_to_cpu(str->di_generation);
178
179 di->di_flags = be32_to_cpu(str->di_flags);
180 di->di_payload_format = be32_to_cpu(str->di_payload_format);
181 di->di_height = be16_to_cpu(str->di_height);
182
183 di->di_depth = be16_to_cpu(str->di_depth);
184 di->di_entries = be32_to_cpu(str->di_entries);
185
186 di->di_eattr = be64_to_cpu(str->di_eattr);
187
188}
189
190void gfs2_dinode_out(const struct gfs2_dinode *di, void *buf)
191{ 141{
142 const struct gfs2_dinode_host *di = &ip->i_di;
192 struct gfs2_dinode *str = buf; 143 struct gfs2_dinode *str = buf;
193 144
194 gfs2_meta_header_out(&di->di_header, buf); 145 str->di_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
195 gfs2_inum_out(&di->di_num, (char *)&str->di_num); 146 str->di_header.mh_type = cpu_to_be32(GFS2_METATYPE_DI);
147 str->di_header.__pad0 = 0;
148 str->di_header.mh_format = cpu_to_be32(GFS2_FORMAT_DI);
149 str->di_header.__pad1 = 0;
196 150
197 str->di_mode = cpu_to_be32(di->di_mode); 151 gfs2_inum_out(&ip->i_num, &str->di_num);
198 str->di_uid = cpu_to_be32(di->di_uid); 152
199 str->di_gid = cpu_to_be32(di->di_gid); 153 str->di_mode = cpu_to_be32(ip->i_inode.i_mode);
200 str->di_nlink = cpu_to_be32(di->di_nlink); 154 str->di_uid = cpu_to_be32(ip->i_inode.i_uid);
155 str->di_gid = cpu_to_be32(ip->i_inode.i_gid);
156 str->di_nlink = cpu_to_be32(ip->i_inode.i_nlink);
201 str->di_size = cpu_to_be64(di->di_size); 157 str->di_size = cpu_to_be64(di->di_size);
202 str->di_blocks = cpu_to_be64(di->di_blocks); 158 str->di_blocks = cpu_to_be64(di->di_blocks);
203 str->di_atime = cpu_to_be64(di->di_atime); 159 str->di_atime = cpu_to_be64(ip->i_inode.i_atime.tv_sec);
204 str->di_mtime = cpu_to_be64(di->di_mtime); 160 str->di_mtime = cpu_to_be64(ip->i_inode.i_mtime.tv_sec);
205 str->di_ctime = cpu_to_be64(di->di_ctime); 161 str->di_ctime = cpu_to_be64(ip->i_inode.i_ctime.tv_sec);
206 str->di_major = cpu_to_be32(di->di_major);
207 str->di_minor = cpu_to_be32(di->di_minor);
208 162
209 str->di_goal_meta = cpu_to_be64(di->di_goal_meta); 163 str->di_goal_meta = cpu_to_be64(di->di_goal_meta);
210 str->di_goal_data = cpu_to_be64(di->di_goal_data); 164 str->di_goal_data = cpu_to_be64(di->di_goal_data);
211 str->di_generation = cpu_to_be64(di->di_generation); 165 str->di_generation = cpu_to_be64(di->di_generation);
212 166
213 str->di_flags = cpu_to_be32(di->di_flags); 167 str->di_flags = cpu_to_be32(di->di_flags);
214 str->di_payload_format = cpu_to_be32(di->di_payload_format);
215 str->di_height = cpu_to_be16(di->di_height); 168 str->di_height = cpu_to_be16(di->di_height);
216 169 str->di_payload_format = cpu_to_be32(S_ISDIR(ip->i_inode.i_mode) &&
170 !(ip->i_di.di_flags & GFS2_DIF_EXHASH) ?
171 GFS2_FORMAT_DE : 0);
217 str->di_depth = cpu_to_be16(di->di_depth); 172 str->di_depth = cpu_to_be16(di->di_depth);
218 str->di_entries = cpu_to_be32(di->di_entries); 173 str->di_entries = cpu_to_be32(di->di_entries);
219 174
220 str->di_eattr = cpu_to_be64(di->di_eattr); 175 str->di_eattr = cpu_to_be64(di->di_eattr);
221
222} 176}
223 177
224void gfs2_dinode_print(const struct gfs2_dinode *di) 178void gfs2_dinode_print(const struct gfs2_inode *ip)
225{ 179{
226 gfs2_meta_header_print(&di->di_header); 180 const struct gfs2_dinode_host *di = &ip->i_di;
227 gfs2_inum_print(&di->di_num); 181
182 gfs2_inum_print(&ip->i_num);
228 183
229 pv(di, di_mode, "0%o");
230 pv(di, di_uid, "%u");
231 pv(di, di_gid, "%u");
232 pv(di, di_nlink, "%u");
233 printk(KERN_INFO " di_size = %llu\n", (unsigned long long)di->di_size); 184 printk(KERN_INFO " di_size = %llu\n", (unsigned long long)di->di_size);
234 printk(KERN_INFO " di_blocks = %llu\n", (unsigned long long)di->di_blocks); 185 printk(KERN_INFO " di_blocks = %llu\n", (unsigned long long)di->di_blocks);
235 printk(KERN_INFO " di_atime = %lld\n", (long long)di->di_atime);
236 printk(KERN_INFO " di_mtime = %lld\n", (long long)di->di_mtime);
237 printk(KERN_INFO " di_ctime = %lld\n", (long long)di->di_ctime);
238 pv(di, di_major, "%u");
239 pv(di, di_minor, "%u");
240
241 printk(KERN_INFO " di_goal_meta = %llu\n", (unsigned long long)di->di_goal_meta); 186 printk(KERN_INFO " di_goal_meta = %llu\n", (unsigned long long)di->di_goal_meta);
242 printk(KERN_INFO " di_goal_data = %llu\n", (unsigned long long)di->di_goal_data); 187 printk(KERN_INFO " di_goal_data = %llu\n", (unsigned long long)di->di_goal_data);
243 188
244 pv(di, di_flags, "0x%.8X"); 189 pv(di, di_flags, "0x%.8X");
245 pv(di, di_payload_format, "%u");
246 pv(di, di_height, "%u"); 190 pv(di, di_height, "%u");
247 191
248 pv(di, di_depth, "%u"); 192 pv(di, di_depth, "%u");
@@ -251,7 +195,7 @@ void gfs2_dinode_print(const struct gfs2_dinode *di)
251 printk(KERN_INFO " di_eattr = %llu\n", (unsigned long long)di->di_eattr); 195 printk(KERN_INFO " di_eattr = %llu\n", (unsigned long long)di->di_eattr);
252} 196}
253 197
254void gfs2_log_header_in(struct gfs2_log_header *lh, const void *buf) 198void gfs2_log_header_in(struct gfs2_log_header_host *lh, const void *buf)
255{ 199{
256 const struct gfs2_log_header *str = buf; 200 const struct gfs2_log_header *str = buf;
257 201
@@ -263,7 +207,7 @@ void gfs2_log_header_in(struct gfs2_log_header *lh, const void *buf)
263 lh->lh_hash = be32_to_cpu(str->lh_hash); 207 lh->lh_hash = be32_to_cpu(str->lh_hash);
264} 208}
265 209
266void gfs2_inum_range_in(struct gfs2_inum_range *ir, const void *buf) 210void gfs2_inum_range_in(struct gfs2_inum_range_host *ir, const void *buf)
267{ 211{
268 const struct gfs2_inum_range *str = buf; 212 const struct gfs2_inum_range *str = buf;
269 213
@@ -271,7 +215,7 @@ void gfs2_inum_range_in(struct gfs2_inum_range *ir, const void *buf)
271 ir->ir_length = be64_to_cpu(str->ir_length); 215 ir->ir_length = be64_to_cpu(str->ir_length);
272} 216}
273 217
274void gfs2_inum_range_out(const struct gfs2_inum_range *ir, void *buf) 218void gfs2_inum_range_out(const struct gfs2_inum_range_host *ir, void *buf)
275{ 219{
276 struct gfs2_inum_range *str = buf; 220 struct gfs2_inum_range *str = buf;
277 221
@@ -279,7 +223,7 @@ void gfs2_inum_range_out(const struct gfs2_inum_range *ir, void *buf)
279 str->ir_length = cpu_to_be64(ir->ir_length); 223 str->ir_length = cpu_to_be64(ir->ir_length);
280} 224}
281 225
282void gfs2_statfs_change_in(struct gfs2_statfs_change *sc, const void *buf) 226void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc, const void *buf)
283{ 227{
284 const struct gfs2_statfs_change *str = buf; 228 const struct gfs2_statfs_change *str = buf;
285 229
@@ -288,7 +232,7 @@ void gfs2_statfs_change_in(struct gfs2_statfs_change *sc, const void *buf)
288 sc->sc_dinodes = be64_to_cpu(str->sc_dinodes); 232 sc->sc_dinodes = be64_to_cpu(str->sc_dinodes);
289} 233}
290 234
291void gfs2_statfs_change_out(const struct gfs2_statfs_change *sc, void *buf) 235void gfs2_statfs_change_out(const struct gfs2_statfs_change_host *sc, void *buf)
292{ 236{
293 struct gfs2_statfs_change *str = buf; 237 struct gfs2_statfs_change *str = buf;
294 238
@@ -297,7 +241,7 @@ void gfs2_statfs_change_out(const struct gfs2_statfs_change *sc, void *buf)
297 str->sc_dinodes = cpu_to_be64(sc->sc_dinodes); 241 str->sc_dinodes = cpu_to_be64(sc->sc_dinodes);
298} 242}
299 243
300void gfs2_quota_change_in(struct gfs2_quota_change *qc, const void *buf) 244void gfs2_quota_change_in(struct gfs2_quota_change_host *qc, const void *buf)
301{ 245{
302 const struct gfs2_quota_change *str = buf; 246 const struct gfs2_quota_change *str = buf;
303 247
diff --git a/fs/gfs2/ops_address.c b/fs/gfs2/ops_address.c
index 015640b3f123..d8d69a72a10d 100644
--- a/fs/gfs2/ops_address.c
+++ b/fs/gfs2/ops_address.c
@@ -156,19 +156,6 @@ out_ignore:
156 return 0; 156 return 0;
157} 157}
158 158
159static int zero_readpage(struct page *page)
160{
161 void *kaddr;
162
163 kaddr = kmap_atomic(page, KM_USER0);
164 memset(kaddr, 0, PAGE_CACHE_SIZE);
165 kunmap_atomic(kaddr, KM_USER0);
166
167 SetPageUptodate(page);
168
169 return 0;
170}
171
172/** 159/**
173 * stuffed_readpage - Fill in a Linux page with stuffed file data 160 * stuffed_readpage - Fill in a Linux page with stuffed file data
174 * @ip: the inode 161 * @ip: the inode
@@ -183,9 +170,7 @@ static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
183 void *kaddr; 170 void *kaddr;
184 int error; 171 int error;
185 172
186 /* Only the first page of a stuffed file might contain data */ 173 BUG_ON(page->index);
187 if (unlikely(page->index))
188 return zero_readpage(page);
189 174
190 error = gfs2_meta_inode_buffer(ip, &dibh); 175 error = gfs2_meta_inode_buffer(ip, &dibh);
191 if (error) 176 if (error)
@@ -230,9 +215,9 @@ static int gfs2_readpage(struct file *file, struct page *page)
230 /* gfs2_sharewrite_nopage has grabbed the ip->i_gl already */ 215 /* gfs2_sharewrite_nopage has grabbed the ip->i_gl already */
231 goto skip_lock; 216 goto skip_lock;
232 } 217 }
233 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME|GL_AOP, &gh); 218 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME|LM_FLAG_TRY_1CB, &gh);
234 do_unlock = 1; 219 do_unlock = 1;
235 error = gfs2_glock_nq_m_atime(1, &gh); 220 error = gfs2_glock_nq_atime(&gh);
236 if (unlikely(error)) 221 if (unlikely(error))
237 goto out_unlock; 222 goto out_unlock;
238 } 223 }
@@ -254,6 +239,8 @@ skip_lock:
254out: 239out:
255 return error; 240 return error;
256out_unlock: 241out_unlock:
242 if (error == GLR_TRYFAILED)
243 error = AOP_TRUNCATED_PAGE;
257 unlock_page(page); 244 unlock_page(page);
258 if (do_unlock) 245 if (do_unlock)
259 gfs2_holder_uninit(&gh); 246 gfs2_holder_uninit(&gh);
@@ -293,9 +280,9 @@ static int gfs2_readpages(struct file *file, struct address_space *mapping,
293 goto skip_lock; 280 goto skip_lock;
294 } 281 }
295 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 282 gfs2_holder_init(ip->i_gl, LM_ST_SHARED,
296 LM_FLAG_TRY_1CB|GL_ATIME|GL_AOP, &gh); 283 LM_FLAG_TRY_1CB|GL_ATIME, &gh);
297 do_unlock = 1; 284 do_unlock = 1;
298 ret = gfs2_glock_nq_m_atime(1, &gh); 285 ret = gfs2_glock_nq_atime(&gh);
299 if (ret == GLR_TRYFAILED) 286 if (ret == GLR_TRYFAILED)
300 goto out_noerror; 287 goto out_noerror;
301 if (unlikely(ret)) 288 if (unlikely(ret))
@@ -366,10 +353,13 @@ static int gfs2_prepare_write(struct file *file, struct page *page,
366 unsigned int write_len = to - from; 353 unsigned int write_len = to - from;
367 354
368 355
369 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_ATIME|GL_AOP, &ip->i_gh); 356 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_ATIME|LM_FLAG_TRY_1CB, &ip->i_gh);
370 error = gfs2_glock_nq_m_atime(1, &ip->i_gh); 357 error = gfs2_glock_nq_atime(&ip->i_gh);
371 if (error) 358 if (unlikely(error)) {
359 if (error == GLR_TRYFAILED)
360 error = AOP_TRUNCATED_PAGE;
372 goto out_uninit; 361 goto out_uninit;
362 }
373 363
374 gfs2_write_calc_reserv(ip, write_len, &data_blocks, &ind_blocks); 364 gfs2_write_calc_reserv(ip, write_len, &data_blocks, &ind_blocks);
375 365
@@ -386,7 +376,7 @@ static int gfs2_prepare_write(struct file *file, struct page *page,
386 if (error) 376 if (error)
387 goto out_alloc_put; 377 goto out_alloc_put;
388 378
389 error = gfs2_quota_check(ip, ip->i_di.di_uid, ip->i_di.di_gid); 379 error = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid);
390 if (error) 380 if (error)
391 goto out_qunlock; 381 goto out_qunlock;
392 382
@@ -482,8 +472,10 @@ static int gfs2_commit_write(struct file *file, struct page *page,
482 472
483 SetPageUptodate(page); 473 SetPageUptodate(page);
484 474
485 if (inode->i_size < file_size) 475 if (inode->i_size < file_size) {
486 i_size_write(inode, file_size); 476 i_size_write(inode, file_size);
477 mark_inode_dirty(inode);
478 }
487 } else { 479 } else {
488 if (sdp->sd_args.ar_data == GFS2_DATA_ORDERED || 480 if (sdp->sd_args.ar_data == GFS2_DATA_ORDERED ||
489 gfs2_is_jdata(ip)) 481 gfs2_is_jdata(ip))
@@ -498,11 +490,6 @@ static int gfs2_commit_write(struct file *file, struct page *page,
498 di->di_size = cpu_to_be64(inode->i_size); 490 di->di_size = cpu_to_be64(inode->i_size);
499 } 491 }
500 492
501 di->di_mode = cpu_to_be32(inode->i_mode);
502 di->di_atime = cpu_to_be64(inode->i_atime.tv_sec);
503 di->di_mtime = cpu_to_be64(inode->i_mtime.tv_sec);
504 di->di_ctime = cpu_to_be64(inode->i_ctime.tv_sec);
505
506 brelse(dibh); 493 brelse(dibh);
507 gfs2_trans_end(sdp); 494 gfs2_trans_end(sdp);
508 if (al->al_requested) { 495 if (al->al_requested) {
@@ -624,7 +611,7 @@ static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
624 * on this path. All we need change is atime. 611 * on this path. All we need change is atime.
625 */ 612 */
626 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &gh); 613 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &gh);
627 rv = gfs2_glock_nq_m_atime(1, &gh); 614 rv = gfs2_glock_nq_atime(&gh);
628 if (rv) 615 if (rv)
629 goto out; 616 goto out;
630 617
@@ -737,6 +724,9 @@ int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
737 if (!atomic_read(&aspace->i_writecount)) 724 if (!atomic_read(&aspace->i_writecount))
738 return 0; 725 return 0;
739 726
727 if (!(gfp_mask & __GFP_WAIT))
728 return 0;
729
740 if (time_after_eq(jiffies, t)) { 730 if (time_after_eq(jiffies, t)) {
741 stuck_releasepage(bh); 731 stuck_releasepage(bh);
742 /* should we withdraw here? */ 732 /* should we withdraw here? */
diff --git a/fs/gfs2/ops_dentry.c b/fs/gfs2/ops_dentry.c
index 00041b1b8025..d355899585d8 100644
--- a/fs/gfs2/ops_dentry.c
+++ b/fs/gfs2/ops_dentry.c
@@ -43,7 +43,7 @@ static int gfs2_drevalidate(struct dentry *dentry, struct nameidata *nd)
43 struct inode *inode = dentry->d_inode; 43 struct inode *inode = dentry->d_inode;
44 struct gfs2_holder d_gh; 44 struct gfs2_holder d_gh;
45 struct gfs2_inode *ip; 45 struct gfs2_inode *ip;
46 struct gfs2_inum inum; 46 struct gfs2_inum_host inum;
47 unsigned int type; 47 unsigned int type;
48 int error; 48 int error;
49 49
@@ -76,7 +76,7 @@ static int gfs2_drevalidate(struct dentry *dentry, struct nameidata *nd)
76 if (!gfs2_inum_equal(&ip->i_num, &inum)) 76 if (!gfs2_inum_equal(&ip->i_num, &inum))
77 goto invalid_gunlock; 77 goto invalid_gunlock;
78 78
79 if (IF2DT(ip->i_di.di_mode) != type) { 79 if (IF2DT(ip->i_inode.i_mode) != type) {
80 gfs2_consist_inode(dip); 80 gfs2_consist_inode(dip);
81 goto fail_gunlock; 81 goto fail_gunlock;
82 } 82 }
diff --git a/fs/gfs2/ops_export.c b/fs/gfs2/ops_export.c
index 86127d93bd35..b4e7b8775315 100644
--- a/fs/gfs2/ops_export.c
+++ b/fs/gfs2/ops_export.c
@@ -27,15 +27,16 @@
27#include "util.h" 27#include "util.h"
28 28
29static struct dentry *gfs2_decode_fh(struct super_block *sb, 29static struct dentry *gfs2_decode_fh(struct super_block *sb,
30 __u32 *fh, 30 __u32 *p,
31 int fh_len, 31 int fh_len,
32 int fh_type, 32 int fh_type,
33 int (*acceptable)(void *context, 33 int (*acceptable)(void *context,
34 struct dentry *dentry), 34 struct dentry *dentry),
35 void *context) 35 void *context)
36{ 36{
37 __be32 *fh = (__force __be32 *)p;
37 struct gfs2_fh_obj fh_obj; 38 struct gfs2_fh_obj fh_obj;
38 struct gfs2_inum *this, parent; 39 struct gfs2_inum_host *this, parent;
39 40
40 if (fh_type != fh_len) 41 if (fh_type != fh_len)
41 return NULL; 42 return NULL;
@@ -65,9 +66,10 @@ static struct dentry *gfs2_decode_fh(struct super_block *sb,
65 acceptable, context); 66 acceptable, context);
66} 67}
67 68
68static int gfs2_encode_fh(struct dentry *dentry, __u32 *fh, int *len, 69static int gfs2_encode_fh(struct dentry *dentry, __u32 *p, int *len,
69 int connectable) 70 int connectable)
70{ 71{
72 __be32 *fh = (__force __be32 *)p;
71 struct inode *inode = dentry->d_inode; 73 struct inode *inode = dentry->d_inode;
72 struct super_block *sb = inode->i_sb; 74 struct super_block *sb = inode->i_sb;
73 struct gfs2_inode *ip = GFS2_I(inode); 75 struct gfs2_inode *ip = GFS2_I(inode);
@@ -76,14 +78,10 @@ static int gfs2_encode_fh(struct dentry *dentry, __u32 *fh, int *len,
76 (connectable && *len < GFS2_LARGE_FH_SIZE)) 78 (connectable && *len < GFS2_LARGE_FH_SIZE))
77 return 255; 79 return 255;
78 80
79 fh[0] = ip->i_num.no_formal_ino >> 32; 81 fh[0] = cpu_to_be32(ip->i_num.no_formal_ino >> 32);
80 fh[0] = cpu_to_be32(fh[0]); 82 fh[1] = cpu_to_be32(ip->i_num.no_formal_ino & 0xFFFFFFFF);
81 fh[1] = ip->i_num.no_formal_ino & 0xFFFFFFFF; 83 fh[2] = cpu_to_be32(ip->i_num.no_addr >> 32);
82 fh[1] = cpu_to_be32(fh[1]); 84 fh[3] = cpu_to_be32(ip->i_num.no_addr & 0xFFFFFFFF);
83 fh[2] = ip->i_num.no_addr >> 32;
84 fh[2] = cpu_to_be32(fh[2]);
85 fh[3] = ip->i_num.no_addr & 0xFFFFFFFF;
86 fh[3] = cpu_to_be32(fh[3]);
87 *len = GFS2_SMALL_FH_SIZE; 85 *len = GFS2_SMALL_FH_SIZE;
88 86
89 if (!connectable || inode == sb->s_root->d_inode) 87 if (!connectable || inode == sb->s_root->d_inode)
@@ -95,14 +93,10 @@ static int gfs2_encode_fh(struct dentry *dentry, __u32 *fh, int *len,
95 igrab(inode); 93 igrab(inode);
96 spin_unlock(&dentry->d_lock); 94 spin_unlock(&dentry->d_lock);
97 95
98 fh[4] = ip->i_num.no_formal_ino >> 32; 96 fh[4] = cpu_to_be32(ip->i_num.no_formal_ino >> 32);
99 fh[4] = cpu_to_be32(fh[4]); 97 fh[5] = cpu_to_be32(ip->i_num.no_formal_ino & 0xFFFFFFFF);
100 fh[5] = ip->i_num.no_formal_ino & 0xFFFFFFFF; 98 fh[6] = cpu_to_be32(ip->i_num.no_addr >> 32);
101 fh[5] = cpu_to_be32(fh[5]); 99 fh[7] = cpu_to_be32(ip->i_num.no_addr & 0xFFFFFFFF);
102 fh[6] = ip->i_num.no_addr >> 32;
103 fh[6] = cpu_to_be32(fh[6]);
104 fh[7] = ip->i_num.no_addr & 0xFFFFFFFF;
105 fh[7] = cpu_to_be32(fh[7]);
106 100
107 fh[8] = cpu_to_be32(inode->i_mode); 101 fh[8] = cpu_to_be32(inode->i_mode);
108 fh[9] = 0; /* pad to double word */ 102 fh[9] = 0; /* pad to double word */
@@ -114,12 +108,12 @@ static int gfs2_encode_fh(struct dentry *dentry, __u32 *fh, int *len,
114} 108}
115 109
116struct get_name_filldir { 110struct get_name_filldir {
117 struct gfs2_inum inum; 111 struct gfs2_inum_host inum;
118 char *name; 112 char *name;
119}; 113};
120 114
121static int get_name_filldir(void *opaque, const char *name, unsigned int length, 115static int get_name_filldir(void *opaque, const char *name, unsigned int length,
122 u64 offset, struct gfs2_inum *inum, 116 u64 offset, struct gfs2_inum_host *inum,
123 unsigned int type) 117 unsigned int type)
124{ 118{
125 struct get_name_filldir *gnfd = (struct get_name_filldir *)opaque; 119 struct get_name_filldir *gnfd = (struct get_name_filldir *)opaque;
@@ -202,7 +196,7 @@ static struct dentry *gfs2_get_dentry(struct super_block *sb, void *inum_obj)
202{ 196{
203 struct gfs2_sbd *sdp = sb->s_fs_info; 197 struct gfs2_sbd *sdp = sb->s_fs_info;
204 struct gfs2_fh_obj *fh_obj = (struct gfs2_fh_obj *)inum_obj; 198 struct gfs2_fh_obj *fh_obj = (struct gfs2_fh_obj *)inum_obj;
205 struct gfs2_inum *inum = &fh_obj->this; 199 struct gfs2_inum_host *inum = &fh_obj->this;
206 struct gfs2_holder i_gh, ri_gh, rgd_gh; 200 struct gfs2_holder i_gh, ri_gh, rgd_gh;
207 struct gfs2_rgrpd *rgd; 201 struct gfs2_rgrpd *rgd;
208 struct inode *inode; 202 struct inode *inode;
diff --git a/fs/gfs2/ops_export.h b/fs/gfs2/ops_export.h
index 09aca5046fb1..f925a955b3b8 100644
--- a/fs/gfs2/ops_export.h
+++ b/fs/gfs2/ops_export.h
@@ -15,7 +15,7 @@
15 15
16extern struct export_operations gfs2_export_ops; 16extern struct export_operations gfs2_export_ops;
17struct gfs2_fh_obj { 17struct gfs2_fh_obj {
18 struct gfs2_inum this; 18 struct gfs2_inum_host this;
19 __u32 imode; 19 __u32 imode;
20}; 20};
21 21
diff --git a/fs/gfs2/ops_file.c b/fs/gfs2/ops_file.c
index 3064f133bf3c..b3f1e0349ae0 100644
--- a/fs/gfs2/ops_file.c
+++ b/fs/gfs2/ops_file.c
@@ -22,6 +22,7 @@
22#include <linux/ext2_fs.h> 22#include <linux/ext2_fs.h>
23#include <linux/crc32.h> 23#include <linux/crc32.h>
24#include <linux/lm_interface.h> 24#include <linux/lm_interface.h>
25#include <linux/writeback.h>
25#include <asm/uaccess.h> 26#include <asm/uaccess.h>
26 27
27#include "gfs2.h" 28#include "gfs2.h"
@@ -71,7 +72,7 @@ static int gfs2_read_actor(read_descriptor_t *desc, struct page *page,
71 size = count; 72 size = count;
72 73
73 kaddr = kmap(page); 74 kaddr = kmap(page);
74 memcpy(desc->arg.buf, kaddr + offset, size); 75 memcpy(desc->arg.data, kaddr + offset, size);
75 kunmap(page); 76 kunmap(page);
76 77
77 desc->count = count - size; 78 desc->count = count - size;
@@ -86,7 +87,7 @@ int gfs2_internal_read(struct gfs2_inode *ip, struct file_ra_state *ra_state,
86 struct inode *inode = &ip->i_inode; 87 struct inode *inode = &ip->i_inode;
87 read_descriptor_t desc; 88 read_descriptor_t desc;
88 desc.written = 0; 89 desc.written = 0;
89 desc.arg.buf = buf; 90 desc.arg.data = buf;
90 desc.count = size; 91 desc.count = size;
91 desc.error = 0; 92 desc.error = 0;
92 do_generic_mapping_read(inode->i_mapping, ra_state, 93 do_generic_mapping_read(inode->i_mapping, ra_state,
@@ -139,7 +140,7 @@ static loff_t gfs2_llseek(struct file *file, loff_t offset, int origin)
139 */ 140 */
140 141
141static int filldir_func(void *opaque, const char *name, unsigned int length, 142static int filldir_func(void *opaque, const char *name, unsigned int length,
142 u64 offset, struct gfs2_inum *inum, 143 u64 offset, struct gfs2_inum_host *inum,
143 unsigned int type) 144 unsigned int type)
144{ 145{
145 struct filldir_reg *fdr = (struct filldir_reg *)opaque; 146 struct filldir_reg *fdr = (struct filldir_reg *)opaque;
@@ -253,7 +254,7 @@ static int gfs2_get_flags(struct file *filp, u32 __user *ptr)
253 u32 fsflags; 254 u32 fsflags;
254 255
255 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &gh); 256 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &gh);
256 error = gfs2_glock_nq_m_atime(1, &gh); 257 error = gfs2_glock_nq_atime(&gh);
257 if (error) 258 if (error)
258 return error; 259 return error;
259 260
@@ -266,6 +267,24 @@ static int gfs2_get_flags(struct file *filp, u32 __user *ptr)
266 return error; 267 return error;
267} 268}
268 269
270void gfs2_set_inode_flags(struct inode *inode)
271{
272 struct gfs2_inode *ip = GFS2_I(inode);
273 struct gfs2_dinode_host *di = &ip->i_di;
274 unsigned int flags = inode->i_flags;
275
276 flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
277 if (di->di_flags & GFS2_DIF_IMMUTABLE)
278 flags |= S_IMMUTABLE;
279 if (di->di_flags & GFS2_DIF_APPENDONLY)
280 flags |= S_APPEND;
281 if (di->di_flags & GFS2_DIF_NOATIME)
282 flags |= S_NOATIME;
283 if (di->di_flags & GFS2_DIF_SYNC)
284 flags |= S_SYNC;
285 inode->i_flags = flags;
286}
287
269/* Flags that can be set by user space */ 288/* Flags that can be set by user space */
270#define GFS2_FLAGS_USER_SET (GFS2_DIF_JDATA| \ 289#define GFS2_FLAGS_USER_SET (GFS2_DIF_JDATA| \
271 GFS2_DIF_DIRECTIO| \ 290 GFS2_DIF_DIRECTIO| \
@@ -336,8 +355,9 @@ static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
336 goto out_trans_end; 355 goto out_trans_end;
337 gfs2_trans_add_bh(ip->i_gl, bh, 1); 356 gfs2_trans_add_bh(ip->i_gl, bh, 1);
338 ip->i_di.di_flags = new_flags; 357 ip->i_di.di_flags = new_flags;
339 gfs2_dinode_out(&ip->i_di, bh->b_data); 358 gfs2_dinode_out(ip, bh->b_data);
340 brelse(bh); 359 brelse(bh);
360 gfs2_set_inode_flags(inode);
341out_trans_end: 361out_trans_end:
342 gfs2_trans_end(sdp); 362 gfs2_trans_end(sdp);
343out: 363out:
@@ -425,7 +445,7 @@ static int gfs2_open(struct inode *inode, struct file *file)
425 gfs2_assert_warn(GFS2_SB(inode), !file->private_data); 445 gfs2_assert_warn(GFS2_SB(inode), !file->private_data);
426 file->private_data = fp; 446 file->private_data = fp;
427 447
428 if (S_ISREG(ip->i_di.di_mode)) { 448 if (S_ISREG(ip->i_inode.i_mode)) {
429 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, 449 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
430 &i_gh); 450 &i_gh);
431 if (error) 451 if (error)
@@ -484,16 +504,40 @@ static int gfs2_close(struct inode *inode, struct file *file)
484 * @file: the file that points to the dentry (we ignore this) 504 * @file: the file that points to the dentry (we ignore this)
485 * @dentry: the dentry that points to the inode to sync 505 * @dentry: the dentry that points to the inode to sync
486 * 506 *
507 * The VFS will flush "normal" data for us. We only need to worry
508 * about metadata here. For journaled data, we just do a log flush
509 * as we can't avoid it. Otherwise we can just bale out if datasync
510 * is set. For stuffed inodes we must flush the log in order to
511 * ensure that all data is on disk.
512 *
513 * The call to write_inode_now() is there to write back metadata and
514 * the inode itself. It does also try and write the data, but thats
515 * (hopefully) a no-op due to the VFS having already called filemap_fdatawrite()
516 * for us.
517 *
487 * Returns: errno 518 * Returns: errno
488 */ 519 */
489 520
490static int gfs2_fsync(struct file *file, struct dentry *dentry, int datasync) 521static int gfs2_fsync(struct file *file, struct dentry *dentry, int datasync)
491{ 522{
492 struct gfs2_inode *ip = GFS2_I(dentry->d_inode); 523 struct inode *inode = dentry->d_inode;
524 int sync_state = inode->i_state & (I_DIRTY_SYNC|I_DIRTY_DATASYNC);
525 int ret = 0;
493 526
494 gfs2_log_flush(ip->i_gl->gl_sbd, ip->i_gl); 527 if (gfs2_is_jdata(GFS2_I(inode))) {
528 gfs2_log_flush(GFS2_SB(inode), GFS2_I(inode)->i_gl);
529 return 0;
530 }
495 531
496 return 0; 532 if (sync_state != 0) {
533 if (!datasync)
534 ret = write_inode_now(inode, 0);
535
536 if (gfs2_is_stuffed(GFS2_I(inode)))
537 gfs2_log_flush(GFS2_SB(inode), GFS2_I(inode)->i_gl);
538 }
539
540 return ret;
497} 541}
498 542
499/** 543/**
@@ -515,7 +559,7 @@ static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
515 559
516 if (!(fl->fl_flags & FL_POSIX)) 560 if (!(fl->fl_flags & FL_POSIX))
517 return -ENOLCK; 561 return -ENOLCK;
518 if ((ip->i_di.di_mode & (S_ISGID | S_IXGRP)) == S_ISGID) 562 if ((ip->i_inode.i_mode & (S_ISGID | S_IXGRP)) == S_ISGID)
519 return -ENOLCK; 563 return -ENOLCK;
520 564
521 if (sdp->sd_args.ar_localflocks) { 565 if (sdp->sd_args.ar_localflocks) {
@@ -617,7 +661,7 @@ static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl)
617 661
618 if (!(fl->fl_flags & FL_FLOCK)) 662 if (!(fl->fl_flags & FL_FLOCK))
619 return -ENOLCK; 663 return -ENOLCK;
620 if ((ip->i_di.di_mode & (S_ISGID | S_IXGRP)) == S_ISGID) 664 if ((ip->i_inode.i_mode & (S_ISGID | S_IXGRP)) == S_ISGID)
621 return -ENOLCK; 665 return -ENOLCK;
622 666
623 if (sdp->sd_args.ar_localflocks) 667 if (sdp->sd_args.ar_localflocks)
diff --git a/fs/gfs2/ops_file.h b/fs/gfs2/ops_file.h
index ce319f89ec8e..7e5d8ec9c846 100644
--- a/fs/gfs2/ops_file.h
+++ b/fs/gfs2/ops_file.h
@@ -17,7 +17,7 @@ extern struct file gfs2_internal_file_sentinel;
17extern int gfs2_internal_read(struct gfs2_inode *ip, 17extern int gfs2_internal_read(struct gfs2_inode *ip,
18 struct file_ra_state *ra_state, 18 struct file_ra_state *ra_state,
19 char *buf, loff_t *pos, unsigned size); 19 char *buf, loff_t *pos, unsigned size);
20 20extern void gfs2_set_inode_flags(struct inode *inode);
21extern const struct file_operations gfs2_file_fops; 21extern const struct file_operations gfs2_file_fops;
22extern const struct file_operations gfs2_dir_fops; 22extern const struct file_operations gfs2_dir_fops;
23 23
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 882873a6bd69..d14e139d2674 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -237,7 +237,7 @@ fail:
237} 237}
238 238
239static struct inode *gfs2_lookup_root(struct super_block *sb, 239static struct inode *gfs2_lookup_root(struct super_block *sb,
240 struct gfs2_inum *inum) 240 struct gfs2_inum_host *inum)
241{ 241{
242 return gfs2_inode_lookup(sb, inum, DT_DIR); 242 return gfs2_inode_lookup(sb, inum, DT_DIR);
243} 243}
@@ -246,7 +246,7 @@ static int init_sb(struct gfs2_sbd *sdp, int silent, int undo)
246{ 246{
247 struct super_block *sb = sdp->sd_vfs; 247 struct super_block *sb = sdp->sd_vfs;
248 struct gfs2_holder sb_gh; 248 struct gfs2_holder sb_gh;
249 struct gfs2_inum *inum; 249 struct gfs2_inum_host *inum;
250 struct inode *inode; 250 struct inode *inode;
251 int error = 0; 251 int error = 0;
252 252
diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c
index ef6e5ed70e94..636dda4c7d38 100644
--- a/fs/gfs2/ops_inode.c
+++ b/fs/gfs2/ops_inode.c
@@ -59,7 +59,7 @@ static int gfs2_create(struct inode *dir, struct dentry *dentry,
59 gfs2_holder_init(dip->i_gl, 0, 0, ghs); 59 gfs2_holder_init(dip->i_gl, 0, 0, ghs);
60 60
61 for (;;) { 61 for (;;) {
62 inode = gfs2_createi(ghs, &dentry->d_name, S_IFREG | mode); 62 inode = gfs2_createi(ghs, &dentry->d_name, S_IFREG | mode, 0);
63 if (!IS_ERR(inode)) { 63 if (!IS_ERR(inode)) {
64 gfs2_trans_end(sdp); 64 gfs2_trans_end(sdp);
65 if (dip->i_alloc.al_rgd) 65 if (dip->i_alloc.al_rgd)
@@ -144,7 +144,7 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir,
144 int alloc_required; 144 int alloc_required;
145 int error; 145 int error;
146 146
147 if (S_ISDIR(ip->i_di.di_mode)) 147 if (S_ISDIR(inode->i_mode))
148 return -EPERM; 148 return -EPERM;
149 149
150 gfs2_holder_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs); 150 gfs2_holder_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs);
@@ -169,7 +169,7 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir,
169 } 169 }
170 170
171 error = -EINVAL; 171 error = -EINVAL;
172 if (!dip->i_di.di_nlink) 172 if (!dip->i_inode.i_nlink)
173 goto out_gunlock; 173 goto out_gunlock;
174 error = -EFBIG; 174 error = -EFBIG;
175 if (dip->i_di.di_entries == (u32)-1) 175 if (dip->i_di.di_entries == (u32)-1)
@@ -178,10 +178,10 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir,
178 if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) 178 if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
179 goto out_gunlock; 179 goto out_gunlock;
180 error = -EINVAL; 180 error = -EINVAL;
181 if (!ip->i_di.di_nlink) 181 if (!ip->i_inode.i_nlink)
182 goto out_gunlock; 182 goto out_gunlock;
183 error = -EMLINK; 183 error = -EMLINK;
184 if (ip->i_di.di_nlink == (u32)-1) 184 if (ip->i_inode.i_nlink == (u32)-1)
185 goto out_gunlock; 185 goto out_gunlock;
186 186
187 alloc_required = error = gfs2_diradd_alloc_required(dir, &dentry->d_name); 187 alloc_required = error = gfs2_diradd_alloc_required(dir, &dentry->d_name);
@@ -196,8 +196,7 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir,
196 if (error) 196 if (error)
197 goto out_alloc; 197 goto out_alloc;
198 198
199 error = gfs2_quota_check(dip, dip->i_di.di_uid, 199 error = gfs2_quota_check(dip, dip->i_inode.i_uid, dip->i_inode.i_gid);
200 dip->i_di.di_gid);
201 if (error) 200 if (error)
202 goto out_gunlock_q; 201 goto out_gunlock_q;
203 202
@@ -220,7 +219,7 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir,
220 } 219 }
221 220
222 error = gfs2_dir_add(dir, &dentry->d_name, &ip->i_num, 221 error = gfs2_dir_add(dir, &dentry->d_name, &ip->i_num,
223 IF2DT(ip->i_di.di_mode)); 222 IF2DT(inode->i_mode));
224 if (error) 223 if (error)
225 goto out_end_trans; 224 goto out_end_trans;
226 225
@@ -326,7 +325,7 @@ static int gfs2_symlink(struct inode *dir, struct dentry *dentry,
326 325
327 gfs2_holder_init(dip->i_gl, 0, 0, ghs); 326 gfs2_holder_init(dip->i_gl, 0, 0, ghs);
328 327
329 inode = gfs2_createi(ghs, &dentry->d_name, S_IFLNK | S_IRWXUGO); 328 inode = gfs2_createi(ghs, &dentry->d_name, S_IFLNK | S_IRWXUGO, 0);
330 if (IS_ERR(inode)) { 329 if (IS_ERR(inode)) {
331 gfs2_holder_uninit(ghs); 330 gfs2_holder_uninit(ghs);
332 return PTR_ERR(inode); 331 return PTR_ERR(inode);
@@ -339,7 +338,7 @@ static int gfs2_symlink(struct inode *dir, struct dentry *dentry,
339 error = gfs2_meta_inode_buffer(ip, &dibh); 338 error = gfs2_meta_inode_buffer(ip, &dibh);
340 339
341 if (!gfs2_assert_withdraw(sdp, !error)) { 340 if (!gfs2_assert_withdraw(sdp, !error)) {
342 gfs2_dinode_out(&ip->i_di, dibh->b_data); 341 gfs2_dinode_out(ip, dibh->b_data);
343 memcpy(dibh->b_data + sizeof(struct gfs2_dinode), symname, 342 memcpy(dibh->b_data + sizeof(struct gfs2_dinode), symname,
344 size); 343 size);
345 brelse(dibh); 344 brelse(dibh);
@@ -379,7 +378,7 @@ static int gfs2_mkdir(struct inode *dir, struct dentry *dentry, int mode)
379 378
380 gfs2_holder_init(dip->i_gl, 0, 0, ghs); 379 gfs2_holder_init(dip->i_gl, 0, 0, ghs);
381 380
382 inode = gfs2_createi(ghs, &dentry->d_name, S_IFDIR | mode); 381 inode = gfs2_createi(ghs, &dentry->d_name, S_IFDIR | mode, 0);
383 if (IS_ERR(inode)) { 382 if (IS_ERR(inode)) {
384 gfs2_holder_uninit(ghs); 383 gfs2_holder_uninit(ghs);
385 return PTR_ERR(inode); 384 return PTR_ERR(inode);
@@ -387,10 +386,9 @@ static int gfs2_mkdir(struct inode *dir, struct dentry *dentry, int mode)
387 386
388 ip = ghs[1].gh_gl->gl_object; 387 ip = ghs[1].gh_gl->gl_object;
389 388
390 ip->i_di.di_nlink = 2; 389 ip->i_inode.i_nlink = 2;
391 ip->i_di.di_size = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode); 390 ip->i_di.di_size = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode);
392 ip->i_di.di_flags |= GFS2_DIF_JDATA; 391 ip->i_di.di_flags |= GFS2_DIF_JDATA;
393 ip->i_di.di_payload_format = GFS2_FORMAT_DE;
394 ip->i_di.di_entries = 2; 392 ip->i_di.di_entries = 2;
395 393
396 error = gfs2_meta_inode_buffer(ip, &dibh); 394 error = gfs2_meta_inode_buffer(ip, &dibh);
@@ -414,7 +412,7 @@ static int gfs2_mkdir(struct inode *dir, struct dentry *dentry, int mode)
414 gfs2_inum_out(&dip->i_num, &dent->de_inum); 412 gfs2_inum_out(&dip->i_num, &dent->de_inum);
415 dent->de_type = cpu_to_be16(DT_DIR); 413 dent->de_type = cpu_to_be16(DT_DIR);
416 414
417 gfs2_dinode_out(&ip->i_di, di); 415 gfs2_dinode_out(ip, di);
418 416
419 brelse(dibh); 417 brelse(dibh);
420 } 418 }
@@ -467,7 +465,7 @@ static int gfs2_rmdir(struct inode *dir, struct dentry *dentry)
467 465
468 if (ip->i_di.di_entries < 2) { 466 if (ip->i_di.di_entries < 2) {
469 if (gfs2_consist_inode(ip)) 467 if (gfs2_consist_inode(ip))
470 gfs2_dinode_print(&ip->i_di); 468 gfs2_dinode_print(ip);
471 error = -EIO; 469 error = -EIO;
472 goto out_gunlock; 470 goto out_gunlock;
473 } 471 }
@@ -504,47 +502,19 @@ out:
504static int gfs2_mknod(struct inode *dir, struct dentry *dentry, int mode, 502static int gfs2_mknod(struct inode *dir, struct dentry *dentry, int mode,
505 dev_t dev) 503 dev_t dev)
506{ 504{
507 struct gfs2_inode *dip = GFS2_I(dir), *ip; 505 struct gfs2_inode *dip = GFS2_I(dir);
508 struct gfs2_sbd *sdp = GFS2_SB(dir); 506 struct gfs2_sbd *sdp = GFS2_SB(dir);
509 struct gfs2_holder ghs[2]; 507 struct gfs2_holder ghs[2];
510 struct inode *inode; 508 struct inode *inode;
511 struct buffer_head *dibh;
512 u32 major = 0, minor = 0;
513 int error;
514
515 switch (mode & S_IFMT) {
516 case S_IFBLK:
517 case S_IFCHR:
518 major = MAJOR(dev);
519 minor = MINOR(dev);
520 break;
521 case S_IFIFO:
522 case S_IFSOCK:
523 break;
524 default:
525 return -EOPNOTSUPP;
526 };
527 509
528 gfs2_holder_init(dip->i_gl, 0, 0, ghs); 510 gfs2_holder_init(dip->i_gl, 0, 0, ghs);
529 511
530 inode = gfs2_createi(ghs, &dentry->d_name, mode); 512 inode = gfs2_createi(ghs, &dentry->d_name, mode, dev);
531 if (IS_ERR(inode)) { 513 if (IS_ERR(inode)) {
532 gfs2_holder_uninit(ghs); 514 gfs2_holder_uninit(ghs);
533 return PTR_ERR(inode); 515 return PTR_ERR(inode);
534 } 516 }
535 517
536 ip = ghs[1].gh_gl->gl_object;
537
538 ip->i_di.di_major = major;
539 ip->i_di.di_minor = minor;
540
541 error = gfs2_meta_inode_buffer(ip, &dibh);
542
543 if (!gfs2_assert_withdraw(sdp, !error)) {
544 gfs2_dinode_out(&ip->i_di, dibh->b_data);
545 brelse(dibh);
546 }
547
548 gfs2_trans_end(sdp); 518 gfs2_trans_end(sdp);
549 if (dip->i_alloc.al_rgd) 519 if (dip->i_alloc.al_rgd)
550 gfs2_inplace_release(dip); 520 gfs2_inplace_release(dip);
@@ -592,11 +562,10 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
592 562
593 /* Make sure we aren't trying to move a dirctory into it's subdir */ 563 /* Make sure we aren't trying to move a dirctory into it's subdir */
594 564
595 if (S_ISDIR(ip->i_di.di_mode) && odip != ndip) { 565 if (S_ISDIR(ip->i_inode.i_mode) && odip != ndip) {
596 dir_rename = 1; 566 dir_rename = 1;
597 567
598 error = gfs2_glock_nq_init(sdp->sd_rename_gl, 568 error = gfs2_glock_nq_init(sdp->sd_rename_gl, LM_ST_EXCLUSIVE, 0,
599 LM_ST_EXCLUSIVE, 0,
600 &r_gh); 569 &r_gh);
601 if (error) 570 if (error)
602 goto out; 571 goto out;
@@ -637,10 +606,10 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
637 if (error) 606 if (error)
638 goto out_gunlock; 607 goto out_gunlock;
639 608
640 if (S_ISDIR(nip->i_di.di_mode)) { 609 if (S_ISDIR(nip->i_inode.i_mode)) {
641 if (nip->i_di.di_entries < 2) { 610 if (nip->i_di.di_entries < 2) {
642 if (gfs2_consist_inode(nip)) 611 if (gfs2_consist_inode(nip))
643 gfs2_dinode_print(&nip->i_di); 612 gfs2_dinode_print(nip);
644 error = -EIO; 613 error = -EIO;
645 goto out_gunlock; 614 goto out_gunlock;
646 } 615 }
@@ -666,7 +635,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
666 }; 635 };
667 636
668 if (odip != ndip) { 637 if (odip != ndip) {
669 if (!ndip->i_di.di_nlink) { 638 if (!ndip->i_inode.i_nlink) {
670 error = -EINVAL; 639 error = -EINVAL;
671 goto out_gunlock; 640 goto out_gunlock;
672 } 641 }
@@ -674,8 +643,8 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
674 error = -EFBIG; 643 error = -EFBIG;
675 goto out_gunlock; 644 goto out_gunlock;
676 } 645 }
677 if (S_ISDIR(ip->i_di.di_mode) && 646 if (S_ISDIR(ip->i_inode.i_mode) &&
678 ndip->i_di.di_nlink == (u32)-1) { 647 ndip->i_inode.i_nlink == (u32)-1) {
679 error = -EMLINK; 648 error = -EMLINK;
680 goto out_gunlock; 649 goto out_gunlock;
681 } 650 }
@@ -702,8 +671,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
702 if (error) 671 if (error)
703 goto out_alloc; 672 goto out_alloc;
704 673
705 error = gfs2_quota_check(ndip, ndip->i_di.di_uid, 674 error = gfs2_quota_check(ndip, ndip->i_inode.i_uid, ndip->i_inode.i_gid);
706 ndip->i_di.di_gid);
707 if (error) 675 if (error)
708 goto out_gunlock_q; 676 goto out_gunlock_q;
709 677
@@ -729,7 +697,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
729 /* Remove the target file, if it exists */ 697 /* Remove the target file, if it exists */
730 698
731 if (nip) { 699 if (nip) {
732 if (S_ISDIR(nip->i_di.di_mode)) 700 if (S_ISDIR(nip->i_inode.i_mode))
733 error = gfs2_rmdiri(ndip, &ndentry->d_name, nip); 701 error = gfs2_rmdiri(ndip, &ndentry->d_name, nip);
734 else { 702 else {
735 error = gfs2_dir_del(ndip, &ndentry->d_name); 703 error = gfs2_dir_del(ndip, &ndentry->d_name);
@@ -760,9 +728,9 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
760 error = gfs2_meta_inode_buffer(ip, &dibh); 728 error = gfs2_meta_inode_buffer(ip, &dibh);
761 if (error) 729 if (error)
762 goto out_end_trans; 730 goto out_end_trans;
763 ip->i_di.di_ctime = get_seconds(); 731 ip->i_inode.i_ctime.tv_sec = get_seconds();
764 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 732 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
765 gfs2_dinode_out(&ip->i_di, dibh->b_data); 733 gfs2_dinode_out(ip, dibh->b_data);
766 brelse(dibh); 734 brelse(dibh);
767 } 735 }
768 736
@@ -771,7 +739,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
771 goto out_end_trans; 739 goto out_end_trans;
772 740
773 error = gfs2_dir_add(ndir, &ndentry->d_name, &ip->i_num, 741 error = gfs2_dir_add(ndir, &ndentry->d_name, &ip->i_num,
774 IF2DT(ip->i_di.di_mode)); 742 IF2DT(ip->i_inode.i_mode));
775 if (error) 743 if (error)
776 goto out_end_trans; 744 goto out_end_trans;
777 745
@@ -867,6 +835,10 @@ static void *gfs2_follow_link(struct dentry *dentry, struct nameidata *nd)
867 * @mask: 835 * @mask:
868 * @nd: passed from Linux VFS, ignored by us 836 * @nd: passed from Linux VFS, ignored by us
869 * 837 *
838 * This may be called from the VFS directly, or from within GFS2 with the
839 * inode locked, so we look to see if the glock is already locked and only
840 * lock the glock if its not already been done.
841 *
870 * Returns: errno 842 * Returns: errno
871 */ 843 */
872 844
@@ -875,15 +847,18 @@ static int gfs2_permission(struct inode *inode, int mask, struct nameidata *nd)
875 struct gfs2_inode *ip = GFS2_I(inode); 847 struct gfs2_inode *ip = GFS2_I(inode);
876 struct gfs2_holder i_gh; 848 struct gfs2_holder i_gh;
877 int error; 849 int error;
850 int unlock = 0;
878 851
879 if (ip->i_vn == ip->i_gl->gl_vn) 852 if (gfs2_glock_is_locked_by_me(ip->i_gl) == 0) {
880 return generic_permission(inode, mask, gfs2_check_acl); 853 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
854 if (error)
855 return error;
856 unlock = 1;
857 }
881 858
882 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh); 859 error = generic_permission(inode, mask, gfs2_check_acl);
883 if (!error) { 860 if (unlock)
884 error = generic_permission(inode, mask, gfs2_check_acl_locked);
885 gfs2_glock_dq_uninit(&i_gh); 861 gfs2_glock_dq_uninit(&i_gh);
886 }
887 862
888 return error; 863 return error;
889} 864}
@@ -914,8 +889,8 @@ static int setattr_chown(struct inode *inode, struct iattr *attr)
914 u32 ouid, ogid, nuid, ngid; 889 u32 ouid, ogid, nuid, ngid;
915 int error; 890 int error;
916 891
917 ouid = ip->i_di.di_uid; 892 ouid = inode->i_uid;
918 ogid = ip->i_di.di_gid; 893 ogid = inode->i_gid;
919 nuid = attr->ia_uid; 894 nuid = attr->ia_uid;
920 ngid = attr->ia_gid; 895 ngid = attr->ia_gid;
921 896
@@ -946,10 +921,9 @@ static int setattr_chown(struct inode *inode, struct iattr *attr)
946 921
947 error = inode_setattr(inode, attr); 922 error = inode_setattr(inode, attr);
948 gfs2_assert_warn(sdp, !error); 923 gfs2_assert_warn(sdp, !error);
949 gfs2_inode_attr_out(ip);
950 924
951 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 925 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
952 gfs2_dinode_out(&ip->i_di, dibh->b_data); 926 gfs2_dinode_out(ip, dibh->b_data);
953 brelse(dibh); 927 brelse(dibh);
954 928
955 if (ouid != NO_QUOTA_CHANGE || ogid != NO_QUOTA_CHANGE) { 929 if (ouid != NO_QUOTA_CHANGE || ogid != NO_QUOTA_CHANGE) {
@@ -1018,6 +992,12 @@ out:
1018 * @dentry: The dentry to stat 992 * @dentry: The dentry to stat
1019 * @stat: The inode's stats 993 * @stat: The inode's stats
1020 * 994 *
995 * This may be called from the VFS directly, or from within GFS2 with the
996 * inode locked, so we look to see if the glock is already locked and only
997 * lock the glock if its not already been done. Note that its the NFS
998 * readdirplus operation which causes this to be called (from filldir)
999 * with the glock already held.
1000 *
1021 * Returns: errno 1001 * Returns: errno
1022 */ 1002 */
1023 1003
@@ -1028,14 +1008,20 @@ static int gfs2_getattr(struct vfsmount *mnt, struct dentry *dentry,
1028 struct gfs2_inode *ip = GFS2_I(inode); 1008 struct gfs2_inode *ip = GFS2_I(inode);
1029 struct gfs2_holder gh; 1009 struct gfs2_holder gh;
1030 int error; 1010 int error;
1011 int unlock = 0;
1031 1012
1032 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &gh); 1013 if (gfs2_glock_is_locked_by_me(ip->i_gl) == 0) {
1033 if (!error) { 1014 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &gh);
1034 generic_fillattr(inode, stat); 1015 if (error)
1035 gfs2_glock_dq_uninit(&gh); 1016 return error;
1017 unlock = 1;
1036 } 1018 }
1037 1019
1038 return error; 1020 generic_fillattr(inode, stat);
1021 if (unlock);
1022 gfs2_glock_dq_uninit(&gh);
1023
1024 return 0;
1039} 1025}
1040 1026
1041static int gfs2_setxattr(struct dentry *dentry, const char *name, 1027static int gfs2_setxattr(struct dentry *dentry, const char *name,
diff --git a/fs/gfs2/ops_super.c b/fs/gfs2/ops_super.c
index b47d9598c047..7685b46f934b 100644
--- a/fs/gfs2/ops_super.c
+++ b/fs/gfs2/ops_super.c
@@ -157,7 +157,8 @@ static void gfs2_write_super(struct super_block *sb)
157static int gfs2_sync_fs(struct super_block *sb, int wait) 157static int gfs2_sync_fs(struct super_block *sb, int wait)
158{ 158{
159 sb->s_dirt = 0; 159 sb->s_dirt = 0;
160 gfs2_log_flush(sb->s_fs_info, NULL); 160 if (wait)
161 gfs2_log_flush(sb->s_fs_info, NULL);
161 return 0; 162 return 0;
162} 163}
163 164
@@ -215,7 +216,7 @@ static int gfs2_statfs(struct dentry *dentry, struct kstatfs *buf)
215{ 216{
216 struct super_block *sb = dentry->d_inode->i_sb; 217 struct super_block *sb = dentry->d_inode->i_sb;
217 struct gfs2_sbd *sdp = sb->s_fs_info; 218 struct gfs2_sbd *sdp = sb->s_fs_info;
218 struct gfs2_statfs_change sc; 219 struct gfs2_statfs_change_host sc;
219 int error; 220 int error;
220 221
221 if (gfs2_tune_get(sdp, gt_statfs_slow)) 222 if (gfs2_tune_get(sdp, gt_statfs_slow))
@@ -293,8 +294,6 @@ static void gfs2_clear_inode(struct inode *inode)
293 */ 294 */
294 if (inode->i_private) { 295 if (inode->i_private) {
295 struct gfs2_inode *ip = GFS2_I(inode); 296 struct gfs2_inode *ip = GFS2_I(inode);
296 gfs2_glock_inode_squish(inode);
297 gfs2_assert(inode->i_sb->s_fs_info, ip->i_gl->gl_state == LM_ST_UNLOCKED);
298 ip->i_gl->gl_object = NULL; 297 ip->i_gl->gl_object = NULL;
299 gfs2_glock_schedule_for_reclaim(ip->i_gl); 298 gfs2_glock_schedule_for_reclaim(ip->i_gl);
300 gfs2_glock_put(ip->i_gl); 299 gfs2_glock_put(ip->i_gl);
@@ -395,7 +394,7 @@ static void gfs2_delete_inode(struct inode *inode)
395 if (!inode->i_private) 394 if (!inode->i_private)
396 goto out; 395 goto out;
397 396
398 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE, &gh); 397 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB, &gh);
399 if (unlikely(error)) { 398 if (unlikely(error)) {
400 gfs2_glock_dq_uninit(&ip->i_iopen_gh); 399 gfs2_glock_dq_uninit(&ip->i_iopen_gh);
401 goto out; 400 goto out;
@@ -407,7 +406,7 @@ static void gfs2_delete_inode(struct inode *inode)
407 if (error) 406 if (error)
408 goto out_uninit; 407 goto out_uninit;
409 408
410 if (S_ISDIR(ip->i_di.di_mode) && 409 if (S_ISDIR(inode->i_mode) &&
411 (ip->i_di.di_flags & GFS2_DIF_EXHASH)) { 410 (ip->i_di.di_flags & GFS2_DIF_EXHASH)) {
412 error = gfs2_dir_exhash_dealloc(ip); 411 error = gfs2_dir_exhash_dealloc(ip);
413 if (error) 412 if (error)
diff --git a/fs/gfs2/ops_vm.c b/fs/gfs2/ops_vm.c
index 5453d2947ab3..45a5f11fc39a 100644
--- a/fs/gfs2/ops_vm.c
+++ b/fs/gfs2/ops_vm.c
@@ -76,7 +76,7 @@ static int alloc_page_backing(struct gfs2_inode *ip, struct page *page)
76 if (error) 76 if (error)
77 goto out; 77 goto out;
78 78
79 error = gfs2_quota_check(ip, ip->i_di.di_uid, ip->i_di.di_gid); 79 error = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid);
80 if (error) 80 if (error)
81 goto out_gunlock_q; 81 goto out_gunlock_q;
82 82
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index a3deae7416c9..d0db881b55d2 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -452,19 +452,19 @@ int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid)
452 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) 452 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
453 return 0; 453 return 0;
454 454
455 error = qdsb_get(sdp, QUOTA_USER, ip->i_di.di_uid, CREATE, qd); 455 error = qdsb_get(sdp, QUOTA_USER, ip->i_inode.i_uid, CREATE, qd);
456 if (error) 456 if (error)
457 goto out; 457 goto out;
458 al->al_qd_num++; 458 al->al_qd_num++;
459 qd++; 459 qd++;
460 460
461 error = qdsb_get(sdp, QUOTA_GROUP, ip->i_di.di_gid, CREATE, qd); 461 error = qdsb_get(sdp, QUOTA_GROUP, ip->i_inode.i_gid, CREATE, qd);
462 if (error) 462 if (error)
463 goto out; 463 goto out;
464 al->al_qd_num++; 464 al->al_qd_num++;
465 qd++; 465 qd++;
466 466
467 if (uid != NO_QUOTA_CHANGE && uid != ip->i_di.di_uid) { 467 if (uid != NO_QUOTA_CHANGE && uid != ip->i_inode.i_uid) {
468 error = qdsb_get(sdp, QUOTA_USER, uid, CREATE, qd); 468 error = qdsb_get(sdp, QUOTA_USER, uid, CREATE, qd);
469 if (error) 469 if (error)
470 goto out; 470 goto out;
@@ -472,7 +472,7 @@ int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid)
472 qd++; 472 qd++;
473 } 473 }
474 474
475 if (gid != NO_QUOTA_CHANGE && gid != ip->i_di.di_gid) { 475 if (gid != NO_QUOTA_CHANGE && gid != ip->i_inode.i_gid) {
476 error = qdsb_get(sdp, QUOTA_GROUP, gid, CREATE, qd); 476 error = qdsb_get(sdp, QUOTA_GROUP, gid, CREATE, qd);
477 if (error) 477 if (error)
478 goto out; 478 goto out;
@@ -539,8 +539,7 @@ static void do_qc(struct gfs2_quota_data *qd, s64 change)
539 qc->qc_id = cpu_to_be32(qd->qd_id); 539 qc->qc_id = cpu_to_be32(qd->qd_id);
540 } 540 }
541 541
542 x = qc->qc_change; 542 x = be64_to_cpu(qc->qc_change) + change;
543 x = be64_to_cpu(x) + change;
544 qc->qc_change = cpu_to_be64(x); 543 qc->qc_change = cpu_to_be64(x);
545 544
546 spin_lock(&sdp->sd_quota_spin); 545 spin_lock(&sdp->sd_quota_spin);
@@ -743,7 +742,7 @@ static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
743 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 742 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
744 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); 743 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
745 struct gfs2_holder i_gh; 744 struct gfs2_holder i_gh;
746 struct gfs2_quota q; 745 struct gfs2_quota_host q;
747 char buf[sizeof(struct gfs2_quota)]; 746 char buf[sizeof(struct gfs2_quota)];
748 struct file_ra_state ra_state; 747 struct file_ra_state ra_state;
749 int error; 748 int error;
@@ -1103,7 +1102,7 @@ int gfs2_quota_init(struct gfs2_sbd *sdp)
1103 1102
1104 for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots; 1103 for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots;
1105 y++, slot++) { 1104 y++, slot++) {
1106 struct gfs2_quota_change qc; 1105 struct gfs2_quota_change_host qc;
1107 struct gfs2_quota_data *qd; 1106 struct gfs2_quota_data *qd;
1108 1107
1109 gfs2_quota_change_in(&qc, bh->b_data + 1108 gfs2_quota_change_in(&qc, bh->b_data +
diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c
index 62cd223819b7..d0c806b85c86 100644
--- a/fs/gfs2/recovery.c
+++ b/fs/gfs2/recovery.c
@@ -132,10 +132,11 @@ void gfs2_revoke_clean(struct gfs2_sbd *sdp)
132 */ 132 */
133 133
134static int get_log_header(struct gfs2_jdesc *jd, unsigned int blk, 134static int get_log_header(struct gfs2_jdesc *jd, unsigned int blk,
135 struct gfs2_log_header *head) 135 struct gfs2_log_header_host *head)
136{ 136{
137 struct buffer_head *bh; 137 struct buffer_head *bh;
138 struct gfs2_log_header lh; 138 struct gfs2_log_header_host lh;
139 const u32 nothing = 0;
139 u32 hash; 140 u32 hash;
140 int error; 141 int error;
141 142
@@ -143,11 +144,11 @@ static int get_log_header(struct gfs2_jdesc *jd, unsigned int blk,
143 if (error) 144 if (error)
144 return error; 145 return error;
145 146
146 memcpy(&lh, bh->b_data, sizeof(struct gfs2_log_header)); 147 hash = crc32_le((u32)~0, bh->b_data, sizeof(struct gfs2_log_header) -
147 lh.lh_hash = 0; 148 sizeof(u32));
148 hash = gfs2_disk_hash((char *)&lh, sizeof(struct gfs2_log_header)); 149 hash = crc32_le(hash, (unsigned char const *)&nothing, sizeof(nothing));
150 hash ^= (u32)~0;
149 gfs2_log_header_in(&lh, bh->b_data); 151 gfs2_log_header_in(&lh, bh->b_data);
150
151 brelse(bh); 152 brelse(bh);
152 153
153 if (lh.lh_header.mh_magic != GFS2_MAGIC || 154 if (lh.lh_header.mh_magic != GFS2_MAGIC ||
@@ -174,7 +175,7 @@ static int get_log_header(struct gfs2_jdesc *jd, unsigned int blk,
174 */ 175 */
175 176
176static int find_good_lh(struct gfs2_jdesc *jd, unsigned int *blk, 177static int find_good_lh(struct gfs2_jdesc *jd, unsigned int *blk,
177 struct gfs2_log_header *head) 178 struct gfs2_log_header_host *head)
178{ 179{
179 unsigned int orig_blk = *blk; 180 unsigned int orig_blk = *blk;
180 int error; 181 int error;
@@ -205,10 +206,10 @@ static int find_good_lh(struct gfs2_jdesc *jd, unsigned int *blk,
205 * Returns: errno 206 * Returns: errno
206 */ 207 */
207 208
208static int jhead_scan(struct gfs2_jdesc *jd, struct gfs2_log_header *head) 209static int jhead_scan(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head)
209{ 210{
210 unsigned int blk = head->lh_blkno; 211 unsigned int blk = head->lh_blkno;
211 struct gfs2_log_header lh; 212 struct gfs2_log_header_host lh;
212 int error; 213 int error;
213 214
214 for (;;) { 215 for (;;) {
@@ -245,9 +246,9 @@ static int jhead_scan(struct gfs2_jdesc *jd, struct gfs2_log_header *head)
245 * Returns: errno 246 * Returns: errno
246 */ 247 */
247 248
248int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header *head) 249int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head)
249{ 250{
250 struct gfs2_log_header lh_1, lh_m; 251 struct gfs2_log_header_host lh_1, lh_m;
251 u32 blk_1, blk_2, blk_m; 252 u32 blk_1, blk_2, blk_m;
252 int error; 253 int error;
253 254
@@ -320,7 +321,7 @@ static int foreach_descriptor(struct gfs2_jdesc *jd, unsigned int start,
320 length = be32_to_cpu(ld->ld_length); 321 length = be32_to_cpu(ld->ld_length);
321 322
322 if (be32_to_cpu(ld->ld_header.mh_type) == GFS2_METATYPE_LH) { 323 if (be32_to_cpu(ld->ld_header.mh_type) == GFS2_METATYPE_LH) {
323 struct gfs2_log_header lh; 324 struct gfs2_log_header_host lh;
324 error = get_log_header(jd, start, &lh); 325 error = get_log_header(jd, start, &lh);
325 if (!error) { 326 if (!error) {
326 gfs2_replay_incr_blk(sdp, &start); 327 gfs2_replay_incr_blk(sdp, &start);
@@ -363,7 +364,7 @@ static int foreach_descriptor(struct gfs2_jdesc *jd, unsigned int start,
363 * Returns: errno 364 * Returns: errno
364 */ 365 */
365 366
366static int clean_journal(struct gfs2_jdesc *jd, struct gfs2_log_header *head) 367static int clean_journal(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head)
367{ 368{
368 struct gfs2_inode *ip = GFS2_I(jd->jd_inode); 369 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
369 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); 370 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
@@ -425,7 +426,7 @@ int gfs2_recover_journal(struct gfs2_jdesc *jd)
425{ 426{
426 struct gfs2_inode *ip = GFS2_I(jd->jd_inode); 427 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
427 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); 428 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
428 struct gfs2_log_header head; 429 struct gfs2_log_header_host head;
429 struct gfs2_holder j_gh, ji_gh, t_gh; 430 struct gfs2_holder j_gh, ji_gh, t_gh;
430 unsigned long t; 431 unsigned long t;
431 int ro = 0; 432 int ro = 0;
diff --git a/fs/gfs2/recovery.h b/fs/gfs2/recovery.h
index 961feedf4d8b..f7235e61c723 100644
--- a/fs/gfs2/recovery.h
+++ b/fs/gfs2/recovery.h
@@ -26,7 +26,7 @@ int gfs2_revoke_check(struct gfs2_sbd *sdp, u64 blkno, unsigned int where);
26void gfs2_revoke_clean(struct gfs2_sbd *sdp); 26void gfs2_revoke_clean(struct gfs2_sbd *sdp);
27 27
28int gfs2_find_jhead(struct gfs2_jdesc *jd, 28int gfs2_find_jhead(struct gfs2_jdesc *jd,
29 struct gfs2_log_header *head); 29 struct gfs2_log_header_host *head);
30int gfs2_recover_journal(struct gfs2_jdesc *gfs2_jd); 30int gfs2_recover_journal(struct gfs2_jdesc *gfs2_jd);
31void gfs2_check_journals(struct gfs2_sbd *sdp); 31void gfs2_check_journals(struct gfs2_sbd *sdp);
32 32
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index b261385c0065..ff0846528d54 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -253,7 +253,7 @@ void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd)
253 253
254} 254}
255 255
256static inline int rgrp_contains_block(struct gfs2_rindex *ri, u64 block) 256static inline int rgrp_contains_block(struct gfs2_rindex_host *ri, u64 block)
257{ 257{
258 u64 first = ri->ri_data0; 258 u64 first = ri->ri_data0;
259 u64 last = first + ri->ri_data; 259 u64 last = first + ri->ri_data;
@@ -1217,7 +1217,7 @@ u64 gfs2_alloc_data(struct gfs2_inode *ip)
1217 al->al_alloced++; 1217 al->al_alloced++;
1218 1218
1219 gfs2_statfs_change(sdp, 0, -1, 0); 1219 gfs2_statfs_change(sdp, 0, -1, 0);
1220 gfs2_quota_change(ip, +1, ip->i_di.di_uid, ip->i_di.di_gid); 1220 gfs2_quota_change(ip, +1, ip->i_inode.i_uid, ip->i_inode.i_gid);
1221 1221
1222 spin_lock(&sdp->sd_rindex_spin); 1222 spin_lock(&sdp->sd_rindex_spin);
1223 rgd->rd_free_clone--; 1223 rgd->rd_free_clone--;
@@ -1261,7 +1261,7 @@ u64 gfs2_alloc_meta(struct gfs2_inode *ip)
1261 al->al_alloced++; 1261 al->al_alloced++;
1262 1262
1263 gfs2_statfs_change(sdp, 0, -1, 0); 1263 gfs2_statfs_change(sdp, 0, -1, 0);
1264 gfs2_quota_change(ip, +1, ip->i_di.di_uid, ip->i_di.di_gid); 1264 gfs2_quota_change(ip, +1, ip->i_inode.i_uid, ip->i_inode.i_gid);
1265 gfs2_trans_add_unrevoke(sdp, block); 1265 gfs2_trans_add_unrevoke(sdp, block);
1266 1266
1267 spin_lock(&sdp->sd_rindex_spin); 1267 spin_lock(&sdp->sd_rindex_spin);
@@ -1337,8 +1337,7 @@ void gfs2_free_data(struct gfs2_inode *ip, u64 bstart, u32 blen)
1337 gfs2_trans_add_rg(rgd); 1337 gfs2_trans_add_rg(rgd);
1338 1338
1339 gfs2_statfs_change(sdp, 0, +blen, 0); 1339 gfs2_statfs_change(sdp, 0, +blen, 0);
1340 gfs2_quota_change(ip, -(s64)blen, 1340 gfs2_quota_change(ip, -(s64)blen, ip->i_inode.i_uid, ip->i_inode.i_gid);
1341 ip->i_di.di_uid, ip->i_di.di_gid);
1342} 1341}
1343 1342
1344/** 1343/**
@@ -1366,7 +1365,7 @@ void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen)
1366 gfs2_trans_add_rg(rgd); 1365 gfs2_trans_add_rg(rgd);
1367 1366
1368 gfs2_statfs_change(sdp, 0, +blen, 0); 1367 gfs2_statfs_change(sdp, 0, +blen, 0);
1369 gfs2_quota_change(ip, -(s64)blen, ip->i_di.di_uid, ip->i_di.di_gid); 1368 gfs2_quota_change(ip, -(s64)blen, ip->i_inode.i_uid, ip->i_inode.i_gid);
1370 gfs2_meta_wipe(ip, bstart, blen); 1369 gfs2_meta_wipe(ip, bstart, blen);
1371} 1370}
1372 1371
@@ -1411,7 +1410,7 @@ static void gfs2_free_uninit_di(struct gfs2_rgrpd *rgd, u64 blkno)
1411void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip) 1410void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip)
1412{ 1411{
1413 gfs2_free_uninit_di(rgd, ip->i_num.no_addr); 1412 gfs2_free_uninit_di(rgd, ip->i_num.no_addr);
1414 gfs2_quota_change(ip, -1, ip->i_di.di_uid, ip->i_di.di_gid); 1413 gfs2_quota_change(ip, -1, ip->i_inode.i_uid, ip->i_inode.i_gid);
1415 gfs2_meta_wipe(ip, ip->i_num.no_addr, 1); 1414 gfs2_meta_wipe(ip, ip->i_num.no_addr, 1);
1416} 1415}
1417 1416
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 6a78b1b32e25..43a24f2e5905 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -97,7 +97,7 @@ void gfs2_tune_init(struct gfs2_tune *gt)
97 * changed. 97 * changed.
98 */ 98 */
99 99
100int gfs2_check_sb(struct gfs2_sbd *sdp, struct gfs2_sb *sb, int silent) 100int gfs2_check_sb(struct gfs2_sbd *sdp, struct gfs2_sb_host *sb, int silent)
101{ 101{
102 unsigned int x; 102 unsigned int x;
103 103
@@ -180,6 +180,24 @@ static int end_bio_io_page(struct bio *bio, unsigned int bytes_done, int error)
180 return 0; 180 return 0;
181} 181}
182 182
183/**
184 * gfs2_read_super - Read the gfs2 super block from disk
185 * @sb: The VFS super block
186 * @sector: The location of the super block
187 *
188 * This uses the bio functions to read the super block from disk
189 * because we want to be 100% sure that we never read cached data.
190 * A super block is read twice only during each GFS2 mount and is
191 * never written to by the filesystem. The first time its read no
192 * locks are held, and the only details which are looked at are those
193 * relating to the locking protocol. Once locking is up and working,
194 * the sb is read again under the lock to establish the location of
195 * the master directory (contains pointers to journals etc) and the
196 * root directory.
197 *
198 * Returns: A page containing the sb or NULL
199 */
200
183struct page *gfs2_read_super(struct super_block *sb, sector_t sector) 201struct page *gfs2_read_super(struct super_block *sb, sector_t sector)
184{ 202{
185 struct page *page; 203 struct page *page;
@@ -199,7 +217,7 @@ struct page *gfs2_read_super(struct super_block *sb, sector_t sector)
199 return NULL; 217 return NULL;
200 } 218 }
201 219
202 bio->bi_sector = sector; 220 bio->bi_sector = sector * (sb->s_blocksize >> 9);
203 bio->bi_bdev = sb->s_bdev; 221 bio->bi_bdev = sb->s_bdev;
204 bio_add_page(bio, page, PAGE_SIZE, 0); 222 bio_add_page(bio, page, PAGE_SIZE, 0);
205 223
@@ -508,7 +526,7 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
508 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode); 526 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
509 struct gfs2_glock *j_gl = ip->i_gl; 527 struct gfs2_glock *j_gl = ip->i_gl;
510 struct gfs2_holder t_gh; 528 struct gfs2_holder t_gh;
511 struct gfs2_log_header head; 529 struct gfs2_log_header_host head;
512 int error; 530 int error;
513 531
514 error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED, 532 error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED,
@@ -517,7 +535,7 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
517 return error; 535 return error;
518 536
519 gfs2_meta_cache_flush(ip); 537 gfs2_meta_cache_flush(ip);
520 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA | DIO_DATA); 538 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
521 539
522 error = gfs2_find_jhead(sdp->sd_jdesc, &head); 540 error = gfs2_find_jhead(sdp->sd_jdesc, &head);
523 if (error) 541 if (error)
@@ -587,9 +605,9 @@ int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
587int gfs2_statfs_init(struct gfs2_sbd *sdp) 605int gfs2_statfs_init(struct gfs2_sbd *sdp)
588{ 606{
589 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); 607 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
590 struct gfs2_statfs_change *m_sc = &sdp->sd_statfs_master; 608 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
591 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode); 609 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
592 struct gfs2_statfs_change *l_sc = &sdp->sd_statfs_local; 610 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
593 struct buffer_head *m_bh, *l_bh; 611 struct buffer_head *m_bh, *l_bh;
594 struct gfs2_holder gh; 612 struct gfs2_holder gh;
595 int error; 613 int error;
@@ -634,7 +652,7 @@ void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
634 s64 dinodes) 652 s64 dinodes)
635{ 653{
636 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode); 654 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
637 struct gfs2_statfs_change *l_sc = &sdp->sd_statfs_local; 655 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
638 struct buffer_head *l_bh; 656 struct buffer_head *l_bh;
639 int error; 657 int error;
640 658
@@ -660,8 +678,8 @@ int gfs2_statfs_sync(struct gfs2_sbd *sdp)
660{ 678{
661 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); 679 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
662 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode); 680 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
663 struct gfs2_statfs_change *m_sc = &sdp->sd_statfs_master; 681 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
664 struct gfs2_statfs_change *l_sc = &sdp->sd_statfs_local; 682 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
665 struct gfs2_holder gh; 683 struct gfs2_holder gh;
666 struct buffer_head *m_bh, *l_bh; 684 struct buffer_head *m_bh, *l_bh;
667 int error; 685 int error;
@@ -727,10 +745,10 @@ out:
727 * Returns: errno 745 * Returns: errno
728 */ 746 */
729 747
730int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change *sc) 748int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc)
731{ 749{
732 struct gfs2_statfs_change *m_sc = &sdp->sd_statfs_master; 750 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
733 struct gfs2_statfs_change *l_sc = &sdp->sd_statfs_local; 751 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
734 752
735 spin_lock(&sdp->sd_statfs_spin); 753 spin_lock(&sdp->sd_statfs_spin);
736 754
@@ -760,7 +778,7 @@ int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change *sc)
760 */ 778 */
761 779
762static int statfs_slow_fill(struct gfs2_rgrpd *rgd, 780static int statfs_slow_fill(struct gfs2_rgrpd *rgd,
763 struct gfs2_statfs_change *sc) 781 struct gfs2_statfs_change_host *sc)
764{ 782{
765 gfs2_rgrp_verify(rgd); 783 gfs2_rgrp_verify(rgd);
766 sc->sc_total += rgd->rd_ri.ri_data; 784 sc->sc_total += rgd->rd_ri.ri_data;
@@ -782,7 +800,7 @@ static int statfs_slow_fill(struct gfs2_rgrpd *rgd,
782 * Returns: errno 800 * Returns: errno
783 */ 801 */
784 802
785int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change *sc) 803int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc)
786{ 804{
787 struct gfs2_holder ri_gh; 805 struct gfs2_holder ri_gh;
788 struct gfs2_rgrpd *rgd_next; 806 struct gfs2_rgrpd *rgd_next;
@@ -792,7 +810,7 @@ int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change *sc)
792 int done; 810 int done;
793 int error = 0, err; 811 int error = 0, err;
794 812
795 memset(sc, 0, sizeof(struct gfs2_statfs_change)); 813 memset(sc, 0, sizeof(struct gfs2_statfs_change_host));
796 gha = kcalloc(slots, sizeof(struct gfs2_holder), GFP_KERNEL); 814 gha = kcalloc(slots, sizeof(struct gfs2_holder), GFP_KERNEL);
797 if (!gha) 815 if (!gha)
798 return -ENOMEM; 816 return -ENOMEM;
@@ -873,7 +891,7 @@ static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp,
873 struct gfs2_jdesc *jd; 891 struct gfs2_jdesc *jd;
874 struct lfcc *lfcc; 892 struct lfcc *lfcc;
875 LIST_HEAD(list); 893 LIST_HEAD(list);
876 struct gfs2_log_header lh; 894 struct gfs2_log_header_host lh;
877 int error; 895 int error;
878 896
879 error = gfs2_jindex_hold(sdp, &ji_gh); 897 error = gfs2_jindex_hold(sdp, &ji_gh);
diff --git a/fs/gfs2/super.h b/fs/gfs2/super.h
index 5bb443ae0f59..e590b2df11dc 100644
--- a/fs/gfs2/super.h
+++ b/fs/gfs2/super.h
@@ -14,7 +14,7 @@
14 14
15void gfs2_tune_init(struct gfs2_tune *gt); 15void gfs2_tune_init(struct gfs2_tune *gt);
16 16
17int gfs2_check_sb(struct gfs2_sbd *sdp, struct gfs2_sb *sb, int silent); 17int gfs2_check_sb(struct gfs2_sbd *sdp, struct gfs2_sb_host *sb, int silent);
18int gfs2_read_sb(struct gfs2_sbd *sdp, struct gfs2_glock *gl, int silent); 18int gfs2_read_sb(struct gfs2_sbd *sdp, struct gfs2_glock *gl, int silent);
19struct page *gfs2_read_super(struct super_block *sb, sector_t sector); 19struct page *gfs2_read_super(struct super_block *sb, sector_t sector);
20 20
@@ -45,8 +45,8 @@ int gfs2_statfs_init(struct gfs2_sbd *sdp);
45void gfs2_statfs_change(struct gfs2_sbd *sdp, 45void gfs2_statfs_change(struct gfs2_sbd *sdp,
46 s64 total, s64 free, s64 dinodes); 46 s64 total, s64 free, s64 dinodes);
47int gfs2_statfs_sync(struct gfs2_sbd *sdp); 47int gfs2_statfs_sync(struct gfs2_sbd *sdp);
48int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change *sc); 48int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc);
49int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change *sc); 49int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc);
50 50
51int gfs2_freeze_fs(struct gfs2_sbd *sdp); 51int gfs2_freeze_fs(struct gfs2_sbd *sdp);
52void gfs2_unfreeze_fs(struct gfs2_sbd *sdp); 52void gfs2_unfreeze_fs(struct gfs2_sbd *sdp);
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
index 0e0ec988f731..983eaf1e06be 100644
--- a/fs/gfs2/sys.c
+++ b/fs/gfs2/sys.c
@@ -426,9 +426,6 @@ static ssize_t name##_store(struct gfs2_sbd *sdp, const char *buf, size_t len)\
426} \ 426} \
427TUNE_ATTR_2(name, name##_store) 427TUNE_ATTR_2(name, name##_store)
428 428
429TUNE_ATTR(ilimit, 0);
430TUNE_ATTR(ilimit_tries, 0);
431TUNE_ATTR(ilimit_min, 0);
432TUNE_ATTR(demote_secs, 0); 429TUNE_ATTR(demote_secs, 0);
433TUNE_ATTR(incore_log_blocks, 0); 430TUNE_ATTR(incore_log_blocks, 0);
434TUNE_ATTR(log_flush_secs, 0); 431TUNE_ATTR(log_flush_secs, 0);
@@ -447,7 +444,6 @@ TUNE_ATTR(quota_simul_sync, 1);
447TUNE_ATTR(quota_cache_secs, 1); 444TUNE_ATTR(quota_cache_secs, 1);
448TUNE_ATTR(max_atomic_write, 1); 445TUNE_ATTR(max_atomic_write, 1);
449TUNE_ATTR(stall_secs, 1); 446TUNE_ATTR(stall_secs, 1);
450TUNE_ATTR(entries_per_readdir, 1);
451TUNE_ATTR(greedy_default, 1); 447TUNE_ATTR(greedy_default, 1);
452TUNE_ATTR(greedy_quantum, 1); 448TUNE_ATTR(greedy_quantum, 1);
453TUNE_ATTR(greedy_max, 1); 449TUNE_ATTR(greedy_max, 1);
@@ -459,9 +455,6 @@ TUNE_ATTR_DAEMON(quotad_secs, quotad_process);
459TUNE_ATTR_3(quota_scale, quota_scale_show, quota_scale_store); 455TUNE_ATTR_3(quota_scale, quota_scale_show, quota_scale_store);
460 456
461static struct attribute *tune_attrs[] = { 457static struct attribute *tune_attrs[] = {
462 &tune_attr_ilimit.attr,
463 &tune_attr_ilimit_tries.attr,
464 &tune_attr_ilimit_min.attr,
465 &tune_attr_demote_secs.attr, 458 &tune_attr_demote_secs.attr,
466 &tune_attr_incore_log_blocks.attr, 459 &tune_attr_incore_log_blocks.attr,
467 &tune_attr_log_flush_secs.attr, 460 &tune_attr_log_flush_secs.attr,
@@ -478,7 +471,6 @@ static struct attribute *tune_attrs[] = {
478 &tune_attr_quota_cache_secs.attr, 471 &tune_attr_quota_cache_secs.attr,
479 &tune_attr_max_atomic_write.attr, 472 &tune_attr_max_atomic_write.attr,
480 &tune_attr_stall_secs.attr, 473 &tune_attr_stall_secs.attr,
481 &tune_attr_entries_per_readdir.attr,
482 &tune_attr_greedy_default.attr, 474 &tune_attr_greedy_default.attr,
483 &tune_attr_greedy_quantum.attr, 475 &tune_attr_greedy_quantum.attr,
484 &tune_attr_greedy_max.attr, 476 &tune_attr_greedy_max.attr,
diff --git a/fs/gfs2/util.c b/fs/gfs2/util.c
index 196c604faadc..e5707a9f78c2 100644
--- a/fs/gfs2/util.c
+++ b/fs/gfs2/util.c
@@ -23,9 +23,9 @@
23#include "lm.h" 23#include "lm.h"
24#include "util.h" 24#include "util.h"
25 25
26kmem_cache_t *gfs2_glock_cachep __read_mostly; 26struct kmem_cache *gfs2_glock_cachep __read_mostly;
27kmem_cache_t *gfs2_inode_cachep __read_mostly; 27struct kmem_cache *gfs2_inode_cachep __read_mostly;
28kmem_cache_t *gfs2_bufdata_cachep __read_mostly; 28struct kmem_cache *gfs2_bufdata_cachep __read_mostly;
29 29
30void gfs2_assert_i(struct gfs2_sbd *sdp) 30void gfs2_assert_i(struct gfs2_sbd *sdp)
31{ 31{
diff --git a/fs/gfs2/util.h b/fs/gfs2/util.h
index 76a50899fe9e..28938a46cf47 100644
--- a/fs/gfs2/util.h
+++ b/fs/gfs2/util.h
@@ -83,8 +83,7 @@ static inline int gfs2_meta_check_i(struct gfs2_sbd *sdp,
83 char *file, unsigned int line) 83 char *file, unsigned int line)
84{ 84{
85 struct gfs2_meta_header *mh = (struct gfs2_meta_header *)bh->b_data; 85 struct gfs2_meta_header *mh = (struct gfs2_meta_header *)bh->b_data;
86 u32 magic = mh->mh_magic; 86 u32 magic = be32_to_cpu(mh->mh_magic);
87 magic = be32_to_cpu(magic);
88 if (unlikely(magic != GFS2_MAGIC)) 87 if (unlikely(magic != GFS2_MAGIC))
89 return gfs2_meta_check_ii(sdp, bh, "magic number", function, 88 return gfs2_meta_check_ii(sdp, bh, "magic number", function,
90 file, line); 89 file, line);
@@ -107,9 +106,8 @@ static inline int gfs2_metatype_check_i(struct gfs2_sbd *sdp,
107 char *file, unsigned int line) 106 char *file, unsigned int line)
108{ 107{
109 struct gfs2_meta_header *mh = (struct gfs2_meta_header *)bh->b_data; 108 struct gfs2_meta_header *mh = (struct gfs2_meta_header *)bh->b_data;
110 u32 magic = mh->mh_magic; 109 u32 magic = be32_to_cpu(mh->mh_magic);
111 u16 t = be32_to_cpu(mh->mh_type); 110 u16 t = be32_to_cpu(mh->mh_type);
112 magic = be32_to_cpu(magic);
113 if (unlikely(magic != GFS2_MAGIC)) 111 if (unlikely(magic != GFS2_MAGIC))
114 return gfs2_meta_check_ii(sdp, bh, "magic number", function, 112 return gfs2_meta_check_ii(sdp, bh, "magic number", function,
115 file, line); 113 file, line);
@@ -146,9 +144,9 @@ int gfs2_io_error_bh_i(struct gfs2_sbd *sdp, struct buffer_head *bh,
146gfs2_io_error_bh_i((sdp), (bh), __FUNCTION__, __FILE__, __LINE__); 144gfs2_io_error_bh_i((sdp), (bh), __FUNCTION__, __FILE__, __LINE__);
147 145
148 146
149extern kmem_cache_t *gfs2_glock_cachep; 147extern struct kmem_cache *gfs2_glock_cachep;
150extern kmem_cache_t *gfs2_inode_cachep; 148extern struct kmem_cache *gfs2_inode_cachep;
151extern kmem_cache_t *gfs2_bufdata_cachep; 149extern struct kmem_cache *gfs2_bufdata_cachep;
152 150
153static inline unsigned int gfs2_tune_get_i(struct gfs2_tune *gt, 151static inline unsigned int gfs2_tune_get_i(struct gfs2_tune *gt,
154 unsigned int *p) 152 unsigned int *p)
diff --git a/fs/hfs/super.c b/fs/hfs/super.c
index 85b17b3fa4a0..a36987966004 100644
--- a/fs/hfs/super.c
+++ b/fs/hfs/super.c
@@ -24,7 +24,7 @@
24#include "hfs_fs.h" 24#include "hfs_fs.h"
25#include "btree.h" 25#include "btree.h"
26 26
27static kmem_cache_t *hfs_inode_cachep; 27static struct kmem_cache *hfs_inode_cachep;
28 28
29MODULE_LICENSE("GPL"); 29MODULE_LICENSE("GPL");
30 30
@@ -145,7 +145,7 @@ static struct inode *hfs_alloc_inode(struct super_block *sb)
145{ 145{
146 struct hfs_inode_info *i; 146 struct hfs_inode_info *i;
147 147
148 i = kmem_cache_alloc(hfs_inode_cachep, SLAB_KERNEL); 148 i = kmem_cache_alloc(hfs_inode_cachep, GFP_KERNEL);
149 return i ? &i->vfs_inode : NULL; 149 return i ? &i->vfs_inode : NULL;
150} 150}
151 151
@@ -430,7 +430,7 @@ static struct file_system_type hfs_fs_type = {
430 .fs_flags = FS_REQUIRES_DEV, 430 .fs_flags = FS_REQUIRES_DEV,
431}; 431};
432 432
433static void hfs_init_once(void *p, kmem_cache_t *cachep, unsigned long flags) 433static void hfs_init_once(void *p, struct kmem_cache *cachep, unsigned long flags)
434{ 434{
435 struct hfs_inode_info *i = p; 435 struct hfs_inode_info *i = p;
436 436
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index 194eede52fa4..0f513c6bf843 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -434,13 +434,13 @@ MODULE_AUTHOR("Brad Boyer");
434MODULE_DESCRIPTION("Extended Macintosh Filesystem"); 434MODULE_DESCRIPTION("Extended Macintosh Filesystem");
435MODULE_LICENSE("GPL"); 435MODULE_LICENSE("GPL");
436 436
437static kmem_cache_t *hfsplus_inode_cachep; 437static struct kmem_cache *hfsplus_inode_cachep;
438 438
439static struct inode *hfsplus_alloc_inode(struct super_block *sb) 439static struct inode *hfsplus_alloc_inode(struct super_block *sb)
440{ 440{
441 struct hfsplus_inode_info *i; 441 struct hfsplus_inode_info *i;
442 442
443 i = kmem_cache_alloc(hfsplus_inode_cachep, SLAB_KERNEL); 443 i = kmem_cache_alloc(hfsplus_inode_cachep, GFP_KERNEL);
444 return i ? &i->vfs_inode : NULL; 444 return i ? &i->vfs_inode : NULL;
445} 445}
446 446
@@ -467,7 +467,7 @@ static struct file_system_type hfsplus_fs_type = {
467 .fs_flags = FS_REQUIRES_DEV, 467 .fs_flags = FS_REQUIRES_DEV,
468}; 468};
469 469
470static void hfsplus_init_once(void *p, kmem_cache_t *cachep, unsigned long flags) 470static void hfsplus_init_once(void *p, struct kmem_cache *cachep, unsigned long flags)
471{ 471{
472 struct hfsplus_inode_info *i = p; 472 struct hfsplus_inode_info *i = p;
473 473
diff --git a/fs/hpfs/dir.c b/fs/hpfs/dir.c
index ecc9180645ae..594f9c428fc2 100644
--- a/fs/hpfs/dir.c
+++ b/fs/hpfs/dir.c
@@ -84,7 +84,8 @@ static int hpfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
84 } 84 }
85 if (!fno->dirflag) { 85 if (!fno->dirflag) {
86 e = 1; 86 e = 1;
87 hpfs_error(inode->i_sb, "not a directory, fnode %08x",inode->i_ino); 87 hpfs_error(inode->i_sb, "not a directory, fnode %08lx",
88 (unsigned long)inode->i_ino);
88 } 89 }
89 if (hpfs_inode->i_dno != fno->u.external[0].disk_secno) { 90 if (hpfs_inode->i_dno != fno->u.external[0].disk_secno) {
90 e = 1; 91 e = 1;
@@ -144,8 +145,11 @@ static int hpfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
144 } 145 }
145 if (de->first || de->last) { 146 if (de->first || de->last) {
146 if (hpfs_sb(inode->i_sb)->sb_chk) { 147 if (hpfs_sb(inode->i_sb)->sb_chk) {
147 if (de->first && !de->last && (de->namelen != 2 || de ->name[0] != 1 || de->name[1] != 1)) hpfs_error(inode->i_sb, "hpfs_readdir: bad ^A^A entry; pos = %08x", old_pos); 148 if (de->first && !de->last && (de->namelen != 2
148 if (de->last && (de->namelen != 1 || de ->name[0] != 255)) hpfs_error(inode->i_sb, "hpfs_readdir: bad \\377 entry; pos = %08x", old_pos); 149 || de ->name[0] != 1 || de->name[1] != 1))
150 hpfs_error(inode->i_sb, "hpfs_readdir: bad ^A^A entry; pos = %08lx", old_pos);
151 if (de->last && (de->namelen != 1 || de ->name[0] != 255))
152 hpfs_error(inode->i_sb, "hpfs_readdir: bad \\377 entry; pos = %08lx", old_pos);
149 } 153 }
150 hpfs_brelse4(&qbh); 154 hpfs_brelse4(&qbh);
151 goto again; 155 goto again;
diff --git a/fs/hpfs/dnode.c b/fs/hpfs/dnode.c
index 229ff2fb1809..fe83c2b7d2d8 100644
--- a/fs/hpfs/dnode.c
+++ b/fs/hpfs/dnode.c
@@ -533,10 +533,13 @@ static void delete_empty_dnode(struct inode *i, dnode_secno dno)
533 struct buffer_head *bh; 533 struct buffer_head *bh;
534 struct dnode *d1; 534 struct dnode *d1;
535 struct quad_buffer_head qbh1; 535 struct quad_buffer_head qbh1;
536 if (hpfs_sb(i->i_sb)->sb_chk) if (up != i->i_ino) { 536 if (hpfs_sb(i->i_sb)->sb_chk)
537 hpfs_error(i->i_sb, "bad pointer to fnode, dnode %08x, pointing to %08x, should be %08x", dno, up, i->i_ino); 537 if (up != i->i_ino) {
538 hpfs_error(i->i_sb,
539 "bad pointer to fnode, dnode %08x, pointing to %08x, should be %08lx",
540 dno, up, (unsigned long)i->i_ino);
538 return; 541 return;
539 } 542 }
540 if ((d1 = hpfs_map_dnode(i->i_sb, down, &qbh1))) { 543 if ((d1 = hpfs_map_dnode(i->i_sb, down, &qbh1))) {
541 d1->up = up; 544 d1->up = up;
542 d1->root_dnode = 1; 545 d1->root_dnode = 1;
@@ -851,7 +854,9 @@ struct hpfs_dirent *map_pos_dirent(struct inode *inode, loff_t *posp,
851 /* Going to the next dirent */ 854 /* Going to the next dirent */
852 if ((d = de_next_de(de)) < dnode_end_de(dnode)) { 855 if ((d = de_next_de(de)) < dnode_end_de(dnode)) {
853 if (!(++*posp & 077)) { 856 if (!(++*posp & 077)) {
854 hpfs_error(inode->i_sb, "map_pos_dirent: pos crossed dnode boundary; pos = %08x", *posp); 857 hpfs_error(inode->i_sb,
858 "map_pos_dirent: pos crossed dnode boundary; pos = %08llx",
859 (unsigned long long)*posp);
855 goto bail; 860 goto bail;
856 } 861 }
857 /* We're going down the tree */ 862 /* We're going down the tree */
diff --git a/fs/hpfs/ea.c b/fs/hpfs/ea.c
index 66339dc030e4..547a8384571f 100644
--- a/fs/hpfs/ea.c
+++ b/fs/hpfs/ea.c
@@ -243,8 +243,9 @@ void hpfs_set_ea(struct inode *inode, struct fnode *fnode, char *key, char *data
243 fnode->ea_offs = 0xc4; 243 fnode->ea_offs = 0xc4;
244 } 244 }
245 if (fnode->ea_offs < 0xc4 || fnode->ea_offs + fnode->acl_size_s + fnode->ea_size_s > 0x200) { 245 if (fnode->ea_offs < 0xc4 || fnode->ea_offs + fnode->acl_size_s + fnode->ea_size_s > 0x200) {
246 hpfs_error(s, "fnode %08x: ea_offs == %03x, ea_size_s == %03x", 246 hpfs_error(s, "fnode %08lx: ea_offs == %03x, ea_size_s == %03x",
247 inode->i_ino, fnode->ea_offs, fnode->ea_size_s); 247 (unsigned long)inode->i_ino,
248 fnode->ea_offs, fnode->ea_size_s);
248 return; 249 return;
249 } 250 }
250 if ((fnode->ea_size_s || !fnode->ea_size_l) && 251 if ((fnode->ea_size_s || !fnode->ea_size_l) &&
diff --git a/fs/hpfs/hpfs_fn.h b/fs/hpfs/hpfs_fn.h
index 32ab51e42b96..1c07aa82d327 100644
--- a/fs/hpfs/hpfs_fn.h
+++ b/fs/hpfs/hpfs_fn.h
@@ -317,7 +317,8 @@ static inline struct hpfs_sb_info *hpfs_sb(struct super_block *sb)
317 317
318/* super.c */ 318/* super.c */
319 319
320void hpfs_error(struct super_block *, char *, ...); 320void hpfs_error(struct super_block *, const char *, ...)
321 __attribute__((format (printf, 2, 3)));
321int hpfs_stop_cycles(struct super_block *, int, int *, int *, char *); 322int hpfs_stop_cycles(struct super_block *, int, int *, int *, char *);
322unsigned hpfs_count_one_bitmap(struct super_block *, secno); 323unsigned hpfs_count_one_bitmap(struct super_block *, secno);
323 324
diff --git a/fs/hpfs/inode.c b/fs/hpfs/inode.c
index 7faef8544f32..85d3e1d9ac00 100644
--- a/fs/hpfs/inode.c
+++ b/fs/hpfs/inode.c
@@ -251,7 +251,10 @@ void hpfs_write_inode_nolock(struct inode *i)
251 de->file_size = 0; 251 de->file_size = 0;
252 hpfs_mark_4buffers_dirty(&qbh); 252 hpfs_mark_4buffers_dirty(&qbh);
253 hpfs_brelse4(&qbh); 253 hpfs_brelse4(&qbh);
254 } else hpfs_error(i->i_sb, "directory %08x doesn't have '.' entry", i->i_ino); 254 } else
255 hpfs_error(i->i_sb,
256 "directory %08lx doesn't have '.' entry",
257 (unsigned long)i->i_ino);
255 } 258 }
256 mark_buffer_dirty(bh); 259 mark_buffer_dirty(bh);
257 brelse(bh); 260 brelse(bh);
diff --git a/fs/hpfs/map.c b/fs/hpfs/map.c
index 0fecdac22e4e..c4724589b2eb 100644
--- a/fs/hpfs/map.c
+++ b/fs/hpfs/map.c
@@ -126,32 +126,40 @@ struct fnode *hpfs_map_fnode(struct super_block *s, ino_t ino, struct buffer_hea
126 struct extended_attribute *ea; 126 struct extended_attribute *ea;
127 struct extended_attribute *ea_end; 127 struct extended_attribute *ea_end;
128 if (fnode->magic != FNODE_MAGIC) { 128 if (fnode->magic != FNODE_MAGIC) {
129 hpfs_error(s, "bad magic on fnode %08x", ino); 129 hpfs_error(s, "bad magic on fnode %08lx",
130 (unsigned long)ino);
130 goto bail; 131 goto bail;
131 } 132 }
132 if (!fnode->dirflag) { 133 if (!fnode->dirflag) {
133 if ((unsigned)fnode->btree.n_used_nodes + (unsigned)fnode->btree.n_free_nodes != 134 if ((unsigned)fnode->btree.n_used_nodes + (unsigned)fnode->btree.n_free_nodes !=
134 (fnode->btree.internal ? 12 : 8)) { 135 (fnode->btree.internal ? 12 : 8)) {
135 hpfs_error(s, "bad number of nodes in fnode %08x", ino); 136 hpfs_error(s,
137 "bad number of nodes in fnode %08lx",
138 (unsigned long)ino);
136 goto bail; 139 goto bail;
137 } 140 }
138 if (fnode->btree.first_free != 141 if (fnode->btree.first_free !=
139 8 + fnode->btree.n_used_nodes * (fnode->btree.internal ? 8 : 12)) { 142 8 + fnode->btree.n_used_nodes * (fnode->btree.internal ? 8 : 12)) {
140 hpfs_error(s, "bad first_free pointer in fnode %08x", ino); 143 hpfs_error(s,
144 "bad first_free pointer in fnode %08lx",
145 (unsigned long)ino);
141 goto bail; 146 goto bail;
142 } 147 }
143 } 148 }
144 if (fnode->ea_size_s && ((signed int)fnode->ea_offs < 0xc4 || 149 if (fnode->ea_size_s && ((signed int)fnode->ea_offs < 0xc4 ||
145 (signed int)fnode->ea_offs + fnode->acl_size_s + fnode->ea_size_s > 0x200)) { 150 (signed int)fnode->ea_offs + fnode->acl_size_s + fnode->ea_size_s > 0x200)) {
146 hpfs_error(s, "bad EA info in fnode %08x: ea_offs == %04x ea_size_s == %04x", 151 hpfs_error(s,
147 ino, fnode->ea_offs, fnode->ea_size_s); 152 "bad EA info in fnode %08lx: ea_offs == %04x ea_size_s == %04x",
153 (unsigned long)ino,
154 fnode->ea_offs, fnode->ea_size_s);
148 goto bail; 155 goto bail;
149 } 156 }
150 ea = fnode_ea(fnode); 157 ea = fnode_ea(fnode);
151 ea_end = fnode_end_ea(fnode); 158 ea_end = fnode_end_ea(fnode);
152 while (ea != ea_end) { 159 while (ea != ea_end) {
153 if (ea > ea_end) { 160 if (ea > ea_end) {
154 hpfs_error(s, "bad EA in fnode %08x", ino); 161 hpfs_error(s, "bad EA in fnode %08lx",
162 (unsigned long)ino);
155 goto bail; 163 goto bail;
156 } 164 }
157 ea = next_ea(ea); 165 ea = next_ea(ea);
diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
index 450b5e0b4785..d4abc1a1d566 100644
--- a/fs/hpfs/super.c
+++ b/fs/hpfs/super.c
@@ -46,21 +46,17 @@ static void unmark_dirty(struct super_block *s)
46} 46}
47 47
48/* Filesystem error... */ 48/* Filesystem error... */
49static char err_buf[1024];
49 50
50#define ERR_BUF_SIZE 1024 51void hpfs_error(struct super_block *s, const char *fmt, ...)
51
52void hpfs_error(struct super_block *s, char *m,...)
53{ 52{
54 char *buf; 53 va_list args;
55 va_list l; 54
56 va_start(l, m); 55 va_start(args, fmt);
57 if (!(buf = kmalloc(ERR_BUF_SIZE, GFP_KERNEL))) 56 vsnprintf(err_buf, sizeof(err_buf), fmt, args);
58 printk("HPFS: No memory for error message '%s'\n",m); 57 va_end(args);
59 else if (vsprintf(buf, m, l) >= ERR_BUF_SIZE) 58
60 printk("HPFS: Grrrr... Kernel memory corrupted ... going on, but it'll crash very soon :-(\n"); 59 printk("HPFS: filesystem error: %s", err_buf);
61 printk("HPFS: filesystem error: ");
62 if (buf) printk("%s", buf);
63 else printk("%s\n",m);
64 if (!hpfs_sb(s)->sb_was_error) { 60 if (!hpfs_sb(s)->sb_was_error) {
65 if (hpfs_sb(s)->sb_err == 2) { 61 if (hpfs_sb(s)->sb_err == 2) {
66 printk("; crashing the system because you wanted it\n"); 62 printk("; crashing the system because you wanted it\n");
@@ -76,7 +72,6 @@ void hpfs_error(struct super_block *s, char *m,...)
76 } else if (s->s_flags & MS_RDONLY) printk("; going on - but anything won't be destroyed because it's read-only\n"); 72 } else if (s->s_flags & MS_RDONLY) printk("; going on - but anything won't be destroyed because it's read-only\n");
77 else printk("; corrupted filesystem mounted read/write - your computer will explode within 20 seconds ... but you wanted it so!\n"); 73 else printk("; corrupted filesystem mounted read/write - your computer will explode within 20 seconds ... but you wanted it so!\n");
78 } else printk("\n"); 74 } else printk("\n");
79 kfree(buf);
80 hpfs_sb(s)->sb_was_error = 1; 75 hpfs_sb(s)->sb_was_error = 1;
81} 76}
82 77
@@ -160,12 +155,12 @@ static int hpfs_statfs(struct dentry *dentry, struct kstatfs *buf)
160 return 0; 155 return 0;
161} 156}
162 157
163static kmem_cache_t * hpfs_inode_cachep; 158static struct kmem_cache * hpfs_inode_cachep;
164 159
165static struct inode *hpfs_alloc_inode(struct super_block *sb) 160static struct inode *hpfs_alloc_inode(struct super_block *sb)
166{ 161{
167 struct hpfs_inode_info *ei; 162 struct hpfs_inode_info *ei;
168 ei = (struct hpfs_inode_info *)kmem_cache_alloc(hpfs_inode_cachep, SLAB_NOFS); 163 ei = (struct hpfs_inode_info *)kmem_cache_alloc(hpfs_inode_cachep, GFP_NOFS);
169 if (!ei) 164 if (!ei)
170 return NULL; 165 return NULL;
171 ei->vfs_inode.i_version = 1; 166 ei->vfs_inode.i_version = 1;
@@ -177,7 +172,7 @@ static void hpfs_destroy_inode(struct inode *inode)
177 kmem_cache_free(hpfs_inode_cachep, hpfs_i(inode)); 172 kmem_cache_free(hpfs_inode_cachep, hpfs_i(inode));
178} 173}
179 174
180static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) 175static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
181{ 176{
182 struct hpfs_inode_info *ei = (struct hpfs_inode_info *) foo; 177 struct hpfs_inode_info *ei = (struct hpfs_inode_info *) foo;
183 178
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 7f4756963d05..0706f5aac6a2 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -513,7 +513,7 @@ static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo)
513} 513}
514 514
515 515
516static kmem_cache_t *hugetlbfs_inode_cachep; 516static struct kmem_cache *hugetlbfs_inode_cachep;
517 517
518static struct inode *hugetlbfs_alloc_inode(struct super_block *sb) 518static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
519{ 519{
@@ -522,7 +522,7 @@ static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
522 522
523 if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo))) 523 if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo)))
524 return NULL; 524 return NULL;
525 p = kmem_cache_alloc(hugetlbfs_inode_cachep, SLAB_KERNEL); 525 p = kmem_cache_alloc(hugetlbfs_inode_cachep, GFP_KERNEL);
526 if (unlikely(!p)) { 526 if (unlikely(!p)) {
527 hugetlbfs_inc_free_inodes(sbinfo); 527 hugetlbfs_inc_free_inodes(sbinfo);
528 return NULL; 528 return NULL;
@@ -545,7 +545,7 @@ static const struct address_space_operations hugetlbfs_aops = {
545}; 545};
546 546
547 547
548static void init_once(void *foo, kmem_cache_t *cachep, unsigned long flags) 548static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags)
549{ 549{
550 struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo; 550 struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo;
551 551
diff --git a/fs/inode.c b/fs/inode.c
index 26cdb115ce67..9ecccab7326d 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -97,7 +97,7 @@ static DEFINE_MUTEX(iprune_mutex);
97 */ 97 */
98struct inodes_stat_t inodes_stat; 98struct inodes_stat_t inodes_stat;
99 99
100static kmem_cache_t * inode_cachep __read_mostly; 100static struct kmem_cache * inode_cachep __read_mostly;
101 101
102static struct inode *alloc_inode(struct super_block *sb) 102static struct inode *alloc_inode(struct super_block *sb)
103{ 103{
@@ -109,7 +109,7 @@ static struct inode *alloc_inode(struct super_block *sb)
109 if (sb->s_op->alloc_inode) 109 if (sb->s_op->alloc_inode)
110 inode = sb->s_op->alloc_inode(sb); 110 inode = sb->s_op->alloc_inode(sb);
111 else 111 else
112 inode = (struct inode *) kmem_cache_alloc(inode_cachep, SLAB_KERNEL); 112 inode = (struct inode *) kmem_cache_alloc(inode_cachep, GFP_KERNEL);
113 113
114 if (inode) { 114 if (inode) {
115 struct address_space * const mapping = &inode->i_data; 115 struct address_space * const mapping = &inode->i_data;
@@ -209,7 +209,7 @@ void inode_init_once(struct inode *inode)
209 209
210EXPORT_SYMBOL(inode_init_once); 210EXPORT_SYMBOL(inode_init_once);
211 211
212static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) 212static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
213{ 213{
214 struct inode * inode = (struct inode *) foo; 214 struct inode * inode = (struct inode *) foo;
215 215
@@ -1242,9 +1242,6 @@ EXPORT_SYMBOL(inode_needs_sync);
1242 */ 1242 */
1243#ifdef CONFIG_QUOTA 1243#ifdef CONFIG_QUOTA
1244 1244
1245/* Function back in dquot.c */
1246int remove_inode_dquot_ref(struct inode *, int, struct list_head *);
1247
1248void remove_dquot_ref(struct super_block *sb, int type, 1245void remove_dquot_ref(struct super_block *sb, int type,
1249 struct list_head *tofree_head) 1246 struct list_head *tofree_head)
1250{ 1247{
diff --git a/fs/inotify_user.c b/fs/inotify_user.c
index 017cb0f134d6..e1956e6f116c 100644
--- a/fs/inotify_user.c
+++ b/fs/inotify_user.c
@@ -34,8 +34,8 @@
34 34
35#include <asm/ioctls.h> 35#include <asm/ioctls.h>
36 36
37static kmem_cache_t *watch_cachep __read_mostly; 37static struct kmem_cache *watch_cachep __read_mostly;
38static kmem_cache_t *event_cachep __read_mostly; 38static struct kmem_cache *event_cachep __read_mostly;
39 39
40static struct vfsmount *inotify_mnt __read_mostly; 40static struct vfsmount *inotify_mnt __read_mostly;
41 41
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index c34b862cdbf2..ea55b6c469ec 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -57,12 +57,12 @@ static void isofs_put_super(struct super_block *sb)
57static void isofs_read_inode(struct inode *); 57static void isofs_read_inode(struct inode *);
58static int isofs_statfs (struct dentry *, struct kstatfs *); 58static int isofs_statfs (struct dentry *, struct kstatfs *);
59 59
60static kmem_cache_t *isofs_inode_cachep; 60static struct kmem_cache *isofs_inode_cachep;
61 61
62static struct inode *isofs_alloc_inode(struct super_block *sb) 62static struct inode *isofs_alloc_inode(struct super_block *sb)
63{ 63{
64 struct iso_inode_info *ei; 64 struct iso_inode_info *ei;
65 ei = kmem_cache_alloc(isofs_inode_cachep, SLAB_KERNEL); 65 ei = kmem_cache_alloc(isofs_inode_cachep, GFP_KERNEL);
66 if (!ei) 66 if (!ei)
67 return NULL; 67 return NULL;
68 return &ei->vfs_inode; 68 return &ei->vfs_inode;
@@ -73,7 +73,7 @@ static void isofs_destroy_inode(struct inode *inode)
73 kmem_cache_free(isofs_inode_cachep, ISOFS_I(inode)); 73 kmem_cache_free(isofs_inode_cachep, ISOFS_I(inode));
74} 74}
75 75
76static void init_once(void *foo, kmem_cache_t * cachep, unsigned long flags) 76static void init_once(void *foo, struct kmem_cache * cachep, unsigned long flags)
77{ 77{
78 struct iso_inode_info *ei = foo; 78 struct iso_inode_info *ei = foo;
79 79
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index b85c686b60db..10fff9443938 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -31,7 +31,7 @@
31#include <linux/smp_lock.h> 31#include <linux/smp_lock.h>
32#include <linux/init.h> 32#include <linux/init.h>
33#include <linux/mm.h> 33#include <linux/mm.h>
34#include <linux/suspend.h> 34#include <linux/freezer.h>
35#include <linux/pagemap.h> 35#include <linux/pagemap.h>
36#include <linux/kthread.h> 36#include <linux/kthread.h>
37#include <linux/poison.h> 37#include <linux/poison.h>
@@ -1630,7 +1630,7 @@ void * __jbd_kmalloc (const char *where, size_t size, gfp_t flags, int retry)
1630#define JBD_MAX_SLABS 5 1630#define JBD_MAX_SLABS 5
1631#define JBD_SLAB_INDEX(size) (size >> 11) 1631#define JBD_SLAB_INDEX(size) (size >> 11)
1632 1632
1633static kmem_cache_t *jbd_slab[JBD_MAX_SLABS]; 1633static struct kmem_cache *jbd_slab[JBD_MAX_SLABS];
1634static const char *jbd_slab_names[JBD_MAX_SLABS] = { 1634static const char *jbd_slab_names[JBD_MAX_SLABS] = {
1635 "jbd_1k", "jbd_2k", "jbd_4k", NULL, "jbd_8k" 1635 "jbd_1k", "jbd_2k", "jbd_4k", NULL, "jbd_8k"
1636}; 1636};
@@ -1693,7 +1693,7 @@ void jbd_slab_free(void *ptr, size_t size)
1693/* 1693/*
1694 * Journal_head storage management 1694 * Journal_head storage management
1695 */ 1695 */
1696static kmem_cache_t *journal_head_cache; 1696static struct kmem_cache *journal_head_cache;
1697#ifdef CONFIG_JBD_DEBUG 1697#ifdef CONFIG_JBD_DEBUG
1698static atomic_t nr_journal_heads = ATOMIC_INIT(0); 1698static atomic_t nr_journal_heads = ATOMIC_INIT(0);
1699#endif 1699#endif
@@ -1996,7 +1996,7 @@ static void __exit remove_jbd_proc_entry(void)
1996 1996
1997#endif 1997#endif
1998 1998
1999kmem_cache_t *jbd_handle_cache; 1999struct kmem_cache *jbd_handle_cache;
2000 2000
2001static int __init journal_init_handle_cache(void) 2001static int __init journal_init_handle_cache(void)
2002{ 2002{
diff --git a/fs/jbd/revoke.c b/fs/jbd/revoke.c
index c532429d8d9b..d204ab394f36 100644
--- a/fs/jbd/revoke.c
+++ b/fs/jbd/revoke.c
@@ -70,8 +70,8 @@
70#include <linux/init.h> 70#include <linux/init.h>
71#endif 71#endif
72 72
73static kmem_cache_t *revoke_record_cache; 73static struct kmem_cache *revoke_record_cache;
74static kmem_cache_t *revoke_table_cache; 74static struct kmem_cache *revoke_table_cache;
75 75
76/* Each revoke record represents one single revoked block. During 76/* Each revoke record represents one single revoked block. During
77 journal replay, this involves recording the transaction ID of the 77 journal replay, this involves recording the transaction ID of the
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
index 4f82bcd63e48..d38e0d575e48 100644
--- a/fs/jbd/transaction.c
+++ b/fs/jbd/transaction.c
@@ -27,6 +27,8 @@
27#include <linux/mm.h> 27#include <linux/mm.h>
28#include <linux/highmem.h> 28#include <linux/highmem.h>
29 29
30static void __journal_temp_unlink_buffer(struct journal_head *jh);
31
30/* 32/*
31 * get_transaction: obtain a new transaction_t object. 33 * get_transaction: obtain a new transaction_t object.
32 * 34 *
@@ -1499,7 +1501,7 @@ __blist_del_buffer(struct journal_head **list, struct journal_head *jh)
1499 * 1501 *
1500 * Called under j_list_lock. The journal may not be locked. 1502 * Called under j_list_lock. The journal may not be locked.
1501 */ 1503 */
1502void __journal_temp_unlink_buffer(struct journal_head *jh) 1504static void __journal_temp_unlink_buffer(struct journal_head *jh)
1503{ 1505{
1504 struct journal_head **list = NULL; 1506 struct journal_head **list = NULL;
1505 transaction_t *transaction; 1507 transaction_t *transaction;
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 70b2ae1ef281..6bd8005e3d34 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -248,8 +248,12 @@ write_out_data:
248 bufs = 0; 248 bufs = 0;
249 goto write_out_data; 249 goto write_out_data;
250 } 250 }
251 } 251 } else if (!locked && buffer_locked(bh)) {
252 else { 252 __jbd2_journal_file_buffer(jh, commit_transaction,
253 BJ_Locked);
254 jbd_unlock_bh_state(bh);
255 put_bh(bh);
256 } else {
253 BUFFER_TRACE(bh, "writeout complete: unfile"); 257 BUFFER_TRACE(bh, "writeout complete: unfile");
254 __jbd2_journal_unfile_buffer(jh); 258 __jbd2_journal_unfile_buffer(jh);
255 jbd_unlock_bh_state(bh); 259 jbd_unlock_bh_state(bh);
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index c60f378b0f76..44fc32bfd7f1 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -31,7 +31,7 @@
31#include <linux/smp_lock.h> 31#include <linux/smp_lock.h>
32#include <linux/init.h> 32#include <linux/init.h>
33#include <linux/mm.h> 33#include <linux/mm.h>
34#include <linux/suspend.h> 34#include <linux/freezer.h>
35#include <linux/pagemap.h> 35#include <linux/pagemap.h>
36#include <linux/kthread.h> 36#include <linux/kthread.h>
37#include <linux/poison.h> 37#include <linux/poison.h>
@@ -1641,7 +1641,7 @@ void * __jbd2_kmalloc (const char *where, size_t size, gfp_t flags, int retry)
1641#define JBD_MAX_SLABS 5 1641#define JBD_MAX_SLABS 5
1642#define JBD_SLAB_INDEX(size) (size >> 11) 1642#define JBD_SLAB_INDEX(size) (size >> 11)
1643 1643
1644static kmem_cache_t *jbd_slab[JBD_MAX_SLABS]; 1644static struct kmem_cache *jbd_slab[JBD_MAX_SLABS];
1645static const char *jbd_slab_names[JBD_MAX_SLABS] = { 1645static const char *jbd_slab_names[JBD_MAX_SLABS] = {
1646 "jbd2_1k", "jbd2_2k", "jbd2_4k", NULL, "jbd2_8k" 1646 "jbd2_1k", "jbd2_2k", "jbd2_4k", NULL, "jbd2_8k"
1647}; 1647};
@@ -1704,7 +1704,7 @@ void jbd2_slab_free(void *ptr, size_t size)
1704/* 1704/*
1705 * Journal_head storage management 1705 * Journal_head storage management
1706 */ 1706 */
1707static kmem_cache_t *jbd2_journal_head_cache; 1707static struct kmem_cache *jbd2_journal_head_cache;
1708#ifdef CONFIG_JBD_DEBUG 1708#ifdef CONFIG_JBD_DEBUG
1709static atomic_t nr_journal_heads = ATOMIC_INIT(0); 1709static atomic_t nr_journal_heads = ATOMIC_INIT(0);
1710#endif 1710#endif
@@ -2007,7 +2007,7 @@ static void __exit jbd2_remove_jbd_proc_entry(void)
2007 2007
2008#endif 2008#endif
2009 2009
2010kmem_cache_t *jbd2_handle_cache; 2010struct kmem_cache *jbd2_handle_cache;
2011 2011
2012static int __init journal_init_handle_cache(void) 2012static int __init journal_init_handle_cache(void)
2013{ 2013{
diff --git a/fs/jbd2/revoke.c b/fs/jbd2/revoke.c
index 380d19917f37..f506646ad0ff 100644
--- a/fs/jbd2/revoke.c
+++ b/fs/jbd2/revoke.c
@@ -70,8 +70,8 @@
70#include <linux/init.h> 70#include <linux/init.h>
71#endif 71#endif
72 72
73static kmem_cache_t *jbd2_revoke_record_cache; 73static struct kmem_cache *jbd2_revoke_record_cache;
74static kmem_cache_t *jbd2_revoke_table_cache; 74static struct kmem_cache *jbd2_revoke_table_cache;
75 75
76/* Each revoke record represents one single revoked block. During 76/* Each revoke record represents one single revoked block. During
77 journal replay, this involves recording the transaction ID of the 77 journal replay, this involves recording the transaction ID of the
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index c051a94c8a97..3a8700153cb0 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -27,6 +27,8 @@
27#include <linux/mm.h> 27#include <linux/mm.h>
28#include <linux/highmem.h> 28#include <linux/highmem.h>
29 29
30static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh);
31
30/* 32/*
31 * jbd2_get_transaction: obtain a new transaction_t object. 33 * jbd2_get_transaction: obtain a new transaction_t object.
32 * 34 *
diff --git a/fs/jffs/inode-v23.c b/fs/jffs/inode-v23.c
index 3f7899ea4cba..9f15bce92022 100644
--- a/fs/jffs/inode-v23.c
+++ b/fs/jffs/inode-v23.c
@@ -61,8 +61,8 @@ static const struct file_operations jffs_dir_operations;
61static struct inode_operations jffs_dir_inode_operations; 61static struct inode_operations jffs_dir_inode_operations;
62static const struct address_space_operations jffs_address_operations; 62static const struct address_space_operations jffs_address_operations;
63 63
64kmem_cache_t *node_cache = NULL; 64struct kmem_cache *node_cache = NULL;
65kmem_cache_t *fm_cache = NULL; 65struct kmem_cache *fm_cache = NULL;
66 66
67/* Called by the VFS at mount time to initialize the whole file system. */ 67/* Called by the VFS at mount time to initialize the whole file system. */
68static int jffs_fill_super(struct super_block *sb, void *data, int silent) 68static int jffs_fill_super(struct super_block *sb, void *data, int silent)
diff --git a/fs/jffs/intrep.c b/fs/jffs/intrep.c
index 4a543e114970..d0e783f199ea 100644
--- a/fs/jffs/intrep.c
+++ b/fs/jffs/intrep.c
@@ -66,6 +66,7 @@
66#include <linux/smp_lock.h> 66#include <linux/smp_lock.h>
67#include <linux/time.h> 67#include <linux/time.h>
68#include <linux/ctype.h> 68#include <linux/ctype.h>
69#include <linux/freezer.h>
69 70
70#include "intrep.h" 71#include "intrep.h"
71#include "jffs_fm.h" 72#include "jffs_fm.h"
@@ -591,7 +592,7 @@ jffs_add_virtual_root(struct jffs_control *c)
591 D2(printk("jffs_add_virtual_root(): " 592 D2(printk("jffs_add_virtual_root(): "
592 "Creating a virtual root directory.\n")); 593 "Creating a virtual root directory.\n"));
593 594
594 if (!(root = kmalloc(sizeof(struct jffs_file), GFP_KERNEL))) { 595 if (!(root = kzalloc(sizeof(struct jffs_file), GFP_KERNEL))) {
595 return -ENOMEM; 596 return -ENOMEM;
596 } 597 }
597 no_jffs_file++; 598 no_jffs_file++;
@@ -603,7 +604,6 @@ jffs_add_virtual_root(struct jffs_control *c)
603 DJM(no_jffs_node++); 604 DJM(no_jffs_node++);
604 memset(node, 0, sizeof(struct jffs_node)); 605 memset(node, 0, sizeof(struct jffs_node));
605 node->ino = JFFS_MIN_INO; 606 node->ino = JFFS_MIN_INO;
606 memset(root, 0, sizeof(struct jffs_file));
607 root->ino = JFFS_MIN_INO; 607 root->ino = JFFS_MIN_INO;
608 root->mode = S_IFDIR | S_IRWXU | S_IRGRP 608 root->mode = S_IFDIR | S_IRWXU | S_IRGRP
609 | S_IXGRP | S_IROTH | S_IXOTH; 609 | S_IXGRP | S_IROTH | S_IXOTH;
diff --git a/fs/jffs/jffs_fm.c b/fs/jffs/jffs_fm.c
index 29b68d939bd9..077258b2103e 100644
--- a/fs/jffs/jffs_fm.c
+++ b/fs/jffs/jffs_fm.c
@@ -29,8 +29,8 @@ static int jffs_mark_obsolete(struct jffs_fmcontrol *fmc, __u32 fm_offset);
29static struct jffs_fm *jffs_alloc_fm(void); 29static struct jffs_fm *jffs_alloc_fm(void);
30static void jffs_free_fm(struct jffs_fm *n); 30static void jffs_free_fm(struct jffs_fm *n);
31 31
32extern kmem_cache_t *fm_cache; 32extern struct kmem_cache *fm_cache;
33extern kmem_cache_t *node_cache; 33extern struct kmem_cache *node_cache;
34 34
35#if CONFIG_JFFS_FS_VERBOSE > 0 35#if CONFIG_JFFS_FS_VERBOSE > 0
36void 36void
diff --git a/fs/jffs2/background.c b/fs/jffs2/background.c
index ff2a872e80e7..6eb3daebd563 100644
--- a/fs/jffs2/background.c
+++ b/fs/jffs2/background.c
@@ -16,6 +16,7 @@
16#include <linux/mtd/mtd.h> 16#include <linux/mtd/mtd.h>
17#include <linux/completion.h> 17#include <linux/completion.h>
18#include <linux/sched.h> 18#include <linux/sched.h>
19#include <linux/freezer.h>
19#include "nodelist.h" 20#include "nodelist.h"
20 21
21 22
diff --git a/fs/jffs2/malloc.c b/fs/jffs2/malloc.c
index 33f291005012..83f9881ec4cc 100644
--- a/fs/jffs2/malloc.c
+++ b/fs/jffs2/malloc.c
@@ -19,16 +19,16 @@
19 19
20/* These are initialised to NULL in the kernel startup code. 20/* These are initialised to NULL in the kernel startup code.
21 If you're porting to other operating systems, beware */ 21 If you're porting to other operating systems, beware */
22static kmem_cache_t *full_dnode_slab; 22static struct kmem_cache *full_dnode_slab;
23static kmem_cache_t *raw_dirent_slab; 23static struct kmem_cache *raw_dirent_slab;
24static kmem_cache_t *raw_inode_slab; 24static struct kmem_cache *raw_inode_slab;
25static kmem_cache_t *tmp_dnode_info_slab; 25static struct kmem_cache *tmp_dnode_info_slab;
26static kmem_cache_t *raw_node_ref_slab; 26static struct kmem_cache *raw_node_ref_slab;
27static kmem_cache_t *node_frag_slab; 27static struct kmem_cache *node_frag_slab;
28static kmem_cache_t *inode_cache_slab; 28static struct kmem_cache *inode_cache_slab;
29#ifdef CONFIG_JFFS2_FS_XATTR 29#ifdef CONFIG_JFFS2_FS_XATTR
30static kmem_cache_t *xattr_datum_cache; 30static struct kmem_cache *xattr_datum_cache;
31static kmem_cache_t *xattr_ref_cache; 31static struct kmem_cache *xattr_ref_cache;
32#endif 32#endif
33 33
34int __init jffs2_create_slab_caches(void) 34int __init jffs2_create_slab_caches(void)
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
index bc4b8106a490..7deb78254021 100644
--- a/fs/jffs2/super.c
+++ b/fs/jffs2/super.c
@@ -28,12 +28,12 @@
28 28
29static void jffs2_put_super(struct super_block *); 29static void jffs2_put_super(struct super_block *);
30 30
31static kmem_cache_t *jffs2_inode_cachep; 31static struct kmem_cache *jffs2_inode_cachep;
32 32
33static struct inode *jffs2_alloc_inode(struct super_block *sb) 33static struct inode *jffs2_alloc_inode(struct super_block *sb)
34{ 34{
35 struct jffs2_inode_info *ei; 35 struct jffs2_inode_info *ei;
36 ei = (struct jffs2_inode_info *)kmem_cache_alloc(jffs2_inode_cachep, SLAB_KERNEL); 36 ei = (struct jffs2_inode_info *)kmem_cache_alloc(jffs2_inode_cachep, GFP_KERNEL);
37 if (!ei) 37 if (!ei)
38 return NULL; 38 return NULL;
39 return &ei->vfs_inode; 39 return &ei->vfs_inode;
@@ -44,7 +44,7 @@ static void jffs2_destroy_inode(struct inode *inode)
44 kmem_cache_free(jffs2_inode_cachep, JFFS2_INODE_INFO(inode)); 44 kmem_cache_free(jffs2_inode_cachep, JFFS2_INODE_INFO(inode));
45} 45}
46 46
47static void jffs2_i_init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) 47static void jffs2_i_init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
48{ 48{
49 struct jffs2_inode_info *ei = (struct jffs2_inode_info *) foo; 49 struct jffs2_inode_info *ei = (struct jffs2_inode_info *) foo;
50 50
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c
index b89c9aba0466..5065baa530b6 100644
--- a/fs/jfs/jfs_logmgr.c
+++ b/fs/jfs/jfs_logmgr.c
@@ -67,7 +67,7 @@
67#include <linux/kthread.h> 67#include <linux/kthread.h>
68#include <linux/buffer_head.h> /* for sync_blockdev() */ 68#include <linux/buffer_head.h> /* for sync_blockdev() */
69#include <linux/bio.h> 69#include <linux/bio.h>
70#include <linux/suspend.h> 70#include <linux/freezer.h>
71#include <linux/delay.h> 71#include <linux/delay.h>
72#include <linux/mutex.h> 72#include <linux/mutex.h>
73#include "jfs_incore.h" 73#include "jfs_incore.h"
diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c
index 0cccd1c39d75..b1a1c7296014 100644
--- a/fs/jfs/jfs_metapage.c
+++ b/fs/jfs/jfs_metapage.c
@@ -74,7 +74,7 @@ static inline void lock_metapage(struct metapage *mp)
74} 74}
75 75
76#define METAPOOL_MIN_PAGES 32 76#define METAPOOL_MIN_PAGES 32
77static kmem_cache_t *metapage_cache; 77static struct kmem_cache *metapage_cache;
78static mempool_t *metapage_mempool; 78static mempool_t *metapage_mempool;
79 79
80#define MPS_PER_PAGE (PAGE_CACHE_SIZE >> L2PSIZE) 80#define MPS_PER_PAGE (PAGE_CACHE_SIZE >> L2PSIZE)
@@ -180,7 +180,7 @@ static inline void remove_metapage(struct page *page, struct metapage *mp)
180 180
181#endif 181#endif
182 182
183static void init_once(void *foo, kmem_cache_t *cachep, unsigned long flags) 183static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags)
184{ 184{
185 struct metapage *mp = (struct metapage *)foo; 185 struct metapage *mp = (struct metapage *)foo;
186 186
diff --git a/fs/jfs/jfs_txnmgr.c b/fs/jfs/jfs_txnmgr.c
index 81f6f04af192..d558e51b0df8 100644
--- a/fs/jfs/jfs_txnmgr.c
+++ b/fs/jfs/jfs_txnmgr.c
@@ -46,7 +46,7 @@
46#include <linux/vmalloc.h> 46#include <linux/vmalloc.h>
47#include <linux/smp_lock.h> 47#include <linux/smp_lock.h>
48#include <linux/completion.h> 48#include <linux/completion.h>
49#include <linux/suspend.h> 49#include <linux/freezer.h>
50#include <linux/module.h> 50#include <linux/module.h>
51#include <linux/moduleparam.h> 51#include <linux/moduleparam.h>
52#include <linux/kthread.h> 52#include <linux/kthread.h>
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index 9c1c6e0e633d..846ac8f34513 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -44,7 +44,7 @@ MODULE_DESCRIPTION("The Journaled Filesystem (JFS)");
44MODULE_AUTHOR("Steve Best/Dave Kleikamp/Barry Arndt, IBM"); 44MODULE_AUTHOR("Steve Best/Dave Kleikamp/Barry Arndt, IBM");
45MODULE_LICENSE("GPL"); 45MODULE_LICENSE("GPL");
46 46
47static kmem_cache_t * jfs_inode_cachep; 47static struct kmem_cache * jfs_inode_cachep;
48 48
49static struct super_operations jfs_super_operations; 49static struct super_operations jfs_super_operations;
50static struct export_operations jfs_export_operations; 50static struct export_operations jfs_export_operations;
@@ -93,7 +93,7 @@ void jfs_error(struct super_block *sb, const char * function, ...)
93 va_list args; 93 va_list args;
94 94
95 va_start(args, function); 95 va_start(args, function);
96 vsprintf(error_buf, function, args); 96 vsnprintf(error_buf, sizeof(error_buf), function, args);
97 va_end(args); 97 va_end(args);
98 98
99 printk(KERN_ERR "ERROR: (device %s): %s\n", sb->s_id, error_buf); 99 printk(KERN_ERR "ERROR: (device %s): %s\n", sb->s_id, error_buf);
@@ -748,7 +748,7 @@ static struct file_system_type jfs_fs_type = {
748 .fs_flags = FS_REQUIRES_DEV, 748 .fs_flags = FS_REQUIRES_DEV,
749}; 749};
750 750
751static void init_once(void *foo, kmem_cache_t * cachep, unsigned long flags) 751static void init_once(void *foo, struct kmem_cache * cachep, unsigned long flags)
752{ 752{
753 struct jfs_inode_info *jfs_ip = (struct jfs_inode_info *) foo; 753 struct jfs_inode_info *jfs_ip = (struct jfs_inode_info *) foo;
754 754
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
index 41c983a05294..497c3cd59d52 100644
--- a/fs/lockd/clntproc.c
+++ b/fs/lockd/clntproc.c
@@ -13,6 +13,7 @@
13#include <linux/nfs_fs.h> 13#include <linux/nfs_fs.h>
14#include <linux/utsname.h> 14#include <linux/utsname.h>
15#include <linux/smp_lock.h> 15#include <linux/smp_lock.h>
16#include <linux/freezer.h>
16#include <linux/sunrpc/clnt.h> 17#include <linux/sunrpc/clnt.h>
17#include <linux/sunrpc/svc.h> 18#include <linux/sunrpc/svc.h>
18#include <linux/lockd/lockd.h> 19#include <linux/lockd/lockd.h>
diff --git a/fs/lockd/host.c b/fs/lockd/host.c
index fb24a9730345..3d4610c2a266 100644
--- a/fs/lockd/host.c
+++ b/fs/lockd/host.c
@@ -36,34 +36,14 @@ static DEFINE_MUTEX(nlm_host_mutex);
36static void nlm_gc_hosts(void); 36static void nlm_gc_hosts(void);
37static struct nsm_handle * __nsm_find(const struct sockaddr_in *, 37static struct nsm_handle * __nsm_find(const struct sockaddr_in *,
38 const char *, int, int); 38 const char *, int, int);
39 39static struct nsm_handle * nsm_find(const struct sockaddr_in *sin,
40/* 40 const char *hostname,
41 * Find an NLM server handle in the cache. If there is none, create it. 41 int hostname_len);
42 */
43struct nlm_host *
44nlmclnt_lookup_host(const struct sockaddr_in *sin, int proto, int version,
45 const char *hostname, int hostname_len)
46{
47 return nlm_lookup_host(0, sin, proto, version,
48 hostname, hostname_len);
49}
50
51/*
52 * Find an NLM client handle in the cache. If there is none, create it.
53 */
54struct nlm_host *
55nlmsvc_lookup_host(struct svc_rqst *rqstp,
56 const char *hostname, int hostname_len)
57{
58 return nlm_lookup_host(1, &rqstp->rq_addr,
59 rqstp->rq_prot, rqstp->rq_vers,
60 hostname, hostname_len);
61}
62 42
63/* 43/*
64 * Common host lookup routine for server & client 44 * Common host lookup routine for server & client
65 */ 45 */
66struct nlm_host * 46static struct nlm_host *
67nlm_lookup_host(int server, const struct sockaddr_in *sin, 47nlm_lookup_host(int server, const struct sockaddr_in *sin,
68 int proto, int version, 48 int proto, int version,
69 const char *hostname, 49 const char *hostname,
@@ -195,6 +175,29 @@ nlm_destroy_host(struct nlm_host *host)
195} 175}
196 176
197/* 177/*
178 * Find an NLM server handle in the cache. If there is none, create it.
179 */
180struct nlm_host *
181nlmclnt_lookup_host(const struct sockaddr_in *sin, int proto, int version,
182 const char *hostname, int hostname_len)
183{
184 return nlm_lookup_host(0, sin, proto, version,
185 hostname, hostname_len);
186}
187
188/*
189 * Find an NLM client handle in the cache. If there is none, create it.
190 */
191struct nlm_host *
192nlmsvc_lookup_host(struct svc_rqst *rqstp,
193 const char *hostname, int hostname_len)
194{
195 return nlm_lookup_host(1, &rqstp->rq_addr,
196 rqstp->rq_prot, rqstp->rq_vers,
197 hostname, hostname_len);
198}
199
200/*
198 * Create the NLM RPC client for an NLM peer 201 * Create the NLM RPC client for an NLM peer
199 */ 202 */
200struct rpc_clnt * 203struct rpc_clnt *
@@ -495,7 +498,7 @@ out:
495 return nsm; 498 return nsm;
496} 499}
497 500
498struct nsm_handle * 501static struct nsm_handle *
499nsm_find(const struct sockaddr_in *sin, const char *hostname, int hostname_len) 502nsm_find(const struct sockaddr_in *sin, const char *hostname, int hostname_len)
500{ 503{
501 return __nsm_find(sin, hostname, hostname_len, 1); 504 return __nsm_find(sin, hostname, hostname_len, 1);
diff --git a/fs/locks.c b/fs/locks.c
index e0b6a80649a0..1cb0c57fedbd 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -142,12 +142,12 @@ int lease_break_time = 45;
142static LIST_HEAD(file_lock_list); 142static LIST_HEAD(file_lock_list);
143static LIST_HEAD(blocked_list); 143static LIST_HEAD(blocked_list);
144 144
145static kmem_cache_t *filelock_cache __read_mostly; 145static struct kmem_cache *filelock_cache __read_mostly;
146 146
147/* Allocate an empty lock structure. */ 147/* Allocate an empty lock structure. */
148static struct file_lock *locks_alloc_lock(void) 148static struct file_lock *locks_alloc_lock(void)
149{ 149{
150 return kmem_cache_alloc(filelock_cache, SLAB_KERNEL); 150 return kmem_cache_alloc(filelock_cache, GFP_KERNEL);
151} 151}
152 152
153static void locks_release_private(struct file_lock *fl) 153static void locks_release_private(struct file_lock *fl)
@@ -199,7 +199,7 @@ EXPORT_SYMBOL(locks_init_lock);
199 * Initialises the fields of the file lock which are invariant for 199 * Initialises the fields of the file lock which are invariant for
200 * free file_locks. 200 * free file_locks.
201 */ 201 */
202static void init_once(void *foo, kmem_cache_t *cache, unsigned long flags) 202static void init_once(void *foo, struct kmem_cache *cache, unsigned long flags)
203{ 203{
204 struct file_lock *lock = (struct file_lock *) foo; 204 struct file_lock *lock = (struct file_lock *) foo;
205 205
diff --git a/fs/mbcache.c b/fs/mbcache.c
index 0ff71256e65b..deeb9dc062d9 100644
--- a/fs/mbcache.c
+++ b/fs/mbcache.c
@@ -85,7 +85,7 @@ struct mb_cache {
85#ifndef MB_CACHE_INDEXES_COUNT 85#ifndef MB_CACHE_INDEXES_COUNT
86 int c_indexes_count; 86 int c_indexes_count;
87#endif 87#endif
88 kmem_cache_t *c_entry_cache; 88 struct kmem_cache *c_entry_cache;
89 struct list_head *c_block_hash; 89 struct list_head *c_block_hash;
90 struct list_head *c_indexes_hash[0]; 90 struct list_head *c_indexes_hash[0];
91}; 91};
diff --git a/fs/minix/inode.c b/fs/minix/inode.c
index 1e36bae4d0eb..629e09b38c5c 100644
--- a/fs/minix/inode.c
+++ b/fs/minix/inode.c
@@ -51,12 +51,12 @@ static void minix_put_super(struct super_block *sb)
51 return; 51 return;
52} 52}
53 53
54static kmem_cache_t * minix_inode_cachep; 54static struct kmem_cache * minix_inode_cachep;
55 55
56static struct inode *minix_alloc_inode(struct super_block *sb) 56static struct inode *minix_alloc_inode(struct super_block *sb)
57{ 57{
58 struct minix_inode_info *ei; 58 struct minix_inode_info *ei;
59 ei = (struct minix_inode_info *)kmem_cache_alloc(minix_inode_cachep, SLAB_KERNEL); 59 ei = (struct minix_inode_info *)kmem_cache_alloc(minix_inode_cachep, GFP_KERNEL);
60 if (!ei) 60 if (!ei)
61 return NULL; 61 return NULL;
62 return &ei->vfs_inode; 62 return &ei->vfs_inode;
@@ -67,7 +67,7 @@ static void minix_destroy_inode(struct inode *inode)
67 kmem_cache_free(minix_inode_cachep, minix_i(inode)); 67 kmem_cache_free(minix_inode_cachep, minix_i(inode));
68} 68}
69 69
70static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) 70static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
71{ 71{
72 struct minix_inode_info *ei = (struct minix_inode_info *) foo; 72 struct minix_inode_info *ei = (struct minix_inode_info *) foo;
73 73
diff --git a/fs/namei.c b/fs/namei.c
index 28d49b301d55..db1bca26d88c 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -249,9 +249,11 @@ int permission(struct inode *inode, int mask, struct nameidata *nd)
249 249
250 /* 250 /*
251 * MAY_EXEC on regular files requires special handling: We override 251 * MAY_EXEC on regular files requires special handling: We override
252 * filesystem execute permissions if the mode bits aren't set. 252 * filesystem execute permissions if the mode bits aren't set or
253 * the fs is mounted with the "noexec" flag.
253 */ 254 */
254 if ((mask & MAY_EXEC) && S_ISREG(mode) && !(mode & S_IXUGO)) 255 if ((mask & MAY_EXEC) && S_ISREG(mode) && (!(mode & S_IXUGO) ||
256 (nd && nd->mnt && (nd->mnt->mnt_flags & MNT_NOEXEC))))
255 return -EACCES; 257 return -EACCES;
256 258
257 /* Ordinary permission routines do not understand MAY_APPEND. */ 259 /* Ordinary permission routines do not understand MAY_APPEND. */
@@ -1996,8 +1998,7 @@ asmlinkage long sys_mkdir(const char __user *pathname, int mode)
1996void dentry_unhash(struct dentry *dentry) 1998void dentry_unhash(struct dentry *dentry)
1997{ 1999{
1998 dget(dentry); 2000 dget(dentry);
1999 if (atomic_read(&dentry->d_count)) 2001 shrink_dcache_parent(dentry);
2000 shrink_dcache_parent(dentry);
2001 spin_lock(&dcache_lock); 2002 spin_lock(&dcache_lock);
2002 spin_lock(&dentry->d_lock); 2003 spin_lock(&dentry->d_lock);
2003 if (atomic_read(&dentry->d_count) == 2) 2004 if (atomic_read(&dentry->d_count) == 2)
diff --git a/fs/namespace.c b/fs/namespace.c
index 55442a6cf221..b00ac84ebbdd 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -36,7 +36,7 @@ static int event;
36 36
37static struct list_head *mount_hashtable __read_mostly; 37static struct list_head *mount_hashtable __read_mostly;
38static int hash_mask __read_mostly, hash_bits __read_mostly; 38static int hash_mask __read_mostly, hash_bits __read_mostly;
39static kmem_cache_t *mnt_cache __read_mostly; 39static struct kmem_cache *mnt_cache __read_mostly;
40static struct rw_semaphore namespace_sem; 40static struct rw_semaphore namespace_sem;
41 41
42/* /sys/fs */ 42/* /sys/fs */
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
index 72dad552aa00..fae53243bb92 100644
--- a/fs/ncpfs/inode.c
+++ b/fs/ncpfs/inode.c
@@ -40,12 +40,12 @@ static void ncp_delete_inode(struct inode *);
40static void ncp_put_super(struct super_block *); 40static void ncp_put_super(struct super_block *);
41static int ncp_statfs(struct dentry *, struct kstatfs *); 41static int ncp_statfs(struct dentry *, struct kstatfs *);
42 42
43static kmem_cache_t * ncp_inode_cachep; 43static struct kmem_cache * ncp_inode_cachep;
44 44
45static struct inode *ncp_alloc_inode(struct super_block *sb) 45static struct inode *ncp_alloc_inode(struct super_block *sb)
46{ 46{
47 struct ncp_inode_info *ei; 47 struct ncp_inode_info *ei;
48 ei = (struct ncp_inode_info *)kmem_cache_alloc(ncp_inode_cachep, SLAB_KERNEL); 48 ei = (struct ncp_inode_info *)kmem_cache_alloc(ncp_inode_cachep, GFP_KERNEL);
49 if (!ei) 49 if (!ei)
50 return NULL; 50 return NULL;
51 return &ei->vfs_inode; 51 return &ei->vfs_inode;
@@ -56,7 +56,7 @@ static void ncp_destroy_inode(struct inode *inode)
56 kmem_cache_free(ncp_inode_cachep, NCP_FINFO(inode)); 56 kmem_cache_free(ncp_inode_cachep, NCP_FINFO(inode));
57} 57}
58 58
59static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) 59static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
60{ 60{
61 struct ncp_inode_info *ei = (struct ncp_inode_info *) foo; 61 struct ncp_inode_info *ei = (struct ncp_inode_info *) foo;
62 62
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 784bbb54e6c1..f9d678f4ae06 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -58,7 +58,7 @@
58 58
59#define NFSDBG_FACILITY NFSDBG_VFS 59#define NFSDBG_FACILITY NFSDBG_VFS
60 60
61static kmem_cache_t *nfs_direct_cachep; 61static struct kmem_cache *nfs_direct_cachep;
62 62
63/* 63/*
64 * This represents a set of asynchronous requests that we're waiting on 64 * This represents a set of asynchronous requests that we're waiting on
@@ -143,7 +143,7 @@ static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
143{ 143{
144 struct nfs_direct_req *dreq; 144 struct nfs_direct_req *dreq;
145 145
146 dreq = kmem_cache_alloc(nfs_direct_cachep, SLAB_KERNEL); 146 dreq = kmem_cache_alloc(nfs_direct_cachep, GFP_KERNEL);
147 if (!dreq) 147 if (!dreq)
148 return NULL; 148 return NULL;
149 149
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 7c32187f953e..36680d1061b0 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -55,7 +55,7 @@ static int nfs_update_inode(struct inode *, struct nfs_fattr *);
55 55
56static void nfs_zap_acl_cache(struct inode *); 56static void nfs_zap_acl_cache(struct inode *);
57 57
58static kmem_cache_t * nfs_inode_cachep; 58static struct kmem_cache * nfs_inode_cachep;
59 59
60static inline unsigned long 60static inline unsigned long
61nfs_fattr_to_ino_t(struct nfs_fattr *fattr) 61nfs_fattr_to_ino_t(struct nfs_fattr *fattr)
@@ -1080,7 +1080,7 @@ void nfs4_clear_inode(struct inode *inode)
1080struct inode *nfs_alloc_inode(struct super_block *sb) 1080struct inode *nfs_alloc_inode(struct super_block *sb)
1081{ 1081{
1082 struct nfs_inode *nfsi; 1082 struct nfs_inode *nfsi;
1083 nfsi = (struct nfs_inode *)kmem_cache_alloc(nfs_inode_cachep, SLAB_KERNEL); 1083 nfsi = (struct nfs_inode *)kmem_cache_alloc(nfs_inode_cachep, GFP_KERNEL);
1084 if (!nfsi) 1084 if (!nfsi)
1085 return NULL; 1085 return NULL;
1086 nfsi->flags = 0UL; 1086 nfsi->flags = 0UL;
@@ -1111,7 +1111,7 @@ static inline void nfs4_init_once(struct nfs_inode *nfsi)
1111#endif 1111#endif
1112} 1112}
1113 1113
1114static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) 1114static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
1115{ 1115{
1116 struct nfs_inode *nfsi = (struct nfs_inode *) foo; 1116 struct nfs_inode *nfsi = (struct nfs_inode *) foo;
1117 1117
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index bc9fab68b29c..ca4b1d4ff42b 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -21,13 +21,13 @@
21 21
22#define NFS_PARANOIA 1 22#define NFS_PARANOIA 1
23 23
24static kmem_cache_t *nfs_page_cachep; 24static struct kmem_cache *nfs_page_cachep;
25 25
26static inline struct nfs_page * 26static inline struct nfs_page *
27nfs_page_alloc(void) 27nfs_page_alloc(void)
28{ 28{
29 struct nfs_page *p; 29 struct nfs_page *p;
30 p = kmem_cache_alloc(nfs_page_cachep, SLAB_KERNEL); 30 p = kmem_cache_alloc(nfs_page_cachep, GFP_KERNEL);
31 if (p) { 31 if (p) {
32 memset(p, 0, sizeof(*p)); 32 memset(p, 0, sizeof(*p));
33 INIT_LIST_HEAD(&p->wb_list); 33 INIT_LIST_HEAD(&p->wb_list);
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 05cca6609977..a9c26521a9e2 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -39,7 +39,7 @@ static int nfs_pagein_one(struct list_head *, struct inode *);
39static const struct rpc_call_ops nfs_read_partial_ops; 39static const struct rpc_call_ops nfs_read_partial_ops;
40static const struct rpc_call_ops nfs_read_full_ops; 40static const struct rpc_call_ops nfs_read_full_ops;
41 41
42static kmem_cache_t *nfs_rdata_cachep; 42static struct kmem_cache *nfs_rdata_cachep;
43static mempool_t *nfs_rdata_mempool; 43static mempool_t *nfs_rdata_mempool;
44 44
45#define MIN_POOL_READ (32) 45#define MIN_POOL_READ (32)
@@ -47,7 +47,7 @@ static mempool_t *nfs_rdata_mempool;
47struct nfs_read_data *nfs_readdata_alloc(size_t len) 47struct nfs_read_data *nfs_readdata_alloc(size_t len)
48{ 48{
49 unsigned int pagecount = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; 49 unsigned int pagecount = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
50 struct nfs_read_data *p = mempool_alloc(nfs_rdata_mempool, SLAB_NOFS); 50 struct nfs_read_data *p = mempool_alloc(nfs_rdata_mempool, GFP_NOFS);
51 51
52 if (p) { 52 if (p) {
53 memset(p, 0, sizeof(*p)); 53 memset(p, 0, sizeof(*p));
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 7f3844d2bf36..594eb16879ef 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -85,7 +85,7 @@ static const struct rpc_call_ops nfs_write_partial_ops;
85static const struct rpc_call_ops nfs_write_full_ops; 85static const struct rpc_call_ops nfs_write_full_ops;
86static const struct rpc_call_ops nfs_commit_ops; 86static const struct rpc_call_ops nfs_commit_ops;
87 87
88static kmem_cache_t *nfs_wdata_cachep; 88static struct kmem_cache *nfs_wdata_cachep;
89static mempool_t *nfs_wdata_mempool; 89static mempool_t *nfs_wdata_mempool;
90static mempool_t *nfs_commit_mempool; 90static mempool_t *nfs_commit_mempool;
91 91
@@ -93,7 +93,7 @@ static DECLARE_WAIT_QUEUE_HEAD(nfs_write_congestion);
93 93
94struct nfs_write_data *nfs_commit_alloc(void) 94struct nfs_write_data *nfs_commit_alloc(void)
95{ 95{
96 struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, SLAB_NOFS); 96 struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, GFP_NOFS);
97 97
98 if (p) { 98 if (p) {
99 memset(p, 0, sizeof(*p)); 99 memset(p, 0, sizeof(*p));
@@ -118,7 +118,7 @@ void nfs_commit_free(struct nfs_write_data *wdata)
118struct nfs_write_data *nfs_writedata_alloc(size_t len) 118struct nfs_write_data *nfs_writedata_alloc(size_t len)
119{ 119{
120 unsigned int pagecount = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; 120 unsigned int pagecount = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
121 struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, SLAB_NOFS); 121 struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, GFP_NOFS);
122 122
123 if (p) { 123 if (p) {
124 memset(p, 0, sizeof(*p)); 124 memset(p, 0, sizeof(*p));
diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c
index b4baca3053c3..277df40f098d 100644
--- a/fs/nfsd/nfs3xdr.c
+++ b/fs/nfsd/nfs3xdr.c
@@ -24,10 +24,6 @@
24 24
25#define NFSDDBG_FACILITY NFSDDBG_XDR 25#define NFSDDBG_FACILITY NFSDDBG_XDR
26 26
27#ifdef NFSD_OPTIMIZE_SPACE
28# define inline
29#endif
30
31 27
32/* 28/*
33 * Mapping of S_IF* types to NFS file types 29 * Mapping of S_IF* types to NFS file types
@@ -42,14 +38,14 @@ static u32 nfs3_ftypes[] = {
42/* 38/*
43 * XDR functions for basic NFS types 39 * XDR functions for basic NFS types
44 */ 40 */
45static inline __be32 * 41static __be32 *
46encode_time3(__be32 *p, struct timespec *time) 42encode_time3(__be32 *p, struct timespec *time)
47{ 43{
48 *p++ = htonl((u32) time->tv_sec); *p++ = htonl(time->tv_nsec); 44 *p++ = htonl((u32) time->tv_sec); *p++ = htonl(time->tv_nsec);
49 return p; 45 return p;
50} 46}
51 47
52static inline __be32 * 48static __be32 *
53decode_time3(__be32 *p, struct timespec *time) 49decode_time3(__be32 *p, struct timespec *time)
54{ 50{
55 time->tv_sec = ntohl(*p++); 51 time->tv_sec = ntohl(*p++);
@@ -57,7 +53,7 @@ decode_time3(__be32 *p, struct timespec *time)
57 return p; 53 return p;
58} 54}
59 55
60static inline __be32 * 56static __be32 *
61decode_fh(__be32 *p, struct svc_fh *fhp) 57decode_fh(__be32 *p, struct svc_fh *fhp)
62{ 58{
63 unsigned int size; 59 unsigned int size;
@@ -77,7 +73,7 @@ __be32 *nfs3svc_decode_fh(__be32 *p, struct svc_fh *fhp)
77 return decode_fh(p, fhp); 73 return decode_fh(p, fhp);
78} 74}
79 75
80static inline __be32 * 76static __be32 *
81encode_fh(__be32 *p, struct svc_fh *fhp) 77encode_fh(__be32 *p, struct svc_fh *fhp)
82{ 78{
83 unsigned int size = fhp->fh_handle.fh_size; 79 unsigned int size = fhp->fh_handle.fh_size;
@@ -91,7 +87,7 @@ encode_fh(__be32 *p, struct svc_fh *fhp)
91 * Decode a file name and make sure that the path contains 87 * Decode a file name and make sure that the path contains
92 * no slashes or null bytes. 88 * no slashes or null bytes.
93 */ 89 */
94static inline __be32 * 90static __be32 *
95decode_filename(__be32 *p, char **namp, int *lenp) 91decode_filename(__be32 *p, char **namp, int *lenp)
96{ 92{
97 char *name; 93 char *name;
@@ -107,7 +103,7 @@ decode_filename(__be32 *p, char **namp, int *lenp)
107 return p; 103 return p;
108} 104}
109 105
110static inline __be32 * 106static __be32 *
111decode_sattr3(__be32 *p, struct iattr *iap) 107decode_sattr3(__be32 *p, struct iattr *iap)
112{ 108{
113 u32 tmp; 109 u32 tmp;
@@ -153,7 +149,7 @@ decode_sattr3(__be32 *p, struct iattr *iap)
153 return p; 149 return p;
154} 150}
155 151
156static inline __be32 * 152static __be32 *
157encode_fattr3(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp, 153encode_fattr3(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp,
158 struct kstat *stat) 154 struct kstat *stat)
159{ 155{
@@ -186,7 +182,7 @@ encode_fattr3(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp,
186 return p; 182 return p;
187} 183}
188 184
189static inline __be32 * 185static __be32 *
190encode_saved_post_attr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp) 186encode_saved_post_attr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp)
191{ 187{
192 struct inode *inode = fhp->fh_dentry->d_inode; 188 struct inode *inode = fhp->fh_dentry->d_inode;
@@ -776,7 +772,7 @@ nfs3svc_encode_readdirres(struct svc_rqst *rqstp, __be32 *p,
776 return xdr_ressize_check(rqstp, p); 772 return xdr_ressize_check(rqstp, p);
777} 773}
778 774
779static inline __be32 * 775static __be32 *
780encode_entry_baggage(struct nfsd3_readdirres *cd, __be32 *p, const char *name, 776encode_entry_baggage(struct nfsd3_readdirres *cd, __be32 *p, const char *name,
781 int namlen, ino_t ino) 777 int namlen, ino_t ino)
782{ 778{
@@ -790,7 +786,7 @@ encode_entry_baggage(struct nfsd3_readdirres *cd, __be32 *p, const char *name,
790 return p; 786 return p;
791} 787}
792 788
793static inline __be32 * 789static __be32 *
794encode_entryplus_baggage(struct nfsd3_readdirres *cd, __be32 *p, 790encode_entryplus_baggage(struct nfsd3_readdirres *cd, __be32 *p,
795 struct svc_fh *fhp) 791 struct svc_fh *fhp)
796{ 792{
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index e431e93ab503..640c92b2a9f7 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -84,10 +84,10 @@ static void nfs4_set_recdir(char *recdir);
84 */ 84 */
85static DEFINE_MUTEX(client_mutex); 85static DEFINE_MUTEX(client_mutex);
86 86
87static kmem_cache_t *stateowner_slab = NULL; 87static struct kmem_cache *stateowner_slab = NULL;
88static kmem_cache_t *file_slab = NULL; 88static struct kmem_cache *file_slab = NULL;
89static kmem_cache_t *stateid_slab = NULL; 89static struct kmem_cache *stateid_slab = NULL;
90static kmem_cache_t *deleg_slab = NULL; 90static struct kmem_cache *deleg_slab = NULL;
91 91
92void 92void
93nfs4_lock_state(void) 93nfs4_lock_state(void)
@@ -1003,7 +1003,7 @@ alloc_init_file(struct inode *ino)
1003} 1003}
1004 1004
1005static void 1005static void
1006nfsd4_free_slab(kmem_cache_t **slab) 1006nfsd4_free_slab(struct kmem_cache **slab)
1007{ 1007{
1008 if (*slab == NULL) 1008 if (*slab == NULL)
1009 return; 1009 return;
diff --git a/fs/nfsd/nfsxdr.c b/fs/nfsd/nfsxdr.c
index 56ebb1443e0e..f5243f943996 100644
--- a/fs/nfsd/nfsxdr.c
+++ b/fs/nfsd/nfsxdr.c
@@ -18,11 +18,6 @@
18 18
19#define NFSDDBG_FACILITY NFSDDBG_XDR 19#define NFSDDBG_FACILITY NFSDDBG_XDR
20 20
21
22#ifdef NFSD_OPTIMIZE_SPACE
23# define inline
24#endif
25
26/* 21/*
27 * Mapping of S_IF* types to NFS file types 22 * Mapping of S_IF* types to NFS file types
28 */ 23 */
@@ -55,7 +50,7 @@ __be32 *nfs2svc_decode_fh(__be32 *p, struct svc_fh *fhp)
55 return decode_fh(p, fhp); 50 return decode_fh(p, fhp);
56} 51}
57 52
58static inline __be32 * 53static __be32 *
59encode_fh(__be32 *p, struct svc_fh *fhp) 54encode_fh(__be32 *p, struct svc_fh *fhp)
60{ 55{
61 memcpy(p, &fhp->fh_handle.fh_base, NFS_FHSIZE); 56 memcpy(p, &fhp->fh_handle.fh_base, NFS_FHSIZE);
@@ -66,7 +61,7 @@ encode_fh(__be32 *p, struct svc_fh *fhp)
66 * Decode a file name and make sure that the path contains 61 * Decode a file name and make sure that the path contains
67 * no slashes or null bytes. 62 * no slashes or null bytes.
68 */ 63 */
69static inline __be32 * 64static __be32 *
70decode_filename(__be32 *p, char **namp, int *lenp) 65decode_filename(__be32 *p, char **namp, int *lenp)
71{ 66{
72 char *name; 67 char *name;
@@ -82,7 +77,7 @@ decode_filename(__be32 *p, char **namp, int *lenp)
82 return p; 77 return p;
83} 78}
84 79
85static inline __be32 * 80static __be32 *
86decode_pathname(__be32 *p, char **namp, int *lenp) 81decode_pathname(__be32 *p, char **namp, int *lenp)
87{ 82{
88 char *name; 83 char *name;
@@ -98,7 +93,7 @@ decode_pathname(__be32 *p, char **namp, int *lenp)
98 return p; 93 return p;
99} 94}
100 95
101static inline __be32 * 96static __be32 *
102decode_sattr(__be32 *p, struct iattr *iap) 97decode_sattr(__be32 *p, struct iattr *iap)
103{ 98{
104 u32 tmp, tmp1; 99 u32 tmp, tmp1;
diff --git a/fs/nls/nls_cp936.c b/fs/nls/nls_cp936.c
index 046fde8170ea..65e640c61c8b 100644
--- a/fs/nls/nls_cp936.c
+++ b/fs/nls/nls_cp936.c
@@ -4421,6 +4421,73 @@ static wchar_t *page_charset2uni[256] = {
4421 c2u_F8, c2u_F9, c2u_FA, c2u_FB, c2u_FC, c2u_FD, c2u_FE, NULL, 4421 c2u_F8, c2u_F9, c2u_FA, c2u_FB, c2u_FC, c2u_FD, c2u_FE, NULL,
4422}; 4422};
4423 4423
4424static unsigned char u2c_00[512] = {
4425 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
4426 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
4427 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
4428 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
4429 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
4430 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
4431 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
4432 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
4433 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
4434 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
4435 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
4436 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
4437 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
4438 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
4439 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
4440 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
4441 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
4442 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
4443 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
4444 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
4445 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
4446 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
4447 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
4448 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
4449 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
4450 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
4451 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
4452 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
4453 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
4454 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
4455 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
4456 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
4457 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
4458 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
4459 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
4460 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
4461 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
4462 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
4463 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
4464 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
4465 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
4466 0xA1, 0xE8, 0x00, 0x00, 0x00, 0x00, 0xA1, 0xEC, /* 0xA4-0xA7 */
4467 0xA1, 0xA7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
4468 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
4469 0xA1, 0xE3, 0xA1, 0xC0, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
4470 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA1, 0xA4, /* 0xB4-0xB7 */
4471 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
4472 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
4473 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
4474 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
4475 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
4476 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
4477 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
4478 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA1, 0xC1, /* 0xD4-0xD7 */
4479 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
4480 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
4481 0xA8, 0xA4, 0xA8, 0xA2, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
4482 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
4483 0xA8, 0xA8, 0xA8, 0xA6, 0xA8, 0xBA, 0x00, 0x00, /* 0xE8-0xEB */
4484 0xA8, 0xAC, 0xA8, 0xAA, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
4485 0x00, 0x00, 0x00, 0x00, 0xA8, 0xB0, 0xA8, 0xAE, /* 0xF0-0xF3 */
4486 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA1, 0xC2, /* 0xF4-0xF7 */
4487 0x00, 0x00, 0xA8, 0xB4, 0xA8, 0xB2, 0x00, 0x00, /* 0xF8-0xFB */
4488 0xA8, 0xB9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xFC-0xFF */
4489};
4490
4424static unsigned char u2c_01[512] = { 4491static unsigned char u2c_01[512] = {
4425 0xA8, 0xA1, 0xA8, 0xA1, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */ 4492 0xA8, 0xA1, 0xA8, 0xA1, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
4426 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */ 4493 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
@@ -10825,7 +10892,7 @@ static unsigned char u2c_FF[512] = {
10825}; 10892};
10826 10893
10827static unsigned char *page_uni2charset[256] = { 10894static unsigned char *page_uni2charset[256] = {
10828 NULL, u2c_01, u2c_02, u2c_03, u2c_04, NULL, NULL, NULL, 10895 u2c_00, u2c_01, u2c_02, u2c_03, u2c_04, NULL, NULL, NULL,
10829 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 10896 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
10830 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 10897 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
10831 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 10898 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
@@ -10936,11 +11003,34 @@ static int uni2char(const wchar_t uni,
10936 unsigned char *uni2charset; 11003 unsigned char *uni2charset;
10937 unsigned char cl = uni&0xFF; 11004 unsigned char cl = uni&0xFF;
10938 unsigned char ch = (uni>>8)&0xFF; 11005 unsigned char ch = (uni>>8)&0xFF;
10939 int n; 11006 unsigned char out0,out1;
10940 11007
10941 if (boundlen <= 0) 11008 if (boundlen <= 0)
10942 return -ENAMETOOLONG; 11009 return -ENAMETOOLONG;
10943 11010
11011 if (uni == 0x20ac) {/* Euro symbol.The only exception with a non-ascii unicode */
11012 out[0] = 0x80;
11013 return 1;
11014 }
11015
11016 if (ch == 0) { /* handle the U00 plane*/
11017 /* if (cl == 0) return -EINVAL;*/ /*U0000 is legal in cp936*/
11018 out0 = u2c_00[cl*2];
11019 out1 = u2c_00[cl*2+1];
11020 if (out0 == 0x00 && out1 == 0x00) {
11021 if (cl<0x80) {
11022 out[0] = cl;
11023 return 1;
11024 }
11025 return -EINVAL;
11026 } else {
11027 if (boundlen <= 1)
11028 return -ENAMETOOLONG;
11029 out[0] = out0;
11030 out[1] = out1;
11031 return 2;
11032 }
11033 }
10944 11034
10945 uni2charset = page_uni2charset[ch]; 11035 uni2charset = page_uni2charset[ch];
10946 if (uni2charset) { 11036 if (uni2charset) {
@@ -10950,15 +11040,10 @@ static int uni2char(const wchar_t uni,
10950 out[1] = uni2charset[cl*2+1]; 11040 out[1] = uni2charset[cl*2+1];
10951 if (out[0] == 0x00 && out[1] == 0x00) 11041 if (out[0] == 0x00 && out[1] == 0x00)
10952 return -EINVAL; 11042 return -EINVAL;
10953 n = 2; 11043 return 2;
10954 } else if (ch==0 && cl) {
10955 out[0] = cl;
10956 n = 1;
10957 } 11044 }
10958 else 11045 else
10959 return -EINVAL; 11046 return -EINVAL;
10960
10961 return n;
10962} 11047}
10963 11048
10964static int char2uni(const unsigned char *rawstring, int boundlen, 11049static int char2uni(const unsigned char *rawstring, int boundlen,
@@ -10972,7 +11057,11 @@ static int char2uni(const unsigned char *rawstring, int boundlen,
10972 return -ENAMETOOLONG; 11057 return -ENAMETOOLONG;
10973 11058
10974 if (boundlen == 1) { 11059 if (boundlen == 1) {
10975 *uni = rawstring[0]; 11060 if (rawstring[0]==0x80) { /* Euro symbol.The only exception with a non-ascii unicode */
11061 *uni = 0x20ac;
11062 } else {
11063 *uni = rawstring[0];
11064 }
10976 return 1; 11065 return 1;
10977 } 11066 }
10978 11067
@@ -10986,7 +11075,11 @@ static int char2uni(const unsigned char *rawstring, int boundlen,
10986 return -EINVAL; 11075 return -EINVAL;
10987 n = 2; 11076 n = 2;
10988 } else{ 11077 } else{
10989 *uni = ch; 11078 if (ch==0x80) {/* Euro symbol.The only exception with a non-ascii unicode */
11079 *uni = 0x20ac;
11080 } else {
11081 *uni = ch;
11082 }
10990 n = 1; 11083 n = 1;
10991 } 11084 }
10992 return n; 11085 return n;
diff --git a/fs/ntfs/attrib.c b/fs/ntfs/attrib.c
index 9f08e851cfb6..c577d8e1bd95 100644
--- a/fs/ntfs/attrib.c
+++ b/fs/ntfs/attrib.c
@@ -1272,7 +1272,7 @@ ntfs_attr_search_ctx *ntfs_attr_get_search_ctx(ntfs_inode *ni, MFT_RECORD *mrec)
1272{ 1272{
1273 ntfs_attr_search_ctx *ctx; 1273 ntfs_attr_search_ctx *ctx;
1274 1274
1275 ctx = kmem_cache_alloc(ntfs_attr_ctx_cache, SLAB_NOFS); 1275 ctx = kmem_cache_alloc(ntfs_attr_ctx_cache, GFP_NOFS);
1276 if (ctx) 1276 if (ctx)
1277 ntfs_attr_init_search_ctx(ctx, ni, mrec); 1277 ntfs_attr_init_search_ctx(ctx, ni, mrec);
1278 return ctx; 1278 return ctx;
diff --git a/fs/ntfs/index.c b/fs/ntfs/index.c
index e32cde486362..2194eff49743 100644
--- a/fs/ntfs/index.c
+++ b/fs/ntfs/index.c
@@ -38,7 +38,7 @@ ntfs_index_context *ntfs_index_ctx_get(ntfs_inode *idx_ni)
38{ 38{
39 ntfs_index_context *ictx; 39 ntfs_index_context *ictx;
40 40
41 ictx = kmem_cache_alloc(ntfs_index_ctx_cache, SLAB_NOFS); 41 ictx = kmem_cache_alloc(ntfs_index_ctx_cache, GFP_NOFS);
42 if (ictx) 42 if (ictx)
43 *ictx = (ntfs_index_context){ .idx_ni = idx_ni }; 43 *ictx = (ntfs_index_context){ .idx_ni = idx_ni };
44 return ictx; 44 return ictx;
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
index 2d3de9c89818..247989891b4b 100644
--- a/fs/ntfs/inode.c
+++ b/fs/ntfs/inode.c
@@ -324,7 +324,7 @@ struct inode *ntfs_alloc_big_inode(struct super_block *sb)
324 ntfs_inode *ni; 324 ntfs_inode *ni;
325 325
326 ntfs_debug("Entering."); 326 ntfs_debug("Entering.");
327 ni = kmem_cache_alloc(ntfs_big_inode_cache, SLAB_NOFS); 327 ni = kmem_cache_alloc(ntfs_big_inode_cache, GFP_NOFS);
328 if (likely(ni != NULL)) { 328 if (likely(ni != NULL)) {
329 ni->state = 0; 329 ni->state = 0;
330 return VFS_I(ni); 330 return VFS_I(ni);
@@ -349,7 +349,7 @@ static inline ntfs_inode *ntfs_alloc_extent_inode(void)
349 ntfs_inode *ni; 349 ntfs_inode *ni;
350 350
351 ntfs_debug("Entering."); 351 ntfs_debug("Entering.");
352 ni = kmem_cache_alloc(ntfs_inode_cache, SLAB_NOFS); 352 ni = kmem_cache_alloc(ntfs_inode_cache, GFP_NOFS);
353 if (likely(ni != NULL)) { 353 if (likely(ni != NULL)) {
354 ni->state = 0; 354 ni->state = 0;
355 return ni; 355 return ni;
diff --git a/fs/ntfs/unistr.c b/fs/ntfs/unistr.c
index 6a495f7369f9..005ca4b0f132 100644
--- a/fs/ntfs/unistr.c
+++ b/fs/ntfs/unistr.c
@@ -266,7 +266,7 @@ int ntfs_nlstoucs(const ntfs_volume *vol, const char *ins,
266 266
267 /* We do not trust outside sources. */ 267 /* We do not trust outside sources. */
268 if (likely(ins)) { 268 if (likely(ins)) {
269 ucs = kmem_cache_alloc(ntfs_name_cache, SLAB_NOFS); 269 ucs = kmem_cache_alloc(ntfs_name_cache, GFP_NOFS);
270 if (likely(ucs)) { 270 if (likely(ucs)) {
271 for (i = o = 0; i < ins_len; i += wc_len) { 271 for (i = o = 0; i < ins_len; i += wc_len) {
272 wc_len = nls->char2uni(ins + i, ins_len - i, 272 wc_len = nls->char2uni(ins + i, ins_len - i,
diff --git a/fs/ocfs2/dlm/dlmfs.c b/fs/ocfs2/dlm/dlmfs.c
index 16b8d1ba7066..941acf14e61f 100644
--- a/fs/ocfs2/dlm/dlmfs.c
+++ b/fs/ocfs2/dlm/dlmfs.c
@@ -66,7 +66,7 @@ static struct file_operations dlmfs_file_operations;
66static struct inode_operations dlmfs_dir_inode_operations; 66static struct inode_operations dlmfs_dir_inode_operations;
67static struct inode_operations dlmfs_root_inode_operations; 67static struct inode_operations dlmfs_root_inode_operations;
68static struct inode_operations dlmfs_file_inode_operations; 68static struct inode_operations dlmfs_file_inode_operations;
69static kmem_cache_t *dlmfs_inode_cache; 69static struct kmem_cache *dlmfs_inode_cache;
70 70
71struct workqueue_struct *user_dlm_worker; 71struct workqueue_struct *user_dlm_worker;
72 72
@@ -257,7 +257,7 @@ static ssize_t dlmfs_file_write(struct file *filp,
257} 257}
258 258
259static void dlmfs_init_once(void *foo, 259static void dlmfs_init_once(void *foo,
260 kmem_cache_t *cachep, 260 struct kmem_cache *cachep,
261 unsigned long flags) 261 unsigned long flags)
262{ 262{
263 struct dlmfs_inode_private *ip = 263 struct dlmfs_inode_private *ip =
@@ -276,7 +276,7 @@ static struct inode *dlmfs_alloc_inode(struct super_block *sb)
276{ 276{
277 struct dlmfs_inode_private *ip; 277 struct dlmfs_inode_private *ip;
278 278
279 ip = kmem_cache_alloc(dlmfs_inode_cache, SLAB_NOFS); 279 ip = kmem_cache_alloc(dlmfs_inode_cache, GFP_NOFS);
280 if (!ip) 280 if (!ip)
281 return NULL; 281 return NULL;
282 282
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index f784177b6241..856012b4fa49 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -221,7 +221,7 @@ EXPORT_SYMBOL_GPL(dlm_dump_all_mles);
221#endif /* 0 */ 221#endif /* 0 */
222 222
223 223
224static kmem_cache_t *dlm_mle_cache = NULL; 224static struct kmem_cache *dlm_mle_cache = NULL;
225 225
226 226
227static void dlm_mle_release(struct kref *kref); 227static void dlm_mle_release(struct kref *kref);
diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c
index fcd4475d1f89..80ac69f11d9f 100644
--- a/fs/ocfs2/extent_map.c
+++ b/fs/ocfs2/extent_map.c
@@ -61,7 +61,7 @@ struct ocfs2_em_insert_context {
61 struct ocfs2_extent_map_entry *right_ent; 61 struct ocfs2_extent_map_entry *right_ent;
62}; 62};
63 63
64static kmem_cache_t *ocfs2_em_ent_cachep = NULL; 64static struct kmem_cache *ocfs2_em_ent_cachep = NULL;
65 65
66 66
67static struct ocfs2_extent_map_entry * 67static struct ocfs2_extent_map_entry *
diff --git a/fs/ocfs2/inode.h b/fs/ocfs2/inode.h
index 46a378fbc40b..1a7dd2945b34 100644
--- a/fs/ocfs2/inode.h
+++ b/fs/ocfs2/inode.h
@@ -106,7 +106,7 @@ static inline struct ocfs2_inode_info *OCFS2_I(struct inode *inode)
106#define INODE_JOURNAL(i) (OCFS2_I(i)->ip_flags & OCFS2_INODE_JOURNAL) 106#define INODE_JOURNAL(i) (OCFS2_I(i)->ip_flags & OCFS2_INODE_JOURNAL)
107#define SET_INODE_JOURNAL(i) (OCFS2_I(i)->ip_flags |= OCFS2_INODE_JOURNAL) 107#define SET_INODE_JOURNAL(i) (OCFS2_I(i)->ip_flags |= OCFS2_INODE_JOURNAL)
108 108
109extern kmem_cache_t *ocfs2_inode_cache; 109extern struct kmem_cache *ocfs2_inode_cache;
110 110
111extern const struct address_space_operations ocfs2_aops; 111extern const struct address_space_operations ocfs2_aops;
112 112
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index d9b4214a12da..4bf39540e652 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -68,7 +68,7 @@
68 68
69#include "buffer_head_io.h" 69#include "buffer_head_io.h"
70 70
71static kmem_cache_t *ocfs2_inode_cachep = NULL; 71static struct kmem_cache *ocfs2_inode_cachep = NULL;
72 72
73/* OCFS2 needs to schedule several differnt types of work which 73/* OCFS2 needs to schedule several differnt types of work which
74 * require cluster locking, disk I/O, recovery waits, etc. Since these 74 * require cluster locking, disk I/O, recovery waits, etc. Since these
@@ -303,7 +303,7 @@ static struct inode *ocfs2_alloc_inode(struct super_block *sb)
303{ 303{
304 struct ocfs2_inode_info *oi; 304 struct ocfs2_inode_info *oi;
305 305
306 oi = kmem_cache_alloc(ocfs2_inode_cachep, SLAB_NOFS); 306 oi = kmem_cache_alloc(ocfs2_inode_cachep, GFP_NOFS);
307 if (!oi) 307 if (!oi)
308 return NULL; 308 return NULL;
309 309
@@ -914,7 +914,7 @@ bail:
914} 914}
915 915
916static void ocfs2_inode_init_once(void *data, 916static void ocfs2_inode_init_once(void *data,
917 kmem_cache_t *cachep, 917 struct kmem_cache *cachep,
918 unsigned long flags) 918 unsigned long flags)
919{ 919{
920 struct ocfs2_inode_info *oi = data; 920 struct ocfs2_inode_info *oi = data;
@@ -1674,7 +1674,7 @@ void __ocfs2_error(struct super_block *sb,
1674 va_list args; 1674 va_list args;
1675 1675
1676 va_start(args, fmt); 1676 va_start(args, fmt);
1677 vsprintf(error_buf, fmt, args); 1677 vsnprintf(error_buf, sizeof(error_buf), fmt, args);
1678 va_end(args); 1678 va_end(args);
1679 1679
1680 /* Not using mlog here because we want to show the actual 1680 /* Not using mlog here because we want to show the actual
@@ -1695,7 +1695,7 @@ void __ocfs2_abort(struct super_block* sb,
1695 va_list args; 1695 va_list args;
1696 1696
1697 va_start(args, fmt); 1697 va_start(args, fmt);
1698 vsprintf(error_buf, fmt, args); 1698 vsnprintf(error_buf, sizeof(error_buf), fmt, args);
1699 va_end(args); 1699 va_end(args);
1700 1700
1701 printk(KERN_CRIT "OCFS2: abort (device %s): %s: %s\n", 1701 printk(KERN_CRIT "OCFS2: abort (device %s): %s: %s\n",
diff --git a/fs/ocfs2/uptodate.c b/fs/ocfs2/uptodate.c
index 9707ed7a3206..39814b900fc0 100644
--- a/fs/ocfs2/uptodate.c
+++ b/fs/ocfs2/uptodate.c
@@ -69,7 +69,7 @@ struct ocfs2_meta_cache_item {
69 sector_t c_block; 69 sector_t c_block;
70}; 70};
71 71
72static kmem_cache_t *ocfs2_uptodate_cachep = NULL; 72static struct kmem_cache *ocfs2_uptodate_cachep = NULL;
73 73
74void ocfs2_metadata_cache_init(struct inode *inode) 74void ocfs2_metadata_cache_init(struct inode *inode)
75{ 75{
diff --git a/fs/openpromfs/inode.c b/fs/openpromfs/inode.c
index 592a6402e851..26f44e0074ec 100644
--- a/fs/openpromfs/inode.c
+++ b/fs/openpromfs/inode.c
@@ -330,13 +330,13 @@ out:
330 return 0; 330 return 0;
331} 331}
332 332
333static kmem_cache_t *op_inode_cachep; 333static struct kmem_cache *op_inode_cachep;
334 334
335static struct inode *openprom_alloc_inode(struct super_block *sb) 335static struct inode *openprom_alloc_inode(struct super_block *sb)
336{ 336{
337 struct op_inode_info *oi; 337 struct op_inode_info *oi;
338 338
339 oi = kmem_cache_alloc(op_inode_cachep, SLAB_KERNEL); 339 oi = kmem_cache_alloc(op_inode_cachep, GFP_KERNEL);
340 if (!oi) 340 if (!oi)
341 return NULL; 341 return NULL;
342 342
@@ -415,7 +415,7 @@ static struct file_system_type openprom_fs_type = {
415 .kill_sb = kill_anon_super, 415 .kill_sb = kill_anon_super,
416}; 416};
417 417
418static void op_inode_init_once(void *data, kmem_cache_t * cachep, unsigned long flags) 418static void op_inode_init_once(void *data, struct kmem_cache * cachep, unsigned long flags)
419{ 419{
420 struct op_inode_info *oi = (struct op_inode_info *) data; 420 struct op_inode_info *oi = (struct op_inode_info *) data;
421 421
diff --git a/fs/partitions/amiga.c b/fs/partitions/amiga.c
index 3068528890a6..9917a8c360f2 100644
--- a/fs/partitions/amiga.c
+++ b/fs/partitions/amiga.c
@@ -43,6 +43,7 @@ amiga_partition(struct parsed_partitions *state, struct block_device *bdev)
43 if (warn_no_part) 43 if (warn_no_part)
44 printk("Dev %s: unable to read RDB block %d\n", 44 printk("Dev %s: unable to read RDB block %d\n",
45 bdevname(bdev, b), blk); 45 bdevname(bdev, b), blk);
46 res = -1;
46 goto rdb_done; 47 goto rdb_done;
47 } 48 }
48 if (*(__be32 *)data != cpu_to_be32(IDNAME_RIGIDDISK)) 49 if (*(__be32 *)data != cpu_to_be32(IDNAME_RIGIDDISK))
@@ -79,6 +80,7 @@ amiga_partition(struct parsed_partitions *state, struct block_device *bdev)
79 if (warn_no_part) 80 if (warn_no_part)
80 printk("Dev %s: unable to read partition block %d\n", 81 printk("Dev %s: unable to read partition block %d\n",
81 bdevname(bdev, b), blk); 82 bdevname(bdev, b), blk);
83 res = -1;
82 goto rdb_done; 84 goto rdb_done;
83 } 85 }
84 pb = (struct PartitionBlock *)data; 86 pb = (struct PartitionBlock *)data;
diff --git a/fs/partitions/atari.c b/fs/partitions/atari.c
index 192a6adfdefd..1f3572d5b755 100644
--- a/fs/partitions/atari.c
+++ b/fs/partitions/atari.c
@@ -88,7 +88,7 @@ int atari_partition(struct parsed_partitions *state, struct block_device *bdev)
88 if (!xrs) { 88 if (!xrs) {
89 printk (" block %ld read failed\n", partsect); 89 printk (" block %ld read failed\n", partsect);
90 put_dev_sector(sect); 90 put_dev_sector(sect);
91 return 0; 91 return -1;
92 } 92 }
93 93
94 /* ++roman: sanity check: bit 0 of flg field must be set */ 94 /* ++roman: sanity check: bit 0 of flg field must be set */
diff --git a/fs/partitions/check.c b/fs/partitions/check.c
index 6fb4b6150d77..1901137f4eca 100644
--- a/fs/partitions/check.c
+++ b/fs/partitions/check.c
@@ -153,7 +153,7 @@ static struct parsed_partitions *
153check_partition(struct gendisk *hd, struct block_device *bdev) 153check_partition(struct gendisk *hd, struct block_device *bdev)
154{ 154{
155 struct parsed_partitions *state; 155 struct parsed_partitions *state;
156 int i, res; 156 int i, res, err;
157 157
158 state = kmalloc(sizeof(struct parsed_partitions), GFP_KERNEL); 158 state = kmalloc(sizeof(struct parsed_partitions), GFP_KERNEL);
159 if (!state) 159 if (!state)
@@ -165,19 +165,30 @@ check_partition(struct gendisk *hd, struct block_device *bdev)
165 sprintf(state->name, "p"); 165 sprintf(state->name, "p");
166 166
167 state->limit = hd->minors; 167 state->limit = hd->minors;
168 i = res = 0; 168 i = res = err = 0;
169 while (!res && check_part[i]) { 169 while (!res && check_part[i]) {
170 memset(&state->parts, 0, sizeof(state->parts)); 170 memset(&state->parts, 0, sizeof(state->parts));
171 res = check_part[i++](state, bdev); 171 res = check_part[i++](state, bdev);
172 if (res < 0) {
173 /* We have hit an I/O error which we don't report now.
174 * But record it, and let the others do their job.
175 */
176 err = res;
177 res = 0;
178 }
179
172 } 180 }
173 if (res > 0) 181 if (res > 0)
174 return state; 182 return state;
183 if (!err)
184 /* The partition is unrecognized. So report I/O errors if there were any */
185 res = err;
175 if (!res) 186 if (!res)
176 printk(" unknown partition table\n"); 187 printk(" unknown partition table\n");
177 else if (warn_no_part) 188 else if (warn_no_part)
178 printk(" unable to read partition table\n"); 189 printk(" unable to read partition table\n");
179 kfree(state); 190 kfree(state);
180 return NULL; 191 return ERR_PTR(res);
181} 192}
182 193
183/* 194/*
@@ -494,6 +505,8 @@ int rescan_partitions(struct gendisk *disk, struct block_device *bdev)
494 disk->fops->revalidate_disk(disk); 505 disk->fops->revalidate_disk(disk);
495 if (!get_capacity(disk) || !(state = check_partition(disk, bdev))) 506 if (!get_capacity(disk) || !(state = check_partition(disk, bdev)))
496 return 0; 507 return 0;
508 if (IS_ERR(state)) /* I/O error reading the partition table */
509 return PTR_ERR(state);
497 for (p = 1; p < state->limit; p++) { 510 for (p = 1; p < state->limit; p++) {
498 sector_t size = state->parts[p].size; 511 sector_t size = state->parts[p].size;
499 sector_t from = state->parts[p].from; 512 sector_t from = state->parts[p].from;
diff --git a/fs/partitions/ibm.c b/fs/partitions/ibm.c
index d352a7381fed..9f7ad4244f63 100644
--- a/fs/partitions/ibm.c
+++ b/fs/partitions/ibm.c
@@ -43,7 +43,7 @@ cchhb2blk (struct vtoc_cchhb *ptr, struct hd_geometry *geo) {
43int 43int
44ibm_partition(struct parsed_partitions *state, struct block_device *bdev) 44ibm_partition(struct parsed_partitions *state, struct block_device *bdev)
45{ 45{
46 int blocksize, offset, size; 46 int blocksize, offset, size,res;
47 loff_t i_size; 47 loff_t i_size;
48 dasd_information_t *info; 48 dasd_information_t *info;
49 struct hd_geometry *geo; 49 struct hd_geometry *geo;
@@ -56,15 +56,16 @@ ibm_partition(struct parsed_partitions *state, struct block_device *bdev)
56 unsigned char *data; 56 unsigned char *data;
57 Sector sect; 57 Sector sect;
58 58
59 res = 0;
59 blocksize = bdev_hardsect_size(bdev); 60 blocksize = bdev_hardsect_size(bdev);
60 if (blocksize <= 0) 61 if (blocksize <= 0)
61 return 0; 62 goto out_exit;
62 i_size = i_size_read(bdev->bd_inode); 63 i_size = i_size_read(bdev->bd_inode);
63 if (i_size == 0) 64 if (i_size == 0)
64 return 0; 65 goto out_exit;
65 66
66 if ((info = kmalloc(sizeof(dasd_information_t), GFP_KERNEL)) == NULL) 67 if ((info = kmalloc(sizeof(dasd_information_t), GFP_KERNEL)) == NULL)
67 goto out_noinfo; 68 goto out_exit;
68 if ((geo = kmalloc(sizeof(struct hd_geometry), GFP_KERNEL)) == NULL) 69 if ((geo = kmalloc(sizeof(struct hd_geometry), GFP_KERNEL)) == NULL)
69 goto out_nogeo; 70 goto out_nogeo;
70 if ((label = kmalloc(sizeof(union label_t), GFP_KERNEL)) == NULL) 71 if ((label = kmalloc(sizeof(union label_t), GFP_KERNEL)) == NULL)
@@ -72,7 +73,7 @@ ibm_partition(struct parsed_partitions *state, struct block_device *bdev)
72 73
73 if (ioctl_by_bdev(bdev, BIODASDINFO, (unsigned long)info) != 0 || 74 if (ioctl_by_bdev(bdev, BIODASDINFO, (unsigned long)info) != 0 ||
74 ioctl_by_bdev(bdev, HDIO_GETGEO, (unsigned long)geo) != 0) 75 ioctl_by_bdev(bdev, HDIO_GETGEO, (unsigned long)geo) != 0)
75 goto out_noioctl; 76 goto out_freeall;
76 77
77 /* 78 /*
78 * Get volume label, extract name and type. 79 * Get volume label, extract name and type.
@@ -92,6 +93,8 @@ ibm_partition(struct parsed_partitions *state, struct block_device *bdev)
92 EBCASC(type, 4); 93 EBCASC(type, 4);
93 EBCASC(name, 6); 94 EBCASC(name, 6);
94 95
96 res = 1;
97
95 /* 98 /*
96 * Three different types: CMS1, VOL1 and LNX1/unlabeled 99 * Three different types: CMS1, VOL1 and LNX1/unlabeled
97 */ 100 */
@@ -156,6 +159,9 @@ ibm_partition(struct parsed_partitions *state, struct block_device *bdev)
156 counter++; 159 counter++;
157 blk++; 160 blk++;
158 } 161 }
162 if (!data)
163 /* Are we not supposed to report this ? */
164 goto out_readerr;
159 } else { 165 } else {
160 /* 166 /*
161 * Old style LNX1 or unlabeled disk 167 * Old style LNX1 or unlabeled disk
@@ -171,18 +177,17 @@ ibm_partition(struct parsed_partitions *state, struct block_device *bdev)
171 } 177 }
172 178
173 printk("\n"); 179 printk("\n");
174 kfree(label); 180 goto out_freeall;
175 kfree(geo); 181
176 kfree(info);
177 return 1;
178 182
179out_readerr: 183out_readerr:
180out_noioctl: 184 res = -1;
185out_freeall:
181 kfree(label); 186 kfree(label);
182out_nolab: 187out_nolab:
183 kfree(geo); 188 kfree(geo);
184out_nogeo: 189out_nogeo:
185 kfree(info); 190 kfree(info);
186out_noinfo: 191out_exit:
187 return 0; 192 return res;
188} 193}
diff --git a/fs/pipe.c b/fs/pipe.c
index b1626f269a34..ae36b89b1a37 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -830,7 +830,14 @@ void free_pipe_info(struct inode *inode)
830static struct vfsmount *pipe_mnt __read_mostly; 830static struct vfsmount *pipe_mnt __read_mostly;
831static int pipefs_delete_dentry(struct dentry *dentry) 831static int pipefs_delete_dentry(struct dentry *dentry)
832{ 832{
833 return 1; 833 /*
834 * At creation time, we pretended this dentry was hashed
835 * (by clearing DCACHE_UNHASHED bit in d_flags)
836 * At delete time, we restore the truth : not hashed.
837 * (so that dput() can proceed correctly)
838 */
839 dentry->d_flags |= DCACHE_UNHASHED;
840 return 0;
834} 841}
835 842
836static struct dentry_operations pipefs_dentry_operations = { 843static struct dentry_operations pipefs_dentry_operations = {
@@ -891,17 +898,22 @@ struct file *create_write_pipe(void)
891 if (!inode) 898 if (!inode)
892 goto err_file; 899 goto err_file;
893 900
894 sprintf(name, "[%lu]", inode->i_ino); 901 this.len = sprintf(name, "[%lu]", inode->i_ino);
895 this.name = name; 902 this.name = name;
896 this.len = strlen(name); 903 this.hash = 0;
897 this.hash = inode->i_ino; /* will go */
898 err = -ENOMEM; 904 err = -ENOMEM;
899 dentry = d_alloc(pipe_mnt->mnt_sb->s_root, &this); 905 dentry = d_alloc(pipe_mnt->mnt_sb->s_root, &this);
900 if (!dentry) 906 if (!dentry)
901 goto err_inode; 907 goto err_inode;
902 908
903 dentry->d_op = &pipefs_dentry_operations; 909 dentry->d_op = &pipefs_dentry_operations;
904 d_add(dentry, inode); 910 /*
911 * We dont want to publish this dentry into global dentry hash table.
912 * We pretend dentry is already hashed, by unsetting DCACHE_UNHASHED
913 * This permits a working /proc/$pid/fd/XXX on pipes
914 */
915 dentry->d_flags &= ~DCACHE_UNHASHED;
916 d_instantiate(dentry, inode);
905 f->f_vfsmnt = mntget(pipe_mnt); 917 f->f_vfsmnt = mntget(pipe_mnt);
906 f->f_dentry = dentry; 918 f->f_dentry = dentry;
907 f->f_mapping = inode->i_mapping; 919 f->f_mapping = inode->i_mapping;
diff --git a/fs/proc/Makefile b/fs/proc/Makefile
index 7431d7ba2d09..f6c776272572 100644
--- a/fs/proc/Makefile
+++ b/fs/proc/Makefile
@@ -8,8 +8,9 @@ proc-y := nommu.o task_nommu.o
8proc-$(CONFIG_MMU) := mmu.o task_mmu.o 8proc-$(CONFIG_MMU) := mmu.o task_mmu.o
9 9
10proc-y += inode.o root.o base.o generic.o array.o \ 10proc-y += inode.o root.o base.o generic.o array.o \
11 kmsg.o proc_tty.o proc_misc.o 11 proc_tty.o proc_misc.o
12 12
13proc-$(CONFIG_PROC_KCORE) += kcore.o 13proc-$(CONFIG_PROC_KCORE) += kcore.o
14proc-$(CONFIG_PROC_VMCORE) += vmcore.o 14proc-$(CONFIG_PROC_VMCORE) += vmcore.o
15proc-$(CONFIG_PROC_DEVICETREE) += proc_devtree.o 15proc-$(CONFIG_PROC_DEVICETREE) += proc_devtree.o
16proc-$(CONFIG_PRINTK) += kmsg.o
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 795319c54f72..b859fc749c07 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -683,8 +683,6 @@ static ssize_t oom_adjust_write(struct file *file, const char __user *buf,
683 char buffer[PROC_NUMBUF], *end; 683 char buffer[PROC_NUMBUF], *end;
684 int oom_adjust; 684 int oom_adjust;
685 685
686 if (!capable(CAP_SYS_RESOURCE))
687 return -EPERM;
688 memset(buffer, 0, sizeof(buffer)); 686 memset(buffer, 0, sizeof(buffer));
689 if (count > sizeof(buffer) - 1) 687 if (count > sizeof(buffer) - 1)
690 count = sizeof(buffer) - 1; 688 count = sizeof(buffer) - 1;
@@ -699,6 +697,10 @@ static ssize_t oom_adjust_write(struct file *file, const char __user *buf,
699 task = get_proc_task(file->f_dentry->d_inode); 697 task = get_proc_task(file->f_dentry->d_inode);
700 if (!task) 698 if (!task)
701 return -ESRCH; 699 return -ESRCH;
700 if (oom_adjust < task->oomkilladj && !capable(CAP_SYS_RESOURCE)) {
701 put_task_struct(task);
702 return -EACCES;
703 }
702 task->oomkilladj = oom_adjust; 704 task->oomkilladj = oom_adjust;
703 put_task_struct(task); 705 put_task_struct(task);
704 if (end - buffer == 0) 706 if (end - buffer == 0)
@@ -1883,8 +1885,9 @@ out:
1883 return; 1885 return;
1884} 1886}
1885 1887
1886struct dentry *proc_pid_instantiate(struct inode *dir, 1888static struct dentry *proc_pid_instantiate(struct inode *dir,
1887 struct dentry * dentry, struct task_struct *task, void *ptr) 1889 struct dentry * dentry,
1890 struct task_struct *task, void *ptr)
1888{ 1891{
1889 struct dentry *error = ERR_PTR(-ENOENT); 1892 struct dentry *error = ERR_PTR(-ENOENT);
1890 struct inode *inode; 1893 struct inode *inode;
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index 49dfb2ab783e..e26945ba685b 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -81,14 +81,14 @@ static void proc_read_inode(struct inode * inode)
81 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; 81 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
82} 82}
83 83
84static kmem_cache_t * proc_inode_cachep; 84static struct kmem_cache * proc_inode_cachep;
85 85
86static struct inode *proc_alloc_inode(struct super_block *sb) 86static struct inode *proc_alloc_inode(struct super_block *sb)
87{ 87{
88 struct proc_inode *ei; 88 struct proc_inode *ei;
89 struct inode *inode; 89 struct inode *inode;
90 90
91 ei = (struct proc_inode *)kmem_cache_alloc(proc_inode_cachep, SLAB_KERNEL); 91 ei = (struct proc_inode *)kmem_cache_alloc(proc_inode_cachep, GFP_KERNEL);
92 if (!ei) 92 if (!ei)
93 return NULL; 93 return NULL;
94 ei->pid = NULL; 94 ei->pid = NULL;
@@ -105,7 +105,7 @@ static void proc_destroy_inode(struct inode *inode)
105 kmem_cache_free(proc_inode_cachep, PROC_I(inode)); 105 kmem_cache_free(proc_inode_cachep, PROC_I(inode));
106} 106}
107 107
108static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) 108static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
109{ 109{
110 struct proc_inode *ei = (struct proc_inode *) foo; 110 struct proc_inode *ei = (struct proc_inode *) foo;
111 111
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index 1294eda4acae..1be73082edd3 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -22,6 +22,7 @@
22#include <asm/uaccess.h> 22#include <asm/uaccess.h>
23#include <asm/io.h> 23#include <asm/io.h>
24 24
25#define CORE_STR "CORE"
25 26
26static int open_kcore(struct inode * inode, struct file * filp) 27static int open_kcore(struct inode * inode, struct file * filp)
27{ 28{
@@ -82,10 +83,11 @@ static size_t get_kcore_size(int *nphdr, size_t *elf_buflen)
82 } 83 }
83 *elf_buflen = sizeof(struct elfhdr) + 84 *elf_buflen = sizeof(struct elfhdr) +
84 (*nphdr + 2)*sizeof(struct elf_phdr) + 85 (*nphdr + 2)*sizeof(struct elf_phdr) +
85 3 * (sizeof(struct elf_note) + 4) + 86 3 * ((sizeof(struct elf_note)) +
86 sizeof(struct elf_prstatus) + 87 roundup(sizeof(CORE_STR), 4)) +
87 sizeof(struct elf_prpsinfo) + 88 roundup(sizeof(struct elf_prstatus), 4) +
88 sizeof(struct task_struct); 89 roundup(sizeof(struct elf_prpsinfo), 4) +
90 roundup(sizeof(struct task_struct), 4);
89 *elf_buflen = PAGE_ALIGN(*elf_buflen); 91 *elf_buflen = PAGE_ALIGN(*elf_buflen);
90 return size + *elf_buflen; 92 return size + *elf_buflen;
91} 93}
@@ -210,7 +212,7 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
210 nhdr->p_offset = offset; 212 nhdr->p_offset = offset;
211 213
212 /* set up the process status */ 214 /* set up the process status */
213 notes[0].name = "CORE"; 215 notes[0].name = CORE_STR;
214 notes[0].type = NT_PRSTATUS; 216 notes[0].type = NT_PRSTATUS;
215 notes[0].datasz = sizeof(struct elf_prstatus); 217 notes[0].datasz = sizeof(struct elf_prstatus);
216 notes[0].data = &prstatus; 218 notes[0].data = &prstatus;
@@ -221,7 +223,7 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
221 bufp = storenote(&notes[0], bufp); 223 bufp = storenote(&notes[0], bufp);
222 224
223 /* set up the process info */ 225 /* set up the process info */
224 notes[1].name = "CORE"; 226 notes[1].name = CORE_STR;
225 notes[1].type = NT_PRPSINFO; 227 notes[1].type = NT_PRPSINFO;
226 notes[1].datasz = sizeof(struct elf_prpsinfo); 228 notes[1].datasz = sizeof(struct elf_prpsinfo);
227 notes[1].data = &prpsinfo; 229 notes[1].data = &prpsinfo;
@@ -238,7 +240,7 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
238 bufp = storenote(&notes[1], bufp); 240 bufp = storenote(&notes[1], bufp);
239 241
240 /* set up the task structure */ 242 /* set up the task structure */
241 notes[2].name = "CORE"; 243 notes[2].name = CORE_STR;
242 notes[2].type = NT_TASKSTRUCT; 244 notes[2].type = NT_TASKSTRUCT;
243 notes[2].datasz = sizeof(struct task_struct); 245 notes[2].datasz = sizeof(struct task_struct);
244 notes[2].data = current; 246 notes[2].data = current;
diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c
index 93c43b676e59..51815cece6f3 100644
--- a/fs/proc/proc_misc.c
+++ b/fs/proc/proc_misc.c
@@ -696,9 +696,11 @@ void __init proc_misc_init(void)
696 proc_symlink("mounts", NULL, "self/mounts"); 696 proc_symlink("mounts", NULL, "self/mounts");
697 697
698 /* And now for trickier ones */ 698 /* And now for trickier ones */
699#ifdef CONFIG_PRINTK
699 entry = create_proc_entry("kmsg", S_IRUSR, &proc_root); 700 entry = create_proc_entry("kmsg", S_IRUSR, &proc_root);
700 if (entry) 701 if (entry)
701 entry->proc_fops = &proc_kmsg_operations; 702 entry->proc_fops = &proc_kmsg_operations;
703#endif
702 create_seq_entry("devices", 0, &proc_devinfo_operations); 704 create_seq_entry("devices", 0, &proc_devinfo_operations);
703 create_seq_entry("cpuinfo", 0, &proc_cpuinfo_operations); 705 create_seq_entry("cpuinfo", 0, &proc_cpuinfo_operations);
704#ifdef CONFIG_BLOCK 706#ifdef CONFIG_BLOCK
diff --git a/fs/qnx4/inode.c b/fs/qnx4/inode.c
index 5a41db2a218d..c047dc654d5c 100644
--- a/fs/qnx4/inode.c
+++ b/fs/qnx4/inode.c
@@ -515,12 +515,12 @@ static void qnx4_read_inode(struct inode *inode)
515 brelse(bh); 515 brelse(bh);
516} 516}
517 517
518static kmem_cache_t *qnx4_inode_cachep; 518static struct kmem_cache *qnx4_inode_cachep;
519 519
520static struct inode *qnx4_alloc_inode(struct super_block *sb) 520static struct inode *qnx4_alloc_inode(struct super_block *sb)
521{ 521{
522 struct qnx4_inode_info *ei; 522 struct qnx4_inode_info *ei;
523 ei = kmem_cache_alloc(qnx4_inode_cachep, SLAB_KERNEL); 523 ei = kmem_cache_alloc(qnx4_inode_cachep, GFP_KERNEL);
524 if (!ei) 524 if (!ei)
525 return NULL; 525 return NULL;
526 return &ei->vfs_inode; 526 return &ei->vfs_inode;
@@ -531,7 +531,7 @@ static void qnx4_destroy_inode(struct inode *inode)
531 kmem_cache_free(qnx4_inode_cachep, qnx4_i(inode)); 531 kmem_cache_free(qnx4_inode_cachep, qnx4_i(inode));
532} 532}
533 533
534static void init_once(void *foo, kmem_cache_t * cachep, 534static void init_once(void *foo, struct kmem_cache * cachep,
535 unsigned long flags) 535 unsigned long flags)
536{ 536{
537 struct qnx4_inode_info *ei = (struct qnx4_inode_info *) foo; 537 struct qnx4_inode_info *ei = (struct qnx4_inode_info *) foo;
diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c
index ac14318c81ba..373d862c3f87 100644
--- a/fs/reiserfs/file.c
+++ b/fs/reiserfs/file.c
@@ -317,12 +317,11 @@ static int reiserfs_allocate_blocks_for_region(struct reiserfs_transaction_handl
317 /* area filled with zeroes, to supply as list of zero blocknumbers 317 /* area filled with zeroes, to supply as list of zero blocknumbers
318 We allocate it outside of loop just in case loop would spin for 318 We allocate it outside of loop just in case loop would spin for
319 several iterations. */ 319 several iterations. */
320 char *zeros = kmalloc(to_paste * UNFM_P_SIZE, GFP_ATOMIC); // We cannot insert more than MAX_ITEM_LEN bytes anyway. 320 char *zeros = kzalloc(to_paste * UNFM_P_SIZE, GFP_ATOMIC); // We cannot insert more than MAX_ITEM_LEN bytes anyway.
321 if (!zeros) { 321 if (!zeros) {
322 res = -ENOMEM; 322 res = -ENOMEM;
323 goto error_exit_free_blocks; 323 goto error_exit_free_blocks;
324 } 324 }
325 memset(zeros, 0, to_paste * UNFM_P_SIZE);
326 do { 325 do {
327 to_paste = 326 to_paste =
328 min_t(__u64, hole_size, 327 min_t(__u64, hole_size,
@@ -407,6 +406,8 @@ static int reiserfs_allocate_blocks_for_region(struct reiserfs_transaction_handl
407 we restart it. This will also free the path. */ 406 we restart it. This will also free the path. */
408 if (journal_transaction_should_end 407 if (journal_transaction_should_end
409 (th, th->t_blocks_allocated)) { 408 (th, th->t_blocks_allocated)) {
409 inode->i_size = cpu_key_k_offset(&key) +
410 (to_paste << inode->i_blkbits);
410 res = 411 res =
411 restart_transaction(th, inode, 412 restart_transaction(th, inode,
412 &path); 413 &path);
@@ -1045,6 +1046,7 @@ static int reiserfs_prepare_file_region_for_write(struct inode *inode
1045 char *kaddr = kmap_atomic(prepared_pages[0], KM_USER0); 1046 char *kaddr = kmap_atomic(prepared_pages[0], KM_USER0);
1046 memset(kaddr, 0, from); 1047 memset(kaddr, 0, from);
1047 kunmap_atomic(kaddr, KM_USER0); 1048 kunmap_atomic(kaddr, KM_USER0);
1049 flush_dcache_page(prepared_pages[0]);
1048 } 1050 }
1049 if (to != PAGE_CACHE_SIZE) { /* Last page needs to be partially zeroed */ 1051 if (to != PAGE_CACHE_SIZE) { /* Last page needs to be partially zeroed */
1050 char *kaddr = 1052 char *kaddr =
@@ -1052,6 +1054,7 @@ static int reiserfs_prepare_file_region_for_write(struct inode *inode
1052 KM_USER0); 1054 KM_USER0);
1053 memset(kaddr + to, 0, PAGE_CACHE_SIZE - to); 1055 memset(kaddr + to, 0, PAGE_CACHE_SIZE - to);
1054 kunmap_atomic(kaddr, KM_USER0); 1056 kunmap_atomic(kaddr, KM_USER0);
1057 flush_dcache_page(prepared_pages[num_pages - 1]);
1055 } 1058 }
1056 1059
1057 /* Since all blocks are new - use already calculated value */ 1060 /* Since all blocks are new - use already calculated value */
@@ -1185,6 +1188,7 @@ static int reiserfs_prepare_file_region_for_write(struct inode *inode
1185 memset(kaddr + block_start, 0, 1188 memset(kaddr + block_start, 0,
1186 from - block_start); 1189 from - block_start);
1187 kunmap_atomic(kaddr, KM_USER0); 1190 kunmap_atomic(kaddr, KM_USER0);
1191 flush_dcache_page(prepared_pages[0]);
1188 set_buffer_uptodate(bh); 1192 set_buffer_uptodate(bh);
1189 } 1193 }
1190 } 1194 }
@@ -1222,6 +1226,7 @@ static int reiserfs_prepare_file_region_for_write(struct inode *inode
1222 KM_USER0); 1226 KM_USER0);
1223 memset(kaddr + to, 0, block_end - to); 1227 memset(kaddr + to, 0, block_end - to);
1224 kunmap_atomic(kaddr, KM_USER0); 1228 kunmap_atomic(kaddr, KM_USER0);
1229 flush_dcache_page(prepared_pages[num_pages - 1]);
1225 set_buffer_uptodate(bh); 1230 set_buffer_uptodate(bh);
1226 } 1231 }
1227 } 1232 }
@@ -1307,56 +1312,8 @@ static ssize_t reiserfs_file_write(struct file *file, /* the file we are going t
1307 count = MAX_NON_LFS - (unsigned long)*ppos; 1312 count = MAX_NON_LFS - (unsigned long)*ppos;
1308 } 1313 }
1309 1314
1310 if (file->f_flags & O_DIRECT) { // Direct IO needs treatment 1315 if (file->f_flags & O_DIRECT)
1311 ssize_t result, after_file_end = 0; 1316 return do_sync_write(file, buf, count, ppos);
1312 if ((*ppos + count >= inode->i_size)
1313 || (file->f_flags & O_APPEND)) {
1314 /* If we are appending a file, we need to put this savelink in here.
1315 If we will crash while doing direct io, finish_unfinished will
1316 cut the garbage from the file end. */
1317 reiserfs_write_lock(inode->i_sb);
1318 err =
1319 journal_begin(&th, inode->i_sb,
1320 JOURNAL_PER_BALANCE_CNT);
1321 if (err) {
1322 reiserfs_write_unlock(inode->i_sb);
1323 return err;
1324 }
1325 reiserfs_update_inode_transaction(inode);
1326 add_save_link(&th, inode, 1 /* Truncate */ );
1327 after_file_end = 1;
1328 err =
1329 journal_end(&th, inode->i_sb,
1330 JOURNAL_PER_BALANCE_CNT);
1331 reiserfs_write_unlock(inode->i_sb);
1332 if (err)
1333 return err;
1334 }
1335 result = do_sync_write(file, buf, count, ppos);
1336
1337 if (after_file_end) { /* Now update i_size and remove the savelink */
1338 struct reiserfs_transaction_handle th;
1339 reiserfs_write_lock(inode->i_sb);
1340 err = journal_begin(&th, inode->i_sb, 1);
1341 if (err) {
1342 reiserfs_write_unlock(inode->i_sb);
1343 return err;
1344 }
1345 reiserfs_update_inode_transaction(inode);
1346 mark_inode_dirty(inode);
1347 err = journal_end(&th, inode->i_sb, 1);
1348 if (err) {
1349 reiserfs_write_unlock(inode->i_sb);
1350 return err;
1351 }
1352 err = remove_save_link(inode, 1 /* truncate */ );
1353 reiserfs_write_unlock(inode->i_sb);
1354 if (err)
1355 return err;
1356 }
1357
1358 return result;
1359 }
1360 1317
1361 if (unlikely((ssize_t) count < 0)) 1318 if (unlikely((ssize_t) count < 0))
1362 return -EINVAL; 1319 return -EINVAL;
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 9c69bcacad22..254239e6f9e3 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -216,11 +216,12 @@ static int file_capable(struct inode *inode, long block)
216 BUG_ON(!th->t_trans_id); 216 BUG_ON(!th->t_trans_id);
217 BUG_ON(!th->t_refcount); 217 BUG_ON(!th->t_refcount);
218 218
219 pathrelse(path);
220
219 /* we cannot restart while nested */ 221 /* we cannot restart while nested */
220 if (th->t_refcount > 1) { 222 if (th->t_refcount > 1) {
221 return 0; 223 return 0;
222 } 224 }
223 pathrelse(path);
224 reiserfs_update_sd(th, inode); 225 reiserfs_update_sd(th, inode);
225 err = journal_end(th, s, len); 226 err = journal_end(th, s, len);
226 if (!err) { 227 if (!err) {
@@ -928,15 +929,12 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
928 if (blocks_needed == 1) { 929 if (blocks_needed == 1) {
929 un = &unf_single; 930 un = &unf_single;
930 } else { 931 } else {
931 un = kmalloc(min(blocks_needed, max_to_insert) * UNFM_P_SIZE, GFP_ATOMIC); // We need to avoid scheduling. 932 un = kzalloc(min(blocks_needed, max_to_insert) * UNFM_P_SIZE, GFP_ATOMIC); // We need to avoid scheduling.
932 if (!un) { 933 if (!un) {
933 un = &unf_single; 934 un = &unf_single;
934 blocks_needed = 1; 935 blocks_needed = 1;
935 max_to_insert = 0; 936 max_to_insert = 0;
936 } else 937 }
937 memset(un, 0,
938 UNFM_P_SIZE * min(blocks_needed,
939 max_to_insert));
940 } 938 }
941 if (blocks_needed <= max_to_insert) { 939 if (blocks_needed <= max_to_insert) {
942 /* we are going to add target block to the file. Use allocated 940 /* we are going to add target block to the file. Use allocated
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 17249994110f..7fb5fb036f90 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -490,13 +490,13 @@ static void reiserfs_put_super(struct super_block *s)
490 return; 490 return;
491} 491}
492 492
493static kmem_cache_t *reiserfs_inode_cachep; 493static struct kmem_cache *reiserfs_inode_cachep;
494 494
495static struct inode *reiserfs_alloc_inode(struct super_block *sb) 495static struct inode *reiserfs_alloc_inode(struct super_block *sb)
496{ 496{
497 struct reiserfs_inode_info *ei; 497 struct reiserfs_inode_info *ei;
498 ei = (struct reiserfs_inode_info *) 498 ei = (struct reiserfs_inode_info *)
499 kmem_cache_alloc(reiserfs_inode_cachep, SLAB_KERNEL); 499 kmem_cache_alloc(reiserfs_inode_cachep, GFP_KERNEL);
500 if (!ei) 500 if (!ei)
501 return NULL; 501 return NULL;
502 return &ei->vfs_inode; 502 return &ei->vfs_inode;
@@ -507,7 +507,7 @@ static void reiserfs_destroy_inode(struct inode *inode)
507 kmem_cache_free(reiserfs_inode_cachep, REISERFS_I(inode)); 507 kmem_cache_free(reiserfs_inode_cachep, REISERFS_I(inode));
508} 508}
509 509
510static void init_once(void *foo, kmem_cache_t * cachep, unsigned long flags) 510static void init_once(void *foo, struct kmem_cache * cachep, unsigned long flags)
511{ 511{
512 struct reiserfs_inode_info *ei = (struct reiserfs_inode_info *)foo; 512 struct reiserfs_inode_info *ei = (struct reiserfs_inode_info *)foo;
513 513
@@ -1549,13 +1549,12 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
1549 struct reiserfs_sb_info *sbi; 1549 struct reiserfs_sb_info *sbi;
1550 int errval = -EINVAL; 1550 int errval = -EINVAL;
1551 1551
1552 sbi = kmalloc(sizeof(struct reiserfs_sb_info), GFP_KERNEL); 1552 sbi = kzalloc(sizeof(struct reiserfs_sb_info), GFP_KERNEL);
1553 if (!sbi) { 1553 if (!sbi) {
1554 errval = -ENOMEM; 1554 errval = -ENOMEM;
1555 goto error; 1555 goto error;
1556 } 1556 }
1557 s->s_fs_info = sbi; 1557 s->s_fs_info = sbi;
1558 memset(sbi, 0, sizeof(struct reiserfs_sb_info));
1559 /* Set default values for options: non-aggressive tails, RO on errors */ 1558 /* Set default values for options: non-aggressive tails, RO on errors */
1560 REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_SMALLTAIL); 1559 REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_SMALLTAIL);
1561 REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_ERROR_RO); 1560 REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_ERROR_RO);
diff --git a/fs/romfs/inode.c b/fs/romfs/inode.c
index ddcd9e1ef282..c5af088d4a4c 100644
--- a/fs/romfs/inode.c
+++ b/fs/romfs/inode.c
@@ -550,12 +550,12 @@ romfs_read_inode(struct inode *i)
550 } 550 }
551} 551}
552 552
553static kmem_cache_t * romfs_inode_cachep; 553static struct kmem_cache * romfs_inode_cachep;
554 554
555static struct inode *romfs_alloc_inode(struct super_block *sb) 555static struct inode *romfs_alloc_inode(struct super_block *sb)
556{ 556{
557 struct romfs_inode_info *ei; 557 struct romfs_inode_info *ei;
558 ei = (struct romfs_inode_info *)kmem_cache_alloc(romfs_inode_cachep, SLAB_KERNEL); 558 ei = (struct romfs_inode_info *)kmem_cache_alloc(romfs_inode_cachep, GFP_KERNEL);
559 if (!ei) 559 if (!ei)
560 return NULL; 560 return NULL;
561 return &ei->vfs_inode; 561 return &ei->vfs_inode;
@@ -566,7 +566,7 @@ static void romfs_destroy_inode(struct inode *inode)
566 kmem_cache_free(romfs_inode_cachep, ROMFS_I(inode)); 566 kmem_cache_free(romfs_inode_cachep, ROMFS_I(inode));
567} 567}
568 568
569static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) 569static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
570{ 570{
571 struct romfs_inode_info *ei = (struct romfs_inode_info *) foo; 571 struct romfs_inode_info *ei = (struct romfs_inode_info *) foo;
572 572
diff --git a/fs/seq_file.c b/fs/seq_file.c
index 555b9ac04c25..10690aa401c7 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -26,7 +26,7 @@
26 * ERR_PTR(error). In the end of sequence they return %NULL. ->show() 26 * ERR_PTR(error). In the end of sequence they return %NULL. ->show()
27 * returns 0 in case of success and negative number in case of error. 27 * returns 0 in case of success and negative number in case of error.
28 */ 28 */
29int seq_open(struct file *file, struct seq_operations *op) 29int seq_open(struct file *file, const struct seq_operations *op)
30{ 30{
31 struct seq_file *p = file->private_data; 31 struct seq_file *p = file->private_data;
32 32
@@ -408,7 +408,7 @@ EXPORT_SYMBOL(single_open);
408 408
409int single_release(struct inode *inode, struct file *file) 409int single_release(struct inode *inode, struct file *file)
410{ 410{
411 struct seq_operations *op = ((struct seq_file *)file->private_data)->op; 411 const struct seq_operations *op = ((struct seq_file *)file->private_data)->op;
412 int res = seq_release(inode, file); 412 int res = seq_release(inode, file);
413 kfree(op); 413 kfree(op);
414 return res; 414 return res;
diff --git a/fs/smbfs/inode.c b/fs/smbfs/inode.c
index 2c122ee83adb..4af4cd729a5a 100644
--- a/fs/smbfs/inode.c
+++ b/fs/smbfs/inode.c
@@ -50,12 +50,12 @@ static void smb_put_super(struct super_block *);
50static int smb_statfs(struct dentry *, struct kstatfs *); 50static int smb_statfs(struct dentry *, struct kstatfs *);
51static int smb_show_options(struct seq_file *, struct vfsmount *); 51static int smb_show_options(struct seq_file *, struct vfsmount *);
52 52
53static kmem_cache_t *smb_inode_cachep; 53static struct kmem_cache *smb_inode_cachep;
54 54
55static struct inode *smb_alloc_inode(struct super_block *sb) 55static struct inode *smb_alloc_inode(struct super_block *sb)
56{ 56{
57 struct smb_inode_info *ei; 57 struct smb_inode_info *ei;
58 ei = (struct smb_inode_info *)kmem_cache_alloc(smb_inode_cachep, SLAB_KERNEL); 58 ei = (struct smb_inode_info *)kmem_cache_alloc(smb_inode_cachep, GFP_KERNEL);
59 if (!ei) 59 if (!ei)
60 return NULL; 60 return NULL;
61 return &ei->vfs_inode; 61 return &ei->vfs_inode;
@@ -66,7 +66,7 @@ static void smb_destroy_inode(struct inode *inode)
66 kmem_cache_free(smb_inode_cachep, SMB_I(inode)); 66 kmem_cache_free(smb_inode_cachep, SMB_I(inode));
67} 67}
68 68
69static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) 69static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
70{ 70{
71 struct smb_inode_info *ei = (struct smb_inode_info *) foo; 71 struct smb_inode_info *ei = (struct smb_inode_info *) foo;
72 unsigned long flagmask = SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR; 72 unsigned long flagmask = SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR;
diff --git a/fs/smbfs/request.c b/fs/smbfs/request.c
index 0fb74697abc4..a4bcae8a9aff 100644
--- a/fs/smbfs/request.c
+++ b/fs/smbfs/request.c
@@ -25,7 +25,7 @@
25#define ROUND_UP(x) (((x)+3) & ~3) 25#define ROUND_UP(x) (((x)+3) & ~3)
26 26
27/* cache for request structures */ 27/* cache for request structures */
28static kmem_cache_t *req_cachep; 28static struct kmem_cache *req_cachep;
29 29
30static int smb_request_send_req(struct smb_request *req); 30static int smb_request_send_req(struct smb_request *req);
31 31
@@ -61,7 +61,7 @@ static struct smb_request *smb_do_alloc_request(struct smb_sb_info *server,
61 struct smb_request *req; 61 struct smb_request *req;
62 unsigned char *buf = NULL; 62 unsigned char *buf = NULL;
63 63
64 req = kmem_cache_alloc(req_cachep, SLAB_KERNEL); 64 req = kmem_cache_alloc(req_cachep, GFP_KERNEL);
65 VERBOSE("allocating request: %p\n", req); 65 VERBOSE("allocating request: %p\n", req);
66 if (!req) 66 if (!req)
67 goto out; 67 goto out;
diff --git a/fs/stat.c b/fs/stat.c
index bca07eb2003c..a0ebfc7f8a64 100644
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -51,13 +51,6 @@ int vfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
51 return inode->i_op->getattr(mnt, dentry, stat); 51 return inode->i_op->getattr(mnt, dentry, stat);
52 52
53 generic_fillattr(inode, stat); 53 generic_fillattr(inode, stat);
54 if (!stat->blksize) {
55 struct super_block *s = inode->i_sb;
56 unsigned blocks;
57 blocks = (stat->size+s->s_blocksize-1) >> s->s_blocksize_bits;
58 stat->blocks = (s->s_blocksize / 512) * blocks;
59 stat->blksize = s->s_blocksize;
60 }
61 return 0; 54 return 0;
62} 55}
63 56
diff --git a/fs/sysfs/mount.c b/fs/sysfs/mount.c
index 20551a1b8a56..e503f858fba8 100644
--- a/fs/sysfs/mount.c
+++ b/fs/sysfs/mount.c
@@ -16,7 +16,7 @@
16 16
17struct vfsmount *sysfs_mount; 17struct vfsmount *sysfs_mount;
18struct super_block * sysfs_sb = NULL; 18struct super_block * sysfs_sb = NULL;
19kmem_cache_t *sysfs_dir_cachep; 19struct kmem_cache *sysfs_dir_cachep;
20 20
21static struct super_operations sysfs_ops = { 21static struct super_operations sysfs_ops = {
22 .statfs = simple_statfs, 22 .statfs = simple_statfs,
diff --git a/fs/sysfs/sysfs.h b/fs/sysfs/sysfs.h
index 6f3d6bd52887..bd7cec295dab 100644
--- a/fs/sysfs/sysfs.h
+++ b/fs/sysfs/sysfs.h
@@ -1,6 +1,6 @@
1 1
2extern struct vfsmount * sysfs_mount; 2extern struct vfsmount * sysfs_mount;
3extern kmem_cache_t *sysfs_dir_cachep; 3extern struct kmem_cache *sysfs_dir_cachep;
4 4
5extern struct inode * sysfs_new_inode(mode_t mode, struct sysfs_dirent *); 5extern struct inode * sysfs_new_inode(mode_t mode, struct sysfs_dirent *);
6extern int sysfs_create(struct dentry *, int mode, int (*init)(struct inode *)); 6extern int sysfs_create(struct dentry *, int mode, int (*init)(struct inode *));
diff --git a/fs/sysv/CHANGES b/fs/sysv/CHANGES
deleted file mode 100644
index 66ea6e92be66..000000000000
--- a/fs/sysv/CHANGES
+++ /dev/null
@@ -1,60 +0,0 @@
1Mon, 15 Dec 1997 Krzysztof G. Baranowski <kgb@manjak.knm.org.pl>
2 * namei.c: struct sysv_dir_inode_operations updated to use dentries.
3
4Fri, 23 Jan 1998 Krzysztof G. Baranowski <kgb@manjak.knm.org.pl>
5 * inode.c: corrected 1 track offset setting (in sb->sv_block_base).
6 Originally it was overridden (by setting to zero)
7 in detected_[xenix,sysv4,sysv2,coherent]. Thanks
8 to Andrzej Krzysztofowicz <ankry@mif.pg.gda.pl>
9 for identifying the problem.
10
11Tue, 27 Jan 1998 Krzysztof G. Baranowski <kgb@manjak.knm.org.pl>
12 * inode.c: added 2048-byte block support to SystemV FS.
13 Merged detected_bs[512,1024,2048]() into one function:
14 void detected_bs (u_char type, struct super_block *sb).
15 Thanks to Andrzej Krzysztofowicz <ankry@mif.pg.gda.pl>
16 for the patch.
17
18Wed, 4 Feb 1998 Krzysztof G. Baranowski <kgb@manjak.knm.org.pl>
19 * namei.c: removed static subdir(); is_subdir() from dcache.c
20 is used instead. Cosmetic changes.
21
22Thu, 3 Dec 1998 Al Viro (viro@parcelfarce.linux.theplanet.co.uk)
23 * namei.c (sysv_rmdir):
24 Bugectomy: old check for victim being busy
25 (inode->i_count) wasn't replaced (with checking
26 dentry->d_count) and escaped Linus in the last round
27 of changes. Shot and buried.
28
29Wed, 9 Dec 1998 AV
30 * namei.c (do_sysv_rename):
31 Fixed incorrect check for other owners + race.
32 Removed checks that went to VFS.
33 * namei.c (sysv_unlink):
34 Removed checks that went to VFS.
35
36Thu, 10 Dec 1998 AV
37 * namei.c (do_mknod):
38 Removed dead code - mknod is never asked to
39 create a symlink or directory. Incidentially,
40 it wouldn't do it right if it would be called.
41
42Sat, 26 Dec 1998 KGB
43 * inode.c (detect_sysv4):
44 Added detection of expanded s_type field (0x10,
45 0x20 and 0x30). Forced read-only access in this case.
46
47Sun, 21 Mar 1999 AV
48 * namei.c (sysv_link):
49 Fixed i_count usage that resulted in dcache corruption.
50 * inode.c:
51 Filled ->delete_inode() method with sysv_delete_inode().
52 sysv_put_inode() is gone, as it tried to do ->delete_
53 _inode()'s job.
54 * ialloc.c: (sysv_free_inode):
55 Fixed race.
56
57Sun, 30 Apr 1999 AV
58 * namei.c (sysv_mknod):
59 Removed dead code (S_IFREG case is now passed to
60 ->create() by VFS).
diff --git a/fs/sysv/ChangeLog b/fs/sysv/ChangeLog
deleted file mode 100644
index f403f8b91b80..000000000000
--- a/fs/sysv/ChangeLog
+++ /dev/null
@@ -1,106 +0,0 @@
1Thu Feb 14 2002 Andrew Morton <akpm@zip.com.au>
2
3 * dir_commit_chunk(): call writeout_one_page() as well as
4 waitfor_one_page() for IS_SYNC directories, so that we
5 actually do sync the directory. (forward-port from 2.4).
6
7Thu Feb 7 2002 Alexander Viro <viro@parcelfarce.linux.theplanet.co.uk>
8
9 * super.c: switched to ->get_sb()
10 * ChangeLog: fixed dates ;-)
11
122002-01-24 David S. Miller <davem@redhat.com>
13
14 * inode.c: Include linux/init.h
15
16Mon Jan 21 2002 Alexander Viro <viro@parcelfarce.linux.theplanet.co.uk>
17 * ialloc.c (sysv_new_inode): zero SYSV_I(inode)->i_data out.
18 * i_vnode renamed to vfs_inode. Sorry, but let's keep that
19 consistent.
20
21Sat Jan 19 2002 Christoph Hellwig <hch@infradead.org>
22
23 * include/linux/sysv_fs.h (SYSV_I): Get fs-private inode data using
24 list_entry() instead of inode->u.
25 * include/linux/sysv_fs_i.h: Add 'struct inode i_vnode' field to
26 sysv_inode_info structure.
27 * inode.c: Include <linux/slab.h>, implement alloc_inode/destroy_inode
28 sop methods, add infrastructure for per-fs inode slab cache.
29 * super.c (init_sysv_fs): Initialize inode cache, recover properly
30 in the case of failed register_filesystem for V7.
31 (exit_sysv_fs): Destroy inode cache.
32
33Sat Jan 19 2002 Christoph Hellwig <hch@infradead.org>
34
35 * include/linux/sysv_fs.h: Include <linux/sysv_fs_i.h>, declare SYSV_I().
36 * dir.c (sysv_find_entry): Use SYSV_I() instead of ->u.sysv_i to
37 access fs-private inode data.
38 * ialloc.c (sysv_new_inode): Likewise.
39 * inode.c (sysv_read_inode): Likewise.
40 (sysv_update_inode): Likewise.
41 * itree.c (get_branch): Likewise.
42 (sysv_truncate): Likewise.
43 * symlink.c (sysv_readlink): Likewise.
44 (sysv_follow_link): Likewise.
45
46Fri Jan 4 2002 Alexander Viro <viro@parcelfarce.linux.theplanet.co.uk>
47
48 * ialloc.c (sysv_free_inode): Use sb->s_id instead of bdevname().
49 * inode.c (sysv_read_inode): Likewise.
50 (sysv_update_inode): Likewise.
51 (sysv_sync_inode): Likewise.
52 * super.c (detect_sysv): Likewise.
53 (complete_read_super): Likewise.
54 (sysv_read_super): Likewise.
55 (v7_read_super): Likewise.
56
57Sun Dec 30 2001 Manfred Spraul <manfred@colorfullife.com>
58
59 * dir.c (dir_commit_chunk): Do not set dir->i_version.
60 (sysv_readdir): Likewise.
61
62Thu Dec 27 2001 Alexander Viro <viro@parcelfarce.linux.theplanet.co.uk>
63
64 * itree.c (get_block): Use map_bh() to fill out bh_result.
65
66Tue Dec 25 2001 Alexander Viro <viro@parcelfarce.linux.theplanet.co.uk>
67
68 * super.c (sysv_read_super): Use sb_set_blocksize() to set blocksize.
69 (v7_read_super): Likewise.
70
71Tue Nov 27 2001 Alexander Viro <viro@parcelfarce.linux.theplanet.co.uk>
72
73 * itree.c (get_block): Change type for iblock argument to sector_t.
74 * super.c (sysv_read_super): Set s_blocksize early.
75 (v7_read_super): Likewise.
76 * balloc.c (sysv_new_block): Use sb_bread(). instead of bread().
77 (sysv_count_free_blocks): Likewise.
78 * ialloc.c (sysv_raw_inode): Likewise.
79 * itree.c (get_branch): Likewise.
80 (free_branches): Likewise.
81 * super.c (sysv_read_super): Likewise.
82 (v7_read_super): Likewise.
83
84Sat Dec 15 2001 Christoph Hellwig <hch@infradead.org>
85
86 * inode.c (sysv_read_inode): Mark inode as bad in case of failure.
87 * super.c (complete_read_super): Check for bad root inode.
88
89Wed Nov 21 2001 Andrew Morton <andrewm@uow.edu.au>
90
91 * file.c (sysv_sync_file): Call fsync_inode_data_buffers.
92
93Fri Oct 26 2001 Christoph Hellwig <hch@infradead.org>
94
95 * dir.c, ialloc.c, namei.c, include/linux/sysv_fs_i.h:
96 Implement per-Inode lookup offset cache.
97 Modelled after Ted's ext2 patch.
98
99Fri Oct 26 2001 Christoph Hellwig <hch@infradead.org>
100
101 * inode.c, super.c, include/linux/sysv_fs.h,
102 include/linux/sysv_fs_sb.h:
103 Remove symlink faking. Noone really wants to use these as
104 linux filesystems and native OSes don't support it anyway.
105
106
diff --git a/fs/sysv/INTRO b/fs/sysv/INTRO
deleted file mode 100644
index de4e4d17cac6..000000000000
--- a/fs/sysv/INTRO
+++ /dev/null
@@ -1,182 +0,0 @@
1This is the implementation of the SystemV/Coherent filesystem for Linux.
2It grew out of separate filesystem implementations
3
4 Xenix FS Doug Evans <dje@cygnus.com> June 1992
5 SystemV FS Paul B. Monday <pmonday@eecs.wsu.edu> March-June 1993
6 Coherent FS B. Haible <haible@ma2s2.mathematik.uni-karlsruhe.de> June 1993
7
8and was merged together in July 1993.
9
10These filesystems are rather similar. Here is a comparison with Minix FS:
11
12* Linux fdisk reports on partitions
13 - Minix FS 0x81 Linux/Minix
14 - Xenix FS ??
15 - SystemV FS ??
16 - Coherent FS 0x08 AIX bootable
17
18* Size of a block or zone (data allocation unit on disk)
19 - Minix FS 1024
20 - Xenix FS 1024 (also 512 ??)
21 - SystemV FS 1024 (also 512 and 2048)
22 - Coherent FS 512
23
24* General layout: all have one boot block, one super block and
25 separate areas for inodes and for directories/data.
26 On SystemV Release 2 FS (e.g. Microport) the first track is reserved and
27 all the block numbers (including the super block) are offset by one track.
28
29* Byte ordering of "short" (16 bit entities) on disk:
30 - Minix FS little endian 0 1
31 - Xenix FS little endian 0 1
32 - SystemV FS little endian 0 1
33 - Coherent FS little endian 0 1
34 Of course, this affects only the file system, not the data of files on it!
35
36* Byte ordering of "long" (32 bit entities) on disk:
37 - Minix FS little endian 0 1 2 3
38 - Xenix FS little endian 0 1 2 3
39 - SystemV FS little endian 0 1 2 3
40 - Coherent FS PDP-11 2 3 0 1
41 Of course, this affects only the file system, not the data of files on it!
42
43* Inode on disk: "short", 0 means non-existent, the root dir ino is:
44 - Minix FS 1
45 - Xenix FS, SystemV FS, Coherent FS 2
46
47* Maximum number of hard links to a file:
48 - Minix FS 250
49 - Xenix FS ??
50 - SystemV FS ??
51 - Coherent FS >=10000
52
53* Free inode management:
54 - Minix FS a bitmap
55 - Xenix FS, SystemV FS, Coherent FS
56 There is a cache of a certain number of free inodes in the super-block.
57 When it is exhausted, new free inodes are found using a linear search.
58
59* Free block management:
60 - Minix FS a bitmap
61 - Xenix FS, SystemV FS, Coherent FS
62 Free blocks are organized in a "free list". Maybe a misleading term,
63 since it is not true that every free block contains a pointer to
64 the next free block. Rather, the free blocks are organized in chunks
65 of limited size, and every now and then a free block contains pointers
66 to the free blocks pertaining to the next chunk; the first of these
67 contains pointers and so on. The list terminates with a "block number"
68 0 on Xenix FS and SystemV FS, with a block zeroed out on Coherent FS.
69
70* Super-block location:
71 - Minix FS block 1 = bytes 1024..2047
72 - Xenix FS block 1 = bytes 1024..2047
73 - SystemV FS bytes 512..1023
74 - Coherent FS block 1 = bytes 512..1023
75
76* Super-block layout:
77 - Minix FS
78 unsigned short s_ninodes;
79 unsigned short s_nzones;
80 unsigned short s_imap_blocks;
81 unsigned short s_zmap_blocks;
82 unsigned short s_firstdatazone;
83 unsigned short s_log_zone_size;
84 unsigned long s_max_size;
85 unsigned short s_magic;
86 - Xenix FS, SystemV FS, Coherent FS
87 unsigned short s_firstdatazone;
88 unsigned long s_nzones;
89 unsigned short s_fzone_count;
90 unsigned long s_fzones[NICFREE];
91 unsigned short s_finode_count;
92 unsigned short s_finodes[NICINOD];
93 char s_flock;
94 char s_ilock;
95 char s_modified;
96 char s_rdonly;
97 unsigned long s_time;
98 short s_dinfo[4]; -- SystemV FS only
99 unsigned long s_free_zones;
100 unsigned short s_free_inodes;
101 short s_dinfo[4]; -- Xenix FS only
102 unsigned short s_interleave_m,s_interleave_n; -- Coherent FS only
103 char s_fname[6];
104 char s_fpack[6];
105 then they differ considerably:
106 Xenix FS
107 char s_clean;
108 char s_fill[371];
109 long s_magic;
110 long s_type;
111 SystemV FS
112 long s_fill[12 or 14];
113 long s_state;
114 long s_magic;
115 long s_type;
116 Coherent FS
117 unsigned long s_unique;
118 Note that Coherent FS has no magic.
119
120* Inode layout:
121 - Minix FS
122 unsigned short i_mode;
123 unsigned short i_uid;
124 unsigned long i_size;
125 unsigned long i_time;
126 unsigned char i_gid;
127 unsigned char i_nlinks;
128 unsigned short i_zone[7+1+1];
129 - Xenix FS, SystemV FS, Coherent FS
130 unsigned short i_mode;
131 unsigned short i_nlink;
132 unsigned short i_uid;
133 unsigned short i_gid;
134 unsigned long i_size;
135 unsigned char i_zone[3*(10+1+1+1)];
136 unsigned long i_atime;
137 unsigned long i_mtime;
138 unsigned long i_ctime;
139
140* Regular file data blocks are organized as
141 - Minix FS
142 7 direct blocks
143 1 indirect block (pointers to blocks)
144 1 double-indirect block (pointer to pointers to blocks)
145 - Xenix FS, SystemV FS, Coherent FS
146 10 direct blocks
147 1 indirect block (pointers to blocks)
148 1 double-indirect block (pointer to pointers to blocks)
149 1 triple-indirect block (pointer to pointers to pointers to blocks)
150
151* Inode size, inodes per block
152 - Minix FS 32 32
153 - Xenix FS 64 16
154 - SystemV FS 64 16
155 - Coherent FS 64 8
156
157* Directory entry on disk
158 - Minix FS
159 unsigned short inode;
160 char name[14/30];
161 - Xenix FS, SystemV FS, Coherent FS
162 unsigned short inode;
163 char name[14];
164
165* Dir entry size, dir entries per block
166 - Minix FS 16/32 64/32
167 - Xenix FS 16 64
168 - SystemV FS 16 64
169 - Coherent FS 16 32
170
171* How to implement symbolic links such that the host fsck doesn't scream:
172 - Minix FS normal
173 - Xenix FS kludge: as regular files with chmod 1000
174 - SystemV FS ??
175 - Coherent FS kludge: as regular files with chmod 1000
176
177
178Notation: We often speak of a "block" but mean a zone (the allocation unit)
179and not the disk driver's notion of "block".
180
181
182Bruno Haible <haible@ma2s2.mathematik.uni-karlsruhe.de>
diff --git a/fs/sysv/inode.c b/fs/sysv/inode.c
index d63c5e48b050..ead9864567e3 100644
--- a/fs/sysv/inode.c
+++ b/fs/sysv/inode.c
@@ -301,13 +301,13 @@ static void sysv_delete_inode(struct inode *inode)
301 unlock_kernel(); 301 unlock_kernel();
302} 302}
303 303
304static kmem_cache_t *sysv_inode_cachep; 304static struct kmem_cache *sysv_inode_cachep;
305 305
306static struct inode *sysv_alloc_inode(struct super_block *sb) 306static struct inode *sysv_alloc_inode(struct super_block *sb)
307{ 307{
308 struct sysv_inode_info *si; 308 struct sysv_inode_info *si;
309 309
310 si = kmem_cache_alloc(sysv_inode_cachep, SLAB_KERNEL); 310 si = kmem_cache_alloc(sysv_inode_cachep, GFP_KERNEL);
311 if (!si) 311 if (!si)
312 return NULL; 312 return NULL;
313 return &si->vfs_inode; 313 return &si->vfs_inode;
@@ -318,7 +318,7 @@ static void sysv_destroy_inode(struct inode *inode)
318 kmem_cache_free(sysv_inode_cachep, SYSV_I(inode)); 318 kmem_cache_free(sysv_inode_cachep, SYSV_I(inode));
319} 319}
320 320
321static void init_once(void *p, kmem_cache_t *cachep, unsigned long flags) 321static void init_once(void *p, struct kmem_cache *cachep, unsigned long flags)
322{ 322{
323 struct sysv_inode_info *si = (struct sysv_inode_info *)p; 323 struct sysv_inode_info *si = (struct sysv_inode_info *)p;
324 324
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 1aea6a4f9a4a..1dbc2955f02e 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -107,12 +107,12 @@ static struct file_system_type udf_fstype = {
107 .fs_flags = FS_REQUIRES_DEV, 107 .fs_flags = FS_REQUIRES_DEV,
108}; 108};
109 109
110static kmem_cache_t * udf_inode_cachep; 110static struct kmem_cache * udf_inode_cachep;
111 111
112static struct inode *udf_alloc_inode(struct super_block *sb) 112static struct inode *udf_alloc_inode(struct super_block *sb)
113{ 113{
114 struct udf_inode_info *ei; 114 struct udf_inode_info *ei;
115 ei = (struct udf_inode_info *)kmem_cache_alloc(udf_inode_cachep, SLAB_KERNEL); 115 ei = (struct udf_inode_info *)kmem_cache_alloc(udf_inode_cachep, GFP_KERNEL);
116 if (!ei) 116 if (!ei)
117 return NULL; 117 return NULL;
118 118
@@ -130,7 +130,7 @@ static void udf_destroy_inode(struct inode *inode)
130 kmem_cache_free(udf_inode_cachep, UDF_I(inode)); 130 kmem_cache_free(udf_inode_cachep, UDF_I(inode));
131} 131}
132 132
133static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) 133static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
134{ 134{
135 struct udf_inode_info *ei = (struct udf_inode_info *) foo; 135 struct udf_inode_info *ei = (struct udf_inode_info *) foo;
136 136
@@ -1709,7 +1709,7 @@ void udf_error(struct super_block *sb, const char *function,
1709 sb->s_dirt = 1; 1709 sb->s_dirt = 1;
1710 } 1710 }
1711 va_start(args, fmt); 1711 va_start(args, fmt);
1712 vsprintf(error_buf, fmt, args); 1712 vsnprintf(error_buf, sizeof(error_buf), fmt, args);
1713 va_end(args); 1713 va_end(args);
1714 printk (KERN_CRIT "UDF-fs error (device %s): %s: %s\n", 1714 printk (KERN_CRIT "UDF-fs error (device %s): %s: %s\n",
1715 sb->s_id, function, error_buf); 1715 sb->s_id, function, error_buf);
@@ -1721,7 +1721,7 @@ void udf_warning(struct super_block *sb, const char *function,
1721 va_list args; 1721 va_list args;
1722 1722
1723 va_start (args, fmt); 1723 va_start (args, fmt);
1724 vsprintf(error_buf, fmt, args); 1724 vsnprintf(error_buf, sizeof(error_buf), fmt, args);
1725 va_end(args); 1725 va_end(args);
1726 printk(KERN_WARNING "UDF-fs warning (device %s): %s: %s\n", 1726 printk(KERN_WARNING "UDF-fs warning (device %s): %s: %s\n",
1727 sb->s_id, function, error_buf); 1727 sb->s_id, function, error_buf);
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index ec79e3091d1b..8a8e9382ec09 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -224,7 +224,7 @@ void ufs_error (struct super_block * sb, const char * function,
224 sb->s_flags |= MS_RDONLY; 224 sb->s_flags |= MS_RDONLY;
225 } 225 }
226 va_start (args, fmt); 226 va_start (args, fmt);
227 vsprintf (error_buf, fmt, args); 227 vsnprintf (error_buf, sizeof(error_buf), fmt, args);
228 va_end (args); 228 va_end (args);
229 switch (UFS_SB(sb)->s_mount_opt & UFS_MOUNT_ONERROR) { 229 switch (UFS_SB(sb)->s_mount_opt & UFS_MOUNT_ONERROR) {
230 case UFS_MOUNT_ONERROR_PANIC: 230 case UFS_MOUNT_ONERROR_PANIC:
@@ -255,7 +255,7 @@ void ufs_panic (struct super_block * sb, const char * function,
255 sb->s_dirt = 1; 255 sb->s_dirt = 1;
256 } 256 }
257 va_start (args, fmt); 257 va_start (args, fmt);
258 vsprintf (error_buf, fmt, args); 258 vsnprintf (error_buf, sizeof(error_buf), fmt, args);
259 va_end (args); 259 va_end (args);
260 sb->s_flags |= MS_RDONLY; 260 sb->s_flags |= MS_RDONLY;
261 printk (KERN_CRIT "UFS-fs panic (device %s): %s: %s\n", 261 printk (KERN_CRIT "UFS-fs panic (device %s): %s: %s\n",
@@ -268,7 +268,7 @@ void ufs_warning (struct super_block * sb, const char * function,
268 va_list args; 268 va_list args;
269 269
270 va_start (args, fmt); 270 va_start (args, fmt);
271 vsprintf (error_buf, fmt, args); 271 vsnprintf (error_buf, sizeof(error_buf), fmt, args);
272 va_end (args); 272 va_end (args);
273 printk (KERN_WARNING "UFS-fs warning (device %s): %s: %s\n", 273 printk (KERN_WARNING "UFS-fs warning (device %s): %s: %s\n",
274 sb->s_id, function, error_buf); 274 sb->s_id, function, error_buf);
@@ -1204,12 +1204,12 @@ static int ufs_statfs(struct dentry *dentry, struct kstatfs *buf)
1204 return 0; 1204 return 0;
1205} 1205}
1206 1206
1207static kmem_cache_t * ufs_inode_cachep; 1207static struct kmem_cache * ufs_inode_cachep;
1208 1208
1209static struct inode *ufs_alloc_inode(struct super_block *sb) 1209static struct inode *ufs_alloc_inode(struct super_block *sb)
1210{ 1210{
1211 struct ufs_inode_info *ei; 1211 struct ufs_inode_info *ei;
1212 ei = (struct ufs_inode_info *)kmem_cache_alloc(ufs_inode_cachep, SLAB_KERNEL); 1212 ei = (struct ufs_inode_info *)kmem_cache_alloc(ufs_inode_cachep, GFP_KERNEL);
1213 if (!ei) 1213 if (!ei)
1214 return NULL; 1214 return NULL;
1215 ei->vfs_inode.i_version = 1; 1215 ei->vfs_inode.i_version = 1;
@@ -1221,7 +1221,7 @@ static void ufs_destroy_inode(struct inode *inode)
1221 kmem_cache_free(ufs_inode_cachep, UFS_I(inode)); 1221 kmem_cache_free(ufs_inode_cachep, UFS_I(inode));
1222} 1222}
1223 1223
1224static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) 1224static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
1225{ 1225{
1226 struct ufs_inode_info *ei = (struct ufs_inode_info *) foo; 1226 struct ufs_inode_info *ei = (struct ufs_inode_info *) foo;
1227 1227
diff --git a/fs/ufs/util.h b/fs/ufs/util.h
index 28fce6c239b5..7dd12bb1d62b 100644
--- a/fs/ufs/util.h
+++ b/fs/ufs/util.h
@@ -299,7 +299,7 @@ static inline void *get_usb_offset(struct ufs_sb_private_info *uspi,
299 299
300#define ubh_get_addr16(ubh,begin) \ 300#define ubh_get_addr16(ubh,begin) \
301 (((__fs16*)((ubh)->bh[(begin) >> (uspi->s_fshift-1)]->b_data)) + \ 301 (((__fs16*)((ubh)->bh[(begin) >> (uspi->s_fshift-1)]->b_data)) + \
302 ((begin) & (uspi->fsize>>1) - 1))) 302 ((begin) & ((uspi->fsize>>1) - 1)))
303 303
304#define ubh_get_addr32(ubh,begin) \ 304#define ubh_get_addr32(ubh,begin) \
305 (((__fs32*)((ubh)->bh[(begin) >> (uspi->s_fshift-2)]->b_data)) + \ 305 (((__fs32*)((ubh)->bh[(begin) >> (uspi->s_fshift-2)]->b_data)) + \
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index eef4a0ba11e9..4fb01ffdfd1a 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -32,6 +32,7 @@
32#include <linux/kthread.h> 32#include <linux/kthread.h>
33#include <linux/migrate.h> 33#include <linux/migrate.h>
34#include <linux/backing-dev.h> 34#include <linux/backing-dev.h>
35#include <linux/freezer.h>
35 36
36STATIC kmem_zone_t *xfs_buf_zone; 37STATIC kmem_zone_t *xfs_buf_zone;
37STATIC kmem_shaker_t xfs_buf_shake; 38STATIC kmem_shaker_t xfs_buf_shake;
@@ -1826,11 +1827,11 @@ xfs_buf_init(void)
1826 if (!xfs_buf_zone) 1827 if (!xfs_buf_zone)
1827 goto out_free_trace_buf; 1828 goto out_free_trace_buf;
1828 1829
1829 xfslogd_workqueue = create_workqueue("xfslogd"); 1830 xfslogd_workqueue = create_freezeable_workqueue("xfslogd");
1830 if (!xfslogd_workqueue) 1831 if (!xfslogd_workqueue)
1831 goto out_free_buf_zone; 1832 goto out_free_buf_zone;
1832 1833
1833 xfsdatad_workqueue = create_workqueue("xfsdatad"); 1834 xfsdatad_workqueue = create_freezeable_workqueue("xfsdatad");
1834 if (!xfsdatad_workqueue) 1835 if (!xfsdatad_workqueue)
1835 goto out_destroy_xfslogd_workqueue; 1836 goto out_destroy_xfslogd_workqueue;
1836 1837
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index de05abbbe7fd..b93265b7c79c 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -56,6 +56,7 @@
56#include <linux/mempool.h> 56#include <linux/mempool.h>
57#include <linux/writeback.h> 57#include <linux/writeback.h>
58#include <linux/kthread.h> 58#include <linux/kthread.h>
59#include <linux/freezer.h>
59 60
60STATIC struct quotactl_ops xfs_quotactl_operations; 61STATIC struct quotactl_ops xfs_quotactl_operations;
61STATIC struct super_operations xfs_super_operations; 62STATIC struct super_operations xfs_super_operations;