aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorTrond Myklebust <Trond.Myklebust@netapp.com>2006-03-23 23:44:19 -0500
committerTrond Myklebust <Trond.Myklebust@netapp.com>2006-03-23 23:44:19 -0500
commit1ebbe2b20091d306453a5cf480a87e6cd28ae76f (patch)
treef5cd7a0fa69b8b1938cb5a0faed2e7b0628072a5 /fs
parentac58c9059da8886b5e8cde012a80266b18ca146e (diff)
parent674a396c6d2ba0341ebdd7c1c9950f32f018e2dd (diff)
Merge branch 'linus'
Diffstat (limited to 'fs')
-rw-r--r--fs/9p/mux.c11
-rw-r--r--fs/9p/vfs_inode.c3
-rw-r--r--fs/Kconfig12
-rw-r--r--fs/Makefile1
-rw-r--r--fs/adfs/file.c4
-rw-r--r--fs/autofs4/autofs_i.h3
-rw-r--r--fs/autofs4/inode.c2
-rw-r--r--fs/autofs4/waitq.c16
-rw-r--r--fs/befs/linuxvfs.c2
-rw-r--r--fs/bio.c12
-rw-r--r--fs/block_dev.c28
-rw-r--r--fs/buffer.c68
-rw-r--r--fs/char_dev.c17
-rw-r--r--fs/cifs/CHANGES2
-rw-r--r--fs/cifs/dir.c8
-rw-r--r--fs/cifs/fcntl.c4
-rw-r--r--fs/cifs/file.c4
-rw-r--r--fs/cifs/inode.c16
-rw-r--r--fs/cifs/link.c16
-rw-r--r--fs/cifs/readdir.c4
-rw-r--r--fs/cifs/xattr.c16
-rw-r--r--fs/compat_ioctl.c1
-rw-r--r--fs/debugfs/file.c46
-rw-r--r--fs/devpts/inode.c76
-rw-r--r--fs/dquot.c167
-rw-r--r--fs/eventpoll.c32
-rw-r--r--fs/ext2/namei.c54
-rw-r--r--fs/ext3/dir.c52
-rw-r--r--fs/ext3/file.c4
-rw-r--r--fs/ext3/inode.c16
-rw-r--r--fs/ext3/ioctl.c4
-rw-r--r--fs/ext3/super.c6
-rw-r--r--fs/fat/dir.c2
-rw-r--r--fs/fat/fatent.c6
-rw-r--r--fs/fat/inode.c2
-rw-r--r--fs/fcntl.c9
-rw-r--r--fs/file.c34
-rw-r--r--fs/file_table.c10
-rw-r--r--fs/hpfs/hpfs_fn.h5
-rw-r--r--fs/hpfs/inode.c10
-rw-r--r--fs/hpfs/namei.c60
-rw-r--r--fs/hpfs/super.c4
-rw-r--r--fs/hugetlbfs/inode.c92
-rw-r--r--fs/inode.c18
-rw-r--r--fs/inotify.c116
-rw-r--r--fs/isofs/joliet.c2
-rw-r--r--fs/jbd/checkpoint.c4
-rw-r--r--fs/jbd/journal.c4
-rw-r--r--fs/jbd/transaction.c4
-rw-r--r--fs/jffs/inode-v23.c86
-rw-r--r--fs/jffs/intrep.c6
-rw-r--r--fs/jffs/jffs_fm.c2
-rw-r--r--fs/jffs/jffs_fm.h5
-rw-r--r--fs/libfs.c14
-rw-r--r--fs/minix/namei.c48
-rw-r--r--fs/namei.c12
-rw-r--r--fs/ncpfs/file.c4
-rw-r--r--fs/ncpfs/inode.c6
-rw-r--r--fs/ncpfs/ncplib_kernel.c4
-rw-r--r--fs/ncpfs/sock.c34
-rw-r--r--fs/nls/Kconfig2
-rw-r--r--fs/ntfs/ChangeLog30
-rw-r--r--fs/ntfs/Makefile2
-rw-r--r--fs/ntfs/aops.c14
-rw-r--r--fs/ntfs/attrib.c35
-rw-r--r--fs/ntfs/compress.c4
-rw-r--r--fs/ntfs/dir.c2
-rw-r--r--fs/ntfs/file.c16
-rw-r--r--fs/ntfs/inode.c111
-rw-r--r--fs/ntfs/inode.h13
-rw-r--r--fs/ntfs/layout.h44
-rw-r--r--fs/ntfs/mft.c68
-rw-r--r--fs/ntfs/mft.h5
-rw-r--r--fs/ntfs/namei.c9
-rw-r--r--fs/ntfs/ntfs.h2
-rw-r--r--fs/ntfs/runlist.c12
-rw-r--r--fs/ntfs/super.c84
-rw-r--r--fs/ntfs/unistr.c51
-rw-r--r--fs/ocfs2/super.c2
-rw-r--r--fs/open.c8
-rw-r--r--fs/proc/proc_misc.c2
-rw-r--r--fs/qnx4/file.c3
-rw-r--r--fs/quota.c6
-rw-r--r--fs/quota_v2.c2
-rw-r--r--fs/ramfs/file-mmu.c11
-rw-r--r--fs/ramfs/file-nommu.c3
-rw-r--r--fs/relayfs/Makefile4
-rw-r--r--fs/relayfs/buffers.c190
-rw-r--r--fs/relayfs/buffers.h12
-rw-r--r--fs/relayfs/inode.c581
-rw-r--r--fs/relayfs/relay.c482
-rw-r--r--fs/relayfs/relay.h8
-rw-r--r--fs/seq_file.c10
-rw-r--r--fs/super.c10
-rw-r--r--fs/sysfs/dir.c37
-rw-r--r--fs/sysfs/file.c9
-rw-r--r--fs/sysfs/inode.c9
-rw-r--r--fs/sysfs/symlink.c6
-rw-r--r--fs/sysfs/sysfs.h1
-rw-r--r--fs/sysv/namei.c48
-rw-r--r--fs/udf/balloc.c36
-rw-r--r--fs/udf/ialloc.c8
-rw-r--r--fs/udf/super.c2
-rw-r--r--fs/ufs/file.c10
-rw-r--r--fs/ufs/namei.c48
-rw-r--r--fs/xfs/Makefile-linux-2.640
-rw-r--r--fs/xfs/linux-2.6/kmem.h91
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c484
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.h4
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c8
-rw-r--r--fs/xfs/linux-2.6/xfs_export.c37
-rw-r--r--fs/xfs/linux-2.6/xfs_file.c187
-rw-r--r--fs/xfs/linux-2.6/xfs_fs_subr.c6
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl.c138
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl32.c12
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl32.h4
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.c317
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.h12
-rw-r--r--fs/xfs/linux-2.6/xfs_linux.h10
-rw-r--r--fs/xfs/linux-2.6/xfs_lrw.c51
-rw-r--r--fs/xfs/linux-2.6/xfs_stats.c7
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c213
-rw-r--r--fs/xfs/linux-2.6/xfs_super.h7
-rw-r--r--fs/xfs/linux-2.6/xfs_sysctl.c3
-rw-r--r--fs/xfs/linux-2.6/xfs_vfs.c19
-rw-r--r--fs/xfs/linux-2.6/xfs_vfs.h3
-rw-r--r--fs/xfs/linux-2.6/xfs_vnode.c35
-rw-r--r--fs/xfs/linux-2.6/xfs_vnode.h33
-rw-r--r--fs/xfs/quota/xfs_dquot_item.c2
-rw-r--r--fs/xfs/quota/xfs_qm.c13
-rw-r--r--fs/xfs/quota/xfs_qm_bhv.c4
-rw-r--r--fs/xfs/support/ktrace.c4
-rw-r--r--fs/xfs/support/uuid.c15
-rw-r--r--fs/xfs/xfs_acl.h4
-rw-r--r--fs/xfs/xfs_attr.c59
-rw-r--r--fs/xfs/xfs_attr_leaf.c729
-rw-r--r--fs/xfs/xfs_attr_leaf.h47
-rw-r--r--fs/xfs/xfs_attr_sf.h8
-rw-r--r--fs/xfs/xfs_bmap.c1305
-rw-r--r--fs/xfs/xfs_bmap.h22
-rw-r--r--fs/xfs/xfs_bmap_btree.c10
-rw-r--r--fs/xfs/xfs_bmap_btree.h8
-rw-r--r--fs/xfs/xfs_clnt.h2
-rw-r--r--fs/xfs/xfs_da_btree.c409
-rw-r--r--fs/xfs/xfs_da_btree.h16
-rw-r--r--fs/xfs/xfs_dfrag.c4
-rw-r--r--fs/xfs/xfs_dir.c32
-rw-r--r--fs/xfs/xfs_dir2.h27
-rw-r--r--fs/xfs/xfs_dir2_block.c193
-rw-r--r--fs/xfs/xfs_dir2_block.h7
-rw-r--r--fs/xfs/xfs_dir2_data.c240
-rw-r--r--fs/xfs/xfs_dir2_data.h26
-rw-r--r--fs/xfs/xfs_dir2_leaf.c285
-rw-r--r--fs/xfs/xfs_dir2_leaf.h15
-rw-r--r--fs/xfs/xfs_dir2_node.c303
-rw-r--r--fs/xfs/xfs_dir2_node.h10
-rw-r--r--fs/xfs/xfs_dir2_sf.c8
-rw-r--r--fs/xfs/xfs_dir_leaf.c82
-rw-r--r--fs/xfs/xfs_dir_sf.h24
-rw-r--r--fs/xfs/xfs_dmapi.h10
-rw-r--r--fs/xfs/xfs_fsops.c1
-rw-r--r--fs/xfs/xfs_ialloc.c13
-rw-r--r--fs/xfs/xfs_iget.c6
-rw-r--r--fs/xfs/xfs_inode.c1303
-rw-r--r--fs/xfs/xfs_inode.h78
-rw-r--r--fs/xfs/xfs_iomap.c2
-rw-r--r--fs/xfs/xfs_itable.c5
-rw-r--r--fs/xfs/xfs_log_recover.c2
-rw-r--r--fs/xfs/xfs_mount.c646
-rw-r--r--fs/xfs/xfs_mount.h44
-rw-r--r--fs/xfs/xfs_rw.h1
-rw-r--r--fs/xfs/xfs_trans.c187
-rw-r--r--fs/xfs/xfs_trans.h2
-rw-r--r--fs/xfs/xfs_vfsops.c82
-rw-r--r--fs/xfs/xfs_vnodeops.c15
175 files changed, 5948 insertions, 5445 deletions
diff --git a/fs/9p/mux.c b/fs/9p/mux.c
index ea1134eb47c8..8e8356c1c229 100644
--- a/fs/9p/mux.c
+++ b/fs/9p/mux.c
@@ -31,6 +31,7 @@
31#include <linux/poll.h> 31#include <linux/poll.h>
32#include <linux/kthread.h> 32#include <linux/kthread.h>
33#include <linux/idr.h> 33#include <linux/idr.h>
34#include <linux/mutex.h>
34 35
35#include "debug.h" 36#include "debug.h"
36#include "v9fs.h" 37#include "v9fs.h"
@@ -110,7 +111,7 @@ static void v9fs_pollwait(struct file *filp, wait_queue_head_t * wait_address,
110static u16 v9fs_mux_get_tag(struct v9fs_mux_data *); 111static u16 v9fs_mux_get_tag(struct v9fs_mux_data *);
111static void v9fs_mux_put_tag(struct v9fs_mux_data *, u16); 112static void v9fs_mux_put_tag(struct v9fs_mux_data *, u16);
112 113
113static DECLARE_MUTEX(v9fs_mux_task_lock); 114static DEFINE_MUTEX(v9fs_mux_task_lock);
114static struct workqueue_struct *v9fs_mux_wq; 115static struct workqueue_struct *v9fs_mux_wq;
115 116
116static int v9fs_mux_num; 117static int v9fs_mux_num;
@@ -166,7 +167,7 @@ static int v9fs_mux_poll_start(struct v9fs_mux_data *m)
166 167
167 dprintk(DEBUG_MUX, "mux %p muxnum %d procnum %d\n", m, v9fs_mux_num, 168 dprintk(DEBUG_MUX, "mux %p muxnum %d procnum %d\n", m, v9fs_mux_num,
168 v9fs_mux_poll_task_num); 169 v9fs_mux_poll_task_num);
169 up(&v9fs_mux_task_lock); 170 mutex_lock(&v9fs_mux_task_lock);
170 171
171 n = v9fs_mux_calc_poll_procs(v9fs_mux_num + 1); 172 n = v9fs_mux_calc_poll_procs(v9fs_mux_num + 1);
172 if (n > v9fs_mux_poll_task_num) { 173 if (n > v9fs_mux_poll_task_num) {
@@ -225,7 +226,7 @@ static int v9fs_mux_poll_start(struct v9fs_mux_data *m)
225 } 226 }
226 227
227 v9fs_mux_num++; 228 v9fs_mux_num++;
228 down(&v9fs_mux_task_lock); 229 mutex_unlock(&v9fs_mux_task_lock);
229 230
230 return 0; 231 return 0;
231} 232}
@@ -235,7 +236,7 @@ static void v9fs_mux_poll_stop(struct v9fs_mux_data *m)
235 int i; 236 int i;
236 struct v9fs_mux_poll_task *vpt; 237 struct v9fs_mux_poll_task *vpt;
237 238
238 up(&v9fs_mux_task_lock); 239 mutex_lock(&v9fs_mux_task_lock);
239 vpt = m->poll_task; 240 vpt = m->poll_task;
240 list_del(&m->mux_list); 241 list_del(&m->mux_list);
241 for(i = 0; i < ARRAY_SIZE(m->poll_waddr); i++) { 242 for(i = 0; i < ARRAY_SIZE(m->poll_waddr); i++) {
@@ -252,7 +253,7 @@ static void v9fs_mux_poll_stop(struct v9fs_mux_data *m)
252 v9fs_mux_poll_task_num--; 253 v9fs_mux_poll_task_num--;
253 } 254 }
254 v9fs_mux_num--; 255 v9fs_mux_num--;
255 down(&v9fs_mux_task_lock); 256 mutex_unlock(&v9fs_mux_task_lock);
256} 257}
257 258
258/** 259/**
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 3ad8455f8577..651a9e14d9a9 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -614,6 +614,7 @@ static struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
614 614
615 sb = dir->i_sb; 615 sb = dir->i_sb;
616 v9ses = v9fs_inode2v9ses(dir); 616 v9ses = v9fs_inode2v9ses(dir);
617 dentry->d_op = &v9fs_dentry_operations;
617 dirfid = v9fs_fid_lookup(dentry->d_parent); 618 dirfid = v9fs_fid_lookup(dentry->d_parent);
618 619
619 if (!dirfid) { 620 if (!dirfid) {
@@ -681,8 +682,6 @@ static struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
681 goto FreeFcall; 682 goto FreeFcall;
682 683
683 fid->qid = fcall->params.rstat.stat.qid; 684 fid->qid = fcall->params.rstat.stat.qid;
684
685 dentry->d_op = &v9fs_dentry_operations;
686 v9fs_stat2inode(&fcall->params.rstat.stat, inode, inode->i_sb); 685 v9fs_stat2inode(&fcall->params.rstat.stat, inode, inode->i_sb);
687 686
688 d_add(dentry, inode); 687 d_add(dentry, inode);
diff --git a/fs/Kconfig b/fs/Kconfig
index 9a4158f2a3ac..e207be68d4ca 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -859,18 +859,6 @@ config RAMFS
859 To compile this as a module, choose M here: the module will be called 859 To compile this as a module, choose M here: the module will be called
860 ramfs. 860 ramfs.
861 861
862config RELAYFS_FS
863 tristate "Relayfs file system support"
864 ---help---
865 Relayfs is a high-speed data relay filesystem designed to provide
866 an efficient mechanism for tools and facilities to relay large
867 amounts of data from kernel space to user space.
868
869 To compile this code as a module, choose M here: the module will be
870 called relayfs.
871
872 If unsure, say N.
873
874config CONFIGFS_FS 862config CONFIGFS_FS
875 tristate "Userspace-driven configuration filesystem (EXPERIMENTAL)" 863 tristate "Userspace-driven configuration filesystem (EXPERIMENTAL)"
876 depends on EXPERIMENTAL 864 depends on EXPERIMENTAL
diff --git a/fs/Makefile b/fs/Makefile
index 1db711319c80..080b3867be4d 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -91,7 +91,6 @@ obj-$(CONFIG_AUTOFS4_FS) += autofs4/
91obj-$(CONFIG_ADFS_FS) += adfs/ 91obj-$(CONFIG_ADFS_FS) += adfs/
92obj-$(CONFIG_FUSE_FS) += fuse/ 92obj-$(CONFIG_FUSE_FS) += fuse/
93obj-$(CONFIG_UDF_FS) += udf/ 93obj-$(CONFIG_UDF_FS) += udf/
94obj-$(CONFIG_RELAYFS_FS) += relayfs/
95obj-$(CONFIG_SUN_OPENPROMFS) += openpromfs/ 94obj-$(CONFIG_SUN_OPENPROMFS) += openpromfs/
96obj-$(CONFIG_JFS_FS) += jfs/ 95obj-$(CONFIG_JFS_FS) += jfs/
97obj-$(CONFIG_XFS_FS) += xfs/ 96obj-$(CONFIG_XFS_FS) += xfs/
diff --git a/fs/adfs/file.c b/fs/adfs/file.c
index afebbfde6968..6af10885f9d6 100644
--- a/fs/adfs/file.c
+++ b/fs/adfs/file.c
@@ -19,11 +19,7 @@
19 * 19 *
20 * adfs regular file handling primitives 20 * adfs regular file handling primitives
21 */ 21 */
22#include <linux/errno.h>
23#include <linux/fs.h> 22#include <linux/fs.h>
24#include <linux/fcntl.h>
25#include <linux/time.h>
26#include <linux/stat.h>
27#include <linux/buffer_head.h> /* for file_fsync() */ 23#include <linux/buffer_head.h> /* for file_fsync() */
28#include <linux/adfs_fs.h> 24#include <linux/adfs_fs.h>
29 25
diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h
index 385bed09b0d8..f54c5b21f876 100644
--- a/fs/autofs4/autofs_i.h
+++ b/fs/autofs4/autofs_i.h
@@ -13,6 +13,7 @@
13/* Internal header file for autofs */ 13/* Internal header file for autofs */
14 14
15#include <linux/auto_fs4.h> 15#include <linux/auto_fs4.h>
16#include <linux/mutex.h>
16#include <linux/list.h> 17#include <linux/list.h>
17 18
18/* This is the range of ioctl() numbers we claim as ours */ 19/* This is the range of ioctl() numbers we claim as ours */
@@ -102,7 +103,7 @@ struct autofs_sb_info {
102 int reghost_enabled; 103 int reghost_enabled;
103 int needs_reghost; 104 int needs_reghost;
104 struct super_block *sb; 105 struct super_block *sb;
105 struct semaphore wq_sem; 106 struct mutex wq_mutex;
106 spinlock_t fs_lock; 107 spinlock_t fs_lock;
107 struct autofs_wait_queue *queues; /* Wait queue pointer */ 108 struct autofs_wait_queue *queues; /* Wait queue pointer */
108}; 109};
diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c
index 2d3082854a29..1ad98d48e550 100644
--- a/fs/autofs4/inode.c
+++ b/fs/autofs4/inode.c
@@ -269,7 +269,7 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent)
269 sbi->sb = s; 269 sbi->sb = s;
270 sbi->version = 0; 270 sbi->version = 0;
271 sbi->sub_version = 0; 271 sbi->sub_version = 0;
272 init_MUTEX(&sbi->wq_sem); 272 mutex_init(&sbi->wq_mutex);
273 spin_lock_init(&sbi->fs_lock); 273 spin_lock_init(&sbi->fs_lock);
274 sbi->queues = NULL; 274 sbi->queues = NULL;
275 s->s_blocksize = 1024; 275 s->s_blocksize = 1024;
diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
index 394ff36ef8f1..be78e9378c03 100644
--- a/fs/autofs4/waitq.c
+++ b/fs/autofs4/waitq.c
@@ -178,7 +178,7 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
178 return -ENOENT; 178 return -ENOENT;
179 } 179 }
180 180
181 if (down_interruptible(&sbi->wq_sem)) { 181 if (mutex_lock_interruptible(&sbi->wq_mutex)) {
182 kfree(name); 182 kfree(name);
183 return -EINTR; 183 return -EINTR;
184 } 184 }
@@ -194,7 +194,7 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
194 /* Can't wait for an expire if there's no mount */ 194 /* Can't wait for an expire if there's no mount */
195 if (notify == NFY_NONE && !d_mountpoint(dentry)) { 195 if (notify == NFY_NONE && !d_mountpoint(dentry)) {
196 kfree(name); 196 kfree(name);
197 up(&sbi->wq_sem); 197 mutex_unlock(&sbi->wq_mutex);
198 return -ENOENT; 198 return -ENOENT;
199 } 199 }
200 200
@@ -202,7 +202,7 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
202 wq = kmalloc(sizeof(struct autofs_wait_queue),GFP_KERNEL); 202 wq = kmalloc(sizeof(struct autofs_wait_queue),GFP_KERNEL);
203 if ( !wq ) { 203 if ( !wq ) {
204 kfree(name); 204 kfree(name);
205 up(&sbi->wq_sem); 205 mutex_unlock(&sbi->wq_mutex);
206 return -ENOMEM; 206 return -ENOMEM;
207 } 207 }
208 208
@@ -218,10 +218,10 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
218 wq->status = -EINTR; /* Status return if interrupted */ 218 wq->status = -EINTR; /* Status return if interrupted */
219 atomic_set(&wq->wait_ctr, 2); 219 atomic_set(&wq->wait_ctr, 2);
220 atomic_set(&wq->notified, 1); 220 atomic_set(&wq->notified, 1);
221 up(&sbi->wq_sem); 221 mutex_unlock(&sbi->wq_mutex);
222 } else { 222 } else {
223 atomic_inc(&wq->wait_ctr); 223 atomic_inc(&wq->wait_ctr);
224 up(&sbi->wq_sem); 224 mutex_unlock(&sbi->wq_mutex);
225 kfree(name); 225 kfree(name);
226 DPRINTK("existing wait id = 0x%08lx, name = %.*s, nfy=%d", 226 DPRINTK("existing wait id = 0x%08lx, name = %.*s, nfy=%d",
227 (unsigned long) wq->wait_queue_token, wq->len, wq->name, notify); 227 (unsigned long) wq->wait_queue_token, wq->len, wq->name, notify);
@@ -282,19 +282,19 @@ int autofs4_wait_release(struct autofs_sb_info *sbi, autofs_wqt_t wait_queue_tok
282{ 282{
283 struct autofs_wait_queue *wq, **wql; 283 struct autofs_wait_queue *wq, **wql;
284 284
285 down(&sbi->wq_sem); 285 mutex_lock(&sbi->wq_mutex);
286 for ( wql = &sbi->queues ; (wq = *wql) != 0 ; wql = &wq->next ) { 286 for ( wql = &sbi->queues ; (wq = *wql) != 0 ; wql = &wq->next ) {
287 if ( wq->wait_queue_token == wait_queue_token ) 287 if ( wq->wait_queue_token == wait_queue_token )
288 break; 288 break;
289 } 289 }
290 290
291 if ( !wq ) { 291 if ( !wq ) {
292 up(&sbi->wq_sem); 292 mutex_unlock(&sbi->wq_mutex);
293 return -EINVAL; 293 return -EINVAL;
294 } 294 }
295 295
296 *wql = wq->next; /* Unlink from chain */ 296 *wql = wq->next; /* Unlink from chain */
297 up(&sbi->wq_sem); 297 mutex_unlock(&sbi->wq_mutex);
298 kfree(wq->name); 298 kfree(wq->name);
299 wq->name = NULL; /* Do not wait on this queue */ 299 wq->name = NULL; /* Do not wait on this queue */
300 300
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
index 2d365cb8eec6..dd6048ce0532 100644
--- a/fs/befs/linuxvfs.c
+++ b/fs/befs/linuxvfs.c
@@ -561,7 +561,7 @@ befs_utf2nls(struct super_block *sb, const char *in,
561 * @sb: Superblock 561 * @sb: Superblock
562 * @src: Input string buffer in NLS format 562 * @src: Input string buffer in NLS format
563 * @srclen: Length of input string in bytes 563 * @srclen: Length of input string in bytes
564 * @dest: The output string in UTF8 format 564 * @dest: The output string in UTF-8 format
565 * @destlen: Length of the output buffer 565 * @destlen: Length of the output buffer
566 * 566 *
567 * Converts input string @src, which is in the format of the loaded NLS map, 567 * Converts input string @src, which is in the format of the loaded NLS map,
diff --git a/fs/bio.c b/fs/bio.c
index 1f3bb501c262..0a8c59cb68f5 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -25,6 +25,7 @@
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/mempool.h> 26#include <linux/mempool.h>
27#include <linux/workqueue.h> 27#include <linux/workqueue.h>
28#include <linux/blktrace_api.h>
28#include <scsi/sg.h> /* for struct sg_iovec */ 29#include <scsi/sg.h> /* for struct sg_iovec */
29 30
30#define BIO_POOL_SIZE 256 31#define BIO_POOL_SIZE 256
@@ -1095,6 +1096,9 @@ struct bio_pair *bio_split(struct bio *bi, mempool_t *pool, int first_sectors)
1095 if (!bp) 1096 if (!bp)
1096 return bp; 1097 return bp;
1097 1098
1099 blk_add_trace_pdu_int(bdev_get_queue(bi->bi_bdev), BLK_TA_SPLIT, bi,
1100 bi->bi_sector + first_sectors);
1101
1098 BUG_ON(bi->bi_vcnt != 1); 1102 BUG_ON(bi->bi_vcnt != 1);
1099 BUG_ON(bi->bi_idx != 0); 1103 BUG_ON(bi->bi_idx != 0);
1100 atomic_set(&bp->cnt, 3); 1104 atomic_set(&bp->cnt, 3);
@@ -1243,11 +1247,11 @@ static int __init init_bio(void)
1243 scale = 4; 1247 scale = 4;
1244 1248
1245 /* 1249 /*
1246 * scale number of entries 1250 * Limit number of entries reserved -- mempools are only used when
1251 * the system is completely unable to allocate memory, so we only
1252 * need enough to make progress.
1247 */ 1253 */
1248 bvec_pool_entries = megabytes * 2; 1254 bvec_pool_entries = 1 + scale;
1249 if (bvec_pool_entries > 256)
1250 bvec_pool_entries = 256;
1251 1255
1252 fs_bio_set = bioset_create(BIO_POOL_SIZE, bvec_pool_entries, scale); 1256 fs_bio_set = bioset_create(BIO_POOL_SIZE, bvec_pool_entries, scale);
1253 if (!fs_bio_set) 1257 if (!fs_bio_set)
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 6e50346fb1ee..44d05e6e34db 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -265,8 +265,8 @@ static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
265 SLAB_CTOR_CONSTRUCTOR) 265 SLAB_CTOR_CONSTRUCTOR)
266 { 266 {
267 memset(bdev, 0, sizeof(*bdev)); 267 memset(bdev, 0, sizeof(*bdev));
268 sema_init(&bdev->bd_sem, 1); 268 mutex_init(&bdev->bd_mutex);
269 sema_init(&bdev->bd_mount_sem, 1); 269 mutex_init(&bdev->bd_mount_mutex);
270 INIT_LIST_HEAD(&bdev->bd_inodes); 270 INIT_LIST_HEAD(&bdev->bd_inodes);
271 INIT_LIST_HEAD(&bdev->bd_list); 271 INIT_LIST_HEAD(&bdev->bd_list);
272 inode_init_once(&ei->vfs_inode); 272 inode_init_once(&ei->vfs_inode);
@@ -574,7 +574,7 @@ static int do_open(struct block_device *bdev, struct file *file)
574 } 574 }
575 owner = disk->fops->owner; 575 owner = disk->fops->owner;
576 576
577 down(&bdev->bd_sem); 577 mutex_lock(&bdev->bd_mutex);
578 if (!bdev->bd_openers) { 578 if (!bdev->bd_openers) {
579 bdev->bd_disk = disk; 579 bdev->bd_disk = disk;
580 bdev->bd_contains = bdev; 580 bdev->bd_contains = bdev;
@@ -605,21 +605,21 @@ static int do_open(struct block_device *bdev, struct file *file)
605 if (ret) 605 if (ret)
606 goto out_first; 606 goto out_first;
607 bdev->bd_contains = whole; 607 bdev->bd_contains = whole;
608 down(&whole->bd_sem); 608 mutex_lock(&whole->bd_mutex);
609 whole->bd_part_count++; 609 whole->bd_part_count++;
610 p = disk->part[part - 1]; 610 p = disk->part[part - 1];
611 bdev->bd_inode->i_data.backing_dev_info = 611 bdev->bd_inode->i_data.backing_dev_info =
612 whole->bd_inode->i_data.backing_dev_info; 612 whole->bd_inode->i_data.backing_dev_info;
613 if (!(disk->flags & GENHD_FL_UP) || !p || !p->nr_sects) { 613 if (!(disk->flags & GENHD_FL_UP) || !p || !p->nr_sects) {
614 whole->bd_part_count--; 614 whole->bd_part_count--;
615 up(&whole->bd_sem); 615 mutex_unlock(&whole->bd_mutex);
616 ret = -ENXIO; 616 ret = -ENXIO;
617 goto out_first; 617 goto out_first;
618 } 618 }
619 kobject_get(&p->kobj); 619 kobject_get(&p->kobj);
620 bdev->bd_part = p; 620 bdev->bd_part = p;
621 bd_set_size(bdev, (loff_t) p->nr_sects << 9); 621 bd_set_size(bdev, (loff_t) p->nr_sects << 9);
622 up(&whole->bd_sem); 622 mutex_unlock(&whole->bd_mutex);
623 } 623 }
624 } else { 624 } else {
625 put_disk(disk); 625 put_disk(disk);
@@ -633,13 +633,13 @@ static int do_open(struct block_device *bdev, struct file *file)
633 if (bdev->bd_invalidated) 633 if (bdev->bd_invalidated)
634 rescan_partitions(bdev->bd_disk, bdev); 634 rescan_partitions(bdev->bd_disk, bdev);
635 } else { 635 } else {
636 down(&bdev->bd_contains->bd_sem); 636 mutex_lock(&bdev->bd_contains->bd_mutex);
637 bdev->bd_contains->bd_part_count++; 637 bdev->bd_contains->bd_part_count++;
638 up(&bdev->bd_contains->bd_sem); 638 mutex_unlock(&bdev->bd_contains->bd_mutex);
639 } 639 }
640 } 640 }
641 bdev->bd_openers++; 641 bdev->bd_openers++;
642 up(&bdev->bd_sem); 642 mutex_unlock(&bdev->bd_mutex);
643 unlock_kernel(); 643 unlock_kernel();
644 return 0; 644 return 0;
645 645
@@ -652,7 +652,7 @@ out_first:
652 put_disk(disk); 652 put_disk(disk);
653 module_put(owner); 653 module_put(owner);
654out: 654out:
655 up(&bdev->bd_sem); 655 mutex_unlock(&bdev->bd_mutex);
656 unlock_kernel(); 656 unlock_kernel();
657 if (ret) 657 if (ret)
658 bdput(bdev); 658 bdput(bdev);
@@ -714,7 +714,7 @@ int blkdev_put(struct block_device *bdev)
714 struct inode *bd_inode = bdev->bd_inode; 714 struct inode *bd_inode = bdev->bd_inode;
715 struct gendisk *disk = bdev->bd_disk; 715 struct gendisk *disk = bdev->bd_disk;
716 716
717 down(&bdev->bd_sem); 717 mutex_lock(&bdev->bd_mutex);
718 lock_kernel(); 718 lock_kernel();
719 if (!--bdev->bd_openers) { 719 if (!--bdev->bd_openers) {
720 sync_blockdev(bdev); 720 sync_blockdev(bdev);
@@ -724,9 +724,9 @@ int blkdev_put(struct block_device *bdev)
724 if (disk->fops->release) 724 if (disk->fops->release)
725 ret = disk->fops->release(bd_inode, NULL); 725 ret = disk->fops->release(bd_inode, NULL);
726 } else { 726 } else {
727 down(&bdev->bd_contains->bd_sem); 727 mutex_lock(&bdev->bd_contains->bd_mutex);
728 bdev->bd_contains->bd_part_count--; 728 bdev->bd_contains->bd_part_count--;
729 up(&bdev->bd_contains->bd_sem); 729 mutex_unlock(&bdev->bd_contains->bd_mutex);
730 } 730 }
731 if (!bdev->bd_openers) { 731 if (!bdev->bd_openers) {
732 struct module *owner = disk->fops->owner; 732 struct module *owner = disk->fops->owner;
@@ -746,7 +746,7 @@ int blkdev_put(struct block_device *bdev)
746 bdev->bd_contains = NULL; 746 bdev->bd_contains = NULL;
747 } 747 }
748 unlock_kernel(); 748 unlock_kernel();
749 up(&bdev->bd_sem); 749 mutex_unlock(&bdev->bd_mutex);
750 bdput(bdev); 750 bdput(bdev);
751 return ret; 751 return ret;
752} 752}
diff --git a/fs/buffer.c b/fs/buffer.c
index a9b399402007..0d6ca7bac6c8 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -201,7 +201,7 @@ int fsync_bdev(struct block_device *bdev)
201 * freeze_bdev -- lock a filesystem and force it into a consistent state 201 * freeze_bdev -- lock a filesystem and force it into a consistent state
202 * @bdev: blockdevice to lock 202 * @bdev: blockdevice to lock
203 * 203 *
204 * This takes the block device bd_mount_sem to make sure no new mounts 204 * This takes the block device bd_mount_mutex to make sure no new mounts
205 * happen on bdev until thaw_bdev() is called. 205 * happen on bdev until thaw_bdev() is called.
206 * If a superblock is found on this device, we take the s_umount semaphore 206 * If a superblock is found on this device, we take the s_umount semaphore
207 * on it to make sure nobody unmounts until the snapshot creation is done. 207 * on it to make sure nobody unmounts until the snapshot creation is done.
@@ -210,7 +210,7 @@ struct super_block *freeze_bdev(struct block_device *bdev)
210{ 210{
211 struct super_block *sb; 211 struct super_block *sb;
212 212
213 down(&bdev->bd_mount_sem); 213 mutex_lock(&bdev->bd_mount_mutex);
214 sb = get_super(bdev); 214 sb = get_super(bdev);
215 if (sb && !(sb->s_flags & MS_RDONLY)) { 215 if (sb && !(sb->s_flags & MS_RDONLY)) {
216 sb->s_frozen = SB_FREEZE_WRITE; 216 sb->s_frozen = SB_FREEZE_WRITE;
@@ -264,7 +264,7 @@ void thaw_bdev(struct block_device *bdev, struct super_block *sb)
264 drop_super(sb); 264 drop_super(sb);
265 } 265 }
266 266
267 up(&bdev->bd_mount_sem); 267 mutex_unlock(&bdev->bd_mount_mutex);
268} 268}
269EXPORT_SYMBOL(thaw_bdev); 269EXPORT_SYMBOL(thaw_bdev);
270 270
@@ -3051,68 +3051,6 @@ asmlinkage long sys_bdflush(int func, long data)
3051} 3051}
3052 3052
3053/* 3053/*
3054 * Migration function for pages with buffers. This function can only be used
3055 * if the underlying filesystem guarantees that no other references to "page"
3056 * exist.
3057 */
3058#ifdef CONFIG_MIGRATION
3059int buffer_migrate_page(struct page *newpage, struct page *page)
3060{
3061 struct address_space *mapping = page->mapping;
3062 struct buffer_head *bh, *head;
3063 int rc;
3064
3065 if (!mapping)
3066 return -EAGAIN;
3067
3068 if (!page_has_buffers(page))
3069 return migrate_page(newpage, page);
3070
3071 head = page_buffers(page);
3072
3073 rc = migrate_page_remove_references(newpage, page, 3);
3074 if (rc)
3075 return rc;
3076
3077 bh = head;
3078 do {
3079 get_bh(bh);
3080 lock_buffer(bh);
3081 bh = bh->b_this_page;
3082
3083 } while (bh != head);
3084
3085 ClearPagePrivate(page);
3086 set_page_private(newpage, page_private(page));
3087 set_page_private(page, 0);
3088 put_page(page);
3089 get_page(newpage);
3090
3091 bh = head;
3092 do {
3093 set_bh_page(bh, newpage, bh_offset(bh));
3094 bh = bh->b_this_page;
3095
3096 } while (bh != head);
3097
3098 SetPagePrivate(newpage);
3099
3100 migrate_page_copy(newpage, page);
3101
3102 bh = head;
3103 do {
3104 unlock_buffer(bh);
3105 put_bh(bh);
3106 bh = bh->b_this_page;
3107
3108 } while (bh != head);
3109
3110 return 0;
3111}
3112EXPORT_SYMBOL(buffer_migrate_page);
3113#endif
3114
3115/*
3116 * Buffer-head allocation 3054 * Buffer-head allocation
3117 */ 3055 */
3118static kmem_cache_t *bh_cachep; 3056static kmem_cache_t *bh_cachep;
diff --git a/fs/char_dev.c b/fs/char_dev.c
index 21195c481637..5c36345c9bf7 100644
--- a/fs/char_dev.c
+++ b/fs/char_dev.c
@@ -19,6 +19,7 @@
19#include <linux/kobject.h> 19#include <linux/kobject.h>
20#include <linux/kobj_map.h> 20#include <linux/kobj_map.h>
21#include <linux/cdev.h> 21#include <linux/cdev.h>
22#include <linux/mutex.h>
22 23
23#ifdef CONFIG_KMOD 24#ifdef CONFIG_KMOD
24#include <linux/kmod.h> 25#include <linux/kmod.h>
@@ -28,7 +29,7 @@ static struct kobj_map *cdev_map;
28 29
29#define MAX_PROBE_HASH 255 /* random */ 30#define MAX_PROBE_HASH 255 /* random */
30 31
31static DECLARE_MUTEX(chrdevs_lock); 32static DEFINE_MUTEX(chrdevs_lock);
32 33
33static struct char_device_struct { 34static struct char_device_struct {
34 struct char_device_struct *next; 35 struct char_device_struct *next;
@@ -88,13 +89,13 @@ out:
88 89
89void *acquire_chrdev_list(void) 90void *acquire_chrdev_list(void)
90{ 91{
91 down(&chrdevs_lock); 92 mutex_lock(&chrdevs_lock);
92 return get_next_chrdev(NULL); 93 return get_next_chrdev(NULL);
93} 94}
94 95
95void release_chrdev_list(void *dev) 96void release_chrdev_list(void *dev)
96{ 97{
97 up(&chrdevs_lock); 98 mutex_unlock(&chrdevs_lock);
98 kfree(dev); 99 kfree(dev);
99} 100}
100 101
@@ -151,7 +152,7 @@ __register_chrdev_region(unsigned int major, unsigned int baseminor,
151 152
152 memset(cd, 0, sizeof(struct char_device_struct)); 153 memset(cd, 0, sizeof(struct char_device_struct));
153 154
154 down(&chrdevs_lock); 155 mutex_lock(&chrdevs_lock);
155 156
156 /* temporary */ 157 /* temporary */
157 if (major == 0) { 158 if (major == 0) {
@@ -186,10 +187,10 @@ __register_chrdev_region(unsigned int major, unsigned int baseminor,
186 } 187 }
187 cd->next = *cp; 188 cd->next = *cp;
188 *cp = cd; 189 *cp = cd;
189 up(&chrdevs_lock); 190 mutex_unlock(&chrdevs_lock);
190 return cd; 191 return cd;
191out: 192out:
192 up(&chrdevs_lock); 193 mutex_unlock(&chrdevs_lock);
193 kfree(cd); 194 kfree(cd);
194 return ERR_PTR(ret); 195 return ERR_PTR(ret);
195} 196}
@@ -200,7 +201,7 @@ __unregister_chrdev_region(unsigned major, unsigned baseminor, int minorct)
200 struct char_device_struct *cd = NULL, **cp; 201 struct char_device_struct *cd = NULL, **cp;
201 int i = major_to_index(major); 202 int i = major_to_index(major);
202 203
203 down(&chrdevs_lock); 204 mutex_lock(&chrdevs_lock);
204 for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next) 205 for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
205 if ((*cp)->major == major && 206 if ((*cp)->major == major &&
206 (*cp)->baseminor == baseminor && 207 (*cp)->baseminor == baseminor &&
@@ -210,7 +211,7 @@ __unregister_chrdev_region(unsigned major, unsigned baseminor, int minorct)
210 cd = *cp; 211 cd = *cp;
211 *cp = cd->next; 212 *cp = cd->next;
212 } 213 }
213 up(&chrdevs_lock); 214 mutex_unlock(&chrdevs_lock);
214 return cd; 215 return cd;
215} 216}
216 217
diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES
index d335015473a5..cb68efba35db 100644
--- a/fs/cifs/CHANGES
+++ b/fs/cifs/CHANGES
@@ -160,7 +160,7 @@ improperly zeroed buffer in CIFS Unix extensions set times call.
160Version 1.25 160Version 1.25
161------------ 161------------
162Fix internationalization problem in cifs readdir with filenames that map to 162Fix internationalization problem in cifs readdir with filenames that map to
163longer UTF8 strings than the string on the wire was in Unicode. Add workaround 163longer UTF-8 strings than the string on the wire was in Unicode. Add workaround
164for readdir to netapp servers. Fix search rewind (seek into readdir to return 164for readdir to netapp servers. Fix search rewind (seek into readdir to return
165non-consecutive entries). Do not do readdir when server negotiates 165non-consecutive entries). Do not do readdir when server negotiates
166buffer size to small to fit filename. Add support for reading POSIX ACLs from 166buffer size to small to fit filename. Add support for reading POSIX ACLs from
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index fed55e3c53df..632561dd9c50 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -138,9 +138,9 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
138 cifs_sb = CIFS_SB(inode->i_sb); 138 cifs_sb = CIFS_SB(inode->i_sb);
139 pTcon = cifs_sb->tcon; 139 pTcon = cifs_sb->tcon;
140 140
141 down(&direntry->d_sb->s_vfs_rename_sem); 141 mutex_lock(&direntry->d_sb->s_vfs_rename_mutex);
142 full_path = build_path_from_dentry(direntry); 142 full_path = build_path_from_dentry(direntry);
143 up(&direntry->d_sb->s_vfs_rename_sem); 143 mutex_unlock(&direntry->d_sb->s_vfs_rename_mutex);
144 if(full_path == NULL) { 144 if(full_path == NULL) {
145 FreeXid(xid); 145 FreeXid(xid);
146 return -ENOMEM; 146 return -ENOMEM;
@@ -317,9 +317,9 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode,
317 cifs_sb = CIFS_SB(inode->i_sb); 317 cifs_sb = CIFS_SB(inode->i_sb);
318 pTcon = cifs_sb->tcon; 318 pTcon = cifs_sb->tcon;
319 319
320 down(&direntry->d_sb->s_vfs_rename_sem); 320 mutex_lock(&direntry->d_sb->s_vfs_rename_mutex);
321 full_path = build_path_from_dentry(direntry); 321 full_path = build_path_from_dentry(direntry);
322 up(&direntry->d_sb->s_vfs_rename_sem); 322 mutex_unlock(&direntry->d_sb->s_vfs_rename_mutex);
323 if(full_path == NULL) 323 if(full_path == NULL)
324 rc = -ENOMEM; 324 rc = -ENOMEM;
325 else if (pTcon->ses->capabilities & CAP_UNIX) { 325 else if (pTcon->ses->capabilities & CAP_UNIX) {
diff --git a/fs/cifs/fcntl.c b/fs/cifs/fcntl.c
index a7a47bb36bf3..ec4dfe9bf5ef 100644
--- a/fs/cifs/fcntl.c
+++ b/fs/cifs/fcntl.c
@@ -86,9 +86,9 @@ int cifs_dir_notify(struct file * file, unsigned long arg)
86 cifs_sb = CIFS_SB(file->f_dentry->d_sb); 86 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
87 pTcon = cifs_sb->tcon; 87 pTcon = cifs_sb->tcon;
88 88
89 down(&file->f_dentry->d_sb->s_vfs_rename_sem); 89 mutex_lock(&file->f_dentry->d_sb->s_vfs_rename_mutex);
90 full_path = build_path_from_dentry(file->f_dentry); 90 full_path = build_path_from_dentry(file->f_dentry);
91 up(&file->f_dentry->d_sb->s_vfs_rename_sem); 91 mutex_unlock(&file->f_dentry->d_sb->s_vfs_rename_mutex);
92 92
93 if(full_path == NULL) { 93 if(full_path == NULL) {
94 rc = -ENOMEM; 94 rc = -ENOMEM;
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 675bd2568297..165d67426381 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -203,9 +203,9 @@ int cifs_open(struct inode *inode, struct file *file)
203 } 203 }
204 } 204 }
205 205
206 down(&inode->i_sb->s_vfs_rename_sem); 206 mutex_lock(&inode->i_sb->s_vfs_rename_mutex);
207 full_path = build_path_from_dentry(file->f_dentry); 207 full_path = build_path_from_dentry(file->f_dentry);
208 up(&inode->i_sb->s_vfs_rename_sem); 208 mutex_unlock(&inode->i_sb->s_vfs_rename_mutex);
209 if (full_path == NULL) { 209 if (full_path == NULL) {
210 FreeXid(xid); 210 FreeXid(xid);
211 return -ENOMEM; 211 return -ENOMEM;
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 59359911f481..ff93a9f81d1c 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -574,9 +574,9 @@ int cifs_unlink(struct inode *inode, struct dentry *direntry)
574 574
575 /* Unlink can be called from rename so we can not grab the sem here 575 /* Unlink can be called from rename so we can not grab the sem here
576 since we deadlock otherwise */ 576 since we deadlock otherwise */
577/* down(&direntry->d_sb->s_vfs_rename_sem);*/ 577/* mutex_lock(&direntry->d_sb->s_vfs_rename_mutex);*/
578 full_path = build_path_from_dentry(direntry); 578 full_path = build_path_from_dentry(direntry);
579/* up(&direntry->d_sb->s_vfs_rename_sem);*/ 579/* mutex_unlock(&direntry->d_sb->s_vfs_rename_mutex);*/
580 if (full_path == NULL) { 580 if (full_path == NULL) {
581 FreeXid(xid); 581 FreeXid(xid);
582 return -ENOMEM; 582 return -ENOMEM;
@@ -718,9 +718,9 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode)
718 cifs_sb = CIFS_SB(inode->i_sb); 718 cifs_sb = CIFS_SB(inode->i_sb);
719 pTcon = cifs_sb->tcon; 719 pTcon = cifs_sb->tcon;
720 720
721 down(&inode->i_sb->s_vfs_rename_sem); 721 mutex_lock(&inode->i_sb->s_vfs_rename_mutex);
722 full_path = build_path_from_dentry(direntry); 722 full_path = build_path_from_dentry(direntry);
723 up(&inode->i_sb->s_vfs_rename_sem); 723 mutex_unlock(&inode->i_sb->s_vfs_rename_mutex);
724 if (full_path == NULL) { 724 if (full_path == NULL) {
725 FreeXid(xid); 725 FreeXid(xid);
726 return -ENOMEM; 726 return -ENOMEM;
@@ -803,9 +803,9 @@ int cifs_rmdir(struct inode *inode, struct dentry *direntry)
803 cifs_sb = CIFS_SB(inode->i_sb); 803 cifs_sb = CIFS_SB(inode->i_sb);
804 pTcon = cifs_sb->tcon; 804 pTcon = cifs_sb->tcon;
805 805
806 down(&inode->i_sb->s_vfs_rename_sem); 806 mutex_lock(&inode->i_sb->s_vfs_rename_mutex);
807 full_path = build_path_from_dentry(direntry); 807 full_path = build_path_from_dentry(direntry);
808 up(&inode->i_sb->s_vfs_rename_sem); 808 mutex_unlock(&inode->i_sb->s_vfs_rename_mutex);
809 if (full_path == NULL) { 809 if (full_path == NULL) {
810 FreeXid(xid); 810 FreeXid(xid);
811 return -ENOMEM; 811 return -ENOMEM;
@@ -1137,9 +1137,9 @@ int cifs_setattr(struct dentry *direntry, struct iattr *attrs)
1137 rc = 0; 1137 rc = 0;
1138 } 1138 }
1139 1139
1140 down(&direntry->d_sb->s_vfs_rename_sem); 1140 mutex_lock(&direntry->d_sb->s_vfs_rename_mutex);
1141 full_path = build_path_from_dentry(direntry); 1141 full_path = build_path_from_dentry(direntry);
1142 up(&direntry->d_sb->s_vfs_rename_sem); 1142 mutex_unlock(&direntry->d_sb->s_vfs_rename_mutex);
1143 if (full_path == NULL) { 1143 if (full_path == NULL) {
1144 FreeXid(xid); 1144 FreeXid(xid);
1145 return -ENOMEM; 1145 return -ENOMEM;
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index 0f99aae33162..8d0da7c87c7b 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -48,10 +48,10 @@ cifs_hardlink(struct dentry *old_file, struct inode *inode,
48/* No need to check for cross device links since server will do that 48/* No need to check for cross device links since server will do that
49 BB note DFS case in future though (when we may have to check) */ 49 BB note DFS case in future though (when we may have to check) */
50 50
51 down(&inode->i_sb->s_vfs_rename_sem); 51 mutex_lock(&inode->i_sb->s_vfs_rename_mutex);
52 fromName = build_path_from_dentry(old_file); 52 fromName = build_path_from_dentry(old_file);
53 toName = build_path_from_dentry(direntry); 53 toName = build_path_from_dentry(direntry);
54 up(&inode->i_sb->s_vfs_rename_sem); 54 mutex_unlock(&inode->i_sb->s_vfs_rename_mutex);
55 if((fromName == NULL) || (toName == NULL)) { 55 if((fromName == NULL) || (toName == NULL)) {
56 rc = -ENOMEM; 56 rc = -ENOMEM;
57 goto cifs_hl_exit; 57 goto cifs_hl_exit;
@@ -103,9 +103,9 @@ cifs_follow_link(struct dentry *direntry, struct nameidata *nd)
103 103
104 xid = GetXid(); 104 xid = GetXid();
105 105
106 down(&direntry->d_sb->s_vfs_rename_sem); 106 mutex_lock(&direntry->d_sb->s_vfs_rename_mutex);
107 full_path = build_path_from_dentry(direntry); 107 full_path = build_path_from_dentry(direntry);
108 up(&direntry->d_sb->s_vfs_rename_sem); 108 mutex_unlock(&direntry->d_sb->s_vfs_rename_mutex);
109 109
110 if (!full_path) 110 if (!full_path)
111 goto out_no_free; 111 goto out_no_free;
@@ -164,9 +164,9 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname)
164 cifs_sb = CIFS_SB(inode->i_sb); 164 cifs_sb = CIFS_SB(inode->i_sb);
165 pTcon = cifs_sb->tcon; 165 pTcon = cifs_sb->tcon;
166 166
167 down(&inode->i_sb->s_vfs_rename_sem); 167 mutex_lock(&inode->i_sb->s_vfs_rename_mutex);
168 full_path = build_path_from_dentry(direntry); 168 full_path = build_path_from_dentry(direntry);
169 up(&inode->i_sb->s_vfs_rename_sem); 169 mutex_unlock(&inode->i_sb->s_vfs_rename_mutex);
170 170
171 if(full_path == NULL) { 171 if(full_path == NULL) {
172 FreeXid(xid); 172 FreeXid(xid);
@@ -232,9 +232,9 @@ cifs_readlink(struct dentry *direntry, char __user *pBuffer, int buflen)
232 232
233/* BB would it be safe against deadlock to grab this sem 233/* BB would it be safe against deadlock to grab this sem
234 even though rename itself grabs the sem and calls lookup? */ 234 even though rename itself grabs the sem and calls lookup? */
235/* down(&inode->i_sb->s_vfs_rename_sem);*/ 235/* mutex_lock(&inode->i_sb->s_vfs_rename_mutex);*/
236 full_path = build_path_from_dentry(direntry); 236 full_path = build_path_from_dentry(direntry);
237/* up(&inode->i_sb->s_vfs_rename_sem);*/ 237/* mutex_unlock(&inode->i_sb->s_vfs_rename_mutex);*/
238 238
239 if(full_path == NULL) { 239 if(full_path == NULL) {
240 FreeXid(xid); 240 FreeXid(xid);
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index 288cc048d37f..edb3b6eb34bc 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -404,9 +404,9 @@ static int initiate_cifs_search(const int xid, struct file *file)
404 if(pTcon == NULL) 404 if(pTcon == NULL)
405 return -EINVAL; 405 return -EINVAL;
406 406
407 down(&file->f_dentry->d_sb->s_vfs_rename_sem); 407 mutex_lock(&file->f_dentry->d_sb->s_vfs_rename_mutex);
408 full_path = build_path_from_dentry(file->f_dentry); 408 full_path = build_path_from_dentry(file->f_dentry);
409 up(&file->f_dentry->d_sb->s_vfs_rename_sem); 409 mutex_unlock(&file->f_dentry->d_sb->s_vfs_rename_mutex);
410 410
411 if(full_path == NULL) { 411 if(full_path == NULL) {
412 return -ENOMEM; 412 return -ENOMEM;
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index 777e3363c2a4..3938444d87b2 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -62,9 +62,9 @@ int cifs_removexattr(struct dentry * direntry, const char * ea_name)
62 cifs_sb = CIFS_SB(sb); 62 cifs_sb = CIFS_SB(sb);
63 pTcon = cifs_sb->tcon; 63 pTcon = cifs_sb->tcon;
64 64
65 down(&sb->s_vfs_rename_sem); 65 mutex_lock(&sb->s_vfs_rename_mutex);
66 full_path = build_path_from_dentry(direntry); 66 full_path = build_path_from_dentry(direntry);
67 up(&sb->s_vfs_rename_sem); 67 mutex_unlock(&sb->s_vfs_rename_mutex);
68 if(full_path == NULL) { 68 if(full_path == NULL) {
69 FreeXid(xid); 69 FreeXid(xid);
70 return -ENOMEM; 70 return -ENOMEM;
@@ -116,9 +116,9 @@ int cifs_setxattr(struct dentry * direntry, const char * ea_name,
116 cifs_sb = CIFS_SB(sb); 116 cifs_sb = CIFS_SB(sb);
117 pTcon = cifs_sb->tcon; 117 pTcon = cifs_sb->tcon;
118 118
119 down(&sb->s_vfs_rename_sem); 119 mutex_lock(&sb->s_vfs_rename_mutex);
120 full_path = build_path_from_dentry(direntry); 120 full_path = build_path_from_dentry(direntry);
121 up(&sb->s_vfs_rename_sem); 121 mutex_unlock(&sb->s_vfs_rename_mutex);
122 if(full_path == NULL) { 122 if(full_path == NULL) {
123 FreeXid(xid); 123 FreeXid(xid);
124 return -ENOMEM; 124 return -ENOMEM;
@@ -223,9 +223,9 @@ ssize_t cifs_getxattr(struct dentry * direntry, const char * ea_name,
223 cifs_sb = CIFS_SB(sb); 223 cifs_sb = CIFS_SB(sb);
224 pTcon = cifs_sb->tcon; 224 pTcon = cifs_sb->tcon;
225 225
226 down(&sb->s_vfs_rename_sem); 226 mutex_lock(&sb->s_vfs_rename_mutex);
227 full_path = build_path_from_dentry(direntry); 227 full_path = build_path_from_dentry(direntry);
228 up(&sb->s_vfs_rename_sem); 228 mutex_unlock(&sb->s_vfs_rename_mutex);
229 if(full_path == NULL) { 229 if(full_path == NULL) {
230 FreeXid(xid); 230 FreeXid(xid);
231 return -ENOMEM; 231 return -ENOMEM;
@@ -341,9 +341,9 @@ ssize_t cifs_listxattr(struct dentry * direntry, char * data, size_t buf_size)
341 cifs_sb = CIFS_SB(sb); 341 cifs_sb = CIFS_SB(sb);
342 pTcon = cifs_sb->tcon; 342 pTcon = cifs_sb->tcon;
343 343
344 down(&sb->s_vfs_rename_sem); 344 mutex_lock(&sb->s_vfs_rename_mutex);
345 full_path = build_path_from_dentry(direntry); 345 full_path = build_path_from_dentry(direntry);
346 up(&sb->s_vfs_rename_sem); 346 mutex_unlock(&sb->s_vfs_rename_mutex);
347 if(full_path == NULL) { 347 if(full_path == NULL) {
348 FreeXid(xid); 348 FreeXid(xid);
349 return -ENOMEM; 349 return -ENOMEM;
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index c666769a875d..7c031f00fd79 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -72,6 +72,7 @@
72#include <linux/i2c-dev.h> 72#include <linux/i2c-dev.h>
73#include <linux/wireless.h> 73#include <linux/wireless.h>
74#include <linux/atalk.h> 74#include <linux/atalk.h>
75#include <linux/blktrace_api.h>
75 76
76#include <net/sock.h> /* siocdevprivate_ioctl */ 77#include <net/sock.h> /* siocdevprivate_ioctl */
77#include <net/bluetooth/bluetooth.h> 78#include <net/bluetooth/bluetooth.h>
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
index d575452cd9f7..40c4fc973fad 100644
--- a/fs/debugfs/file.c
+++ b/fs/debugfs/file.c
@@ -251,3 +251,49 @@ struct dentry *debugfs_create_bool(const char *name, mode_t mode,
251} 251}
252EXPORT_SYMBOL_GPL(debugfs_create_bool); 252EXPORT_SYMBOL_GPL(debugfs_create_bool);
253 253
254static ssize_t read_file_blob(struct file *file, char __user *user_buf,
255 size_t count, loff_t *ppos)
256{
257 struct debugfs_blob_wrapper *blob = file->private_data;
258 return simple_read_from_buffer(user_buf, count, ppos, blob->data,
259 blob->size);
260}
261
262static struct file_operations fops_blob = {
263 .read = read_file_blob,
264 .open = default_open,
265};
266
267/**
268 * debugfs_create_blob - create a file in the debugfs filesystem that is
269 * used to read and write a binary blob.
270 *
271 * @name: a pointer to a string containing the name of the file to create.
272 * @mode: the permission that the file should have
273 * @parent: a pointer to the parent dentry for this file. This should be a
274 * directory dentry if set. If this paramater is NULL, then the
275 * file will be created in the root of the debugfs filesystem.
276 * @blob: a pointer to a struct debugfs_blob_wrapper which contains a pointer
277 * to the blob data and the size of the data.
278 *
279 * This function creates a file in debugfs with the given name that exports
280 * @blob->data as a binary blob. If the @mode variable is so set it can be
281 * read from. Writing is not supported.
282 *
283 * This function will return a pointer to a dentry if it succeeds. This
284 * pointer must be passed to the debugfs_remove() function when the file is
285 * to be removed (no automatic cleanup happens if your module is unloaded,
286 * you are responsible here.) If an error occurs, NULL will be returned.
287 *
288 * If debugfs is not enabled in the kernel, the value -ENODEV will be
289 * returned. It is not wise to check for this value, but rather, check for
290 * NULL or !NULL instead as to eliminate the need for #ifdef in the calling
291 * code.
292 */
293struct dentry *debugfs_create_blob(const char *name, mode_t mode,
294 struct dentry *parent,
295 struct debugfs_blob_wrapper *blob)
296{
297 return debugfs_create_file(name, mode, parent, blob, &fops_blob);
298}
299EXPORT_SYMBOL_GPL(debugfs_create_blob);
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
index bfb8a230bac9..14c5620b5cab 100644
--- a/fs/devpts/inode.c
+++ b/fs/devpts/inode.c
@@ -18,6 +18,7 @@
18#include <linux/mount.h> 18#include <linux/mount.h>
19#include <linux/tty.h> 19#include <linux/tty.h>
20#include <linux/devpts_fs.h> 20#include <linux/devpts_fs.h>
21#include <linux/parser.h>
21 22
22#define DEVPTS_SUPER_MAGIC 0x1cd1 23#define DEVPTS_SUPER_MAGIC 0x1cd1
23 24
@@ -32,39 +33,60 @@ static struct {
32 umode_t mode; 33 umode_t mode;
33} config = {.mode = 0600}; 34} config = {.mode = 0600};
34 35
36enum {
37 Opt_uid, Opt_gid, Opt_mode,
38 Opt_err
39};
40
41static match_table_t tokens = {
42 {Opt_uid, "uid=%u"},
43 {Opt_gid, "gid=%u"},
44 {Opt_mode, "mode=%o"},
45 {Opt_err, NULL}
46};
47
35static int devpts_remount(struct super_block *sb, int *flags, char *data) 48static int devpts_remount(struct super_block *sb, int *flags, char *data)
36{ 49{
37 int setuid = 0; 50 char *p;
38 int setgid = 0; 51
39 uid_t uid = 0; 52 config.setuid = 0;
40 gid_t gid = 0; 53 config.setgid = 0;
41 umode_t mode = 0600; 54 config.uid = 0;
42 char *this_char; 55 config.gid = 0;
43 56 config.mode = 0600;
44 this_char = NULL; 57
45 while ((this_char = strsep(&data, ",")) != NULL) { 58 while ((p = strsep(&data, ",")) != NULL) {
46 int n; 59 substring_t args[MAX_OPT_ARGS];
47 char dummy; 60 int token;
48 if (!*this_char) 61 int option;
62
63 if (!*p)
49 continue; 64 continue;
50 if (sscanf(this_char, "uid=%i%c", &n, &dummy) == 1) { 65
51 setuid = 1; 66 token = match_token(p, tokens, args);
52 uid = n; 67 switch (token) {
53 } else if (sscanf(this_char, "gid=%i%c", &n, &dummy) == 1) { 68 case Opt_uid:
54 setgid = 1; 69 if (match_int(&args[0], &option))
55 gid = n; 70 return -EINVAL;
56 } else if (sscanf(this_char, "mode=%o%c", &n, &dummy) == 1) 71 config.uid = option;
57 mode = n & ~S_IFMT; 72 config.setuid = 1;
58 else { 73 break;
59 printk("devpts: called with bogus options\n"); 74 case Opt_gid:
75 if (match_int(&args[0], &option))
76 return -EINVAL;
77 config.gid = option;
78 config.setgid = 1;
79 break;
80 case Opt_mode:
81 if (match_octal(&args[0], &option))
82 return -EINVAL;
83 config.mode = option & ~S_IFMT;
84 break;
85 default:
86 printk(KERN_ERR "devpts: called with bogus options\n");
60 return -EINVAL; 87 return -EINVAL;
61 } 88 }
62 } 89 }
63 config.setuid = setuid;
64 config.setgid = setgid;
65 config.uid = uid;
66 config.gid = gid;
67 config.mode = mode;
68 90
69 return 0; 91 return 0;
70} 92}
diff --git a/fs/dquot.c b/fs/dquot.c
index 1966c890b48d..acf07e581f8c 100644
--- a/fs/dquot.c
+++ b/fs/dquot.c
@@ -103,12 +103,12 @@
103 * (these locking rules also apply for S_NOQUOTA flag in the inode - note that 103 * (these locking rules also apply for S_NOQUOTA flag in the inode - note that
104 * for altering the flag i_mutex is also needed). If operation is holding 104 * for altering the flag i_mutex is also needed). If operation is holding
105 * reference to dquot in other way (e.g. quotactl ops) it must be guarded by 105 * reference to dquot in other way (e.g. quotactl ops) it must be guarded by
106 * dqonoff_sem. 106 * dqonoff_mutex.
107 * This locking assures that: 107 * This locking assures that:
108 * a) update/access to dquot pointers in inode is serialized 108 * a) update/access to dquot pointers in inode is serialized
109 * b) everyone is guarded against invalidate_dquots() 109 * b) everyone is guarded against invalidate_dquots()
110 * 110 *
111 * Each dquot has its dq_lock semaphore. Locked dquots might not be referenced 111 * Each dquot has its dq_lock mutex. Locked dquots might not be referenced
112 * from inodes (dquot_alloc_space() and such don't check the dq_lock). 112 * from inodes (dquot_alloc_space() and such don't check the dq_lock).
113 * Currently dquot is locked only when it is being read to memory (or space for 113 * Currently dquot is locked only when it is being read to memory (or space for
114 * it is being allocated) on the first dqget() and when it is being released on 114 * it is being allocated) on the first dqget() and when it is being released on
@@ -118,9 +118,9 @@
118 * spinlock to internal buffers before writing. 118 * spinlock to internal buffers before writing.
119 * 119 *
120 * Lock ordering (including related VFS locks) is the following: 120 * Lock ordering (including related VFS locks) is the following:
121 * i_mutex > dqonoff_sem > iprune_sem > journal_lock > dqptr_sem > 121 * i_mutex > dqonoff_sem > journal_lock > dqptr_sem > dquot->dq_lock >
122 * > dquot->dq_lock > dqio_sem 122 * dqio_mutex
123 * i_mutex on quota files is special (it's below dqio_sem) 123 * i_mutex on quota files is special (it's below dqio_mutex)
124 */ 124 */
125 125
126static DEFINE_SPINLOCK(dq_list_lock); 126static DEFINE_SPINLOCK(dq_list_lock);
@@ -281,8 +281,8 @@ static inline void remove_inuse(struct dquot *dquot)
281 281
282static void wait_on_dquot(struct dquot *dquot) 282static void wait_on_dquot(struct dquot *dquot)
283{ 283{
284 down(&dquot->dq_lock); 284 mutex_lock(&dquot->dq_lock);
285 up(&dquot->dq_lock); 285 mutex_unlock(&dquot->dq_lock);
286} 286}
287 287
288#define mark_dquot_dirty(dquot) ((dquot)->dq_sb->dq_op->mark_dirty(dquot)) 288#define mark_dquot_dirty(dquot) ((dquot)->dq_sb->dq_op->mark_dirty(dquot))
@@ -321,8 +321,8 @@ int dquot_acquire(struct dquot *dquot)
321 int ret = 0, ret2 = 0; 321 int ret = 0, ret2 = 0;
322 struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); 322 struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
323 323
324 down(&dquot->dq_lock); 324 mutex_lock(&dquot->dq_lock);
325 down(&dqopt->dqio_sem); 325 mutex_lock(&dqopt->dqio_mutex);
326 if (!test_bit(DQ_READ_B, &dquot->dq_flags)) 326 if (!test_bit(DQ_READ_B, &dquot->dq_flags))
327 ret = dqopt->ops[dquot->dq_type]->read_dqblk(dquot); 327 ret = dqopt->ops[dquot->dq_type]->read_dqblk(dquot);
328 if (ret < 0) 328 if (ret < 0)
@@ -343,8 +343,8 @@ int dquot_acquire(struct dquot *dquot)
343 } 343 }
344 set_bit(DQ_ACTIVE_B, &dquot->dq_flags); 344 set_bit(DQ_ACTIVE_B, &dquot->dq_flags);
345out_iolock: 345out_iolock:
346 up(&dqopt->dqio_sem); 346 mutex_unlock(&dqopt->dqio_mutex);
347 up(&dquot->dq_lock); 347 mutex_unlock(&dquot->dq_lock);
348 return ret; 348 return ret;
349} 349}
350 350
@@ -356,7 +356,7 @@ int dquot_commit(struct dquot *dquot)
356 int ret = 0, ret2 = 0; 356 int ret = 0, ret2 = 0;
357 struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); 357 struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
358 358
359 down(&dqopt->dqio_sem); 359 mutex_lock(&dqopt->dqio_mutex);
360 spin_lock(&dq_list_lock); 360 spin_lock(&dq_list_lock);
361 if (!clear_dquot_dirty(dquot)) { 361 if (!clear_dquot_dirty(dquot)) {
362 spin_unlock(&dq_list_lock); 362 spin_unlock(&dq_list_lock);
@@ -373,7 +373,7 @@ int dquot_commit(struct dquot *dquot)
373 ret = ret2; 373 ret = ret2;
374 } 374 }
375out_sem: 375out_sem:
376 up(&dqopt->dqio_sem); 376 mutex_unlock(&dqopt->dqio_mutex);
377 return ret; 377 return ret;
378} 378}
379 379
@@ -385,11 +385,11 @@ int dquot_release(struct dquot *dquot)
385 int ret = 0, ret2 = 0; 385 int ret = 0, ret2 = 0;
386 struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); 386 struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
387 387
388 down(&dquot->dq_lock); 388 mutex_lock(&dquot->dq_lock);
389 /* Check whether we are not racing with some other dqget() */ 389 /* Check whether we are not racing with some other dqget() */
390 if (atomic_read(&dquot->dq_count) > 1) 390 if (atomic_read(&dquot->dq_count) > 1)
391 goto out_dqlock; 391 goto out_dqlock;
392 down(&dqopt->dqio_sem); 392 mutex_lock(&dqopt->dqio_mutex);
393 if (dqopt->ops[dquot->dq_type]->release_dqblk) { 393 if (dqopt->ops[dquot->dq_type]->release_dqblk) {
394 ret = dqopt->ops[dquot->dq_type]->release_dqblk(dquot); 394 ret = dqopt->ops[dquot->dq_type]->release_dqblk(dquot);
395 /* Write the info */ 395 /* Write the info */
@@ -399,31 +399,57 @@ int dquot_release(struct dquot *dquot)
399 ret = ret2; 399 ret = ret2;
400 } 400 }
401 clear_bit(DQ_ACTIVE_B, &dquot->dq_flags); 401 clear_bit(DQ_ACTIVE_B, &dquot->dq_flags);
402 up(&dqopt->dqio_sem); 402 mutex_unlock(&dqopt->dqio_mutex);
403out_dqlock: 403out_dqlock:
404 up(&dquot->dq_lock); 404 mutex_unlock(&dquot->dq_lock);
405 return ret; 405 return ret;
406} 406}
407 407
408/* Invalidate all dquots on the list. Note that this function is called after 408/* Invalidate all dquots on the list. Note that this function is called after
409 * quota is disabled and pointers from inodes removed so there cannot be new 409 * quota is disabled and pointers from inodes removed so there cannot be new
410 * quota users. Also because we hold dqonoff_sem there can be no quota users 410 * quota users. There can still be some users of quotas due to inodes being
411 * for this sb+type at all. */ 411 * just deleted or pruned by prune_icache() (those are not attached to any
412 * list). We have to wait for such users.
413 */
412static void invalidate_dquots(struct super_block *sb, int type) 414static void invalidate_dquots(struct super_block *sb, int type)
413{ 415{
414 struct dquot *dquot, *tmp; 416 struct dquot *dquot, *tmp;
415 417
418restart:
416 spin_lock(&dq_list_lock); 419 spin_lock(&dq_list_lock);
417 list_for_each_entry_safe(dquot, tmp, &inuse_list, dq_inuse) { 420 list_for_each_entry_safe(dquot, tmp, &inuse_list, dq_inuse) {
418 if (dquot->dq_sb != sb) 421 if (dquot->dq_sb != sb)
419 continue; 422 continue;
420 if (dquot->dq_type != type) 423 if (dquot->dq_type != type)
421 continue; 424 continue;
422#ifdef __DQUOT_PARANOIA 425 /* Wait for dquot users */
423 if (atomic_read(&dquot->dq_count)) 426 if (atomic_read(&dquot->dq_count)) {
424 BUG(); 427 DEFINE_WAIT(wait);
425#endif 428
426 /* Quota now has no users and it has been written on last dqput() */ 429 atomic_inc(&dquot->dq_count);
430 prepare_to_wait(&dquot->dq_wait_unused, &wait,
431 TASK_UNINTERRUPTIBLE);
432 spin_unlock(&dq_list_lock);
433 /* Once dqput() wakes us up, we know it's time to free
434 * the dquot.
435 * IMPORTANT: we rely on the fact that there is always
436 * at most one process waiting for dquot to free.
437 * Otherwise dq_count would be > 1 and we would never
438 * wake up.
439 */
440 if (atomic_read(&dquot->dq_count) > 1)
441 schedule();
442 finish_wait(&dquot->dq_wait_unused, &wait);
443 dqput(dquot);
444 /* At this moment dquot() need not exist (it could be
445 * reclaimed by prune_dqcache(). Hence we must
446 * restart. */
447 goto restart;
448 }
449 /*
450 * Quota now has no users and it has been written on last
451 * dqput()
452 */
427 remove_dquot_hash(dquot); 453 remove_dquot_hash(dquot);
428 remove_free_dquot(dquot); 454 remove_free_dquot(dquot);
429 remove_inuse(dquot); 455 remove_inuse(dquot);
@@ -439,7 +465,7 @@ int vfs_quota_sync(struct super_block *sb, int type)
439 struct quota_info *dqopt = sb_dqopt(sb); 465 struct quota_info *dqopt = sb_dqopt(sb);
440 int cnt; 466 int cnt;
441 467
442 down(&dqopt->dqonoff_sem); 468 mutex_lock(&dqopt->dqonoff_mutex);
443 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 469 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
444 if (type != -1 && cnt != type) 470 if (type != -1 && cnt != type)
445 continue; 471 continue;
@@ -474,7 +500,7 @@ int vfs_quota_sync(struct super_block *sb, int type)
474 spin_lock(&dq_list_lock); 500 spin_lock(&dq_list_lock);
475 dqstats.syncs++; 501 dqstats.syncs++;
476 spin_unlock(&dq_list_lock); 502 spin_unlock(&dq_list_lock);
477 up(&dqopt->dqonoff_sem); 503 mutex_unlock(&dqopt->dqonoff_mutex);
478 504
479 return 0; 505 return 0;
480} 506}
@@ -515,7 +541,7 @@ static int shrink_dqcache_memory(int nr, gfp_t gfp_mask)
515/* 541/*
516 * Put reference to dquot 542 * Put reference to dquot
517 * NOTE: If you change this function please check whether dqput_blocks() works right... 543 * NOTE: If you change this function please check whether dqput_blocks() works right...
518 * MUST be called with either dqptr_sem or dqonoff_sem held 544 * MUST be called with either dqptr_sem or dqonoff_mutex held
519 */ 545 */
520static void dqput(struct dquot *dquot) 546static void dqput(struct dquot *dquot)
521{ 547{
@@ -540,6 +566,10 @@ we_slept:
540 if (atomic_read(&dquot->dq_count) > 1) { 566 if (atomic_read(&dquot->dq_count) > 1) {
541 /* We have more than one user... nothing to do */ 567 /* We have more than one user... nothing to do */
542 atomic_dec(&dquot->dq_count); 568 atomic_dec(&dquot->dq_count);
569 /* Releasing dquot during quotaoff phase? */
570 if (!sb_has_quota_enabled(dquot->dq_sb, dquot->dq_type) &&
571 atomic_read(&dquot->dq_count) == 1)
572 wake_up(&dquot->dq_wait_unused);
543 spin_unlock(&dq_list_lock); 573 spin_unlock(&dq_list_lock);
544 return; 574 return;
545 } 575 }
@@ -576,11 +606,12 @@ static struct dquot *get_empty_dquot(struct super_block *sb, int type)
576 return NODQUOT; 606 return NODQUOT;
577 607
578 memset((caddr_t)dquot, 0, sizeof(struct dquot)); 608 memset((caddr_t)dquot, 0, sizeof(struct dquot));
579 sema_init(&dquot->dq_lock, 1); 609 mutex_init(&dquot->dq_lock);
580 INIT_LIST_HEAD(&dquot->dq_free); 610 INIT_LIST_HEAD(&dquot->dq_free);
581 INIT_LIST_HEAD(&dquot->dq_inuse); 611 INIT_LIST_HEAD(&dquot->dq_inuse);
582 INIT_HLIST_NODE(&dquot->dq_hash); 612 INIT_HLIST_NODE(&dquot->dq_hash);
583 INIT_LIST_HEAD(&dquot->dq_dirty); 613 INIT_LIST_HEAD(&dquot->dq_dirty);
614 init_waitqueue_head(&dquot->dq_wait_unused);
584 dquot->dq_sb = sb; 615 dquot->dq_sb = sb;
585 dquot->dq_type = type; 616 dquot->dq_type = type;
586 atomic_set(&dquot->dq_count, 1); 617 atomic_set(&dquot->dq_count, 1);
@@ -590,7 +621,7 @@ static struct dquot *get_empty_dquot(struct super_block *sb, int type)
590 621
591/* 622/*
592 * Get reference to dquot 623 * Get reference to dquot
593 * MUST be called with either dqptr_sem or dqonoff_sem held 624 * MUST be called with either dqptr_sem or dqonoff_mutex held
594 */ 625 */
595static struct dquot *dqget(struct super_block *sb, unsigned int id, int type) 626static struct dquot *dqget(struct super_block *sb, unsigned int id, int type)
596{ 627{
@@ -656,7 +687,7 @@ static int dqinit_needed(struct inode *inode, int type)
656 return 0; 687 return 0;
657} 688}
658 689
659/* This routine is guarded by dqonoff_sem semaphore */ 690/* This routine is guarded by dqonoff_mutex mutex */
660static void add_dquot_ref(struct super_block *sb, int type) 691static void add_dquot_ref(struct super_block *sb, int type)
661{ 692{
662 struct list_head *p; 693 struct list_head *p;
@@ -732,13 +763,9 @@ static void drop_dquot_ref(struct super_block *sb, int type)
732{ 763{
733 LIST_HEAD(tofree_head); 764 LIST_HEAD(tofree_head);
734 765
735 /* We need to be guarded against prune_icache to reach all the
736 * inodes - otherwise some can be on the local list of prune_icache */
737 down(&iprune_sem);
738 down_write(&sb_dqopt(sb)->dqptr_sem); 766 down_write(&sb_dqopt(sb)->dqptr_sem);
739 remove_dquot_ref(sb, type, &tofree_head); 767 remove_dquot_ref(sb, type, &tofree_head);
740 up_write(&sb_dqopt(sb)->dqptr_sem); 768 up_write(&sb_dqopt(sb)->dqptr_sem);
741 up(&iprune_sem);
742 put_dquot_list(&tofree_head); 769 put_dquot_list(&tofree_head);
743} 770}
744 771
@@ -938,8 +965,8 @@ int dquot_initialize(struct inode *inode, int type)
938 unsigned int id = 0; 965 unsigned int id = 0;
939 int cnt, ret = 0; 966 int cnt, ret = 0;
940 967
941 /* First test before acquiring semaphore - solves deadlocks when we 968 /* First test before acquiring mutex - solves deadlocks when we
942 * re-enter the quota code and are already holding the semaphore */ 969 * re-enter the quota code and are already holding the mutex */
943 if (IS_NOQUOTA(inode)) 970 if (IS_NOQUOTA(inode))
944 return 0; 971 return 0;
945 down_write(&sb_dqopt(inode->i_sb)->dqptr_sem); 972 down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
@@ -1002,8 +1029,8 @@ int dquot_alloc_space(struct inode *inode, qsize_t number, int warn)
1002 int cnt, ret = NO_QUOTA; 1029 int cnt, ret = NO_QUOTA;
1003 char warntype[MAXQUOTAS]; 1030 char warntype[MAXQUOTAS];
1004 1031
1005 /* First test before acquiring semaphore - solves deadlocks when we 1032 /* First test before acquiring mutex - solves deadlocks when we
1006 * re-enter the quota code and are already holding the semaphore */ 1033 * re-enter the quota code and are already holding the mutex */
1007 if (IS_NOQUOTA(inode)) { 1034 if (IS_NOQUOTA(inode)) {
1008out_add: 1035out_add:
1009 inode_add_bytes(inode, number); 1036 inode_add_bytes(inode, number);
@@ -1051,8 +1078,8 @@ int dquot_alloc_inode(const struct inode *inode, unsigned long number)
1051 int cnt, ret = NO_QUOTA; 1078 int cnt, ret = NO_QUOTA;
1052 char warntype[MAXQUOTAS]; 1079 char warntype[MAXQUOTAS];
1053 1080
1054 /* First test before acquiring semaphore - solves deadlocks when we 1081 /* First test before acquiring mutex - solves deadlocks when we
1055 * re-enter the quota code and are already holding the semaphore */ 1082 * re-enter the quota code and are already holding the mutex */
1056 if (IS_NOQUOTA(inode)) 1083 if (IS_NOQUOTA(inode))
1057 return QUOTA_OK; 1084 return QUOTA_OK;
1058 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 1085 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
@@ -1095,8 +1122,8 @@ int dquot_free_space(struct inode *inode, qsize_t number)
1095{ 1122{
1096 unsigned int cnt; 1123 unsigned int cnt;
1097 1124
1098 /* First test before acquiring semaphore - solves deadlocks when we 1125 /* First test before acquiring mutex - solves deadlocks when we
1099 * re-enter the quota code and are already holding the semaphore */ 1126 * re-enter the quota code and are already holding the mutex */
1100 if (IS_NOQUOTA(inode)) { 1127 if (IS_NOQUOTA(inode)) {
1101out_sub: 1128out_sub:
1102 inode_sub_bytes(inode, number); 1129 inode_sub_bytes(inode, number);
@@ -1131,8 +1158,8 @@ int dquot_free_inode(const struct inode *inode, unsigned long number)
1131{ 1158{
1132 unsigned int cnt; 1159 unsigned int cnt;
1133 1160
1134 /* First test before acquiring semaphore - solves deadlocks when we 1161 /* First test before acquiring mutex - solves deadlocks when we
1135 * re-enter the quota code and are already holding the semaphore */ 1162 * re-enter the quota code and are already holding the mutex */
1136 if (IS_NOQUOTA(inode)) 1163 if (IS_NOQUOTA(inode))
1137 return QUOTA_OK; 1164 return QUOTA_OK;
1138 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1165 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
@@ -1171,8 +1198,8 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr)
1171 chgid = (iattr->ia_valid & ATTR_GID) && inode->i_gid != iattr->ia_gid; 1198 chgid = (iattr->ia_valid & ATTR_GID) && inode->i_gid != iattr->ia_gid;
1172 char warntype[MAXQUOTAS]; 1199 char warntype[MAXQUOTAS];
1173 1200
1174 /* First test before acquiring semaphore - solves deadlocks when we 1201 /* First test before acquiring mutex - solves deadlocks when we
1175 * re-enter the quota code and are already holding the semaphore */ 1202 * re-enter the quota code and are already holding the mutex */
1176 if (IS_NOQUOTA(inode)) 1203 if (IS_NOQUOTA(inode))
1177 return QUOTA_OK; 1204 return QUOTA_OK;
1178 /* Clear the arrays */ 1205 /* Clear the arrays */
@@ -1266,9 +1293,9 @@ int dquot_commit_info(struct super_block *sb, int type)
1266 int ret; 1293 int ret;
1267 struct quota_info *dqopt = sb_dqopt(sb); 1294 struct quota_info *dqopt = sb_dqopt(sb);
1268 1295
1269 down(&dqopt->dqio_sem); 1296 mutex_lock(&dqopt->dqio_mutex);
1270 ret = dqopt->ops[type]->write_file_info(sb, type); 1297 ret = dqopt->ops[type]->write_file_info(sb, type);
1271 up(&dqopt->dqio_sem); 1298 mutex_unlock(&dqopt->dqio_mutex);
1272 return ret; 1299 return ret;
1273} 1300}
1274 1301
@@ -1324,7 +1351,7 @@ int vfs_quota_off(struct super_block *sb, int type)
1324 struct inode *toputinode[MAXQUOTAS]; 1351 struct inode *toputinode[MAXQUOTAS];
1325 1352
1326 /* We need to serialize quota_off() for device */ 1353 /* We need to serialize quota_off() for device */
1327 down(&dqopt->dqonoff_sem); 1354 mutex_lock(&dqopt->dqonoff_mutex);
1328 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1355 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1329 toputinode[cnt] = NULL; 1356 toputinode[cnt] = NULL;
1330 if (type != -1 && cnt != type) 1357 if (type != -1 && cnt != type)
@@ -1353,7 +1380,7 @@ int vfs_quota_off(struct super_block *sb, int type)
1353 dqopt->info[cnt].dqi_bgrace = 0; 1380 dqopt->info[cnt].dqi_bgrace = 0;
1354 dqopt->ops[cnt] = NULL; 1381 dqopt->ops[cnt] = NULL;
1355 } 1382 }
1356 up(&dqopt->dqonoff_sem); 1383 mutex_unlock(&dqopt->dqonoff_mutex);
1357 /* Sync the superblock so that buffers with quota data are written to 1384 /* Sync the superblock so that buffers with quota data are written to
1358 * disk (and so userspace sees correct data afterwards). */ 1385 * disk (and so userspace sees correct data afterwards). */
1359 if (sb->s_op->sync_fs) 1386 if (sb->s_op->sync_fs)
@@ -1366,7 +1393,7 @@ int vfs_quota_off(struct super_block *sb, int type)
1366 * changes done by userspace on the next quotaon() */ 1393 * changes done by userspace on the next quotaon() */
1367 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 1394 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1368 if (toputinode[cnt]) { 1395 if (toputinode[cnt]) {
1369 down(&dqopt->dqonoff_sem); 1396 mutex_lock(&dqopt->dqonoff_mutex);
1370 /* If quota was reenabled in the meantime, we have 1397 /* If quota was reenabled in the meantime, we have
1371 * nothing to do */ 1398 * nothing to do */
1372 if (!sb_has_quota_enabled(sb, cnt)) { 1399 if (!sb_has_quota_enabled(sb, cnt)) {
@@ -1378,7 +1405,7 @@ int vfs_quota_off(struct super_block *sb, int type)
1378 mark_inode_dirty(toputinode[cnt]); 1405 mark_inode_dirty(toputinode[cnt]);
1379 iput(toputinode[cnt]); 1406 iput(toputinode[cnt]);
1380 } 1407 }
1381 up(&dqopt->dqonoff_sem); 1408 mutex_unlock(&dqopt->dqonoff_mutex);
1382 } 1409 }
1383 if (sb->s_bdev) 1410 if (sb->s_bdev)
1384 invalidate_bdev(sb->s_bdev, 0); 1411 invalidate_bdev(sb->s_bdev, 0);
@@ -1419,7 +1446,7 @@ static int vfs_quota_on_inode(struct inode *inode, int type, int format_id)
1419 /* And now flush the block cache so that kernel sees the changes */ 1446 /* And now flush the block cache so that kernel sees the changes */
1420 invalidate_bdev(sb->s_bdev, 0); 1447 invalidate_bdev(sb->s_bdev, 0);
1421 mutex_lock(&inode->i_mutex); 1448 mutex_lock(&inode->i_mutex);
1422 down(&dqopt->dqonoff_sem); 1449 mutex_lock(&dqopt->dqonoff_mutex);
1423 if (sb_has_quota_enabled(sb, type)) { 1450 if (sb_has_quota_enabled(sb, type)) {
1424 error = -EBUSY; 1451 error = -EBUSY;
1425 goto out_lock; 1452 goto out_lock;
@@ -1444,17 +1471,17 @@ static int vfs_quota_on_inode(struct inode *inode, int type, int format_id)
1444 dqopt->ops[type] = fmt->qf_ops; 1471 dqopt->ops[type] = fmt->qf_ops;
1445 dqopt->info[type].dqi_format = fmt; 1472 dqopt->info[type].dqi_format = fmt;
1446 INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list); 1473 INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list);
1447 down(&dqopt->dqio_sem); 1474 mutex_lock(&dqopt->dqio_mutex);
1448 if ((error = dqopt->ops[type]->read_file_info(sb, type)) < 0) { 1475 if ((error = dqopt->ops[type]->read_file_info(sb, type)) < 0) {
1449 up(&dqopt->dqio_sem); 1476 mutex_unlock(&dqopt->dqio_mutex);
1450 goto out_file_init; 1477 goto out_file_init;
1451 } 1478 }
1452 up(&dqopt->dqio_sem); 1479 mutex_unlock(&dqopt->dqio_mutex);
1453 mutex_unlock(&inode->i_mutex); 1480 mutex_unlock(&inode->i_mutex);
1454 set_enable_flags(dqopt, type); 1481 set_enable_flags(dqopt, type);
1455 1482
1456 add_dquot_ref(sb, type); 1483 add_dquot_ref(sb, type);
1457 up(&dqopt->dqonoff_sem); 1484 mutex_unlock(&dqopt->dqonoff_mutex);
1458 1485
1459 return 0; 1486 return 0;
1460 1487
@@ -1462,7 +1489,7 @@ out_file_init:
1462 dqopt->files[type] = NULL; 1489 dqopt->files[type] = NULL;
1463 iput(inode); 1490 iput(inode);
1464out_lock: 1491out_lock:
1465 up(&dqopt->dqonoff_sem); 1492 mutex_unlock(&dqopt->dqonoff_mutex);
1466 if (oldflags != -1) { 1493 if (oldflags != -1) {
1467 down_write(&dqopt->dqptr_sem); 1494 down_write(&dqopt->dqptr_sem);
1468 /* Set the flags back (in the case of accidental quotaon() 1495 /* Set the flags back (in the case of accidental quotaon()
@@ -1550,14 +1577,14 @@ int vfs_get_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *d
1550{ 1577{
1551 struct dquot *dquot; 1578 struct dquot *dquot;
1552 1579
1553 down(&sb_dqopt(sb)->dqonoff_sem); 1580 mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
1554 if (!(dquot = dqget(sb, id, type))) { 1581 if (!(dquot = dqget(sb, id, type))) {
1555 up(&sb_dqopt(sb)->dqonoff_sem); 1582 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
1556 return -ESRCH; 1583 return -ESRCH;
1557 } 1584 }
1558 do_get_dqblk(dquot, di); 1585 do_get_dqblk(dquot, di);
1559 dqput(dquot); 1586 dqput(dquot);
1560 up(&sb_dqopt(sb)->dqonoff_sem); 1587 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
1561 return 0; 1588 return 0;
1562} 1589}
1563 1590
@@ -1619,14 +1646,14 @@ int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *d
1619{ 1646{
1620 struct dquot *dquot; 1647 struct dquot *dquot;
1621 1648
1622 down(&sb_dqopt(sb)->dqonoff_sem); 1649 mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
1623 if (!(dquot = dqget(sb, id, type))) { 1650 if (!(dquot = dqget(sb, id, type))) {
1624 up(&sb_dqopt(sb)->dqonoff_sem); 1651 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
1625 return -ESRCH; 1652 return -ESRCH;
1626 } 1653 }
1627 do_set_dqblk(dquot, di); 1654 do_set_dqblk(dquot, di);
1628 dqput(dquot); 1655 dqput(dquot);
1629 up(&sb_dqopt(sb)->dqonoff_sem); 1656 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
1630 return 0; 1657 return 0;
1631} 1658}
1632 1659
@@ -1635,9 +1662,9 @@ int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
1635{ 1662{
1636 struct mem_dqinfo *mi; 1663 struct mem_dqinfo *mi;
1637 1664
1638 down(&sb_dqopt(sb)->dqonoff_sem); 1665 mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
1639 if (!sb_has_quota_enabled(sb, type)) { 1666 if (!sb_has_quota_enabled(sb, type)) {
1640 up(&sb_dqopt(sb)->dqonoff_sem); 1667 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
1641 return -ESRCH; 1668 return -ESRCH;
1642 } 1669 }
1643 mi = sb_dqopt(sb)->info + type; 1670 mi = sb_dqopt(sb)->info + type;
@@ -1647,7 +1674,7 @@ int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
1647 ii->dqi_flags = mi->dqi_flags & DQF_MASK; 1674 ii->dqi_flags = mi->dqi_flags & DQF_MASK;
1648 ii->dqi_valid = IIF_ALL; 1675 ii->dqi_valid = IIF_ALL;
1649 spin_unlock(&dq_data_lock); 1676 spin_unlock(&dq_data_lock);
1650 up(&sb_dqopt(sb)->dqonoff_sem); 1677 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
1651 return 0; 1678 return 0;
1652} 1679}
1653 1680
@@ -1656,9 +1683,9 @@ int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
1656{ 1683{
1657 struct mem_dqinfo *mi; 1684 struct mem_dqinfo *mi;
1658 1685
1659 down(&sb_dqopt(sb)->dqonoff_sem); 1686 mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
1660 if (!sb_has_quota_enabled(sb, type)) { 1687 if (!sb_has_quota_enabled(sb, type)) {
1661 up(&sb_dqopt(sb)->dqonoff_sem); 1688 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
1662 return -ESRCH; 1689 return -ESRCH;
1663 } 1690 }
1664 mi = sb_dqopt(sb)->info + type; 1691 mi = sb_dqopt(sb)->info + type;
@@ -1673,7 +1700,7 @@ int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
1673 mark_info_dirty(sb, type); 1700 mark_info_dirty(sb, type);
1674 /* Force write to disk */ 1701 /* Force write to disk */
1675 sb->dq_op->write_info(sb, type); 1702 sb->dq_op->write_info(sb, type);
1676 up(&sb_dqopt(sb)->dqonoff_sem); 1703 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
1677 return 0; 1704 return 0;
1678} 1705}
1679 1706
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 4284cd31eba6..1c2b16fda13a 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -34,6 +34,7 @@
34#include <linux/eventpoll.h> 34#include <linux/eventpoll.h>
35#include <linux/mount.h> 35#include <linux/mount.h>
36#include <linux/bitops.h> 36#include <linux/bitops.h>
37#include <linux/mutex.h>
37#include <asm/uaccess.h> 38#include <asm/uaccess.h>
38#include <asm/system.h> 39#include <asm/system.h>
39#include <asm/io.h> 40#include <asm/io.h>
@@ -46,7 +47,7 @@
46 * LOCKING: 47 * LOCKING:
47 * There are three level of locking required by epoll : 48 * There are three level of locking required by epoll :
48 * 49 *
49 * 1) epsem (semaphore) 50 * 1) epmutex (mutex)
50 * 2) ep->sem (rw_semaphore) 51 * 2) ep->sem (rw_semaphore)
51 * 3) ep->lock (rw_lock) 52 * 3) ep->lock (rw_lock)
52 * 53 *
@@ -67,9 +68,9 @@
67 * if a file has been pushed inside an epoll set and it is then 68 * if a file has been pushed inside an epoll set and it is then
68 * close()d without a previous call toepoll_ctl(EPOLL_CTL_DEL). 69 * close()d without a previous call toepoll_ctl(EPOLL_CTL_DEL).
69 * It is possible to drop the "ep->sem" and to use the global 70 * It is possible to drop the "ep->sem" and to use the global
70 * semaphore "epsem" (together with "ep->lock") to have it working, 71 * semaphore "epmutex" (together with "ep->lock") to have it working,
71 * but having "ep->sem" will make the interface more scalable. 72 * but having "ep->sem" will make the interface more scalable.
72 * Events that require holding "epsem" are very rare, while for 73 * Events that require holding "epmutex" are very rare, while for
73 * normal operations the epoll private "ep->sem" will guarantee 74 * normal operations the epoll private "ep->sem" will guarantee
74 * a greater scalability. 75 * a greater scalability.
75 */ 76 */
@@ -274,7 +275,7 @@ static struct super_block *eventpollfs_get_sb(struct file_system_type *fs_type,
274/* 275/*
275 * This semaphore is used to serialize ep_free() and eventpoll_release_file(). 276 * This semaphore is used to serialize ep_free() and eventpoll_release_file().
276 */ 277 */
277static struct semaphore epsem; 278static struct mutex epmutex;
278 279
279/* Safe wake up implementation */ 280/* Safe wake up implementation */
280static struct poll_safewake psw; 281static struct poll_safewake psw;
@@ -451,15 +452,6 @@ static void ep_poll_safewake(struct poll_safewake *psw, wait_queue_head_t *wq)
451} 452}
452 453
453 454
454/* Used to initialize the epoll bits inside the "struct file" */
455void eventpoll_init_file(struct file *file)
456{
457
458 INIT_LIST_HEAD(&file->f_ep_links);
459 spin_lock_init(&file->f_ep_lock);
460}
461
462
463/* 455/*
464 * This is called from eventpoll_release() to unlink files from the eventpoll 456 * This is called from eventpoll_release() to unlink files from the eventpoll
465 * interface. We need to have this facility to cleanup correctly files that are 457 * interface. We need to have this facility to cleanup correctly files that are
@@ -477,10 +469,10 @@ void eventpoll_release_file(struct file *file)
477 * cleanup path, and this means that noone is using this file anymore. 469 * cleanup path, and this means that noone is using this file anymore.
478 * The only hit might come from ep_free() but by holding the semaphore 470 * The only hit might come from ep_free() but by holding the semaphore
479 * will correctly serialize the operation. We do need to acquire 471 * will correctly serialize the operation. We do need to acquire
480 * "ep->sem" after "epsem" because ep_remove() requires it when called 472 * "ep->sem" after "epmutex" because ep_remove() requires it when called
481 * from anywhere but ep_free(). 473 * from anywhere but ep_free().
482 */ 474 */
483 down(&epsem); 475 mutex_lock(&epmutex);
484 476
485 while (!list_empty(lsthead)) { 477 while (!list_empty(lsthead)) {
486 epi = list_entry(lsthead->next, struct epitem, fllink); 478 epi = list_entry(lsthead->next, struct epitem, fllink);
@@ -492,7 +484,7 @@ void eventpoll_release_file(struct file *file)
492 up_write(&ep->sem); 484 up_write(&ep->sem);
493 } 485 }
494 486
495 up(&epsem); 487 mutex_unlock(&epmutex);
496} 488}
497 489
498 490
@@ -819,9 +811,9 @@ static void ep_free(struct eventpoll *ep)
819 * We do not need to hold "ep->sem" here because the epoll file 811 * We do not need to hold "ep->sem" here because the epoll file
820 * is on the way to be removed and no one has references to it 812 * is on the way to be removed and no one has references to it
821 * anymore. The only hit might come from eventpoll_release_file() but 813 * anymore. The only hit might come from eventpoll_release_file() but
822 * holding "epsem" is sufficent here. 814 * holding "epmutex" is sufficent here.
823 */ 815 */
824 down(&epsem); 816 mutex_lock(&epmutex);
825 817
826 /* 818 /*
827 * Walks through the whole tree by unregistering poll callbacks. 819 * Walks through the whole tree by unregistering poll callbacks.
@@ -843,7 +835,7 @@ static void ep_free(struct eventpoll *ep)
843 ep_remove(ep, epi); 835 ep_remove(ep, epi);
844 } 836 }
845 837
846 up(&epsem); 838 mutex_unlock(&epmutex);
847} 839}
848 840
849 841
@@ -1615,7 +1607,7 @@ static int __init eventpoll_init(void)
1615{ 1607{
1616 int error; 1608 int error;
1617 1609
1618 init_MUTEX(&epsem); 1610 mutex_init(&epmutex);
1619 1611
1620 /* Initialize the structure used to perform safe poll wait head wake ups */ 1612 /* Initialize the structure used to perform safe poll wait head wake ups */
1621 ep_poll_safewake_init(&psw); 1613 ep_poll_safewake_init(&psw);
diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c
index ad1432a2a62e..4ca824985321 100644
--- a/fs/ext2/namei.c
+++ b/fs/ext2/namei.c
@@ -36,22 +36,6 @@
36#include "acl.h" 36#include "acl.h"
37#include "xip.h" 37#include "xip.h"
38 38
39/*
40 * Couple of helper functions - make the code slightly cleaner.
41 */
42
43static inline void ext2_inc_count(struct inode *inode)
44{
45 inode->i_nlink++;
46 mark_inode_dirty(inode);
47}
48
49static inline void ext2_dec_count(struct inode *inode)
50{
51 inode->i_nlink--;
52 mark_inode_dirty(inode);
53}
54
55static inline int ext2_add_nondir(struct dentry *dentry, struct inode *inode) 39static inline int ext2_add_nondir(struct dentry *dentry, struct inode *inode)
56{ 40{
57 int err = ext2_add_link(dentry, inode); 41 int err = ext2_add_link(dentry, inode);
@@ -59,7 +43,7 @@ static inline int ext2_add_nondir(struct dentry *dentry, struct inode *inode)
59 d_instantiate(dentry, inode); 43 d_instantiate(dentry, inode);
60 return 0; 44 return 0;
61 } 45 }
62 ext2_dec_count(inode); 46 inode_dec_link_count(inode);
63 iput(inode); 47 iput(inode);
64 return err; 48 return err;
65} 49}
@@ -201,7 +185,7 @@ out:
201 return err; 185 return err;
202 186
203out_fail: 187out_fail:
204 ext2_dec_count(inode); 188 inode_dec_link_count(inode);
205 iput (inode); 189 iput (inode);
206 goto out; 190 goto out;
207} 191}
@@ -215,7 +199,7 @@ static int ext2_link (struct dentry * old_dentry, struct inode * dir,
215 return -EMLINK; 199 return -EMLINK;
216 200
217 inode->i_ctime = CURRENT_TIME_SEC; 201 inode->i_ctime = CURRENT_TIME_SEC;
218 ext2_inc_count(inode); 202 inode_inc_link_count(inode);
219 atomic_inc(&inode->i_count); 203 atomic_inc(&inode->i_count);
220 204
221 return ext2_add_nondir(dentry, inode); 205 return ext2_add_nondir(dentry, inode);
@@ -229,7 +213,7 @@ static int ext2_mkdir(struct inode * dir, struct dentry * dentry, int mode)
229 if (dir->i_nlink >= EXT2_LINK_MAX) 213 if (dir->i_nlink >= EXT2_LINK_MAX)
230 goto out; 214 goto out;
231 215
232 ext2_inc_count(dir); 216 inode_inc_link_count(dir);
233 217
234 inode = ext2_new_inode (dir, S_IFDIR | mode); 218 inode = ext2_new_inode (dir, S_IFDIR | mode);
235 err = PTR_ERR(inode); 219 err = PTR_ERR(inode);
@@ -243,7 +227,7 @@ static int ext2_mkdir(struct inode * dir, struct dentry * dentry, int mode)
243 else 227 else
244 inode->i_mapping->a_ops = &ext2_aops; 228 inode->i_mapping->a_ops = &ext2_aops;
245 229
246 ext2_inc_count(inode); 230 inode_inc_link_count(inode);
247 231
248 err = ext2_make_empty(inode, dir); 232 err = ext2_make_empty(inode, dir);
249 if (err) 233 if (err)
@@ -258,11 +242,11 @@ out:
258 return err; 242 return err;
259 243
260out_fail: 244out_fail:
261 ext2_dec_count(inode); 245 inode_dec_link_count(inode);
262 ext2_dec_count(inode); 246 inode_dec_link_count(inode);
263 iput(inode); 247 iput(inode);
264out_dir: 248out_dir:
265 ext2_dec_count(dir); 249 inode_dec_link_count(dir);
266 goto out; 250 goto out;
267} 251}
268 252
@@ -282,7 +266,7 @@ static int ext2_unlink(struct inode * dir, struct dentry *dentry)
282 goto out; 266 goto out;
283 267
284 inode->i_ctime = dir->i_ctime; 268 inode->i_ctime = dir->i_ctime;
285 ext2_dec_count(inode); 269 inode_dec_link_count(inode);
286 err = 0; 270 err = 0;
287out: 271out:
288 return err; 272 return err;
@@ -297,8 +281,8 @@ static int ext2_rmdir (struct inode * dir, struct dentry *dentry)
297 err = ext2_unlink(dir, dentry); 281 err = ext2_unlink(dir, dentry);
298 if (!err) { 282 if (!err) {
299 inode->i_size = 0; 283 inode->i_size = 0;
300 ext2_dec_count(inode); 284 inode_dec_link_count(inode);
301 ext2_dec_count(dir); 285 inode_dec_link_count(dir);
302 } 286 }
303 } 287 }
304 return err; 288 return err;
@@ -338,41 +322,41 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry,
338 new_de = ext2_find_entry (new_dir, new_dentry, &new_page); 322 new_de = ext2_find_entry (new_dir, new_dentry, &new_page);
339 if (!new_de) 323 if (!new_de)
340 goto out_dir; 324 goto out_dir;
341 ext2_inc_count(old_inode); 325 inode_inc_link_count(old_inode);
342 ext2_set_link(new_dir, new_de, new_page, old_inode); 326 ext2_set_link(new_dir, new_de, new_page, old_inode);
343 new_inode->i_ctime = CURRENT_TIME_SEC; 327 new_inode->i_ctime = CURRENT_TIME_SEC;
344 if (dir_de) 328 if (dir_de)
345 new_inode->i_nlink--; 329 new_inode->i_nlink--;
346 ext2_dec_count(new_inode); 330 inode_dec_link_count(new_inode);
347 } else { 331 } else {
348 if (dir_de) { 332 if (dir_de) {
349 err = -EMLINK; 333 err = -EMLINK;
350 if (new_dir->i_nlink >= EXT2_LINK_MAX) 334 if (new_dir->i_nlink >= EXT2_LINK_MAX)
351 goto out_dir; 335 goto out_dir;
352 } 336 }
353 ext2_inc_count(old_inode); 337 inode_inc_link_count(old_inode);
354 err = ext2_add_link(new_dentry, old_inode); 338 err = ext2_add_link(new_dentry, old_inode);
355 if (err) { 339 if (err) {
356 ext2_dec_count(old_inode); 340 inode_dec_link_count(old_inode);
357 goto out_dir; 341 goto out_dir;
358 } 342 }
359 if (dir_de) 343 if (dir_de)
360 ext2_inc_count(new_dir); 344 inode_inc_link_count(new_dir);
361 } 345 }
362 346
363 /* 347 /*
364 * Like most other Unix systems, set the ctime for inodes on a 348 * Like most other Unix systems, set the ctime for inodes on a
365 * rename. 349 * rename.
366 * ext2_dec_count() will mark the inode dirty. 350 * inode_dec_link_count() will mark the inode dirty.
367 */ 351 */
368 old_inode->i_ctime = CURRENT_TIME_SEC; 352 old_inode->i_ctime = CURRENT_TIME_SEC;
369 353
370 ext2_delete_entry (old_de, old_page); 354 ext2_delete_entry (old_de, old_page);
371 ext2_dec_count(old_inode); 355 inode_dec_link_count(old_inode);
372 356
373 if (dir_de) { 357 if (dir_de) {
374 ext2_set_link(old_inode, dir_de, dir_page, new_dir); 358 ext2_set_link(old_inode, dir_de, dir_page, new_dir);
375 ext2_dec_count(old_dir); 359 inode_dec_link_count(old_dir);
376 } 360 }
377 return 0; 361 return 0;
378 362
diff --git a/fs/ext3/dir.c b/fs/ext3/dir.c
index 832867aef3dc..773459164bb2 100644
--- a/fs/ext3/dir.c
+++ b/fs/ext3/dir.c
@@ -95,11 +95,10 @@ static int ext3_readdir(struct file * filp,
95 void * dirent, filldir_t filldir) 95 void * dirent, filldir_t filldir)
96{ 96{
97 int error = 0; 97 int error = 0;
98 unsigned long offset, blk; 98 unsigned long offset;
99 int i, num, stored; 99 int i, stored;
100 struct buffer_head * bh, * tmp, * bha[16]; 100 struct ext3_dir_entry_2 *de;
101 struct ext3_dir_entry_2 * de; 101 struct super_block *sb;
102 struct super_block * sb;
103 int err; 102 int err;
104 struct inode *inode = filp->f_dentry->d_inode; 103 struct inode *inode = filp->f_dentry->d_inode;
105 int ret = 0; 104 int ret = 0;
@@ -124,12 +123,29 @@ static int ext3_readdir(struct file * filp,
124 } 123 }
125#endif 124#endif
126 stored = 0; 125 stored = 0;
127 bh = NULL;
128 offset = filp->f_pos & (sb->s_blocksize - 1); 126 offset = filp->f_pos & (sb->s_blocksize - 1);
129 127
130 while (!error && !stored && filp->f_pos < inode->i_size) { 128 while (!error && !stored && filp->f_pos < inode->i_size) {
131 blk = (filp->f_pos) >> EXT3_BLOCK_SIZE_BITS(sb); 129 unsigned long blk = filp->f_pos >> EXT3_BLOCK_SIZE_BITS(sb);
132 bh = ext3_bread(NULL, inode, blk, 0, &err); 130 struct buffer_head map_bh;
131 struct buffer_head *bh = NULL;
132
133 map_bh.b_state = 0;
134 err = ext3_get_block_handle(NULL, inode, blk, &map_bh, 0, 0);
135 if (!err) {
136 page_cache_readahead(sb->s_bdev->bd_inode->i_mapping,
137 &filp->f_ra,
138 filp,
139 map_bh.b_blocknr >>
140 (PAGE_CACHE_SHIFT - inode->i_blkbits),
141 1);
142 bh = ext3_bread(NULL, inode, blk, 0, &err);
143 }
144
145 /*
146 * We ignore I/O errors on directories so users have a chance
147 * of recovering data when there's a bad sector
148 */
133 if (!bh) { 149 if (!bh) {
134 ext3_error (sb, "ext3_readdir", 150 ext3_error (sb, "ext3_readdir",
135 "directory #%lu contains a hole at offset %lu", 151 "directory #%lu contains a hole at offset %lu",
@@ -138,26 +154,6 @@ static int ext3_readdir(struct file * filp,
138 continue; 154 continue;
139 } 155 }
140 156
141 /*
142 * Do the readahead
143 */
144 if (!offset) {
145 for (i = 16 >> (EXT3_BLOCK_SIZE_BITS(sb) - 9), num = 0;
146 i > 0; i--) {
147 tmp = ext3_getblk (NULL, inode, ++blk, 0, &err);
148 if (tmp && !buffer_uptodate(tmp) &&
149 !buffer_locked(tmp))
150 bha[num++] = tmp;
151 else
152 brelse (tmp);
153 }
154 if (num) {
155 ll_rw_block (READA, num, bha);
156 for (i = 0; i < num; i++)
157 brelse (bha[i]);
158 }
159 }
160
161revalidate: 157revalidate:
162 /* If the dir block has changed since the last call to 158 /* If the dir block has changed since the last call to
163 * readdir(2), then we might be pointing to an invalid 159 * readdir(2), then we might be pointing to an invalid
diff --git a/fs/ext3/file.c b/fs/ext3/file.c
index 98e78345ead9..59098ea56711 100644
--- a/fs/ext3/file.c
+++ b/fs/ext3/file.c
@@ -37,9 +37,9 @@ static int ext3_release_file (struct inode * inode, struct file * filp)
37 if ((filp->f_mode & FMODE_WRITE) && 37 if ((filp->f_mode & FMODE_WRITE) &&
38 (atomic_read(&inode->i_writecount) == 1)) 38 (atomic_read(&inode->i_writecount) == 1))
39 { 39 {
40 down(&EXT3_I(inode)->truncate_sem); 40 mutex_lock(&EXT3_I(inode)->truncate_mutex);
41 ext3_discard_reservation(inode); 41 ext3_discard_reservation(inode);
42 up(&EXT3_I(inode)->truncate_sem); 42 mutex_unlock(&EXT3_I(inode)->truncate_mutex);
43 } 43 }
44 if (is_dx(inode) && filp->private_data) 44 if (is_dx(inode) && filp->private_data)
45 ext3_htree_free_dir_info(filp->private_data); 45 ext3_htree_free_dir_info(filp->private_data);
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index 0384e539b88f..2c361377e0a5 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -671,7 +671,7 @@ err_out:
671 * The BKL may not be held on entry here. Be sure to take it early. 671 * The BKL may not be held on entry here. Be sure to take it early.
672 */ 672 */
673 673
674static int 674int
675ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock, 675ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock,
676 struct buffer_head *bh_result, int create, int extend_disksize) 676 struct buffer_head *bh_result, int create, int extend_disksize)
677{ 677{
@@ -702,7 +702,7 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock,
702 if (!create || err == -EIO) 702 if (!create || err == -EIO)
703 goto cleanup; 703 goto cleanup;
704 704
705 down(&ei->truncate_sem); 705 mutex_lock(&ei->truncate_mutex);
706 706
707 /* 707 /*
708 * If the indirect block is missing while we are reading 708 * If the indirect block is missing while we are reading
@@ -723,7 +723,7 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock,
723 } 723 }
724 partial = ext3_get_branch(inode, depth, offsets, chain, &err); 724 partial = ext3_get_branch(inode, depth, offsets, chain, &err);
725 if (!partial) { 725 if (!partial) {
726 up(&ei->truncate_sem); 726 mutex_unlock(&ei->truncate_mutex);
727 if (err) 727 if (err)
728 goto cleanup; 728 goto cleanup;
729 clear_buffer_new(bh_result); 729 clear_buffer_new(bh_result);
@@ -759,13 +759,13 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock,
759 err = ext3_splice_branch(handle, inode, iblock, chain, 759 err = ext3_splice_branch(handle, inode, iblock, chain,
760 partial, left); 760 partial, left);
761 /* 761 /*
762 * i_disksize growing is protected by truncate_sem. Don't forget to 762 * i_disksize growing is protected by truncate_mutex. Don't forget to
763 * protect it if you're about to implement concurrent 763 * protect it if you're about to implement concurrent
764 * ext3_get_block() -bzzz 764 * ext3_get_block() -bzzz
765 */ 765 */
766 if (!err && extend_disksize && inode->i_size > ei->i_disksize) 766 if (!err && extend_disksize && inode->i_size > ei->i_disksize)
767 ei->i_disksize = inode->i_size; 767 ei->i_disksize = inode->i_size;
768 up(&ei->truncate_sem); 768 mutex_unlock(&ei->truncate_mutex);
769 if (err) 769 if (err)
770 goto cleanup; 770 goto cleanup;
771 771
@@ -1227,7 +1227,7 @@ static int journal_dirty_data_fn(handle_t *handle, struct buffer_head *bh)
1227 * ext3_file_write() -> generic_file_write() -> __alloc_pages() -> ... 1227 * ext3_file_write() -> generic_file_write() -> __alloc_pages() -> ...
1228 * 1228 *
1229 * Same applies to ext3_get_block(). We will deadlock on various things like 1229 * Same applies to ext3_get_block(). We will deadlock on various things like
1230 * lock_journal and i_truncate_sem. 1230 * lock_journal and i_truncate_mutex.
1231 * 1231 *
1232 * Setting PF_MEMALLOC here doesn't work - too many internal memory 1232 * Setting PF_MEMALLOC here doesn't work - too many internal memory
1233 * allocations fail. 1233 * allocations fail.
@@ -2161,7 +2161,7 @@ void ext3_truncate(struct inode * inode)
2161 * From here we block out all ext3_get_block() callers who want to 2161 * From here we block out all ext3_get_block() callers who want to
2162 * modify the block allocation tree. 2162 * modify the block allocation tree.
2163 */ 2163 */
2164 down(&ei->truncate_sem); 2164 mutex_lock(&ei->truncate_mutex);
2165 2165
2166 if (n == 1) { /* direct blocks */ 2166 if (n == 1) { /* direct blocks */
2167 ext3_free_data(handle, inode, NULL, i_data+offsets[0], 2167 ext3_free_data(handle, inode, NULL, i_data+offsets[0],
@@ -2228,7 +2228,7 @@ do_indirects:
2228 2228
2229 ext3_discard_reservation(inode); 2229 ext3_discard_reservation(inode);
2230 2230
2231 up(&ei->truncate_sem); 2231 mutex_unlock(&ei->truncate_mutex);
2232 inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC; 2232 inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
2233 ext3_mark_inode_dirty(handle, inode); 2233 ext3_mark_inode_dirty(handle, inode);
2234 2234
diff --git a/fs/ext3/ioctl.c b/fs/ext3/ioctl.c
index 556cd5510078..aaf1da17b6d4 100644
--- a/fs/ext3/ioctl.c
+++ b/fs/ext3/ioctl.c
@@ -182,7 +182,7 @@ flags_err:
182 * need to allocate reservation structure for this inode 182 * need to allocate reservation structure for this inode
183 * before set the window size 183 * before set the window size
184 */ 184 */
185 down(&ei->truncate_sem); 185 mutex_lock(&ei->truncate_mutex);
186 if (!ei->i_block_alloc_info) 186 if (!ei->i_block_alloc_info)
187 ext3_init_block_alloc_info(inode); 187 ext3_init_block_alloc_info(inode);
188 188
@@ -190,7 +190,7 @@ flags_err:
190 struct ext3_reserve_window_node *rsv = &ei->i_block_alloc_info->rsv_window_node; 190 struct ext3_reserve_window_node *rsv = &ei->i_block_alloc_info->rsv_window_node;
191 rsv->rsv_goal_size = rsv_window_size; 191 rsv->rsv_goal_size = rsv_window_size;
192 } 192 }
193 up(&ei->truncate_sem); 193 mutex_unlock(&ei->truncate_mutex);
194 return 0; 194 return 0;
195 } 195 }
196 case EXT3_IOC_GROUP_EXTEND: { 196 case EXT3_IOC_GROUP_EXTEND: {
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 56bf76586019..efe5b20d7a5a 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -472,7 +472,7 @@ static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
472#ifdef CONFIG_EXT3_FS_XATTR 472#ifdef CONFIG_EXT3_FS_XATTR
473 init_rwsem(&ei->xattr_sem); 473 init_rwsem(&ei->xattr_sem);
474#endif 474#endif
475 init_MUTEX(&ei->truncate_sem); 475 mutex_init(&ei->truncate_mutex);
476 inode_init_once(&ei->vfs_inode); 476 inode_init_once(&ei->vfs_inode);
477 } 477 }
478} 478}
@@ -2382,8 +2382,8 @@ static int ext3_statfs (struct super_block * sb, struct kstatfs * buf)
2382 * Process 1 Process 2 2382 * Process 1 Process 2
2383 * ext3_create() quota_sync() 2383 * ext3_create() quota_sync()
2384 * journal_start() write_dquot() 2384 * journal_start() write_dquot()
2385 * DQUOT_INIT() down(dqio_sem) 2385 * DQUOT_INIT() down(dqio_mutex)
2386 * down(dqio_sem) journal_start() 2386 * down(dqio_mutex) journal_start()
2387 * 2387 *
2388 */ 2388 */
2389 2389
diff --git a/fs/fat/dir.c b/fs/fat/dir.c
index db0de5c621c7..4095bc149eb1 100644
--- a/fs/fat/dir.c
+++ b/fs/fat/dir.c
@@ -114,7 +114,7 @@ static inline int fat_get_entry(struct inode *dir, loff_t *pos,
114} 114}
115 115
116/* 116/*
117 * Convert Unicode 16 to UTF8, translated Unicode, or ASCII. 117 * Convert Unicode 16 to UTF-8, translated Unicode, or ASCII.
118 * If uni_xlate is enabled and we can't get a 1:1 conversion, use a 118 * If uni_xlate is enabled and we can't get a 1:1 conversion, use a
119 * colon as an escape character since it is normally invalid on the vfat 119 * colon as an escape character since it is normally invalid on the vfat
120 * filesystem. The following four characters are the hexadecimal digits 120 * filesystem. The following four characters are the hexadecimal digits
diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c
index a1a9e0451217..ab171ea8e869 100644
--- a/fs/fat/fatent.c
+++ b/fs/fat/fatent.c
@@ -267,19 +267,19 @@ static struct fatent_operations fat32_ops = {
267 267
268static inline void lock_fat(struct msdos_sb_info *sbi) 268static inline void lock_fat(struct msdos_sb_info *sbi)
269{ 269{
270 down(&sbi->fat_lock); 270 mutex_lock(&sbi->fat_lock);
271} 271}
272 272
273static inline void unlock_fat(struct msdos_sb_info *sbi) 273static inline void unlock_fat(struct msdos_sb_info *sbi)
274{ 274{
275 up(&sbi->fat_lock); 275 mutex_unlock(&sbi->fat_lock);
276} 276}
277 277
278void fat_ent_access_init(struct super_block *sb) 278void fat_ent_access_init(struct super_block *sb)
279{ 279{
280 struct msdos_sb_info *sbi = MSDOS_SB(sb); 280 struct msdos_sb_info *sbi = MSDOS_SB(sb);
281 281
282 init_MUTEX(&sbi->fat_lock); 282 mutex_init(&sbi->fat_lock);
283 283
284 switch (sbi->fat_bits) { 284 switch (sbi->fat_bits) {
285 case 32: 285 case 32:
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index e7f4aa7fc686..e78d7b4842cc 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -1101,7 +1101,7 @@ static int parse_options(char *options, int is_vfat, int silent, int *debug,
1101 return -EINVAL; 1101 return -EINVAL;
1102 } 1102 }
1103 } 1103 }
1104 /* UTF8 doesn't provide FAT semantics */ 1104 /* UTF-8 doesn't provide FAT semantics */
1105 if (!strcmp(opts->iocharset, "utf8")) { 1105 if (!strcmp(opts->iocharset, "utf8")) {
1106 printk(KERN_ERR "FAT: utf8 is not a recommended IO charset" 1106 printk(KERN_ERR "FAT: utf8 is not a recommended IO charset"
1107 " for FAT filesystems, filesystem will be case sensitive!\n"); 1107 " for FAT filesystems, filesystem will be case sensitive!\n");
diff --git a/fs/fcntl.c b/fs/fcntl.c
index dc4a7007f4e7..03c789560fb8 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -73,8 +73,8 @@ repeat:
73 * orig_start..fdt->next_fd 73 * orig_start..fdt->next_fd
74 */ 74 */
75 start = orig_start; 75 start = orig_start;
76 if (start < fdt->next_fd) 76 if (start < files->next_fd)
77 start = fdt->next_fd; 77 start = files->next_fd;
78 78
79 newfd = start; 79 newfd = start;
80 if (start < fdt->max_fdset) { 80 if (start < fdt->max_fdset) {
@@ -102,9 +102,8 @@ repeat:
102 * we reacquire the fdtable pointer and use it while holding 102 * we reacquire the fdtable pointer and use it while holding
103 * the lock, no one can free it during that time. 103 * the lock, no one can free it during that time.
104 */ 104 */
105 fdt = files_fdtable(files); 105 if (start <= files->next_fd)
106 if (start <= fdt->next_fd) 106 files->next_fd = newfd + 1;
107 fdt->next_fd = newfd + 1;
108 107
109 error = newfd; 108 error = newfd;
110 109
diff --git a/fs/file.c b/fs/file.c
index cea7cbea11d0..bbc743314730 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -125,7 +125,8 @@ static void free_fdtable_rcu(struct rcu_head *rcu)
125 kmem_cache_free(files_cachep, fdt->free_files); 125 kmem_cache_free(files_cachep, fdt->free_files);
126 return; 126 return;
127 } 127 }
128 if (fdt->max_fdset <= __FD_SETSIZE && fdt->max_fds <= NR_OPEN_DEFAULT) { 128 if (fdt->max_fdset <= EMBEDDED_FD_SET_SIZE &&
129 fdt->max_fds <= NR_OPEN_DEFAULT) {
129 /* 130 /*
130 * The fdtable was embedded 131 * The fdtable was embedded
131 */ 132 */
@@ -155,8 +156,9 @@ static void free_fdtable_rcu(struct rcu_head *rcu)
155 156
156void free_fdtable(struct fdtable *fdt) 157void free_fdtable(struct fdtable *fdt)
157{ 158{
158 if (fdt->free_files || fdt->max_fdset > __FD_SETSIZE || 159 if (fdt->free_files ||
159 fdt->max_fds > NR_OPEN_DEFAULT) 160 fdt->max_fdset > EMBEDDED_FD_SET_SIZE ||
161 fdt->max_fds > NR_OPEN_DEFAULT)
160 call_rcu(&fdt->rcu, free_fdtable_rcu); 162 call_rcu(&fdt->rcu, free_fdtable_rcu);
161} 163}
162 164
@@ -199,7 +201,6 @@ static void copy_fdtable(struct fdtable *nfdt, struct fdtable *fdt)
199 (nfdt->max_fds - fdt->max_fds) * 201 (nfdt->max_fds - fdt->max_fds) *
200 sizeof(struct file *)); 202 sizeof(struct file *));
201 } 203 }
202 nfdt->next_fd = fdt->next_fd;
203} 204}
204 205
205/* 206/*
@@ -220,11 +221,9 @@ fd_set * alloc_fdset(int num)
220 221
221void free_fdset(fd_set *array, int num) 222void free_fdset(fd_set *array, int num)
222{ 223{
223 int size = num / 8; 224 if (num <= EMBEDDED_FD_SET_SIZE) /* Don't free an embedded fdset */
224
225 if (num <= __FD_SETSIZE) /* Don't free an embedded fdset */
226 return; 225 return;
227 else if (size <= PAGE_SIZE) 226 else if (num <= 8 * PAGE_SIZE)
228 kfree(array); 227 kfree(array);
229 else 228 else
230 vfree(array); 229 vfree(array);
@@ -237,22 +236,17 @@ static struct fdtable *alloc_fdtable(int nr)
237 fd_set *new_openset = NULL, *new_execset = NULL; 236 fd_set *new_openset = NULL, *new_execset = NULL;
238 struct file **new_fds; 237 struct file **new_fds;
239 238
240 fdt = kmalloc(sizeof(*fdt), GFP_KERNEL); 239 fdt = kzalloc(sizeof(*fdt), GFP_KERNEL);
241 if (!fdt) 240 if (!fdt)
242 goto out; 241 goto out;
243 memset(fdt, 0, sizeof(*fdt));
244 242
245 nfds = __FD_SETSIZE; 243 nfds = 8 * L1_CACHE_BYTES;
246 /* Expand to the max in easy steps */ 244 /* Expand to the max in easy steps */
247 do { 245 while (nfds <= nr) {
248 if (nfds < (PAGE_SIZE * 8)) 246 nfds = nfds * 2;
249 nfds = PAGE_SIZE * 8; 247 if (nfds > NR_OPEN)
250 else { 248 nfds = NR_OPEN;
251 nfds = nfds * 2; 249 }
252 if (nfds > NR_OPEN)
253 nfds = NR_OPEN;
254 }
255 } while (nfds <= nr);
256 250
257 new_openset = alloc_fdset(nfds); 251 new_openset = alloc_fdset(nfds);
258 new_execset = alloc_fdset(nfds); 252 new_execset = alloc_fdset(nfds);
diff --git a/fs/file_table.c b/fs/file_table.c
index 44fabeaa9415..bcea1998b4de 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -88,6 +88,7 @@ int proc_nr_files(ctl_table *table, int write, struct file *filp,
88 */ 88 */
89struct file *get_empty_filp(void) 89struct file *get_empty_filp(void)
90{ 90{
91 struct task_struct *tsk;
91 static int old_max; 92 static int old_max;
92 struct file * f; 93 struct file * f;
93 94
@@ -112,13 +113,14 @@ struct file *get_empty_filp(void)
112 if (security_file_alloc(f)) 113 if (security_file_alloc(f))
113 goto fail_sec; 114 goto fail_sec;
114 115
115 eventpoll_init_file(f); 116 tsk = current;
117 INIT_LIST_HEAD(&f->f_u.fu_list);
116 atomic_set(&f->f_count, 1); 118 atomic_set(&f->f_count, 1);
117 f->f_uid = current->fsuid;
118 f->f_gid = current->fsgid;
119 rwlock_init(&f->f_owner.lock); 119 rwlock_init(&f->f_owner.lock);
120 f->f_uid = tsk->fsuid;
121 f->f_gid = tsk->fsgid;
122 eventpoll_init_file(f);
120 /* f->f_version: 0 */ 123 /* f->f_version: 0 */
121 INIT_LIST_HEAD(&f->f_u.fu_list);
122 return f; 124 return f;
123 125
124over: 126over:
diff --git a/fs/hpfs/hpfs_fn.h b/fs/hpfs/hpfs_fn.h
index 6628c3b352cb..4c6473ab3b34 100644
--- a/fs/hpfs/hpfs_fn.h
+++ b/fs/hpfs/hpfs_fn.h
@@ -9,6 +9,7 @@
9//#define DBG 9//#define DBG
10//#define DEBUG_LOCKS 10//#define DEBUG_LOCKS
11 11
12#include <linux/mutex.h>
12#include <linux/pagemap.h> 13#include <linux/pagemap.h>
13#include <linux/buffer_head.h> 14#include <linux/buffer_head.h>
14#include <linux/hpfs_fs.h> 15#include <linux/hpfs_fs.h>
@@ -57,8 +58,8 @@ struct hpfs_inode_info {
57 unsigned i_ea_uid : 1; /* file's uid is stored in ea */ 58 unsigned i_ea_uid : 1; /* file's uid is stored in ea */
58 unsigned i_ea_gid : 1; /* file's gid is stored in ea */ 59 unsigned i_ea_gid : 1; /* file's gid is stored in ea */
59 unsigned i_dirty : 1; 60 unsigned i_dirty : 1;
60 struct semaphore i_sem; 61 struct mutex i_mutex;
61 struct semaphore i_parent; 62 struct mutex i_parent_mutex;
62 loff_t **i_rddir_off; 63 loff_t **i_rddir_off;
63 struct inode vfs_inode; 64 struct inode vfs_inode;
64}; 65};
diff --git a/fs/hpfs/inode.c b/fs/hpfs/inode.c
index e3d17e9ea6c1..56f2c338c4d9 100644
--- a/fs/hpfs/inode.c
+++ b/fs/hpfs/inode.c
@@ -186,9 +186,9 @@ void hpfs_write_inode(struct inode *i)
186 kfree(hpfs_inode->i_rddir_off); 186 kfree(hpfs_inode->i_rddir_off);
187 hpfs_inode->i_rddir_off = NULL; 187 hpfs_inode->i_rddir_off = NULL;
188 } 188 }
189 down(&hpfs_inode->i_parent); 189 mutex_lock(&hpfs_inode->i_parent_mutex);
190 if (!i->i_nlink) { 190 if (!i->i_nlink) {
191 up(&hpfs_inode->i_parent); 191 mutex_unlock(&hpfs_inode->i_parent_mutex);
192 return; 192 return;
193 } 193 }
194 parent = iget_locked(i->i_sb, hpfs_inode->i_parent_dir); 194 parent = iget_locked(i->i_sb, hpfs_inode->i_parent_dir);
@@ -199,14 +199,14 @@ void hpfs_write_inode(struct inode *i)
199 hpfs_read_inode(parent); 199 hpfs_read_inode(parent);
200 unlock_new_inode(parent); 200 unlock_new_inode(parent);
201 } 201 }
202 down(&hpfs_inode->i_sem); 202 mutex_lock(&hpfs_inode->i_mutex);
203 hpfs_write_inode_nolock(i); 203 hpfs_write_inode_nolock(i);
204 up(&hpfs_inode->i_sem); 204 mutex_unlock(&hpfs_inode->i_mutex);
205 iput(parent); 205 iput(parent);
206 } else { 206 } else {
207 mark_inode_dirty(i); 207 mark_inode_dirty(i);
208 } 208 }
209 up(&hpfs_inode->i_parent); 209 mutex_unlock(&hpfs_inode->i_parent_mutex);
210} 210}
211 211
212void hpfs_write_inode_nolock(struct inode *i) 212void hpfs_write_inode_nolock(struct inode *i)
diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c
index 8ff8fc433fc1..a03abb12c610 100644
--- a/fs/hpfs/namei.c
+++ b/fs/hpfs/namei.c
@@ -60,7 +60,7 @@ static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
60 if (dee.read_only) 60 if (dee.read_only)
61 result->i_mode &= ~0222; 61 result->i_mode &= ~0222;
62 62
63 down(&hpfs_i(dir)->i_sem); 63 mutex_lock(&hpfs_i(dir)->i_mutex);
64 r = hpfs_add_dirent(dir, (char *)name, len, &dee, 0); 64 r = hpfs_add_dirent(dir, (char *)name, len, &dee, 0);
65 if (r == 1) 65 if (r == 1)
66 goto bail3; 66 goto bail3;
@@ -101,11 +101,11 @@ static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
101 hpfs_write_inode_nolock(result); 101 hpfs_write_inode_nolock(result);
102 } 102 }
103 d_instantiate(dentry, result); 103 d_instantiate(dentry, result);
104 up(&hpfs_i(dir)->i_sem); 104 mutex_unlock(&hpfs_i(dir)->i_mutex);
105 unlock_kernel(); 105 unlock_kernel();
106 return 0; 106 return 0;
107bail3: 107bail3:
108 up(&hpfs_i(dir)->i_sem); 108 mutex_unlock(&hpfs_i(dir)->i_mutex);
109 iput(result); 109 iput(result);
110bail2: 110bail2:
111 hpfs_brelse4(&qbh0); 111 hpfs_brelse4(&qbh0);
@@ -168,7 +168,7 @@ static int hpfs_create(struct inode *dir, struct dentry *dentry, int mode, struc
168 result->i_data.a_ops = &hpfs_aops; 168 result->i_data.a_ops = &hpfs_aops;
169 hpfs_i(result)->mmu_private = 0; 169 hpfs_i(result)->mmu_private = 0;
170 170
171 down(&hpfs_i(dir)->i_sem); 171 mutex_lock(&hpfs_i(dir)->i_mutex);
172 r = hpfs_add_dirent(dir, (char *)name, len, &dee, 0); 172 r = hpfs_add_dirent(dir, (char *)name, len, &dee, 0);
173 if (r == 1) 173 if (r == 1)
174 goto bail2; 174 goto bail2;
@@ -193,12 +193,12 @@ static int hpfs_create(struct inode *dir, struct dentry *dentry, int mode, struc
193 hpfs_write_inode_nolock(result); 193 hpfs_write_inode_nolock(result);
194 } 194 }
195 d_instantiate(dentry, result); 195 d_instantiate(dentry, result);
196 up(&hpfs_i(dir)->i_sem); 196 mutex_unlock(&hpfs_i(dir)->i_mutex);
197 unlock_kernel(); 197 unlock_kernel();
198 return 0; 198 return 0;
199 199
200bail2: 200bail2:
201 up(&hpfs_i(dir)->i_sem); 201 mutex_unlock(&hpfs_i(dir)->i_mutex);
202 iput(result); 202 iput(result);
203bail1: 203bail1:
204 brelse(bh); 204 brelse(bh);
@@ -254,7 +254,7 @@ static int hpfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t
254 result->i_blocks = 1; 254 result->i_blocks = 1;
255 init_special_inode(result, mode, rdev); 255 init_special_inode(result, mode, rdev);
256 256
257 down(&hpfs_i(dir)->i_sem); 257 mutex_lock(&hpfs_i(dir)->i_mutex);
258 r = hpfs_add_dirent(dir, (char *)name, len, &dee, 0); 258 r = hpfs_add_dirent(dir, (char *)name, len, &dee, 0);
259 if (r == 1) 259 if (r == 1)
260 goto bail2; 260 goto bail2;
@@ -271,12 +271,12 @@ static int hpfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t
271 271
272 hpfs_write_inode_nolock(result); 272 hpfs_write_inode_nolock(result);
273 d_instantiate(dentry, result); 273 d_instantiate(dentry, result);
274 up(&hpfs_i(dir)->i_sem); 274 mutex_unlock(&hpfs_i(dir)->i_mutex);
275 brelse(bh); 275 brelse(bh);
276 unlock_kernel(); 276 unlock_kernel();
277 return 0; 277 return 0;
278bail2: 278bail2:
279 up(&hpfs_i(dir)->i_sem); 279 mutex_unlock(&hpfs_i(dir)->i_mutex);
280 iput(result); 280 iput(result);
281bail1: 281bail1:
282 brelse(bh); 282 brelse(bh);
@@ -333,7 +333,7 @@ static int hpfs_symlink(struct inode *dir, struct dentry *dentry, const char *sy
333 result->i_op = &page_symlink_inode_operations; 333 result->i_op = &page_symlink_inode_operations;
334 result->i_data.a_ops = &hpfs_symlink_aops; 334 result->i_data.a_ops = &hpfs_symlink_aops;
335 335
336 down(&hpfs_i(dir)->i_sem); 336 mutex_lock(&hpfs_i(dir)->i_mutex);
337 r = hpfs_add_dirent(dir, (char *)name, len, &dee, 0); 337 r = hpfs_add_dirent(dir, (char *)name, len, &dee, 0);
338 if (r == 1) 338 if (r == 1)
339 goto bail2; 339 goto bail2;
@@ -352,11 +352,11 @@ static int hpfs_symlink(struct inode *dir, struct dentry *dentry, const char *sy
352 352
353 hpfs_write_inode_nolock(result); 353 hpfs_write_inode_nolock(result);
354 d_instantiate(dentry, result); 354 d_instantiate(dentry, result);
355 up(&hpfs_i(dir)->i_sem); 355 mutex_unlock(&hpfs_i(dir)->i_mutex);
356 unlock_kernel(); 356 unlock_kernel();
357 return 0; 357 return 0;
358bail2: 358bail2:
359 up(&hpfs_i(dir)->i_sem); 359 mutex_unlock(&hpfs_i(dir)->i_mutex);
360 iput(result); 360 iput(result);
361bail1: 361bail1:
362 brelse(bh); 362 brelse(bh);
@@ -382,8 +382,8 @@ static int hpfs_unlink(struct inode *dir, struct dentry *dentry)
382 lock_kernel(); 382 lock_kernel();
383 hpfs_adjust_length((char *)name, &len); 383 hpfs_adjust_length((char *)name, &len);
384again: 384again:
385 down(&hpfs_i(inode)->i_parent); 385 mutex_lock(&hpfs_i(inode)->i_parent_mutex);
386 down(&hpfs_i(dir)->i_sem); 386 mutex_lock(&hpfs_i(dir)->i_mutex);
387 err = -ENOENT; 387 err = -ENOENT;
388 de = map_dirent(dir, hpfs_i(dir)->i_dno, (char *)name, len, &dno, &qbh); 388 de = map_dirent(dir, hpfs_i(dir)->i_dno, (char *)name, len, &dno, &qbh);
389 if (!de) 389 if (!de)
@@ -410,8 +410,8 @@ again:
410 if (rep++) 410 if (rep++)
411 break; 411 break;
412 412
413 up(&hpfs_i(dir)->i_sem); 413 mutex_unlock(&hpfs_i(dir)->i_mutex);
414 up(&hpfs_i(inode)->i_parent); 414 mutex_unlock(&hpfs_i(inode)->i_parent_mutex);
415 d_drop(dentry); 415 d_drop(dentry);
416 spin_lock(&dentry->d_lock); 416 spin_lock(&dentry->d_lock);
417 if (atomic_read(&dentry->d_count) > 1 || 417 if (atomic_read(&dentry->d_count) > 1 ||
@@ -442,8 +442,8 @@ again:
442out1: 442out1:
443 hpfs_brelse4(&qbh); 443 hpfs_brelse4(&qbh);
444out: 444out:
445 up(&hpfs_i(dir)->i_sem); 445 mutex_unlock(&hpfs_i(dir)->i_mutex);
446 up(&hpfs_i(inode)->i_parent); 446 mutex_unlock(&hpfs_i(inode)->i_parent_mutex);
447 unlock_kernel(); 447 unlock_kernel();
448 return err; 448 return err;
449} 449}
@@ -463,8 +463,8 @@ static int hpfs_rmdir(struct inode *dir, struct dentry *dentry)
463 463
464 hpfs_adjust_length((char *)name, &len); 464 hpfs_adjust_length((char *)name, &len);
465 lock_kernel(); 465 lock_kernel();
466 down(&hpfs_i(inode)->i_parent); 466 mutex_lock(&hpfs_i(inode)->i_parent_mutex);
467 down(&hpfs_i(dir)->i_sem); 467 mutex_lock(&hpfs_i(dir)->i_mutex);
468 err = -ENOENT; 468 err = -ENOENT;
469 de = map_dirent(dir, hpfs_i(dir)->i_dno, (char *)name, len, &dno, &qbh); 469 de = map_dirent(dir, hpfs_i(dir)->i_dno, (char *)name, len, &dno, &qbh);
470 if (!de) 470 if (!de)
@@ -502,8 +502,8 @@ static int hpfs_rmdir(struct inode *dir, struct dentry *dentry)
502out1: 502out1:
503 hpfs_brelse4(&qbh); 503 hpfs_brelse4(&qbh);
504out: 504out:
505 up(&hpfs_i(dir)->i_sem); 505 mutex_unlock(&hpfs_i(dir)->i_mutex);
506 up(&hpfs_i(inode)->i_parent); 506 mutex_unlock(&hpfs_i(inode)->i_parent_mutex);
507 unlock_kernel(); 507 unlock_kernel();
508 return err; 508 return err;
509} 509}
@@ -565,12 +565,12 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry,
565 565
566 lock_kernel(); 566 lock_kernel();
567 /* order doesn't matter, due to VFS exclusion */ 567 /* order doesn't matter, due to VFS exclusion */
568 down(&hpfs_i(i)->i_parent); 568 mutex_lock(&hpfs_i(i)->i_parent_mutex);
569 if (new_inode) 569 if (new_inode)
570 down(&hpfs_i(new_inode)->i_parent); 570 mutex_lock(&hpfs_i(new_inode)->i_parent_mutex);
571 down(&hpfs_i(old_dir)->i_sem); 571 mutex_lock(&hpfs_i(old_dir)->i_mutex);
572 if (new_dir != old_dir) 572 if (new_dir != old_dir)
573 down(&hpfs_i(new_dir)->i_sem); 573 mutex_lock(&hpfs_i(new_dir)->i_mutex);
574 574
575 /* Erm? Moving over the empty non-busy directory is perfectly legal */ 575 /* Erm? Moving over the empty non-busy directory is perfectly legal */
576 if (new_inode && S_ISDIR(new_inode->i_mode)) { 576 if (new_inode && S_ISDIR(new_inode->i_mode)) {
@@ -650,11 +650,11 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry,
650 hpfs_decide_conv(i, (char *)new_name, new_len); 650 hpfs_decide_conv(i, (char *)new_name, new_len);
651end1: 651end1:
652 if (old_dir != new_dir) 652 if (old_dir != new_dir)
653 up(&hpfs_i(new_dir)->i_sem); 653 mutex_unlock(&hpfs_i(new_dir)->i_mutex);
654 up(&hpfs_i(old_dir)->i_sem); 654 mutex_unlock(&hpfs_i(old_dir)->i_mutex);
655 up(&hpfs_i(i)->i_parent); 655 mutex_unlock(&hpfs_i(i)->i_parent_mutex);
656 if (new_inode) 656 if (new_inode)
657 up(&hpfs_i(new_inode)->i_parent); 657 mutex_unlock(&hpfs_i(new_inode)->i_parent_mutex);
658 unlock_kernel(); 658 unlock_kernel();
659 return err; 659 return err;
660} 660}
diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
index 63e88d7e2c3b..9488a794076e 100644
--- a/fs/hpfs/super.c
+++ b/fs/hpfs/super.c
@@ -181,8 +181,8 @@ static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
181 181
182 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == 182 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
183 SLAB_CTOR_CONSTRUCTOR) { 183 SLAB_CTOR_CONSTRUCTOR) {
184 init_MUTEX(&ei->i_sem); 184 mutex_init(&ei->i_mutex);
185 init_MUTEX(&ei->i_parent); 185 mutex_init(&ei->i_parent_mutex);
186 inode_init_once(&ei->vfs_inode); 186 inode_init_once(&ei->vfs_inode);
187 } 187 }
188} 188}
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index b35195289945..25fa8bba8cb5 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -56,48 +56,10 @@ static void huge_pagevec_release(struct pagevec *pvec)
56 pagevec_reinit(pvec); 56 pagevec_reinit(pvec);
57} 57}
58 58
59/*
60 * huge_pages_needed tries to determine the number of new huge pages that
61 * will be required to fully populate this VMA. This will be equal to
62 * the size of the VMA in huge pages minus the number of huge pages
63 * (covered by this VMA) that are found in the page cache.
64 *
65 * Result is in bytes to be compatible with is_hugepage_mem_enough()
66 */
67static unsigned long
68huge_pages_needed(struct address_space *mapping, struct vm_area_struct *vma)
69{
70 int i;
71 struct pagevec pvec;
72 unsigned long start = vma->vm_start;
73 unsigned long end = vma->vm_end;
74 unsigned long hugepages = (end - start) >> HPAGE_SHIFT;
75 pgoff_t next = vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT);
76 pgoff_t endpg = next + hugepages;
77
78 pagevec_init(&pvec, 0);
79 while (next < endpg) {
80 if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE))
81 break;
82 for (i = 0; i < pagevec_count(&pvec); i++) {
83 struct page *page = pvec.pages[i];
84 if (page->index > next)
85 next = page->index;
86 if (page->index >= endpg)
87 break;
88 next++;
89 hugepages--;
90 }
91 huge_pagevec_release(&pvec);
92 }
93 return hugepages << HPAGE_SHIFT;
94}
95
96static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) 59static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
97{ 60{
98 struct inode *inode = file->f_dentry->d_inode; 61 struct inode *inode = file->f_dentry->d_inode;
99 struct address_space *mapping = inode->i_mapping; 62 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
100 unsigned long bytes;
101 loff_t len, vma_len; 63 loff_t len, vma_len;
102 int ret; 64 int ret;
103 65
@@ -113,10 +75,6 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
113 if (vma->vm_end - vma->vm_start < HPAGE_SIZE) 75 if (vma->vm_end - vma->vm_start < HPAGE_SIZE)
114 return -EINVAL; 76 return -EINVAL;
115 77
116 bytes = huge_pages_needed(mapping, vma);
117 if (!is_hugepage_mem_enough(bytes))
118 return -ENOMEM;
119
120 vma_len = (loff_t)(vma->vm_end - vma->vm_start); 78 vma_len = (loff_t)(vma->vm_end - vma->vm_start);
121 79
122 mutex_lock(&inode->i_mutex); 80 mutex_lock(&inode->i_mutex);
@@ -129,6 +87,10 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
129 if (!(vma->vm_flags & VM_WRITE) && len > inode->i_size) 87 if (!(vma->vm_flags & VM_WRITE) && len > inode->i_size)
130 goto out; 88 goto out;
131 89
90 if (vma->vm_flags & VM_MAYSHARE)
91 if (hugetlb_extend_reservation(info, len >> HPAGE_SHIFT) != 0)
92 goto out;
93
132 ret = 0; 94 ret = 0;
133 hugetlb_prefault_arch_hook(vma->vm_mm); 95 hugetlb_prefault_arch_hook(vma->vm_mm);
134 if (inode->i_size < len) 96 if (inode->i_size < len)
@@ -227,13 +189,18 @@ static void truncate_huge_page(struct page *page)
227 put_page(page); 189 put_page(page);
228} 190}
229 191
230static void truncate_hugepages(struct address_space *mapping, loff_t lstart) 192static void truncate_hugepages(struct inode *inode, loff_t lstart)
231{ 193{
194 struct address_space *mapping = &inode->i_data;
232 const pgoff_t start = lstart >> HPAGE_SHIFT; 195 const pgoff_t start = lstart >> HPAGE_SHIFT;
233 struct pagevec pvec; 196 struct pagevec pvec;
234 pgoff_t next; 197 pgoff_t next;
235 int i; 198 int i;
236 199
200 hugetlb_truncate_reservation(HUGETLBFS_I(inode),
201 lstart >> HPAGE_SHIFT);
202 if (!mapping->nrpages)
203 return;
237 pagevec_init(&pvec, 0); 204 pagevec_init(&pvec, 0);
238 next = start; 205 next = start;
239 while (1) { 206 while (1) {
@@ -262,8 +229,7 @@ static void truncate_hugepages(struct address_space *mapping, loff_t lstart)
262 229
263static void hugetlbfs_delete_inode(struct inode *inode) 230static void hugetlbfs_delete_inode(struct inode *inode)
264{ 231{
265 if (inode->i_data.nrpages) 232 truncate_hugepages(inode, 0);
266 truncate_hugepages(&inode->i_data, 0);
267 clear_inode(inode); 233 clear_inode(inode);
268} 234}
269 235
@@ -296,8 +262,7 @@ static void hugetlbfs_forget_inode(struct inode *inode)
296 inode->i_state |= I_FREEING; 262 inode->i_state |= I_FREEING;
297 inodes_stat.nr_inodes--; 263 inodes_stat.nr_inodes--;
298 spin_unlock(&inode_lock); 264 spin_unlock(&inode_lock);
299 if (inode->i_data.nrpages) 265 truncate_hugepages(inode, 0);
300 truncate_hugepages(&inode->i_data, 0);
301 clear_inode(inode); 266 clear_inode(inode);
302 destroy_inode(inode); 267 destroy_inode(inode);
303} 268}
@@ -356,7 +321,7 @@ static int hugetlb_vmtruncate(struct inode *inode, loff_t offset)
356 if (!prio_tree_empty(&mapping->i_mmap)) 321 if (!prio_tree_empty(&mapping->i_mmap))
357 hugetlb_vmtruncate_list(&mapping->i_mmap, pgoff); 322 hugetlb_vmtruncate_list(&mapping->i_mmap, pgoff);
358 spin_unlock(&mapping->i_mmap_lock); 323 spin_unlock(&mapping->i_mmap_lock);
359 truncate_hugepages(mapping, offset); 324 truncate_hugepages(inode, offset);
360 return 0; 325 return 0;
361} 326}
362 327
@@ -573,6 +538,7 @@ static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
573 hugetlbfs_inc_free_inodes(sbinfo); 538 hugetlbfs_inc_free_inodes(sbinfo);
574 return NULL; 539 return NULL;
575 } 540 }
541 p->prereserved_hpages = 0;
576 return &p->vfs_inode; 542 return &p->vfs_inode;
577} 543}
578 544
@@ -771,21 +737,6 @@ static struct file_system_type hugetlbfs_fs_type = {
771 737
772static struct vfsmount *hugetlbfs_vfsmount; 738static struct vfsmount *hugetlbfs_vfsmount;
773 739
774/*
775 * Return the next identifier for a shm file
776 */
777static unsigned long hugetlbfs_counter(void)
778{
779 static DEFINE_SPINLOCK(lock);
780 static unsigned long counter;
781 unsigned long ret;
782
783 spin_lock(&lock);
784 ret = ++counter;
785 spin_unlock(&lock);
786 return ret;
787}
788
789static int can_do_hugetlb_shm(void) 740static int can_do_hugetlb_shm(void)
790{ 741{
791 return likely(capable(CAP_IPC_LOCK) || 742 return likely(capable(CAP_IPC_LOCK) ||
@@ -801,18 +752,16 @@ struct file *hugetlb_zero_setup(size_t size)
801 struct dentry *dentry, *root; 752 struct dentry *dentry, *root;
802 struct qstr quick_string; 753 struct qstr quick_string;
803 char buf[16]; 754 char buf[16];
755 static atomic_t counter;
804 756
805 if (!can_do_hugetlb_shm()) 757 if (!can_do_hugetlb_shm())
806 return ERR_PTR(-EPERM); 758 return ERR_PTR(-EPERM);
807 759
808 if (!is_hugepage_mem_enough(size))
809 return ERR_PTR(-ENOMEM);
810
811 if (!user_shm_lock(size, current->user)) 760 if (!user_shm_lock(size, current->user))
812 return ERR_PTR(-ENOMEM); 761 return ERR_PTR(-ENOMEM);
813 762
814 root = hugetlbfs_vfsmount->mnt_root; 763 root = hugetlbfs_vfsmount->mnt_root;
815 snprintf(buf, 16, "%lu", hugetlbfs_counter()); 764 snprintf(buf, 16, "%u", atomic_inc_return(&counter));
816 quick_string.name = buf; 765 quick_string.name = buf;
817 quick_string.len = strlen(quick_string.name); 766 quick_string.len = strlen(quick_string.name);
818 quick_string.hash = 0; 767 quick_string.hash = 0;
@@ -831,6 +780,11 @@ struct file *hugetlb_zero_setup(size_t size)
831 if (!inode) 780 if (!inode)
832 goto out_file; 781 goto out_file;
833 782
783 error = -ENOMEM;
784 if (hugetlb_extend_reservation(HUGETLBFS_I(inode),
785 size >> HPAGE_SHIFT) != 0)
786 goto out_inode;
787
834 d_instantiate(dentry, inode); 788 d_instantiate(dentry, inode);
835 inode->i_size = size; 789 inode->i_size = size;
836 inode->i_nlink = 0; 790 inode->i_nlink = 0;
@@ -841,6 +795,8 @@ struct file *hugetlb_zero_setup(size_t size)
841 file->f_mode = FMODE_WRITE | FMODE_READ; 795 file->f_mode = FMODE_WRITE | FMODE_READ;
842 return file; 796 return file;
843 797
798out_inode:
799 iput(inode);
844out_file: 800out_file:
845 put_filp(file); 801 put_filp(file);
846out_dentry: 802out_dentry:
diff --git a/fs/inode.c b/fs/inode.c
index d0be6159eb7f..25967b67903d 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -84,14 +84,14 @@ static struct hlist_head *inode_hashtable;
84DEFINE_SPINLOCK(inode_lock); 84DEFINE_SPINLOCK(inode_lock);
85 85
86/* 86/*
87 * iprune_sem provides exclusion between the kswapd or try_to_free_pages 87 * iprune_mutex provides exclusion between the kswapd or try_to_free_pages
88 * icache shrinking path, and the umount path. Without this exclusion, 88 * icache shrinking path, and the umount path. Without this exclusion,
89 * by the time prune_icache calls iput for the inode whose pages it has 89 * by the time prune_icache calls iput for the inode whose pages it has
90 * been invalidating, or by the time it calls clear_inode & destroy_inode 90 * been invalidating, or by the time it calls clear_inode & destroy_inode
91 * from its final dispose_list, the struct super_block they refer to 91 * from its final dispose_list, the struct super_block they refer to
92 * (for inode->i_sb->s_op) may already have been freed and reused. 92 * (for inode->i_sb->s_op) may already have been freed and reused.
93 */ 93 */
94DECLARE_MUTEX(iprune_sem); 94DEFINE_MUTEX(iprune_mutex);
95 95
96/* 96/*
97 * Statistics gathering.. 97 * Statistics gathering..
@@ -206,7 +206,7 @@ void inode_init_once(struct inode *inode)
206 i_size_ordered_init(inode); 206 i_size_ordered_init(inode);
207#ifdef CONFIG_INOTIFY 207#ifdef CONFIG_INOTIFY
208 INIT_LIST_HEAD(&inode->inotify_watches); 208 INIT_LIST_HEAD(&inode->inotify_watches);
209 sema_init(&inode->inotify_sem, 1); 209 mutex_init(&inode->inotify_mutex);
210#endif 210#endif
211} 211}
212 212
@@ -319,7 +319,7 @@ static int invalidate_list(struct list_head *head, struct list_head *dispose)
319 /* 319 /*
320 * We can reschedule here without worrying about the list's 320 * We can reschedule here without worrying about the list's
321 * consistency because the per-sb list of inodes must not 321 * consistency because the per-sb list of inodes must not
322 * change during umount anymore, and because iprune_sem keeps 322 * change during umount anymore, and because iprune_mutex keeps
323 * shrink_icache_memory() away. 323 * shrink_icache_memory() away.
324 */ 324 */
325 cond_resched_lock(&inode_lock); 325 cond_resched_lock(&inode_lock);
@@ -355,14 +355,14 @@ int invalidate_inodes(struct super_block * sb)
355 int busy; 355 int busy;
356 LIST_HEAD(throw_away); 356 LIST_HEAD(throw_away);
357 357
358 down(&iprune_sem); 358 mutex_lock(&iprune_mutex);
359 spin_lock(&inode_lock); 359 spin_lock(&inode_lock);
360 inotify_unmount_inodes(&sb->s_inodes); 360 inotify_unmount_inodes(&sb->s_inodes);
361 busy = invalidate_list(&sb->s_inodes, &throw_away); 361 busy = invalidate_list(&sb->s_inodes, &throw_away);
362 spin_unlock(&inode_lock); 362 spin_unlock(&inode_lock);
363 363
364 dispose_list(&throw_away); 364 dispose_list(&throw_away);
365 up(&iprune_sem); 365 mutex_unlock(&iprune_mutex);
366 366
367 return busy; 367 return busy;
368} 368}
@@ -377,7 +377,7 @@ int __invalidate_device(struct block_device *bdev)
377 if (sb) { 377 if (sb) {
378 /* 378 /*
379 * no need to lock the super, get_super holds the 379 * no need to lock the super, get_super holds the
380 * read semaphore so the filesystem cannot go away 380 * read mutex so the filesystem cannot go away
381 * under us (->put_super runs with the write lock 381 * under us (->put_super runs with the write lock
382 * hold). 382 * hold).
383 */ 383 */
@@ -423,7 +423,7 @@ static void prune_icache(int nr_to_scan)
423 int nr_scanned; 423 int nr_scanned;
424 unsigned long reap = 0; 424 unsigned long reap = 0;
425 425
426 down(&iprune_sem); 426 mutex_lock(&iprune_mutex);
427 spin_lock(&inode_lock); 427 spin_lock(&inode_lock);
428 for (nr_scanned = 0; nr_scanned < nr_to_scan; nr_scanned++) { 428 for (nr_scanned = 0; nr_scanned < nr_to_scan; nr_scanned++) {
429 struct inode *inode; 429 struct inode *inode;
@@ -459,7 +459,7 @@ static void prune_icache(int nr_to_scan)
459 spin_unlock(&inode_lock); 459 spin_unlock(&inode_lock);
460 460
461 dispose_list(&freeable); 461 dispose_list(&freeable);
462 up(&iprune_sem); 462 mutex_unlock(&iprune_mutex);
463 463
464 if (current_is_kswapd()) 464 if (current_is_kswapd())
465 mod_page_state(kswapd_inodesteal, reap); 465 mod_page_state(kswapd_inodesteal, reap);
diff --git a/fs/inotify.c b/fs/inotify.c
index 3041503bde02..0ee39ef591c6 100644
--- a/fs/inotify.c
+++ b/fs/inotify.c
@@ -54,10 +54,10 @@ int inotify_max_queued_events;
54 * Lock ordering: 54 * Lock ordering:
55 * 55 *
56 * dentry->d_lock (used to keep d_move() away from dentry->d_parent) 56 * dentry->d_lock (used to keep d_move() away from dentry->d_parent)
57 * iprune_sem (synchronize shrink_icache_memory()) 57 * iprune_mutex (synchronize shrink_icache_memory())
58 * inode_lock (protects the super_block->s_inodes list) 58 * inode_lock (protects the super_block->s_inodes list)
59 * inode->inotify_sem (protects inode->inotify_watches and watches->i_list) 59 * inode->inotify_mutex (protects inode->inotify_watches and watches->i_list)
60 * inotify_dev->sem (protects inotify_device and watches->d_list) 60 * inotify_dev->mutex (protects inotify_device and watches->d_list)
61 */ 61 */
62 62
63/* 63/*
@@ -79,12 +79,12 @@ int inotify_max_queued_events;
79/* 79/*
80 * struct inotify_device - represents an inotify instance 80 * struct inotify_device - represents an inotify instance
81 * 81 *
82 * This structure is protected by the semaphore 'sem'. 82 * This structure is protected by the mutex 'mutex'.
83 */ 83 */
84struct inotify_device { 84struct inotify_device {
85 wait_queue_head_t wq; /* wait queue for i/o */ 85 wait_queue_head_t wq; /* wait queue for i/o */
86 struct idr idr; /* idr mapping wd -> watch */ 86 struct idr idr; /* idr mapping wd -> watch */
87 struct semaphore sem; /* protects this bad boy */ 87 struct mutex mutex; /* protects this bad boy */
88 struct list_head events; /* list of queued events */ 88 struct list_head events; /* list of queued events */
89 struct list_head watches; /* list of watches */ 89 struct list_head watches; /* list of watches */
90 atomic_t count; /* reference count */ 90 atomic_t count; /* reference count */
@@ -101,7 +101,7 @@ struct inotify_device {
101 * device. In read(), this list is walked and all events that can fit in the 101 * device. In read(), this list is walked and all events that can fit in the
102 * buffer are returned. 102 * buffer are returned.
103 * 103 *
104 * Protected by dev->sem of the device in which we are queued. 104 * Protected by dev->mutex of the device in which we are queued.
105 */ 105 */
106struct inotify_kernel_event { 106struct inotify_kernel_event {
107 struct inotify_event event; /* the user-space event */ 107 struct inotify_event event; /* the user-space event */
@@ -112,8 +112,8 @@ struct inotify_kernel_event {
112/* 112/*
113 * struct inotify_watch - represents a watch request on a specific inode 113 * struct inotify_watch - represents a watch request on a specific inode
114 * 114 *
115 * d_list is protected by dev->sem of the associated watch->dev. 115 * d_list is protected by dev->mutex of the associated watch->dev.
116 * i_list and mask are protected by inode->inotify_sem of the associated inode. 116 * i_list and mask are protected by inode->inotify_mutex of the associated inode.
117 * dev, inode, and wd are never written to once the watch is created. 117 * dev, inode, and wd are never written to once the watch is created.
118 */ 118 */
119struct inotify_watch { 119struct inotify_watch {
@@ -261,7 +261,7 @@ static struct inotify_kernel_event * kernel_event(s32 wd, u32 mask, u32 cookie,
261/* 261/*
262 * inotify_dev_get_event - return the next event in the given dev's queue 262 * inotify_dev_get_event - return the next event in the given dev's queue
263 * 263 *
264 * Caller must hold dev->sem. 264 * Caller must hold dev->mutex.
265 */ 265 */
266static inline struct inotify_kernel_event * 266static inline struct inotify_kernel_event *
267inotify_dev_get_event(struct inotify_device *dev) 267inotify_dev_get_event(struct inotify_device *dev)
@@ -272,7 +272,7 @@ inotify_dev_get_event(struct inotify_device *dev)
272/* 272/*
273 * inotify_dev_queue_event - add a new event to the given device 273 * inotify_dev_queue_event - add a new event to the given device
274 * 274 *
275 * Caller must hold dev->sem. Can sleep (calls kernel_event()). 275 * Caller must hold dev->mutex. Can sleep (calls kernel_event()).
276 */ 276 */
277static void inotify_dev_queue_event(struct inotify_device *dev, 277static void inotify_dev_queue_event(struct inotify_device *dev,
278 struct inotify_watch *watch, u32 mask, 278 struct inotify_watch *watch, u32 mask,
@@ -315,7 +315,7 @@ static void inotify_dev_queue_event(struct inotify_device *dev,
315/* 315/*
316 * remove_kevent - cleans up and ultimately frees the given kevent 316 * remove_kevent - cleans up and ultimately frees the given kevent
317 * 317 *
318 * Caller must hold dev->sem. 318 * Caller must hold dev->mutex.
319 */ 319 */
320static void remove_kevent(struct inotify_device *dev, 320static void remove_kevent(struct inotify_device *dev,
321 struct inotify_kernel_event *kevent) 321 struct inotify_kernel_event *kevent)
@@ -332,7 +332,7 @@ static void remove_kevent(struct inotify_device *dev,
332/* 332/*
333 * inotify_dev_event_dequeue - destroy an event on the given device 333 * inotify_dev_event_dequeue - destroy an event on the given device
334 * 334 *
335 * Caller must hold dev->sem. 335 * Caller must hold dev->mutex.
336 */ 336 */
337static void inotify_dev_event_dequeue(struct inotify_device *dev) 337static void inotify_dev_event_dequeue(struct inotify_device *dev)
338{ 338{
@@ -346,7 +346,7 @@ static void inotify_dev_event_dequeue(struct inotify_device *dev)
346/* 346/*
347 * inotify_dev_get_wd - returns the next WD for use by the given dev 347 * inotify_dev_get_wd - returns the next WD for use by the given dev
348 * 348 *
349 * Callers must hold dev->sem. This function can sleep. 349 * Callers must hold dev->mutex. This function can sleep.
350 */ 350 */
351static int inotify_dev_get_wd(struct inotify_device *dev, 351static int inotify_dev_get_wd(struct inotify_device *dev,
352 struct inotify_watch *watch) 352 struct inotify_watch *watch)
@@ -383,7 +383,7 @@ static int find_inode(const char __user *dirname, struct nameidata *nd,
383/* 383/*
384 * create_watch - creates a watch on the given device. 384 * create_watch - creates a watch on the given device.
385 * 385 *
386 * Callers must hold dev->sem. Calls inotify_dev_get_wd() so may sleep. 386 * Callers must hold dev->mutex. Calls inotify_dev_get_wd() so may sleep.
387 * Both 'dev' and 'inode' (by way of nameidata) need to be pinned. 387 * Both 'dev' and 'inode' (by way of nameidata) need to be pinned.
388 */ 388 */
389static struct inotify_watch *create_watch(struct inotify_device *dev, 389static struct inotify_watch *create_watch(struct inotify_device *dev,
@@ -434,7 +434,7 @@ static struct inotify_watch *create_watch(struct inotify_device *dev,
434/* 434/*
435 * inotify_find_dev - find the watch associated with the given inode and dev 435 * inotify_find_dev - find the watch associated with the given inode and dev
436 * 436 *
437 * Callers must hold inode->inotify_sem. 437 * Callers must hold inode->inotify_mutex.
438 */ 438 */
439static struct inotify_watch *inode_find_dev(struct inode *inode, 439static struct inotify_watch *inode_find_dev(struct inode *inode,
440 struct inotify_device *dev) 440 struct inotify_device *dev)
@@ -469,7 +469,7 @@ static void remove_watch_no_event(struct inotify_watch *watch,
469 * the IN_IGNORED event to the given device signifying that the inode is no 469 * the IN_IGNORED event to the given device signifying that the inode is no
470 * longer watched. 470 * longer watched.
471 * 471 *
472 * Callers must hold both inode->inotify_sem and dev->sem. We drop a 472 * Callers must hold both inode->inotify_mutex and dev->mutex. We drop a
473 * reference to the inode before returning. 473 * reference to the inode before returning.
474 * 474 *
475 * The inode is not iput() so as to remain atomic. If the inode needs to be 475 * The inode is not iput() so as to remain atomic. If the inode needs to be
@@ -507,21 +507,21 @@ void inotify_inode_queue_event(struct inode *inode, u32 mask, u32 cookie,
507 if (!inotify_inode_watched(inode)) 507 if (!inotify_inode_watched(inode))
508 return; 508 return;
509 509
510 down(&inode->inotify_sem); 510 mutex_lock(&inode->inotify_mutex);
511 list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) { 511 list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) {
512 u32 watch_mask = watch->mask; 512 u32 watch_mask = watch->mask;
513 if (watch_mask & mask) { 513 if (watch_mask & mask) {
514 struct inotify_device *dev = watch->dev; 514 struct inotify_device *dev = watch->dev;
515 get_inotify_watch(watch); 515 get_inotify_watch(watch);
516 down(&dev->sem); 516 mutex_lock(&dev->mutex);
517 inotify_dev_queue_event(dev, watch, mask, cookie, name); 517 inotify_dev_queue_event(dev, watch, mask, cookie, name);
518 if (watch_mask & IN_ONESHOT) 518 if (watch_mask & IN_ONESHOT)
519 remove_watch_no_event(watch, dev); 519 remove_watch_no_event(watch, dev);
520 up(&dev->sem); 520 mutex_unlock(&dev->mutex);
521 put_inotify_watch(watch); 521 put_inotify_watch(watch);
522 } 522 }
523 } 523 }
524 up(&inode->inotify_sem); 524 mutex_unlock(&inode->inotify_mutex);
525} 525}
526EXPORT_SYMBOL_GPL(inotify_inode_queue_event); 526EXPORT_SYMBOL_GPL(inotify_inode_queue_event);
527 527
@@ -569,7 +569,7 @@ EXPORT_SYMBOL_GPL(inotify_get_cookie);
569 * @list: list of inodes being unmounted (sb->s_inodes) 569 * @list: list of inodes being unmounted (sb->s_inodes)
570 * 570 *
571 * Called with inode_lock held, protecting the unmounting super block's list 571 * Called with inode_lock held, protecting the unmounting super block's list
572 * of inodes, and with iprune_sem held, keeping shrink_icache_memory() at bay. 572 * of inodes, and with iprune_mutex held, keeping shrink_icache_memory() at bay.
573 * We temporarily drop inode_lock, however, and CAN block. 573 * We temporarily drop inode_lock, however, and CAN block.
574 */ 574 */
575void inotify_unmount_inodes(struct list_head *list) 575void inotify_unmount_inodes(struct list_head *list)
@@ -618,7 +618,7 @@ void inotify_unmount_inodes(struct list_head *list)
618 * We can safely drop inode_lock here because we hold 618 * We can safely drop inode_lock here because we hold
619 * references on both inode and next_i. Also no new inodes 619 * references on both inode and next_i. Also no new inodes
620 * will be added since the umount has begun. Finally, 620 * will be added since the umount has begun. Finally,
621 * iprune_sem keeps shrink_icache_memory() away. 621 * iprune_mutex keeps shrink_icache_memory() away.
622 */ 622 */
623 spin_unlock(&inode_lock); 623 spin_unlock(&inode_lock);
624 624
@@ -626,16 +626,16 @@ void inotify_unmount_inodes(struct list_head *list)
626 iput(need_iput_tmp); 626 iput(need_iput_tmp);
627 627
628 /* for each watch, send IN_UNMOUNT and then remove it */ 628 /* for each watch, send IN_UNMOUNT and then remove it */
629 down(&inode->inotify_sem); 629 mutex_lock(&inode->inotify_mutex);
630 watches = &inode->inotify_watches; 630 watches = &inode->inotify_watches;
631 list_for_each_entry_safe(watch, next_w, watches, i_list) { 631 list_for_each_entry_safe(watch, next_w, watches, i_list) {
632 struct inotify_device *dev = watch->dev; 632 struct inotify_device *dev = watch->dev;
633 down(&dev->sem); 633 mutex_lock(&dev->mutex);
634 inotify_dev_queue_event(dev, watch, IN_UNMOUNT,0,NULL); 634 inotify_dev_queue_event(dev, watch, IN_UNMOUNT,0,NULL);
635 remove_watch(watch, dev); 635 remove_watch(watch, dev);
636 up(&dev->sem); 636 mutex_unlock(&dev->mutex);
637 } 637 }
638 up(&inode->inotify_sem); 638 mutex_unlock(&inode->inotify_mutex);
639 iput(inode); 639 iput(inode);
640 640
641 spin_lock(&inode_lock); 641 spin_lock(&inode_lock);
@@ -651,14 +651,14 @@ void inotify_inode_is_dead(struct inode *inode)
651{ 651{
652 struct inotify_watch *watch, *next; 652 struct inotify_watch *watch, *next;
653 653
654 down(&inode->inotify_sem); 654 mutex_lock(&inode->inotify_mutex);
655 list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) { 655 list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) {
656 struct inotify_device *dev = watch->dev; 656 struct inotify_device *dev = watch->dev;
657 down(&dev->sem); 657 mutex_lock(&dev->mutex);
658 remove_watch(watch, dev); 658 remove_watch(watch, dev);
659 up(&dev->sem); 659 mutex_unlock(&dev->mutex);
660 } 660 }
661 up(&inode->inotify_sem); 661 mutex_unlock(&inode->inotify_mutex);
662} 662}
663EXPORT_SYMBOL_GPL(inotify_inode_is_dead); 663EXPORT_SYMBOL_GPL(inotify_inode_is_dead);
664 664
@@ -670,10 +670,10 @@ static unsigned int inotify_poll(struct file *file, poll_table *wait)
670 int ret = 0; 670 int ret = 0;
671 671
672 poll_wait(file, &dev->wq, wait); 672 poll_wait(file, &dev->wq, wait);
673 down(&dev->sem); 673 mutex_lock(&dev->mutex);
674 if (!list_empty(&dev->events)) 674 if (!list_empty(&dev->events))
675 ret = POLLIN | POLLRDNORM; 675 ret = POLLIN | POLLRDNORM;
676 up(&dev->sem); 676 mutex_unlock(&dev->mutex);
677 677
678 return ret; 678 return ret;
679} 679}
@@ -695,9 +695,9 @@ static ssize_t inotify_read(struct file *file, char __user *buf,
695 695
696 prepare_to_wait(&dev->wq, &wait, TASK_INTERRUPTIBLE); 696 prepare_to_wait(&dev->wq, &wait, TASK_INTERRUPTIBLE);
697 697
698 down(&dev->sem); 698 mutex_lock(&dev->mutex);
699 events = !list_empty(&dev->events); 699 events = !list_empty(&dev->events);
700 up(&dev->sem); 700 mutex_unlock(&dev->mutex);
701 if (events) { 701 if (events) {
702 ret = 0; 702 ret = 0;
703 break; 703 break;
@@ -720,7 +720,7 @@ static ssize_t inotify_read(struct file *file, char __user *buf,
720 if (ret) 720 if (ret)
721 return ret; 721 return ret;
722 722
723 down(&dev->sem); 723 mutex_lock(&dev->mutex);
724 while (1) { 724 while (1) {
725 struct inotify_kernel_event *kevent; 725 struct inotify_kernel_event *kevent;
726 726
@@ -750,7 +750,7 @@ static ssize_t inotify_read(struct file *file, char __user *buf,
750 750
751 remove_kevent(dev, kevent); 751 remove_kevent(dev, kevent);
752 } 752 }
753 up(&dev->sem); 753 mutex_unlock(&dev->mutex);
754 754
755 return ret; 755 return ret;
756} 756}
@@ -763,37 +763,37 @@ static int inotify_release(struct inode *ignored, struct file *file)
763 * Destroy all of the watches on this device. Unfortunately, not very 763 * Destroy all of the watches on this device. Unfortunately, not very
764 * pretty. We cannot do a simple iteration over the list, because we 764 * pretty. We cannot do a simple iteration over the list, because we
765 * do not know the inode until we iterate to the watch. But we need to 765 * do not know the inode until we iterate to the watch. But we need to
766 * hold inode->inotify_sem before dev->sem. The following works. 766 * hold inode->inotify_mutex before dev->mutex. The following works.
767 */ 767 */
768 while (1) { 768 while (1) {
769 struct inotify_watch *watch; 769 struct inotify_watch *watch;
770 struct list_head *watches; 770 struct list_head *watches;
771 struct inode *inode; 771 struct inode *inode;
772 772
773 down(&dev->sem); 773 mutex_lock(&dev->mutex);
774 watches = &dev->watches; 774 watches = &dev->watches;
775 if (list_empty(watches)) { 775 if (list_empty(watches)) {
776 up(&dev->sem); 776 mutex_unlock(&dev->mutex);
777 break; 777 break;
778 } 778 }
779 watch = list_entry(watches->next, struct inotify_watch, d_list); 779 watch = list_entry(watches->next, struct inotify_watch, d_list);
780 get_inotify_watch(watch); 780 get_inotify_watch(watch);
781 up(&dev->sem); 781 mutex_unlock(&dev->mutex);
782 782
783 inode = watch->inode; 783 inode = watch->inode;
784 down(&inode->inotify_sem); 784 mutex_lock(&inode->inotify_mutex);
785 down(&dev->sem); 785 mutex_lock(&dev->mutex);
786 remove_watch_no_event(watch, dev); 786 remove_watch_no_event(watch, dev);
787 up(&dev->sem); 787 mutex_unlock(&dev->mutex);
788 up(&inode->inotify_sem); 788 mutex_unlock(&inode->inotify_mutex);
789 put_inotify_watch(watch); 789 put_inotify_watch(watch);
790 } 790 }
791 791
792 /* destroy all of the events on this device */ 792 /* destroy all of the events on this device */
793 down(&dev->sem); 793 mutex_lock(&dev->mutex);
794 while (!list_empty(&dev->events)) 794 while (!list_empty(&dev->events))
795 inotify_dev_event_dequeue(dev); 795 inotify_dev_event_dequeue(dev);
796 up(&dev->sem); 796 mutex_unlock(&dev->mutex);
797 797
798 /* free this device: the put matching the get in inotify_init() */ 798 /* free this device: the put matching the get in inotify_init() */
799 put_inotify_dev(dev); 799 put_inotify_dev(dev);
@@ -811,26 +811,26 @@ static int inotify_ignore(struct inotify_device *dev, s32 wd)
811 struct inotify_watch *watch; 811 struct inotify_watch *watch;
812 struct inode *inode; 812 struct inode *inode;
813 813
814 down(&dev->sem); 814 mutex_lock(&dev->mutex);
815 watch = idr_find(&dev->idr, wd); 815 watch = idr_find(&dev->idr, wd);
816 if (unlikely(!watch)) { 816 if (unlikely(!watch)) {
817 up(&dev->sem); 817 mutex_unlock(&dev->mutex);
818 return -EINVAL; 818 return -EINVAL;
819 } 819 }
820 get_inotify_watch(watch); 820 get_inotify_watch(watch);
821 inode = watch->inode; 821 inode = watch->inode;
822 up(&dev->sem); 822 mutex_unlock(&dev->mutex);
823 823
824 down(&inode->inotify_sem); 824 mutex_lock(&inode->inotify_mutex);
825 down(&dev->sem); 825 mutex_lock(&dev->mutex);
826 826
827 /* make sure that we did not race */ 827 /* make sure that we did not race */
828 watch = idr_find(&dev->idr, wd); 828 watch = idr_find(&dev->idr, wd);
829 if (likely(watch)) 829 if (likely(watch))
830 remove_watch(watch, dev); 830 remove_watch(watch, dev);
831 831
832 up(&dev->sem); 832 mutex_unlock(&dev->mutex);
833 up(&inode->inotify_sem); 833 mutex_unlock(&inode->inotify_mutex);
834 put_inotify_watch(watch); 834 put_inotify_watch(watch);
835 835
836 return 0; 836 return 0;
@@ -905,7 +905,7 @@ asmlinkage long sys_inotify_init(void)
905 INIT_LIST_HEAD(&dev->events); 905 INIT_LIST_HEAD(&dev->events);
906 INIT_LIST_HEAD(&dev->watches); 906 INIT_LIST_HEAD(&dev->watches);
907 init_waitqueue_head(&dev->wq); 907 init_waitqueue_head(&dev->wq);
908 sema_init(&dev->sem, 1); 908 mutex_init(&dev->mutex);
909 dev->event_count = 0; 909 dev->event_count = 0;
910 dev->queue_size = 0; 910 dev->queue_size = 0;
911 dev->max_events = inotify_max_queued_events; 911 dev->max_events = inotify_max_queued_events;
@@ -960,8 +960,8 @@ asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, u32 mask)
960 inode = nd.dentry->d_inode; 960 inode = nd.dentry->d_inode;
961 dev = filp->private_data; 961 dev = filp->private_data;
962 962
963 down(&inode->inotify_sem); 963 mutex_lock(&inode->inotify_mutex);
964 down(&dev->sem); 964 mutex_lock(&dev->mutex);
965 965
966 if (mask & IN_MASK_ADD) 966 if (mask & IN_MASK_ADD)
967 mask_add = 1; 967 mask_add = 1;
@@ -998,8 +998,8 @@ asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, u32 mask)
998 list_add(&watch->i_list, &inode->inotify_watches); 998 list_add(&watch->i_list, &inode->inotify_watches);
999 ret = watch->wd; 999 ret = watch->wd;
1000out: 1000out:
1001 up(&dev->sem); 1001 mutex_unlock(&dev->mutex);
1002 up(&inode->inotify_sem); 1002 mutex_unlock(&inode->inotify_mutex);
1003 path_release(&nd); 1003 path_release(&nd);
1004fput_and_out: 1004fput_and_out:
1005 fput_light(filp, fput_needed); 1005 fput_light(filp, fput_needed);
diff --git a/fs/isofs/joliet.c b/fs/isofs/joliet.c
index 2931de7f1a6a..81a90e170ac3 100644
--- a/fs/isofs/joliet.c
+++ b/fs/isofs/joliet.c
@@ -11,7 +11,7 @@
11#include "isofs.h" 11#include "isofs.h"
12 12
13/* 13/*
14 * Convert Unicode 16 to UTF8 or ASCII. 14 * Convert Unicode 16 to UTF-8 or ASCII.
15 */ 15 */
16static int 16static int
17uni16_to_x8(unsigned char *ascii, u16 *uni, int len, struct nls_table *nls) 17uni16_to_x8(unsigned char *ascii, u16 *uni, int len, struct nls_table *nls)
diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
index 543ed543d1e5..3f5102b069db 100644
--- a/fs/jbd/checkpoint.c
+++ b/fs/jbd/checkpoint.c
@@ -85,7 +85,7 @@ void __log_wait_for_space(journal_t *journal)
85 if (journal->j_flags & JFS_ABORT) 85 if (journal->j_flags & JFS_ABORT)
86 return; 86 return;
87 spin_unlock(&journal->j_state_lock); 87 spin_unlock(&journal->j_state_lock);
88 down(&journal->j_checkpoint_sem); 88 mutex_lock(&journal->j_checkpoint_mutex);
89 89
90 /* 90 /*
91 * Test again, another process may have checkpointed while we 91 * Test again, another process may have checkpointed while we
@@ -98,7 +98,7 @@ void __log_wait_for_space(journal_t *journal)
98 log_do_checkpoint(journal); 98 log_do_checkpoint(journal);
99 spin_lock(&journal->j_state_lock); 99 spin_lock(&journal->j_state_lock);
100 } 100 }
101 up(&journal->j_checkpoint_sem); 101 mutex_unlock(&journal->j_checkpoint_mutex);
102 } 102 }
103} 103}
104 104
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index e4b516ac4989..95a628d8cac8 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -659,8 +659,8 @@ static journal_t * journal_init_common (void)
659 init_waitqueue_head(&journal->j_wait_checkpoint); 659 init_waitqueue_head(&journal->j_wait_checkpoint);
660 init_waitqueue_head(&journal->j_wait_commit); 660 init_waitqueue_head(&journal->j_wait_commit);
661 init_waitqueue_head(&journal->j_wait_updates); 661 init_waitqueue_head(&journal->j_wait_updates);
662 init_MUTEX(&journal->j_barrier); 662 mutex_init(&journal->j_barrier);
663 init_MUTEX(&journal->j_checkpoint_sem); 663 mutex_init(&journal->j_checkpoint_mutex);
664 spin_lock_init(&journal->j_revoke_lock); 664 spin_lock_init(&journal->j_revoke_lock);
665 spin_lock_init(&journal->j_list_lock); 665 spin_lock_init(&journal->j_list_lock);
666 spin_lock_init(&journal->j_state_lock); 666 spin_lock_init(&journal->j_state_lock);
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
index ca917973c2c0..5fc40888f4cf 100644
--- a/fs/jbd/transaction.c
+++ b/fs/jbd/transaction.c
@@ -455,7 +455,7 @@ void journal_lock_updates(journal_t *journal)
455 * to make sure that we serialise special journal-locked operations 455 * to make sure that we serialise special journal-locked operations
456 * too. 456 * too.
457 */ 457 */
458 down(&journal->j_barrier); 458 mutex_lock(&journal->j_barrier);
459} 459}
460 460
461/** 461/**
@@ -470,7 +470,7 @@ void journal_unlock_updates (journal_t *journal)
470{ 470{
471 J_ASSERT(journal->j_barrier_count != 0); 471 J_ASSERT(journal->j_barrier_count != 0);
472 472
473 up(&journal->j_barrier); 473 mutex_unlock(&journal->j_barrier);
474 spin_lock(&journal->j_state_lock); 474 spin_lock(&journal->j_state_lock);
475 --journal->j_barrier_count; 475 --journal->j_barrier_count;
476 spin_unlock(&journal->j_state_lock); 476 spin_unlock(&journal->j_state_lock);
diff --git a/fs/jffs/inode-v23.c b/fs/jffs/inode-v23.c
index fc3855a1aef3..890d7ff7456d 100644
--- a/fs/jffs/inode-v23.c
+++ b/fs/jffs/inode-v23.c
@@ -42,7 +42,7 @@
42#include <linux/quotaops.h> 42#include <linux/quotaops.h>
43#include <linux/highmem.h> 43#include <linux/highmem.h>
44#include <linux/vfs.h> 44#include <linux/vfs.h>
45#include <asm/semaphore.h> 45#include <linux/mutex.h>
46#include <asm/byteorder.h> 46#include <asm/byteorder.h>
47#include <asm/uaccess.h> 47#include <asm/uaccess.h>
48 48
@@ -203,7 +203,7 @@ jffs_setattr(struct dentry *dentry, struct iattr *iattr)
203 fmc = c->fmc; 203 fmc = c->fmc;
204 204
205 D3(printk (KERN_NOTICE "notify_change(): down biglock\n")); 205 D3(printk (KERN_NOTICE "notify_change(): down biglock\n"));
206 down(&fmc->biglock); 206 mutex_lock(&fmc->biglock);
207 207
208 f = jffs_find_file(c, inode->i_ino); 208 f = jffs_find_file(c, inode->i_ino);
209 209
@@ -211,7 +211,7 @@ jffs_setattr(struct dentry *dentry, struct iattr *iattr)
211 printk("jffs_setattr(): Invalid inode number: %lu\n", 211 printk("jffs_setattr(): Invalid inode number: %lu\n",
212 inode->i_ino); 212 inode->i_ino);
213 D3(printk (KERN_NOTICE "notify_change(): up biglock\n")); 213 D3(printk (KERN_NOTICE "notify_change(): up biglock\n"));
214 up(&fmc->biglock); 214 mutex_unlock(&fmc->biglock);
215 res = -EINVAL; 215 res = -EINVAL;
216 goto out; 216 goto out;
217 }); 217 });
@@ -232,7 +232,7 @@ jffs_setattr(struct dentry *dentry, struct iattr *iattr)
232 if (!(new_node = jffs_alloc_node())) { 232 if (!(new_node = jffs_alloc_node())) {
233 D(printk("jffs_setattr(): Allocation failed!\n")); 233 D(printk("jffs_setattr(): Allocation failed!\n"));
234 D3(printk (KERN_NOTICE "notify_change(): up biglock\n")); 234 D3(printk (KERN_NOTICE "notify_change(): up biglock\n"));
235 up(&fmc->biglock); 235 mutex_unlock(&fmc->biglock);
236 res = -ENOMEM; 236 res = -ENOMEM;
237 goto out; 237 goto out;
238 } 238 }
@@ -319,7 +319,7 @@ jffs_setattr(struct dentry *dentry, struct iattr *iattr)
319 D(printk("jffs_notify_change(): The write failed!\n")); 319 D(printk("jffs_notify_change(): The write failed!\n"));
320 jffs_free_node(new_node); 320 jffs_free_node(new_node);
321 D3(printk (KERN_NOTICE "n_c(): up biglock\n")); 321 D3(printk (KERN_NOTICE "n_c(): up biglock\n"));
322 up(&c->fmc->biglock); 322 mutex_unlock(&c->fmc->biglock);
323 goto out; 323 goto out;
324 } 324 }
325 325
@@ -327,7 +327,7 @@ jffs_setattr(struct dentry *dentry, struct iattr *iattr)
327 327
328 mark_inode_dirty(inode); 328 mark_inode_dirty(inode);
329 D3(printk (KERN_NOTICE "n_c(): up biglock\n")); 329 D3(printk (KERN_NOTICE "n_c(): up biglock\n"));
330 up(&c->fmc->biglock); 330 mutex_unlock(&c->fmc->biglock);
331out: 331out:
332 unlock_kernel(); 332 unlock_kernel();
333 return res; 333 return res;
@@ -461,7 +461,7 @@ jffs_rename(struct inode *old_dir, struct dentry *old_dentry,
461 goto jffs_rename_end; 461 goto jffs_rename_end;
462 } 462 }
463 D3(printk (KERN_NOTICE "rename(): down biglock\n")); 463 D3(printk (KERN_NOTICE "rename(): down biglock\n"));
464 down(&c->fmc->biglock); 464 mutex_lock(&c->fmc->biglock);
465 /* Create a node and initialize as much as needed. */ 465 /* Create a node and initialize as much as needed. */
466 result = -ENOMEM; 466 result = -ENOMEM;
467 if (!(node = jffs_alloc_node())) { 467 if (!(node = jffs_alloc_node())) {
@@ -555,7 +555,7 @@ jffs_rename(struct inode *old_dir, struct dentry *old_dentry,
555 555
556jffs_rename_end: 556jffs_rename_end:
557 D3(printk (KERN_NOTICE "rename(): up biglock\n")); 557 D3(printk (KERN_NOTICE "rename(): up biglock\n"));
558 up(&c->fmc->biglock); 558 mutex_unlock(&c->fmc->biglock);
559 unlock_kernel(); 559 unlock_kernel();
560 return result; 560 return result;
561} /* jffs_rename() */ 561} /* jffs_rename() */
@@ -574,14 +574,14 @@ jffs_readdir(struct file *filp, void *dirent, filldir_t filldir)
574 int ddino; 574 int ddino;
575 lock_kernel(); 575 lock_kernel();
576 D3(printk (KERN_NOTICE "readdir(): down biglock\n")); 576 D3(printk (KERN_NOTICE "readdir(): down biglock\n"));
577 down(&c->fmc->biglock); 577 mutex_lock(&c->fmc->biglock);
578 578
579 D2(printk("jffs_readdir(): inode: 0x%p, filp: 0x%p\n", inode, filp)); 579 D2(printk("jffs_readdir(): inode: 0x%p, filp: 0x%p\n", inode, filp));
580 if (filp->f_pos == 0) { 580 if (filp->f_pos == 0) {
581 D3(printk("jffs_readdir(): \".\" %lu\n", inode->i_ino)); 581 D3(printk("jffs_readdir(): \".\" %lu\n", inode->i_ino));
582 if (filldir(dirent, ".", 1, filp->f_pos, inode->i_ino, DT_DIR) < 0) { 582 if (filldir(dirent, ".", 1, filp->f_pos, inode->i_ino, DT_DIR) < 0) {
583 D3(printk (KERN_NOTICE "readdir(): up biglock\n")); 583 D3(printk (KERN_NOTICE "readdir(): up biglock\n"));
584 up(&c->fmc->biglock); 584 mutex_unlock(&c->fmc->biglock);
585 unlock_kernel(); 585 unlock_kernel();
586 return 0; 586 return 0;
587 } 587 }
@@ -598,7 +598,7 @@ jffs_readdir(struct file *filp, void *dirent, filldir_t filldir)
598 D3(printk("jffs_readdir(): \"..\" %u\n", ddino)); 598 D3(printk("jffs_readdir(): \"..\" %u\n", ddino));
599 if (filldir(dirent, "..", 2, filp->f_pos, ddino, DT_DIR) < 0) { 599 if (filldir(dirent, "..", 2, filp->f_pos, ddino, DT_DIR) < 0) {
600 D3(printk (KERN_NOTICE "readdir(): up biglock\n")); 600 D3(printk (KERN_NOTICE "readdir(): up biglock\n"));
601 up(&c->fmc->biglock); 601 mutex_unlock(&c->fmc->biglock);
602 unlock_kernel(); 602 unlock_kernel();
603 return 0; 603 return 0;
604 } 604 }
@@ -617,7 +617,7 @@ jffs_readdir(struct file *filp, void *dirent, filldir_t filldir)
617 if (filldir(dirent, f->name, f->nsize, 617 if (filldir(dirent, f->name, f->nsize,
618 filp->f_pos , f->ino, DT_UNKNOWN) < 0) { 618 filp->f_pos , f->ino, DT_UNKNOWN) < 0) {
619 D3(printk (KERN_NOTICE "readdir(): up biglock\n")); 619 D3(printk (KERN_NOTICE "readdir(): up biglock\n"));
620 up(&c->fmc->biglock); 620 mutex_unlock(&c->fmc->biglock);
621 unlock_kernel(); 621 unlock_kernel();
622 return 0; 622 return 0;
623 } 623 }
@@ -627,7 +627,7 @@ jffs_readdir(struct file *filp, void *dirent, filldir_t filldir)
627 } while(f && f->deleted); 627 } while(f && f->deleted);
628 } 628 }
629 D3(printk (KERN_NOTICE "readdir(): up biglock\n")); 629 D3(printk (KERN_NOTICE "readdir(): up biglock\n"));
630 up(&c->fmc->biglock); 630 mutex_unlock(&c->fmc->biglock);
631 unlock_kernel(); 631 unlock_kernel();
632 return filp->f_pos; 632 return filp->f_pos;
633} /* jffs_readdir() */ 633} /* jffs_readdir() */
@@ -660,7 +660,7 @@ jffs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
660 }); 660 });
661 661
662 D3(printk (KERN_NOTICE "lookup(): down biglock\n")); 662 D3(printk (KERN_NOTICE "lookup(): down biglock\n"));
663 down(&c->fmc->biglock); 663 mutex_lock(&c->fmc->biglock);
664 664
665 r = -ENAMETOOLONG; 665 r = -ENAMETOOLONG;
666 if (len > JFFS_MAX_NAME_LEN) { 666 if (len > JFFS_MAX_NAME_LEN) {
@@ -683,31 +683,31 @@ jffs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
683 683
684 if ((len == 1) && (name[0] == '.')) { 684 if ((len == 1) && (name[0] == '.')) {
685 D3(printk (KERN_NOTICE "lookup(): up biglock\n")); 685 D3(printk (KERN_NOTICE "lookup(): up biglock\n"));
686 up(&c->fmc->biglock); 686 mutex_unlock(&c->fmc->biglock);
687 if (!(inode = iget(dir->i_sb, d->ino))) { 687 if (!(inode = iget(dir->i_sb, d->ino))) {
688 D(printk("jffs_lookup(): . iget() ==> NULL\n")); 688 D(printk("jffs_lookup(): . iget() ==> NULL\n"));
689 goto jffs_lookup_end_no_biglock; 689 goto jffs_lookup_end_no_biglock;
690 } 690 }
691 D3(printk (KERN_NOTICE "lookup(): down biglock\n")); 691 D3(printk (KERN_NOTICE "lookup(): down biglock\n"));
692 down(&c->fmc->biglock); 692 mutex_lock(&c->fmc->biglock);
693 } else if ((len == 2) && (name[0] == '.') && (name[1] == '.')) { 693 } else if ((len == 2) && (name[0] == '.') && (name[1] == '.')) {
694 D3(printk (KERN_NOTICE "lookup(): up biglock\n")); 694 D3(printk (KERN_NOTICE "lookup(): up biglock\n"));
695 up(&c->fmc->biglock); 695 mutex_unlock(&c->fmc->biglock);
696 if (!(inode = iget(dir->i_sb, d->pino))) { 696 if (!(inode = iget(dir->i_sb, d->pino))) {
697 D(printk("jffs_lookup(): .. iget() ==> NULL\n")); 697 D(printk("jffs_lookup(): .. iget() ==> NULL\n"));
698 goto jffs_lookup_end_no_biglock; 698 goto jffs_lookup_end_no_biglock;
699 } 699 }
700 D3(printk (KERN_NOTICE "lookup(): down biglock\n")); 700 D3(printk (KERN_NOTICE "lookup(): down biglock\n"));
701 down(&c->fmc->biglock); 701 mutex_lock(&c->fmc->biglock);
702 } else if ((f = jffs_find_child(d, name, len))) { 702 } else if ((f = jffs_find_child(d, name, len))) {
703 D3(printk (KERN_NOTICE "lookup(): up biglock\n")); 703 D3(printk (KERN_NOTICE "lookup(): up biglock\n"));
704 up(&c->fmc->biglock); 704 mutex_unlock(&c->fmc->biglock);
705 if (!(inode = iget(dir->i_sb, f->ino))) { 705 if (!(inode = iget(dir->i_sb, f->ino))) {
706 D(printk("jffs_lookup(): iget() ==> NULL\n")); 706 D(printk("jffs_lookup(): iget() ==> NULL\n"));
707 goto jffs_lookup_end_no_biglock; 707 goto jffs_lookup_end_no_biglock;
708 } 708 }
709 D3(printk (KERN_NOTICE "lookup(): down biglock\n")); 709 D3(printk (KERN_NOTICE "lookup(): down biglock\n"));
710 down(&c->fmc->biglock); 710 mutex_lock(&c->fmc->biglock);
711 } else { 711 } else {
712 D3(printk("jffs_lookup(): Couldn't find the file. " 712 D3(printk("jffs_lookup(): Couldn't find the file. "
713 "f = 0x%p, name = \"%s\", d = 0x%p, d->ino = %u\n", 713 "f = 0x%p, name = \"%s\", d = 0x%p, d->ino = %u\n",
@@ -717,13 +717,13 @@ jffs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
717 717
718 d_add(dentry, inode); 718 d_add(dentry, inode);
719 D3(printk (KERN_NOTICE "lookup(): up biglock\n")); 719 D3(printk (KERN_NOTICE "lookup(): up biglock\n"));
720 up(&c->fmc->biglock); 720 mutex_unlock(&c->fmc->biglock);
721 unlock_kernel(); 721 unlock_kernel();
722 return NULL; 722 return NULL;
723 723
724jffs_lookup_end: 724jffs_lookup_end:
725 D3(printk (KERN_NOTICE "lookup(): up biglock\n")); 725 D3(printk (KERN_NOTICE "lookup(): up biglock\n"));
726 up(&c->fmc->biglock); 726 mutex_unlock(&c->fmc->biglock);
727 727
728jffs_lookup_end_no_biglock: 728jffs_lookup_end_no_biglock:
729 unlock_kernel(); 729 unlock_kernel();
@@ -753,7 +753,7 @@ jffs_do_readpage_nolock(struct file *file, struct page *page)
753 ClearPageError(page); 753 ClearPageError(page);
754 754
755 D3(printk (KERN_NOTICE "readpage(): down biglock\n")); 755 D3(printk (KERN_NOTICE "readpage(): down biglock\n"));
756 down(&c->fmc->biglock); 756 mutex_lock(&c->fmc->biglock);
757 757
758 read_len = 0; 758 read_len = 0;
759 result = 0; 759 result = 0;
@@ -782,7 +782,7 @@ jffs_do_readpage_nolock(struct file *file, struct page *page)
782 kunmap(page); 782 kunmap(page);
783 783
784 D3(printk (KERN_NOTICE "readpage(): up biglock\n")); 784 D3(printk (KERN_NOTICE "readpage(): up biglock\n"));
785 up(&c->fmc->biglock); 785 mutex_unlock(&c->fmc->biglock);
786 786
787 if (result) { 787 if (result) {
788 SetPageError(page); 788 SetPageError(page);
@@ -839,7 +839,7 @@ jffs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
839 839
840 c = dir_f->c; 840 c = dir_f->c;
841 D3(printk (KERN_NOTICE "mkdir(): down biglock\n")); 841 D3(printk (KERN_NOTICE "mkdir(): down biglock\n"));
842 down(&c->fmc->biglock); 842 mutex_lock(&c->fmc->biglock);
843 843
844 dir_mode = S_IFDIR | (mode & (S_IRWXUGO|S_ISVTX) 844 dir_mode = S_IFDIR | (mode & (S_IRWXUGO|S_ISVTX)
845 & ~current->fs->umask); 845 & ~current->fs->umask);
@@ -906,7 +906,7 @@ jffs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
906 result = 0; 906 result = 0;
907jffs_mkdir_end: 907jffs_mkdir_end:
908 D3(printk (KERN_NOTICE "mkdir(): up biglock\n")); 908 D3(printk (KERN_NOTICE "mkdir(): up biglock\n"));
909 up(&c->fmc->biglock); 909 mutex_unlock(&c->fmc->biglock);
910 unlock_kernel(); 910 unlock_kernel();
911 return result; 911 return result;
912} /* jffs_mkdir() */ 912} /* jffs_mkdir() */
@@ -921,10 +921,10 @@ jffs_rmdir(struct inode *dir, struct dentry *dentry)
921 D3(printk("***jffs_rmdir()\n")); 921 D3(printk("***jffs_rmdir()\n"));
922 D3(printk (KERN_NOTICE "rmdir(): down biglock\n")); 922 D3(printk (KERN_NOTICE "rmdir(): down biglock\n"));
923 lock_kernel(); 923 lock_kernel();
924 down(&c->fmc->biglock); 924 mutex_lock(&c->fmc->biglock);
925 ret = jffs_remove(dir, dentry, S_IFDIR); 925 ret = jffs_remove(dir, dentry, S_IFDIR);
926 D3(printk (KERN_NOTICE "rmdir(): up biglock\n")); 926 D3(printk (KERN_NOTICE "rmdir(): up biglock\n"));
927 up(&c->fmc->biglock); 927 mutex_unlock(&c->fmc->biglock);
928 unlock_kernel(); 928 unlock_kernel();
929 return ret; 929 return ret;
930} 930}
@@ -940,10 +940,10 @@ jffs_unlink(struct inode *dir, struct dentry *dentry)
940 lock_kernel(); 940 lock_kernel();
941 D3(printk("***jffs_unlink()\n")); 941 D3(printk("***jffs_unlink()\n"));
942 D3(printk (KERN_NOTICE "unlink(): down biglock\n")); 942 D3(printk (KERN_NOTICE "unlink(): down biglock\n"));
943 down(&c->fmc->biglock); 943 mutex_lock(&c->fmc->biglock);
944 ret = jffs_remove(dir, dentry, 0); 944 ret = jffs_remove(dir, dentry, 0);
945 D3(printk (KERN_NOTICE "unlink(): up biglock\n")); 945 D3(printk (KERN_NOTICE "unlink(): up biglock\n"));
946 up(&c->fmc->biglock); 946 mutex_unlock(&c->fmc->biglock);
947 unlock_kernel(); 947 unlock_kernel();
948 return ret; 948 return ret;
949} 949}
@@ -1086,7 +1086,7 @@ jffs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev)
1086 c = dir_f->c; 1086 c = dir_f->c;
1087 1087
1088 D3(printk (KERN_NOTICE "mknod(): down biglock\n")); 1088 D3(printk (KERN_NOTICE "mknod(): down biglock\n"));
1089 down(&c->fmc->biglock); 1089 mutex_lock(&c->fmc->biglock);
1090 1090
1091 /* Create and initialize a new node. */ 1091 /* Create and initialize a new node. */
1092 if (!(node = jffs_alloc_node())) { 1092 if (!(node = jffs_alloc_node())) {
@@ -1152,7 +1152,7 @@ jffs_mknod_err:
1152 1152
1153jffs_mknod_end: 1153jffs_mknod_end:
1154 D3(printk (KERN_NOTICE "mknod(): up biglock\n")); 1154 D3(printk (KERN_NOTICE "mknod(): up biglock\n"));
1155 up(&c->fmc->biglock); 1155 mutex_unlock(&c->fmc->biglock);
1156 unlock_kernel(); 1156 unlock_kernel();
1157 return result; 1157 return result;
1158} /* jffs_mknod() */ 1158} /* jffs_mknod() */
@@ -1203,7 +1203,7 @@ jffs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
1203 return -ENOMEM; 1203 return -ENOMEM;
1204 } 1204 }
1205 D3(printk (KERN_NOTICE "symlink(): down biglock\n")); 1205 D3(printk (KERN_NOTICE "symlink(): down biglock\n"));
1206 down(&c->fmc->biglock); 1206 mutex_lock(&c->fmc->biglock);
1207 1207
1208 node->data_offset = 0; 1208 node->data_offset = 0;
1209 node->removed_size = 0; 1209 node->removed_size = 0;
@@ -1253,7 +1253,7 @@ jffs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
1253 d_instantiate(dentry, inode); 1253 d_instantiate(dentry, inode);
1254 jffs_symlink_end: 1254 jffs_symlink_end:
1255 D3(printk (KERN_NOTICE "symlink(): up biglock\n")); 1255 D3(printk (KERN_NOTICE "symlink(): up biglock\n"));
1256 up(&c->fmc->biglock); 1256 mutex_unlock(&c->fmc->biglock);
1257 unlock_kernel(); 1257 unlock_kernel();
1258 return err; 1258 return err;
1259} /* jffs_symlink() */ 1259} /* jffs_symlink() */
@@ -1306,7 +1306,7 @@ jffs_create(struct inode *dir, struct dentry *dentry, int mode,
1306 return -ENOMEM; 1306 return -ENOMEM;
1307 } 1307 }
1308 D3(printk (KERN_NOTICE "create(): down biglock\n")); 1308 D3(printk (KERN_NOTICE "create(): down biglock\n"));
1309 down(&c->fmc->biglock); 1309 mutex_lock(&c->fmc->biglock);
1310 1310
1311 node->data_offset = 0; 1311 node->data_offset = 0;
1312 node->removed_size = 0; 1312 node->removed_size = 0;
@@ -1359,7 +1359,7 @@ jffs_create(struct inode *dir, struct dentry *dentry, int mode,
1359 d_instantiate(dentry, inode); 1359 d_instantiate(dentry, inode);
1360 jffs_create_end: 1360 jffs_create_end:
1361 D3(printk (KERN_NOTICE "create(): up biglock\n")); 1361 D3(printk (KERN_NOTICE "create(): up biglock\n"));
1362 up(&c->fmc->biglock); 1362 mutex_unlock(&c->fmc->biglock);
1363 unlock_kernel(); 1363 unlock_kernel();
1364 return err; 1364 return err;
1365} /* jffs_create() */ 1365} /* jffs_create() */
@@ -1423,7 +1423,7 @@ jffs_file_write(struct file *filp, const char *buf, size_t count,
1423 thiscount = min(c->fmc->max_chunk_size - sizeof(struct jffs_raw_inode), count); 1423 thiscount = min(c->fmc->max_chunk_size - sizeof(struct jffs_raw_inode), count);
1424 1424
1425 D3(printk (KERN_NOTICE "file_write(): down biglock\n")); 1425 D3(printk (KERN_NOTICE "file_write(): down biglock\n"));
1426 down(&c->fmc->biglock); 1426 mutex_lock(&c->fmc->biglock);
1427 1427
1428 /* Urgh. POSIX says we can do short writes if we feel like it. 1428 /* Urgh. POSIX says we can do short writes if we feel like it.
1429 * In practice, we can't. Nothing will cope. So we loop until 1429 * In practice, we can't. Nothing will cope. So we loop until
@@ -1511,7 +1511,7 @@ jffs_file_write(struct file *filp, const char *buf, size_t count,
1511 } 1511 }
1512 out: 1512 out:
1513 D3(printk (KERN_NOTICE "file_write(): up biglock\n")); 1513 D3(printk (KERN_NOTICE "file_write(): up biglock\n"));
1514 up(&c->fmc->biglock); 1514 mutex_unlock(&c->fmc->biglock);
1515 1515
1516 /* Fix things in the real inode. */ 1516 /* Fix things in the real inode. */
1517 if (pos > inode->i_size) { 1517 if (pos > inode->i_size) {
@@ -1567,7 +1567,7 @@ jffs_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
1567 return -EIO; 1567 return -EIO;
1568 } 1568 }
1569 D3(printk (KERN_NOTICE "ioctl(): down biglock\n")); 1569 D3(printk (KERN_NOTICE "ioctl(): down biglock\n"));
1570 down(&c->fmc->biglock); 1570 mutex_lock(&c->fmc->biglock);
1571 1571
1572 switch (cmd) { 1572 switch (cmd) {
1573 case JFFS_PRINT_HASH: 1573 case JFFS_PRINT_HASH:
@@ -1609,7 +1609,7 @@ jffs_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
1609 ret = -ENOTTY; 1609 ret = -ENOTTY;
1610 } 1610 }
1611 D3(printk (KERN_NOTICE "ioctl(): up biglock\n")); 1611 D3(printk (KERN_NOTICE "ioctl(): up biglock\n"));
1612 up(&c->fmc->biglock); 1612 mutex_unlock(&c->fmc->biglock);
1613 return ret; 1613 return ret;
1614} /* jffs_ioctl() */ 1614} /* jffs_ioctl() */
1615 1615
@@ -1685,12 +1685,12 @@ jffs_read_inode(struct inode *inode)
1685 } 1685 }
1686 c = (struct jffs_control *)inode->i_sb->s_fs_info; 1686 c = (struct jffs_control *)inode->i_sb->s_fs_info;
1687 D3(printk (KERN_NOTICE "read_inode(): down biglock\n")); 1687 D3(printk (KERN_NOTICE "read_inode(): down biglock\n"));
1688 down(&c->fmc->biglock); 1688 mutex_lock(&c->fmc->biglock);
1689 if (!(f = jffs_find_file(c, inode->i_ino))) { 1689 if (!(f = jffs_find_file(c, inode->i_ino))) {
1690 D(printk("jffs_read_inode(): No such inode (%lu).\n", 1690 D(printk("jffs_read_inode(): No such inode (%lu).\n",
1691 inode->i_ino)); 1691 inode->i_ino));
1692 D3(printk (KERN_NOTICE "read_inode(): up biglock\n")); 1692 D3(printk (KERN_NOTICE "read_inode(): up biglock\n"));
1693 up(&c->fmc->biglock); 1693 mutex_unlock(&c->fmc->biglock);
1694 return; 1694 return;
1695 } 1695 }
1696 inode->u.generic_ip = (void *)f; 1696 inode->u.generic_ip = (void *)f;
@@ -1732,7 +1732,7 @@ jffs_read_inode(struct inode *inode)
1732 } 1732 }
1733 1733
1734 D3(printk (KERN_NOTICE "read_inode(): up biglock\n")); 1734 D3(printk (KERN_NOTICE "read_inode(): up biglock\n"));
1735 up(&c->fmc->biglock); 1735 mutex_unlock(&c->fmc->biglock);
1736} 1736}
1737 1737
1738 1738
diff --git a/fs/jffs/intrep.c b/fs/jffs/intrep.c
index ce7b54b0b2b7..0ef207dfaf6f 100644
--- a/fs/jffs/intrep.c
+++ b/fs/jffs/intrep.c
@@ -62,7 +62,7 @@
62#include <linux/fs.h> 62#include <linux/fs.h>
63#include <linux/stat.h> 63#include <linux/stat.h>
64#include <linux/pagemap.h> 64#include <linux/pagemap.h>
65#include <asm/semaphore.h> 65#include <linux/mutex.h>
66#include <asm/byteorder.h> 66#include <asm/byteorder.h>
67#include <linux/smp_lock.h> 67#include <linux/smp_lock.h>
68#include <linux/time.h> 68#include <linux/time.h>
@@ -3416,7 +3416,7 @@ jffs_garbage_collect_thread(void *ptr)
3416 D1(printk (KERN_NOTICE "jffs_garbage_collect_thread(): collecting.\n")); 3416 D1(printk (KERN_NOTICE "jffs_garbage_collect_thread(): collecting.\n"));
3417 3417
3418 D3(printk (KERN_NOTICE "g_c_thread(): down biglock\n")); 3418 D3(printk (KERN_NOTICE "g_c_thread(): down biglock\n"));
3419 down(&fmc->biglock); 3419 mutex_lock(&fmc->biglock);
3420 3420
3421 D1(printk("***jffs_garbage_collect_thread(): round #%u, " 3421 D1(printk("***jffs_garbage_collect_thread(): round #%u, "
3422 "fmc->dirty_size = %u\n", i++, fmc->dirty_size)); 3422 "fmc->dirty_size = %u\n", i++, fmc->dirty_size));
@@ -3447,6 +3447,6 @@ jffs_garbage_collect_thread(void *ptr)
3447 3447
3448 gc_end: 3448 gc_end:
3449 D3(printk (KERN_NOTICE "g_c_thread(): up biglock\n")); 3449 D3(printk (KERN_NOTICE "g_c_thread(): up biglock\n"));
3450 up(&fmc->biglock); 3450 mutex_unlock(&fmc->biglock);
3451 } /* for (;;) */ 3451 } /* for (;;) */
3452} /* jffs_garbage_collect_thread() */ 3452} /* jffs_garbage_collect_thread() */
diff --git a/fs/jffs/jffs_fm.c b/fs/jffs/jffs_fm.c
index 6da13b309bd1..7d8ca1aeace2 100644
--- a/fs/jffs/jffs_fm.c
+++ b/fs/jffs/jffs_fm.c
@@ -139,7 +139,7 @@ jffs_build_begin(struct jffs_control *c, int unit)
139 fmc->tail = NULL; 139 fmc->tail = NULL;
140 fmc->head_extra = NULL; 140 fmc->head_extra = NULL;
141 fmc->tail_extra = NULL; 141 fmc->tail_extra = NULL;
142 init_MUTEX(&fmc->biglock); 142 mutex_init(&fmc->biglock);
143 return fmc; 143 return fmc;
144} 144}
145 145
diff --git a/fs/jffs/jffs_fm.h b/fs/jffs/jffs_fm.h
index f64151e74122..c794d923df2a 100644
--- a/fs/jffs/jffs_fm.h
+++ b/fs/jffs/jffs_fm.h
@@ -20,10 +20,11 @@
20#ifndef __LINUX_JFFS_FM_H__ 20#ifndef __LINUX_JFFS_FM_H__
21#define __LINUX_JFFS_FM_H__ 21#define __LINUX_JFFS_FM_H__
22 22
23#include <linux/config.h>
23#include <linux/types.h> 24#include <linux/types.h>
24#include <linux/jffs.h> 25#include <linux/jffs.h>
25#include <linux/mtd/mtd.h> 26#include <linux/mtd/mtd.h>
26#include <linux/config.h> 27#include <linux/mutex.h>
27 28
28/* The alignment between two nodes in the flash memory. */ 29/* The alignment between two nodes in the flash memory. */
29#define JFFS_ALIGN_SIZE 4 30#define JFFS_ALIGN_SIZE 4
@@ -97,7 +98,7 @@ struct jffs_fmcontrol
97 struct jffs_fm *tail; 98 struct jffs_fm *tail;
98 struct jffs_fm *head_extra; 99 struct jffs_fm *head_extra;
99 struct jffs_fm *tail_extra; 100 struct jffs_fm *tail_extra;
100 struct semaphore biglock; 101 struct mutex biglock;
101}; 102};
102 103
103/* Notice the two members head_extra and tail_extra in the jffs_control 104/* Notice the two members head_extra and tail_extra in the jffs_control
diff --git a/fs/libfs.c b/fs/libfs.c
index 71fd08fa4103..4fdeaceb892c 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -7,6 +7,8 @@
7#include <linux/pagemap.h> 7#include <linux/pagemap.h>
8#include <linux/mount.h> 8#include <linux/mount.h>
9#include <linux/vfs.h> 9#include <linux/vfs.h>
10#include <linux/mutex.h>
11
10#include <asm/uaccess.h> 12#include <asm/uaccess.h>
11 13
12int simple_getattr(struct vfsmount *mnt, struct dentry *dentry, 14int simple_getattr(struct vfsmount *mnt, struct dentry *dentry,
@@ -530,7 +532,7 @@ struct simple_attr {
530 char set_buf[24]; 532 char set_buf[24];
531 void *data; 533 void *data;
532 const char *fmt; /* format for read operation */ 534 const char *fmt; /* format for read operation */
533 struct semaphore sem; /* protects access to these buffers */ 535 struct mutex mutex; /* protects access to these buffers */
534}; 536};
535 537
536/* simple_attr_open is called by an actual attribute open file operation 538/* simple_attr_open is called by an actual attribute open file operation
@@ -549,7 +551,7 @@ int simple_attr_open(struct inode *inode, struct file *file,
549 attr->set = set; 551 attr->set = set;
550 attr->data = inode->u.generic_ip; 552 attr->data = inode->u.generic_ip;
551 attr->fmt = fmt; 553 attr->fmt = fmt;
552 init_MUTEX(&attr->sem); 554 mutex_init(&attr->mutex);
553 555
554 file->private_data = attr; 556 file->private_data = attr;
555 557
@@ -575,7 +577,7 @@ ssize_t simple_attr_read(struct file *file, char __user *buf,
575 if (!attr->get) 577 if (!attr->get)
576 return -EACCES; 578 return -EACCES;
577 579
578 down(&attr->sem); 580 mutex_lock(&attr->mutex);
579 if (*ppos) /* continued read */ 581 if (*ppos) /* continued read */
580 size = strlen(attr->get_buf); 582 size = strlen(attr->get_buf);
581 else /* first read */ 583 else /* first read */
@@ -584,7 +586,7 @@ ssize_t simple_attr_read(struct file *file, char __user *buf,
584 (unsigned long long)attr->get(attr->data)); 586 (unsigned long long)attr->get(attr->data));
585 587
586 ret = simple_read_from_buffer(buf, len, ppos, attr->get_buf, size); 588 ret = simple_read_from_buffer(buf, len, ppos, attr->get_buf, size);
587 up(&attr->sem); 589 mutex_unlock(&attr->mutex);
588 return ret; 590 return ret;
589} 591}
590 592
@@ -602,7 +604,7 @@ ssize_t simple_attr_write(struct file *file, const char __user *buf,
602 if (!attr->set) 604 if (!attr->set)
603 return -EACCES; 605 return -EACCES;
604 606
605 down(&attr->sem); 607 mutex_lock(&attr->mutex);
606 ret = -EFAULT; 608 ret = -EFAULT;
607 size = min(sizeof(attr->set_buf) - 1, len); 609 size = min(sizeof(attr->set_buf) - 1, len);
608 if (copy_from_user(attr->set_buf, buf, size)) 610 if (copy_from_user(attr->set_buf, buf, size))
@@ -613,7 +615,7 @@ ssize_t simple_attr_write(struct file *file, const char __user *buf,
613 val = simple_strtol(attr->set_buf, NULL, 0); 615 val = simple_strtol(attr->set_buf, NULL, 0);
614 attr->set(attr->data, val); 616 attr->set(attr->data, val);
615out: 617out:
616 up(&attr->sem); 618 mutex_unlock(&attr->mutex);
617 return ret; 619 return ret;
618} 620}
619 621
diff --git a/fs/minix/namei.c b/fs/minix/namei.c
index b25bca5bdb57..5b6a4540a05b 100644
--- a/fs/minix/namei.c
+++ b/fs/minix/namei.c
@@ -6,18 +6,6 @@
6 6
7#include "minix.h" 7#include "minix.h"
8 8
9static inline void inc_count(struct inode *inode)
10{
11 inode->i_nlink++;
12 mark_inode_dirty(inode);
13}
14
15static inline void dec_count(struct inode *inode)
16{
17 inode->i_nlink--;
18 mark_inode_dirty(inode);
19}
20
21static int add_nondir(struct dentry *dentry, struct inode *inode) 9static int add_nondir(struct dentry *dentry, struct inode *inode)
22{ 10{
23 int err = minix_add_link(dentry, inode); 11 int err = minix_add_link(dentry, inode);
@@ -25,7 +13,7 @@ static int add_nondir(struct dentry *dentry, struct inode *inode)
25 d_instantiate(dentry, inode); 13 d_instantiate(dentry, inode);
26 return 0; 14 return 0;
27 } 15 }
28 dec_count(inode); 16 inode_dec_link_count(inode);
29 iput(inode); 17 iput(inode);
30 return err; 18 return err;
31} 19}
@@ -125,7 +113,7 @@ out:
125 return err; 113 return err;
126 114
127out_fail: 115out_fail:
128 dec_count(inode); 116 inode_dec_link_count(inode);
129 iput(inode); 117 iput(inode);
130 goto out; 118 goto out;
131} 119}
@@ -139,7 +127,7 @@ static int minix_link(struct dentry * old_dentry, struct inode * dir,
139 return -EMLINK; 127 return -EMLINK;
140 128
141 inode->i_ctime = CURRENT_TIME_SEC; 129 inode->i_ctime = CURRENT_TIME_SEC;
142 inc_count(inode); 130 inode_inc_link_count(inode);
143 atomic_inc(&inode->i_count); 131 atomic_inc(&inode->i_count);
144 return add_nondir(dentry, inode); 132 return add_nondir(dentry, inode);
145} 133}
@@ -152,7 +140,7 @@ static int minix_mkdir(struct inode * dir, struct dentry *dentry, int mode)
152 if (dir->i_nlink >= minix_sb(dir->i_sb)->s_link_max) 140 if (dir->i_nlink >= minix_sb(dir->i_sb)->s_link_max)
153 goto out; 141 goto out;
154 142
155 inc_count(dir); 143 inode_inc_link_count(dir);
156 144
157 inode = minix_new_inode(dir, &err); 145 inode = minix_new_inode(dir, &err);
158 if (!inode) 146 if (!inode)
@@ -163,7 +151,7 @@ static int minix_mkdir(struct inode * dir, struct dentry *dentry, int mode)
163 inode->i_mode |= S_ISGID; 151 inode->i_mode |= S_ISGID;
164 minix_set_inode(inode, 0); 152 minix_set_inode(inode, 0);
165 153
166 inc_count(inode); 154 inode_inc_link_count(inode);
167 155
168 err = minix_make_empty(inode, dir); 156 err = minix_make_empty(inode, dir);
169 if (err) 157 if (err)
@@ -178,11 +166,11 @@ out:
178 return err; 166 return err;
179 167
180out_fail: 168out_fail:
181 dec_count(inode); 169 inode_dec_link_count(inode);
182 dec_count(inode); 170 inode_dec_link_count(inode);
183 iput(inode); 171 iput(inode);
184out_dir: 172out_dir:
185 dec_count(dir); 173 inode_dec_link_count(dir);
186 goto out; 174 goto out;
187} 175}
188 176
@@ -202,7 +190,7 @@ static int minix_unlink(struct inode * dir, struct dentry *dentry)
202 goto end_unlink; 190 goto end_unlink;
203 191
204 inode->i_ctime = dir->i_ctime; 192 inode->i_ctime = dir->i_ctime;
205 dec_count(inode); 193 inode_dec_link_count(inode);
206end_unlink: 194end_unlink:
207 return err; 195 return err;
208} 196}
@@ -215,8 +203,8 @@ static int minix_rmdir(struct inode * dir, struct dentry *dentry)
215 if (minix_empty_dir(inode)) { 203 if (minix_empty_dir(inode)) {
216 err = minix_unlink(dir, dentry); 204 err = minix_unlink(dir, dentry);
217 if (!err) { 205 if (!err) {
218 dec_count(dir); 206 inode_dec_link_count(dir);
219 dec_count(inode); 207 inode_dec_link_count(inode);
220 } 208 }
221 } 209 }
222 return err; 210 return err;
@@ -257,34 +245,34 @@ static int minix_rename(struct inode * old_dir, struct dentry *old_dentry,
257 new_de = minix_find_entry(new_dentry, &new_page); 245 new_de = minix_find_entry(new_dentry, &new_page);
258 if (!new_de) 246 if (!new_de)
259 goto out_dir; 247 goto out_dir;
260 inc_count(old_inode); 248 inode_inc_link_count(old_inode);
261 minix_set_link(new_de, new_page, old_inode); 249 minix_set_link(new_de, new_page, old_inode);
262 new_inode->i_ctime = CURRENT_TIME_SEC; 250 new_inode->i_ctime = CURRENT_TIME_SEC;
263 if (dir_de) 251 if (dir_de)
264 new_inode->i_nlink--; 252 new_inode->i_nlink--;
265 dec_count(new_inode); 253 inode_dec_link_count(new_inode);
266 } else { 254 } else {
267 if (dir_de) { 255 if (dir_de) {
268 err = -EMLINK; 256 err = -EMLINK;
269 if (new_dir->i_nlink >= info->s_link_max) 257 if (new_dir->i_nlink >= info->s_link_max)
270 goto out_dir; 258 goto out_dir;
271 } 259 }
272 inc_count(old_inode); 260 inode_inc_link_count(old_inode);
273 err = minix_add_link(new_dentry, old_inode); 261 err = minix_add_link(new_dentry, old_inode);
274 if (err) { 262 if (err) {
275 dec_count(old_inode); 263 inode_dec_link_count(old_inode);
276 goto out_dir; 264 goto out_dir;
277 } 265 }
278 if (dir_de) 266 if (dir_de)
279 inc_count(new_dir); 267 inode_inc_link_count(new_dir);
280 } 268 }
281 269
282 minix_delete_entry(old_de, old_page); 270 minix_delete_entry(old_de, old_page);
283 dec_count(old_inode); 271 inode_dec_link_count(old_inode);
284 272
285 if (dir_de) { 273 if (dir_de) {
286 minix_set_link(dir_de, dir_page, new_dir); 274 minix_set_link(dir_de, dir_page, new_dir);
287 dec_count(old_dir); 275 inode_dec_link_count(old_dir);
288 } 276 }
289 return 0; 277 return 0;
290 278
diff --git a/fs/namei.c b/fs/namei.c
index 8dc2b038d5d9..c72b940797fc 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -104,7 +104,7 @@
104 */ 104 */
105/* 105/*
106 * [Sep 2001 AV] Single-semaphore locking scheme (kudos to David Holland) 106 * [Sep 2001 AV] Single-semaphore locking scheme (kudos to David Holland)
107 * implemented. Let's see if raised priority of ->s_vfs_rename_sem gives 107 * implemented. Let's see if raised priority of ->s_vfs_rename_mutex gives
108 * any extra contention... 108 * any extra contention...
109 */ 109 */
110 110
@@ -1422,7 +1422,7 @@ struct dentry *lock_rename(struct dentry *p1, struct dentry *p2)
1422 return NULL; 1422 return NULL;
1423 } 1423 }
1424 1424
1425 down(&p1->d_inode->i_sb->s_vfs_rename_sem); 1425 mutex_lock(&p1->d_inode->i_sb->s_vfs_rename_mutex);
1426 1426
1427 for (p = p1; p->d_parent != p; p = p->d_parent) { 1427 for (p = p1; p->d_parent != p; p = p->d_parent) {
1428 if (p->d_parent == p2) { 1428 if (p->d_parent == p2) {
@@ -1450,7 +1450,7 @@ void unlock_rename(struct dentry *p1, struct dentry *p2)
1450 mutex_unlock(&p1->d_inode->i_mutex); 1450 mutex_unlock(&p1->d_inode->i_mutex);
1451 if (p1 != p2) { 1451 if (p1 != p2) {
1452 mutex_unlock(&p2->d_inode->i_mutex); 1452 mutex_unlock(&p2->d_inode->i_mutex);
1453 up(&p1->d_inode->i_sb->s_vfs_rename_sem); 1453 mutex_unlock(&p1->d_inode->i_sb->s_vfs_rename_mutex);
1454 } 1454 }
1455} 1455}
1456 1456
@@ -2277,17 +2277,17 @@ asmlinkage long sys_link(const char __user *oldname, const char __user *newname)
2277 * a) we can get into loop creation. Check is done in is_subdir(). 2277 * a) we can get into loop creation. Check is done in is_subdir().
2278 * b) race potential - two innocent renames can create a loop together. 2278 * b) race potential - two innocent renames can create a loop together.
2279 * That's where 4.4 screws up. Current fix: serialization on 2279 * That's where 4.4 screws up. Current fix: serialization on
2280 * sb->s_vfs_rename_sem. We might be more accurate, but that's another 2280 * sb->s_vfs_rename_mutex. We might be more accurate, but that's another
2281 * story. 2281 * story.
2282 * c) we have to lock _three_ objects - parents and victim (if it exists). 2282 * c) we have to lock _three_ objects - parents and victim (if it exists).
2283 * And that - after we got ->i_mutex on parents (until then we don't know 2283 * And that - after we got ->i_mutex on parents (until then we don't know
2284 * whether the target exists). Solution: try to be smart with locking 2284 * whether the target exists). Solution: try to be smart with locking
2285 * order for inodes. We rely on the fact that tree topology may change 2285 * order for inodes. We rely on the fact that tree topology may change
2286 * only under ->s_vfs_rename_sem _and_ that parent of the object we 2286 * only under ->s_vfs_rename_mutex _and_ that parent of the object we
2287 * move will be locked. Thus we can rank directories by the tree 2287 * move will be locked. Thus we can rank directories by the tree
2288 * (ancestors first) and rank all non-directories after them. 2288 * (ancestors first) and rank all non-directories after them.
2289 * That works since everybody except rename does "lock parent, lookup, 2289 * That works since everybody except rename does "lock parent, lookup,
2290 * lock child" and rename is under ->s_vfs_rename_sem. 2290 * lock child" and rename is under ->s_vfs_rename_mutex.
2291 * HOWEVER, it relies on the assumption that any object with ->lookup() 2291 * HOWEVER, it relies on the assumption that any object with ->lookup()
2292 * has no more than 1 dentry. If "hybrid" objects will ever appear, 2292 * has no more than 1 dentry. If "hybrid" objects will ever appear,
2293 * we'd better make sure that there's no link(2) for them. 2293 * we'd better make sure that there's no link(2) for them.
diff --git a/fs/ncpfs/file.c b/fs/ncpfs/file.c
index 973b444d6914..ebdad8f6398f 100644
--- a/fs/ncpfs/file.c
+++ b/fs/ncpfs/file.c
@@ -46,7 +46,7 @@ int ncp_make_open(struct inode *inode, int right)
46 NCP_FINFO(inode)->volNumber, 46 NCP_FINFO(inode)->volNumber,
47 NCP_FINFO(inode)->dirEntNum); 47 NCP_FINFO(inode)->dirEntNum);
48 error = -EACCES; 48 error = -EACCES;
49 down(&NCP_FINFO(inode)->open_sem); 49 mutex_lock(&NCP_FINFO(inode)->open_mutex);
50 if (!atomic_read(&NCP_FINFO(inode)->opened)) { 50 if (!atomic_read(&NCP_FINFO(inode)->opened)) {
51 struct ncp_entry_info finfo; 51 struct ncp_entry_info finfo;
52 int result; 52 int result;
@@ -93,7 +93,7 @@ int ncp_make_open(struct inode *inode, int right)
93 } 93 }
94 94
95out_unlock: 95out_unlock:
96 up(&NCP_FINFO(inode)->open_sem); 96 mutex_unlock(&NCP_FINFO(inode)->open_mutex);
97out: 97out:
98 return error; 98 return error;
99} 99}
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
index d277a58bd128..0b521d3d97ce 100644
--- a/fs/ncpfs/inode.c
+++ b/fs/ncpfs/inode.c
@@ -63,7 +63,7 @@ static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
63 63
64 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == 64 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
65 SLAB_CTOR_CONSTRUCTOR) { 65 SLAB_CTOR_CONSTRUCTOR) {
66 init_MUTEX(&ei->open_sem); 66 mutex_init(&ei->open_mutex);
67 inode_init_once(&ei->vfs_inode); 67 inode_init_once(&ei->vfs_inode);
68 } 68 }
69} 69}
@@ -520,7 +520,7 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
520 } 520 }
521 521
522/* server->lock = 0; */ 522/* server->lock = 0; */
523 init_MUTEX(&server->sem); 523 mutex_init(&server->mutex);
524 server->packet = NULL; 524 server->packet = NULL;
525/* server->buffer_size = 0; */ 525/* server->buffer_size = 0; */
526/* server->conn_status = 0; */ 526/* server->conn_status = 0; */
@@ -557,7 +557,7 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
557 server->dentry_ttl = 0; /* no caching */ 557 server->dentry_ttl = 0; /* no caching */
558 558
559 INIT_LIST_HEAD(&server->tx.requests); 559 INIT_LIST_HEAD(&server->tx.requests);
560 init_MUTEX(&server->rcv.creq_sem); 560 mutex_init(&server->rcv.creq_mutex);
561 server->tx.creq = NULL; 561 server->tx.creq = NULL;
562 server->rcv.creq = NULL; 562 server->rcv.creq = NULL;
563 server->data_ready = sock->sk->sk_data_ready; 563 server->data_ready = sock->sk->sk_data_ready;
diff --git a/fs/ncpfs/ncplib_kernel.c b/fs/ncpfs/ncplib_kernel.c
index c755e1848a42..d9ebf6439f59 100644
--- a/fs/ncpfs/ncplib_kernel.c
+++ b/fs/ncpfs/ncplib_kernel.c
@@ -291,7 +291,7 @@ ncp_make_closed(struct inode *inode)
291 int err; 291 int err;
292 292
293 err = 0; 293 err = 0;
294 down(&NCP_FINFO(inode)->open_sem); 294 mutex_lock(&NCP_FINFO(inode)->open_mutex);
295 if (atomic_read(&NCP_FINFO(inode)->opened) == 1) { 295 if (atomic_read(&NCP_FINFO(inode)->opened) == 1) {
296 atomic_set(&NCP_FINFO(inode)->opened, 0); 296 atomic_set(&NCP_FINFO(inode)->opened, 0);
297 err = ncp_close_file(NCP_SERVER(inode), NCP_FINFO(inode)->file_handle); 297 err = ncp_close_file(NCP_SERVER(inode), NCP_FINFO(inode)->file_handle);
@@ -301,7 +301,7 @@ ncp_make_closed(struct inode *inode)
301 NCP_FINFO(inode)->volNumber, 301 NCP_FINFO(inode)->volNumber,
302 NCP_FINFO(inode)->dirEntNum, err); 302 NCP_FINFO(inode)->dirEntNum, err);
303 } 303 }
304 up(&NCP_FINFO(inode)->open_sem); 304 mutex_unlock(&NCP_FINFO(inode)->open_mutex);
305 return err; 305 return err;
306} 306}
307 307
diff --git a/fs/ncpfs/sock.c b/fs/ncpfs/sock.c
index 6593a5ca88ba..8783eb7ec641 100644
--- a/fs/ncpfs/sock.c
+++ b/fs/ncpfs/sock.c
@@ -171,9 +171,9 @@ static inline void __ncp_abort_request(struct ncp_server *server, struct ncp_req
171 171
172static inline void ncp_abort_request(struct ncp_server *server, struct ncp_request_reply *req, int err) 172static inline void ncp_abort_request(struct ncp_server *server, struct ncp_request_reply *req, int err)
173{ 173{
174 down(&server->rcv.creq_sem); 174 mutex_lock(&server->rcv.creq_mutex);
175 __ncp_abort_request(server, req, err); 175 __ncp_abort_request(server, req, err);
176 up(&server->rcv.creq_sem); 176 mutex_unlock(&server->rcv.creq_mutex);
177} 177}
178 178
179static inline void __ncptcp_abort(struct ncp_server *server) 179static inline void __ncptcp_abort(struct ncp_server *server)
@@ -303,20 +303,20 @@ static inline void __ncp_start_request(struct ncp_server *server, struct ncp_req
303 303
304static int ncp_add_request(struct ncp_server *server, struct ncp_request_reply *req) 304static int ncp_add_request(struct ncp_server *server, struct ncp_request_reply *req)
305{ 305{
306 down(&server->rcv.creq_sem); 306 mutex_lock(&server->rcv.creq_mutex);
307 if (!ncp_conn_valid(server)) { 307 if (!ncp_conn_valid(server)) {
308 up(&server->rcv.creq_sem); 308 mutex_unlock(&server->rcv.creq_mutex);
309 printk(KERN_ERR "ncpfs: tcp: Server died\n"); 309 printk(KERN_ERR "ncpfs: tcp: Server died\n");
310 return -EIO; 310 return -EIO;
311 } 311 }
312 if (server->tx.creq || server->rcv.creq) { 312 if (server->tx.creq || server->rcv.creq) {
313 req->status = RQ_QUEUED; 313 req->status = RQ_QUEUED;
314 list_add_tail(&req->req, &server->tx.requests); 314 list_add_tail(&req->req, &server->tx.requests);
315 up(&server->rcv.creq_sem); 315 mutex_unlock(&server->rcv.creq_mutex);
316 return 0; 316 return 0;
317 } 317 }
318 __ncp_start_request(server, req); 318 __ncp_start_request(server, req);
319 up(&server->rcv.creq_sem); 319 mutex_unlock(&server->rcv.creq_mutex);
320 return 0; 320 return 0;
321} 321}
322 322
@@ -400,7 +400,7 @@ void ncpdgram_rcv_proc(void *s)
400 info_server(server, 0, server->unexpected_packet.data, result); 400 info_server(server, 0, server->unexpected_packet.data, result);
401 continue; 401 continue;
402 } 402 }
403 down(&server->rcv.creq_sem); 403 mutex_lock(&server->rcv.creq_mutex);
404 req = server->rcv.creq; 404 req = server->rcv.creq;
405 if (req && (req->tx_type == NCP_ALLOC_SLOT_REQUEST || (server->sequence == reply.sequence && 405 if (req && (req->tx_type == NCP_ALLOC_SLOT_REQUEST || (server->sequence == reply.sequence &&
406 server->connection == get_conn_number(&reply)))) { 406 server->connection == get_conn_number(&reply)))) {
@@ -430,11 +430,11 @@ void ncpdgram_rcv_proc(void *s)
430 server->rcv.creq = NULL; 430 server->rcv.creq = NULL;
431 ncp_finish_request(req, result); 431 ncp_finish_request(req, result);
432 __ncp_next_request(server); 432 __ncp_next_request(server);
433 up(&server->rcv.creq_sem); 433 mutex_unlock(&server->rcv.creq_mutex);
434 continue; 434 continue;
435 } 435 }
436 } 436 }
437 up(&server->rcv.creq_sem); 437 mutex_unlock(&server->rcv.creq_mutex);
438 } 438 }
439drop:; 439drop:;
440 _recv(sock, &reply, sizeof(reply), MSG_DONTWAIT); 440 _recv(sock, &reply, sizeof(reply), MSG_DONTWAIT);
@@ -472,9 +472,9 @@ static void __ncpdgram_timeout_proc(struct ncp_server *server)
472void ncpdgram_timeout_proc(void *s) 472void ncpdgram_timeout_proc(void *s)
473{ 473{
474 struct ncp_server *server = s; 474 struct ncp_server *server = s;
475 down(&server->rcv.creq_sem); 475 mutex_lock(&server->rcv.creq_mutex);
476 __ncpdgram_timeout_proc(server); 476 __ncpdgram_timeout_proc(server);
477 up(&server->rcv.creq_sem); 477 mutex_unlock(&server->rcv.creq_mutex);
478} 478}
479 479
480static inline void ncp_init_req(struct ncp_request_reply* req) 480static inline void ncp_init_req(struct ncp_request_reply* req)
@@ -657,18 +657,18 @@ void ncp_tcp_rcv_proc(void *s)
657{ 657{
658 struct ncp_server *server = s; 658 struct ncp_server *server = s;
659 659
660 down(&server->rcv.creq_sem); 660 mutex_lock(&server->rcv.creq_mutex);
661 __ncptcp_rcv_proc(server); 661 __ncptcp_rcv_proc(server);
662 up(&server->rcv.creq_sem); 662 mutex_unlock(&server->rcv.creq_mutex);
663} 663}
664 664
665void ncp_tcp_tx_proc(void *s) 665void ncp_tcp_tx_proc(void *s)
666{ 666{
667 struct ncp_server *server = s; 667 struct ncp_server *server = s;
668 668
669 down(&server->rcv.creq_sem); 669 mutex_lock(&server->rcv.creq_mutex);
670 __ncptcp_try_send(server); 670 __ncptcp_try_send(server);
671 up(&server->rcv.creq_sem); 671 mutex_unlock(&server->rcv.creq_mutex);
672} 672}
673 673
674static int do_ncp_rpc_call(struct ncp_server *server, int size, 674static int do_ncp_rpc_call(struct ncp_server *server, int size,
@@ -833,7 +833,7 @@ int ncp_disconnect(struct ncp_server *server)
833 833
834void ncp_lock_server(struct ncp_server *server) 834void ncp_lock_server(struct ncp_server *server)
835{ 835{
836 down(&server->sem); 836 mutex_lock(&server->mutex);
837 if (server->lock) 837 if (server->lock)
838 printk(KERN_WARNING "ncp_lock_server: was locked!\n"); 838 printk(KERN_WARNING "ncp_lock_server: was locked!\n");
839 server->lock = 1; 839 server->lock = 1;
@@ -846,5 +846,5 @@ void ncp_unlock_server(struct ncp_server *server)
846 return; 846 return;
847 } 847 }
848 server->lock = 0; 848 server->lock = 0;
849 up(&server->sem); 849 mutex_unlock(&server->mutex);
850} 850}
diff --git a/fs/nls/Kconfig b/fs/nls/Kconfig
index 0ab8f00bdbb2..976ecccd6f56 100644
--- a/fs/nls/Kconfig
+++ b/fs/nls/Kconfig
@@ -491,7 +491,7 @@ config NLS_KOI8_U
491 (koi8-u) and Belarusian (koi8-ru) character sets. 491 (koi8-u) and Belarusian (koi8-ru) character sets.
492 492
493config NLS_UTF8 493config NLS_UTF8
494 tristate "NLS UTF8" 494 tristate "NLS UTF-8"
495 depends on NLS 495 depends on NLS
496 help 496 help
497 If you want to display filenames with native language characters 497 If you want to display filenames with native language characters
diff --git a/fs/ntfs/ChangeLog b/fs/ntfs/ChangeLog
index 9d8ffa89e2c2..35cc4b1d60f7 100644
--- a/fs/ntfs/ChangeLog
+++ b/fs/ntfs/ChangeLog
@@ -16,8 +16,34 @@ ToDo/Notes:
16 inode having been discarded already. Whether this can actually ever 16 inode having been discarded already. Whether this can actually ever
17 happen is unclear however so it is worth waiting until someone hits 17 happen is unclear however so it is worth waiting until someone hits
18 the problem. 18 the problem.
19 - Enable the code for setting the NT4 compatibility flag when we start 19
20 making NTFS 1.2 specific modifications. 202.1.27 - Various bug fixes and cleanups.
21
22 - Fix two compiler warnings on Alpha. Thanks to Andrew Morton for
23 reporting them.
24 - Fix an (innocent) off-by-one error in the runlist code.
25 - Fix a buggette in an "should be impossible" case handling where we
26 continued the attribute lookup loop instead of aborting it.
27 - Use buffer_migrate_page() for the ->migratepage function of all ntfs
28 address space operations.
29 - Fix comparison of $MFT and $MFTMirr to not bail out when there are
30 unused, invalid mft records which are the same in both $MFT and
31 $MFTMirr.
32 - Add support for sparse files which have a compression unit of 0.
33 - Remove all the make_bad_inode() calls. This should only be called
34 from read inode and new inode code paths.
35 - Limit name length in fs/ntfs/unistr.c::ntfs_nlstoucs() to maximum
36 allowed by NTFS, i.e. 255 Unicode characters, not including the
37 terminating NULL (which is not stored on disk).
38 - Improve comments on file attribute flags in fs/ntfs/layout.h.
39 - Fix a bug in fs/ntfs/inode.c::ntfs_read_locked_index_inode() where we
40 forgot to update a temporary variable so loading index inodes which
41 have an index allocation attribute failed.
42 - Add a missing call to flush_dcache_mft_record_page() in
43 fs/ntfs/inode.c::ntfs_write_inode().
44 - Handle the recently introduced -ENAMETOOLONG return value from
45 fs/ntfs/unistr.c::ntfs_nlstoucs() in fs/ntfs/namei.c::ntfs_lookup().
46 - Semaphore to mutex conversion. (Ingo Molnar)
21 47
222.1.26 - Minor bug fixes and updates. 482.1.26 - Minor bug fixes and updates.
23 49
diff --git a/fs/ntfs/Makefile b/fs/ntfs/Makefile
index d95fac7fdeb6..e27b4eacffbf 100644
--- a/fs/ntfs/Makefile
+++ b/fs/ntfs/Makefile
@@ -6,7 +6,7 @@ ntfs-objs := aops.o attrib.o collate.o compress.o debug.o dir.o file.o \
6 index.o inode.o mft.o mst.o namei.o runlist.o super.o sysctl.o \ 6 index.o inode.o mft.o mst.o namei.o runlist.o super.o sysctl.o \
7 unistr.o upcase.o 7 unistr.o upcase.o
8 8
9EXTRA_CFLAGS = -DNTFS_VERSION=\"2.1.26\" 9EXTRA_CFLAGS = -DNTFS_VERSION=\"2.1.27\"
10 10
11ifeq ($(CONFIG_NTFS_DEBUG),y) 11ifeq ($(CONFIG_NTFS_DEBUG),y)
12EXTRA_CFLAGS += -DDEBUG 12EXTRA_CFLAGS += -DDEBUG
diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c
index 7e361da770b3..580412d330cb 100644
--- a/fs/ntfs/aops.c
+++ b/fs/ntfs/aops.c
@@ -22,6 +22,7 @@
22 */ 22 */
23 23
24#include <linux/errno.h> 24#include <linux/errno.h>
25#include <linux/fs.h>
25#include <linux/mm.h> 26#include <linux/mm.h>
26#include <linux/pagemap.h> 27#include <linux/pagemap.h>
27#include <linux/swap.h> 28#include <linux/swap.h>
@@ -1277,18 +1278,18 @@ unm_done:
1277 1278
1278 tni = locked_nis[nr_locked_nis]; 1279 tni = locked_nis[nr_locked_nis];
1279 /* Get the base inode. */ 1280 /* Get the base inode. */
1280 down(&tni->extent_lock); 1281 mutex_lock(&tni->extent_lock);
1281 if (tni->nr_extents >= 0) 1282 if (tni->nr_extents >= 0)
1282 base_tni = tni; 1283 base_tni = tni;
1283 else { 1284 else {
1284 base_tni = tni->ext.base_ntfs_ino; 1285 base_tni = tni->ext.base_ntfs_ino;
1285 BUG_ON(!base_tni); 1286 BUG_ON(!base_tni);
1286 } 1287 }
1287 up(&tni->extent_lock); 1288 mutex_unlock(&tni->extent_lock);
1288 ntfs_debug("Unlocking %s inode 0x%lx.", 1289 ntfs_debug("Unlocking %s inode 0x%lx.",
1289 tni == base_tni ? "base" : "extent", 1290 tni == base_tni ? "base" : "extent",
1290 tni->mft_no); 1291 tni->mft_no);
1291 up(&tni->mrec_lock); 1292 mutex_unlock(&tni->mrec_lock);
1292 atomic_dec(&tni->count); 1293 atomic_dec(&tni->count);
1293 iput(VFS_I(base_tni)); 1294 iput(VFS_I(base_tni));
1294 } 1295 }
@@ -1529,7 +1530,6 @@ err_out:
1529 "error %i.", err); 1530 "error %i.", err);
1530 SetPageError(page); 1531 SetPageError(page);
1531 NVolSetErrors(ni->vol); 1532 NVolSetErrors(ni->vol);
1532 make_bad_inode(vi);
1533 } 1533 }
1534 unlock_page(page); 1534 unlock_page(page);
1535 if (ctx) 1535 if (ctx)
@@ -1551,6 +1551,9 @@ struct address_space_operations ntfs_aops = {
1551#ifdef NTFS_RW 1551#ifdef NTFS_RW
1552 .writepage = ntfs_writepage, /* Write dirty page to disk. */ 1552 .writepage = ntfs_writepage, /* Write dirty page to disk. */
1553#endif /* NTFS_RW */ 1553#endif /* NTFS_RW */
1554 .migratepage = buffer_migrate_page, /* Move a page cache page from
1555 one physical page to an
1556 other. */
1554}; 1557};
1555 1558
1556/** 1559/**
@@ -1567,6 +1570,9 @@ struct address_space_operations ntfs_mst_aops = {
1567 without touching the buffers 1570 without touching the buffers
1568 belonging to the page. */ 1571 belonging to the page. */
1569#endif /* NTFS_RW */ 1572#endif /* NTFS_RW */
1573 .migratepage = buffer_migrate_page, /* Move a page cache page from
1574 one physical page to an
1575 other. */
1570}; 1576};
1571 1577
1572#ifdef NTFS_RW 1578#ifdef NTFS_RW
diff --git a/fs/ntfs/attrib.c b/fs/ntfs/attrib.c
index 9480a0526cd3..1663f5c3c6aa 100644
--- a/fs/ntfs/attrib.c
+++ b/fs/ntfs/attrib.c
@@ -1,7 +1,7 @@
1/** 1/**
2 * attrib.c - NTFS attribute operations. Part of the Linux-NTFS project. 2 * attrib.c - NTFS attribute operations. Part of the Linux-NTFS project.
3 * 3 *
4 * Copyright (c) 2001-2005 Anton Altaparmakov 4 * Copyright (c) 2001-2006 Anton Altaparmakov
5 * Copyright (c) 2002 Richard Russon 5 * Copyright (c) 2002 Richard Russon
6 * 6 *
7 * This program/include file is free software; you can redistribute it and/or 7 * This program/include file is free software; you can redistribute it and/or
@@ -1048,7 +1048,7 @@ do_next_attr_loop:
1048 le32_to_cpu(ctx->mrec->bytes_allocated)) 1048 le32_to_cpu(ctx->mrec->bytes_allocated))
1049 break; 1049 break;
1050 if (a->type == AT_END) 1050 if (a->type == AT_END)
1051 continue; 1051 break;
1052 if (!a->length) 1052 if (!a->length)
1053 break; 1053 break;
1054 if (al_entry->instance != a->instance) 1054 if (al_entry->instance != a->instance)
@@ -1695,7 +1695,9 @@ int ntfs_attr_make_non_resident(ntfs_inode *ni, const u32 data_size)
1695 a->data.non_resident.initialized_size = 1695 a->data.non_resident.initialized_size =
1696 cpu_to_sle64(attr_size); 1696 cpu_to_sle64(attr_size);
1697 if (NInoSparse(ni) || NInoCompressed(ni)) { 1697 if (NInoSparse(ni) || NInoCompressed(ni)) {
1698 a->data.non_resident.compression_unit = 4; 1698 a->data.non_resident.compression_unit = 0;
1699 if (NInoCompressed(ni) || vol->major_ver < 3)
1700 a->data.non_resident.compression_unit = 4;
1699 a->data.non_resident.compressed_size = 1701 a->data.non_resident.compressed_size =
1700 a->data.non_resident.allocated_size; 1702 a->data.non_resident.allocated_size;
1701 } else 1703 } else
@@ -1714,13 +1716,20 @@ int ntfs_attr_make_non_resident(ntfs_inode *ni, const u32 data_size)
1714 ni->allocated_size = new_size; 1716 ni->allocated_size = new_size;
1715 if (NInoSparse(ni) || NInoCompressed(ni)) { 1717 if (NInoSparse(ni) || NInoCompressed(ni)) {
1716 ni->itype.compressed.size = ni->allocated_size; 1718 ni->itype.compressed.size = ni->allocated_size;
1717 ni->itype.compressed.block_size = 1U << 1719 if (a->data.non_resident.compression_unit) {
1718 (a->data.non_resident.compression_unit + 1720 ni->itype.compressed.block_size = 1U << (a->data.
1719 vol->cluster_size_bits); 1721 non_resident.compression_unit +
1720 ni->itype.compressed.block_size_bits = 1722 vol->cluster_size_bits);
1721 ffs(ni->itype.compressed.block_size) - 1; 1723 ni->itype.compressed.block_size_bits =
1722 ni->itype.compressed.block_clusters = 1U << 1724 ffs(ni->itype.compressed.block_size) -
1723 a->data.non_resident.compression_unit; 1725 1;
1726 ni->itype.compressed.block_clusters = 1U <<
1727 a->data.non_resident.compression_unit;
1728 } else {
1729 ni->itype.compressed.block_size = 0;
1730 ni->itype.compressed.block_size_bits = 0;
1731 ni->itype.compressed.block_clusters = 0;
1732 }
1724 vi->i_blocks = ni->itype.compressed.size >> 9; 1733 vi->i_blocks = ni->itype.compressed.size >> 9;
1725 } else 1734 } else
1726 vi->i_blocks = ni->allocated_size >> 9; 1735 vi->i_blocks = ni->allocated_size >> 9;
@@ -2429,16 +2438,12 @@ undo_alloc:
2429 "chkdsk to recover.", IS_ERR(m) ? 2438 "chkdsk to recover.", IS_ERR(m) ?
2430 "restore attribute search context" : 2439 "restore attribute search context" :
2431 "truncate attribute runlist"); 2440 "truncate attribute runlist");
2432 make_bad_inode(vi);
2433 make_bad_inode(VFS_I(base_ni));
2434 NVolSetErrors(vol); 2441 NVolSetErrors(vol);
2435 } else if (mp_rebuilt) { 2442 } else if (mp_rebuilt) {
2436 if (ntfs_attr_record_resize(m, a, attr_len)) { 2443 if (ntfs_attr_record_resize(m, a, attr_len)) {
2437 ntfs_error(vol->sb, "Failed to restore attribute " 2444 ntfs_error(vol->sb, "Failed to restore attribute "
2438 "record in error code path. Run " 2445 "record in error code path. Run "
2439 "chkdsk to recover."); 2446 "chkdsk to recover.");
2440 make_bad_inode(vi);
2441 make_bad_inode(VFS_I(base_ni));
2442 NVolSetErrors(vol); 2447 NVolSetErrors(vol);
2443 } else /* if (success) */ { 2448 } else /* if (success) */ {
2444 if (ntfs_mapping_pairs_build(vol, (u8*)a + le16_to_cpu( 2449 if (ntfs_mapping_pairs_build(vol, (u8*)a + le16_to_cpu(
@@ -2451,8 +2456,6 @@ undo_alloc:
2451 "mapping pairs array in error " 2456 "mapping pairs array in error "
2452 "code path. Run chkdsk to " 2457 "code path. Run chkdsk to "
2453 "recover."); 2458 "recover.");
2454 make_bad_inode(vi);
2455 make_bad_inode(VFS_I(base_ni));
2456 NVolSetErrors(vol); 2459 NVolSetErrors(vol);
2457 } 2460 }
2458 flush_dcache_mft_record_page(ctx->ntfs_ino); 2461 flush_dcache_mft_record_page(ctx->ntfs_ino);
diff --git a/fs/ntfs/compress.c b/fs/ntfs/compress.c
index 25d24106f893..68a607ff9fd3 100644
--- a/fs/ntfs/compress.c
+++ b/fs/ntfs/compress.c
@@ -67,7 +67,7 @@ static DEFINE_SPINLOCK(ntfs_cb_lock);
67/** 67/**
68 * allocate_compression_buffers - allocate the decompression buffers 68 * allocate_compression_buffers - allocate the decompression buffers
69 * 69 *
70 * Caller has to hold the ntfs_lock semaphore. 70 * Caller has to hold the ntfs_lock mutex.
71 * 71 *
72 * Return 0 on success or -ENOMEM if the allocations failed. 72 * Return 0 on success or -ENOMEM if the allocations failed.
73 */ 73 */
@@ -84,7 +84,7 @@ int allocate_compression_buffers(void)
84/** 84/**
85 * free_compression_buffers - free the decompression buffers 85 * free_compression_buffers - free the decompression buffers
86 * 86 *
87 * Caller has to hold the ntfs_lock semaphore. 87 * Caller has to hold the ntfs_lock mutex.
88 */ 88 */
89void free_compression_buffers(void) 89void free_compression_buffers(void)
90{ 90{
diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
index b0690d4c8906..9d9ed3fe371d 100644
--- a/fs/ntfs/dir.c
+++ b/fs/ntfs/dir.c
@@ -1136,7 +1136,7 @@ static int ntfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
1136 if (fpos == 1) { 1136 if (fpos == 1) {
1137 ntfs_debug("Calling filldir for .. with len 2, fpos 0x1, " 1137 ntfs_debug("Calling filldir for .. with len 2, fpos 0x1, "
1138 "inode 0x%lx, DT_DIR.", 1138 "inode 0x%lx, DT_DIR.",
1139 parent_ino(filp->f_dentry)); 1139 (unsigned long)parent_ino(filp->f_dentry));
1140 rc = filldir(dirent, "..", 2, fpos, 1140 rc = filldir(dirent, "..", 2, fpos,
1141 parent_ino(filp->f_dentry), DT_DIR); 1141 parent_ino(filp->f_dentry), DT_DIR);
1142 if (rc) 1142 if (rc)
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
index 5027d3d1b3fe..f5d057e4acc2 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
@@ -943,7 +943,8 @@ rl_not_mapped_enoent:
943 } 943 }
944 ni->runlist.rl = rl; 944 ni->runlist.rl = rl;
945 status.runlist_merged = 1; 945 status.runlist_merged = 1;
946 ntfs_debug("Allocated cluster, lcn 0x%llx.", lcn); 946 ntfs_debug("Allocated cluster, lcn 0x%llx.",
947 (unsigned long long)lcn);
947 /* Map and lock the mft record and get the attribute record. */ 948 /* Map and lock the mft record and get the attribute record. */
948 if (!NInoAttr(ni)) 949 if (!NInoAttr(ni))
949 base_ni = ni; 950 base_ni = ni;
@@ -1206,8 +1207,6 @@ rl_not_mapped_enoent:
1206 "attribute runlist in error code " 1207 "attribute runlist in error code "
1207 "path. Run chkdsk to recover the " 1208 "path. Run chkdsk to recover the "
1208 "lost cluster."); 1209 "lost cluster.");
1209 make_bad_inode(vi);
1210 make_bad_inode(VFS_I(base_ni));
1211 NVolSetErrors(vol); 1210 NVolSetErrors(vol);
1212 } else /* if (success) */ { 1211 } else /* if (success) */ {
1213 status.runlist_merged = 0; 1212 status.runlist_merged = 0;
@@ -1238,8 +1237,6 @@ rl_not_mapped_enoent:
1238 ntfs_error(vol->sb, "Failed to restore attribute " 1237 ntfs_error(vol->sb, "Failed to restore attribute "
1239 "record in error code path. Run " 1238 "record in error code path. Run "
1240 "chkdsk to recover."); 1239 "chkdsk to recover.");
1241 make_bad_inode(vi);
1242 make_bad_inode(VFS_I(base_ni));
1243 NVolSetErrors(vol); 1240 NVolSetErrors(vol);
1244 } else /* if (success) */ { 1241 } else /* if (success) */ {
1245 if (ntfs_mapping_pairs_build(vol, (u8*)a + 1242 if (ntfs_mapping_pairs_build(vol, (u8*)a +
@@ -1252,8 +1249,6 @@ rl_not_mapped_enoent:
1252 "mapping pairs array in error " 1249 "mapping pairs array in error "
1253 "code path. Run chkdsk to " 1250 "code path. Run chkdsk to "
1254 "recover."); 1251 "recover.");
1255 make_bad_inode(vi);
1256 make_bad_inode(VFS_I(base_ni));
1257 NVolSetErrors(vol); 1252 NVolSetErrors(vol);
1258 } 1253 }
1259 flush_dcache_mft_record_page(ctx->ntfs_ino); 1254 flush_dcache_mft_record_page(ctx->ntfs_ino);
@@ -1622,11 +1617,8 @@ err_out:
1622 unmap_mft_record(base_ni); 1617 unmap_mft_record(base_ni);
1623 ntfs_error(vi->i_sb, "Failed to update initialized_size/i_size (error " 1618 ntfs_error(vi->i_sb, "Failed to update initialized_size/i_size (error "
1624 "code %i).", err); 1619 "code %i).", err);
1625 if (err != -ENOMEM) { 1620 if (err != -ENOMEM)
1626 NVolSetErrors(ni->vol); 1621 NVolSetErrors(ni->vol);
1627 make_bad_inode(VFS_I(base_ni));
1628 make_bad_inode(vi);
1629 }
1630 return err; 1622 return err;
1631} 1623}
1632 1624
@@ -1801,8 +1793,6 @@ err_out:
1801 ntfs_error(vi->i_sb, "Resident attribute commit write failed " 1793 ntfs_error(vi->i_sb, "Resident attribute commit write failed "
1802 "with error %i.", err); 1794 "with error %i.", err);
1803 NVolSetErrors(ni->vol); 1795 NVolSetErrors(ni->vol);
1804 make_bad_inode(VFS_I(base_ni));
1805 make_bad_inode(vi);
1806 } 1796 }
1807 if (ctx) 1797 if (ctx)
1808 ntfs_attr_put_search_ctx(ctx); 1798 ntfs_attr_put_search_ctx(ctx);
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
index 55263b7de9c0..4c86b7e1d1eb 100644
--- a/fs/ntfs/inode.c
+++ b/fs/ntfs/inode.c
@@ -1,7 +1,7 @@
1/** 1/**
2 * inode.c - NTFS kernel inode handling. Part of the Linux-NTFS project. 2 * inode.c - NTFS kernel inode handling. Part of the Linux-NTFS project.
3 * 3 *
4 * Copyright (c) 2001-2005 Anton Altaparmakov 4 * Copyright (c) 2001-2006 Anton Altaparmakov
5 * 5 *
6 * This program/include file is free software; you can redistribute it and/or 6 * This program/include file is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as published 7 * modify it under the terms of the GNU General Public License as published
@@ -19,13 +19,19 @@
19 * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19 * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */ 20 */
21 21
22#include <linux/pagemap.h>
23#include <linux/buffer_head.h> 22#include <linux/buffer_head.h>
24#include <linux/smp_lock.h> 23#include <linux/fs.h>
25#include <linux/quotaops.h> 24#include <linux/mm.h>
26#include <linux/mount.h> 25#include <linux/mount.h>
26#include <linux/mutex.h>
27#include <linux/pagemap.h>
28#include <linux/quotaops.h>
29#include <linux/slab.h>
30#include <linux/smp_lock.h>
27 31
28#include "aops.h" 32#include "aops.h"
33#include "attrib.h"
34#include "bitmap.h"
29#include "dir.h" 35#include "dir.h"
30#include "debug.h" 36#include "debug.h"
31#include "inode.h" 37#include "inode.h"
@@ -382,7 +388,7 @@ void __ntfs_init_inode(struct super_block *sb, ntfs_inode *ni)
382 atomic_set(&ni->count, 1); 388 atomic_set(&ni->count, 1);
383 ni->vol = NTFS_SB(sb); 389 ni->vol = NTFS_SB(sb);
384 ntfs_init_runlist(&ni->runlist); 390 ntfs_init_runlist(&ni->runlist);
385 init_MUTEX(&ni->mrec_lock); 391 mutex_init(&ni->mrec_lock);
386 ni->page = NULL; 392 ni->page = NULL;
387 ni->page_ofs = 0; 393 ni->page_ofs = 0;
388 ni->attr_list_size = 0; 394 ni->attr_list_size = 0;
@@ -394,7 +400,7 @@ void __ntfs_init_inode(struct super_block *sb, ntfs_inode *ni)
394 ni->itype.index.collation_rule = 0; 400 ni->itype.index.collation_rule = 0;
395 ni->itype.index.block_size_bits = 0; 401 ni->itype.index.block_size_bits = 0;
396 ni->itype.index.vcn_size_bits = 0; 402 ni->itype.index.vcn_size_bits = 0;
397 init_MUTEX(&ni->extent_lock); 403 mutex_init(&ni->extent_lock);
398 ni->nr_extents = 0; 404 ni->nr_extents = 0;
399 ni->ext.base_ntfs_ino = NULL; 405 ni->ext.base_ntfs_ino = NULL;
400} 406}
@@ -1064,10 +1070,10 @@ skip_large_dir_stuff:
1064 if (a->non_resident) { 1070 if (a->non_resident) {
1065 NInoSetNonResident(ni); 1071 NInoSetNonResident(ni);
1066 if (NInoCompressed(ni) || NInoSparse(ni)) { 1072 if (NInoCompressed(ni) || NInoSparse(ni)) {
1067 if (a->data.non_resident.compression_unit != 1073 if (NInoCompressed(ni) && a->data.non_resident.
1068 4) { 1074 compression_unit != 4) {
1069 ntfs_error(vi->i_sb, "Found " 1075 ntfs_error(vi->i_sb, "Found "
1070 "nonstandard " 1076 "non-standard "
1071 "compression unit (%u " 1077 "compression unit (%u "
1072 "instead of 4). " 1078 "instead of 4). "
1073 "Cannot handle this.", 1079 "Cannot handle this.",
@@ -1076,16 +1082,26 @@ skip_large_dir_stuff:
1076 err = -EOPNOTSUPP; 1082 err = -EOPNOTSUPP;
1077 goto unm_err_out; 1083 goto unm_err_out;
1078 } 1084 }
1079 ni->itype.compressed.block_clusters = 1U << 1085 if (a->data.non_resident.compression_unit) {
1080 a->data.non_resident. 1086 ni->itype.compressed.block_size = 1U <<
1081 compression_unit; 1087 (a->data.non_resident.
1082 ni->itype.compressed.block_size = 1U << ( 1088 compression_unit +
1083 a->data.non_resident. 1089 vol->cluster_size_bits);
1084 compression_unit + 1090 ni->itype.compressed.block_size_bits =
1085 vol->cluster_size_bits); 1091 ffs(ni->itype.
1086 ni->itype.compressed.block_size_bits = ffs( 1092 compressed.
1087 ni->itype.compressed. 1093 block_size) - 1;
1088 block_size) - 1; 1094 ni->itype.compressed.block_clusters =
1095 1U << a->data.
1096 non_resident.
1097 compression_unit;
1098 } else {
1099 ni->itype.compressed.block_size = 0;
1100 ni->itype.compressed.block_size_bits =
1101 0;
1102 ni->itype.compressed.block_clusters =
1103 0;
1104 }
1089 ni->itype.compressed.size = sle64_to_cpu( 1105 ni->itype.compressed.size = sle64_to_cpu(
1090 a->data.non_resident. 1106 a->data.non_resident.
1091 compressed_size); 1107 compressed_size);
@@ -1338,8 +1354,9 @@ static int ntfs_read_locked_attr_inode(struct inode *base_vi, struct inode *vi)
1338 goto unm_err_out; 1354 goto unm_err_out;
1339 } 1355 }
1340 if (NInoCompressed(ni) || NInoSparse(ni)) { 1356 if (NInoCompressed(ni) || NInoSparse(ni)) {
1341 if (a->data.non_resident.compression_unit != 4) { 1357 if (NInoCompressed(ni) && a->data.non_resident.
1342 ntfs_error(vi->i_sb, "Found nonstandard " 1358 compression_unit != 4) {
1359 ntfs_error(vi->i_sb, "Found non-standard "
1343 "compression unit (%u instead " 1360 "compression unit (%u instead "
1344 "of 4). Cannot handle this.", 1361 "of 4). Cannot handle this.",
1345 a->data.non_resident. 1362 a->data.non_resident.
@@ -1347,13 +1364,22 @@ static int ntfs_read_locked_attr_inode(struct inode *base_vi, struct inode *vi)
1347 err = -EOPNOTSUPP; 1364 err = -EOPNOTSUPP;
1348 goto unm_err_out; 1365 goto unm_err_out;
1349 } 1366 }
1350 ni->itype.compressed.block_clusters = 1U << 1367 if (a->data.non_resident.compression_unit) {
1351 a->data.non_resident.compression_unit; 1368 ni->itype.compressed.block_size = 1U <<
1352 ni->itype.compressed.block_size = 1U << ( 1369 (a->data.non_resident.
1353 a->data.non_resident.compression_unit + 1370 compression_unit +
1354 vol->cluster_size_bits); 1371 vol->cluster_size_bits);
1355 ni->itype.compressed.block_size_bits = ffs( 1372 ni->itype.compressed.block_size_bits =
1356 ni->itype.compressed.block_size) - 1; 1373 ffs(ni->itype.compressed.
1374 block_size) - 1;
1375 ni->itype.compressed.block_clusters = 1U <<
1376 a->data.non_resident.
1377 compression_unit;
1378 } else {
1379 ni->itype.compressed.block_size = 0;
1380 ni->itype.compressed.block_size_bits = 0;
1381 ni->itype.compressed.block_clusters = 0;
1382 }
1357 ni->itype.compressed.size = sle64_to_cpu( 1383 ni->itype.compressed.size = sle64_to_cpu(
1358 a->data.non_resident.compressed_size); 1384 a->data.non_resident.compressed_size);
1359 } 1385 }
@@ -1406,7 +1432,6 @@ err_out:
1406 "Run chkdsk.", err, vi->i_ino, ni->type, ni->name_len, 1432 "Run chkdsk.", err, vi->i_ino, ni->type, ni->name_len,
1407 base_vi->i_ino); 1433 base_vi->i_ino);
1408 make_bad_inode(vi); 1434 make_bad_inode(vi);
1409 make_bad_inode(base_vi);
1410 if (err != -ENOMEM) 1435 if (err != -ENOMEM)
1411 NVolSetErrors(vol); 1436 NVolSetErrors(vol);
1412 return err; 1437 return err;
@@ -1591,6 +1616,7 @@ static int ntfs_read_locked_index_inode(struct inode *base_vi, struct inode *vi)
1591 "$INDEX_ALLOCATION attribute."); 1616 "$INDEX_ALLOCATION attribute.");
1592 goto unm_err_out; 1617 goto unm_err_out;
1593 } 1618 }
1619 a = ctx->attr;
1594 if (!a->non_resident) { 1620 if (!a->non_resident) {
1595 ntfs_error(vi->i_sb, "$INDEX_ALLOCATION attribute is " 1621 ntfs_error(vi->i_sb, "$INDEX_ALLOCATION attribute is "
1596 "resident."); 1622 "resident.");
@@ -2823,11 +2849,8 @@ done:
2823old_bad_out: 2849old_bad_out:
2824 old_size = -1; 2850 old_size = -1;
2825bad_out: 2851bad_out:
2826 if (err != -ENOMEM && err != -EOPNOTSUPP) { 2852 if (err != -ENOMEM && err != -EOPNOTSUPP)
2827 make_bad_inode(vi);
2828 make_bad_inode(VFS_I(base_ni));
2829 NVolSetErrors(vol); 2853 NVolSetErrors(vol);
2830 }
2831 if (err != -EOPNOTSUPP) 2854 if (err != -EOPNOTSUPP)
2832 NInoSetTruncateFailed(ni); 2855 NInoSetTruncateFailed(ni);
2833 else if (old_size >= 0) 2856 else if (old_size >= 0)
@@ -2842,11 +2865,8 @@ out:
2842 ntfs_debug("Failed. Returning error code %i.", err); 2865 ntfs_debug("Failed. Returning error code %i.", err);
2843 return err; 2866 return err;
2844conv_err_out: 2867conv_err_out:
2845 if (err != -ENOMEM && err != -EOPNOTSUPP) { 2868 if (err != -ENOMEM && err != -EOPNOTSUPP)
2846 make_bad_inode(vi);
2847 make_bad_inode(VFS_I(base_ni));
2848 NVolSetErrors(vol); 2869 NVolSetErrors(vol);
2849 }
2850 if (err != -EOPNOTSUPP) 2870 if (err != -EOPNOTSUPP)
2851 NInoSetTruncateFailed(ni); 2871 NInoSetTruncateFailed(ni);
2852 else 2872 else
@@ -3044,15 +3064,18 @@ int ntfs_write_inode(struct inode *vi, int sync)
3044 * record will be cleaned and written out to disk below, i.e. before 3064 * record will be cleaned and written out to disk below, i.e. before
3045 * this function returns. 3065 * this function returns.
3046 */ 3066 */
3047 if (modified && !NInoTestSetDirty(ctx->ntfs_ino)) 3067 if (modified) {
3048 mark_ntfs_record_dirty(ctx->ntfs_ino->page, 3068 flush_dcache_mft_record_page(ctx->ntfs_ino);
3049 ctx->ntfs_ino->page_ofs); 3069 if (!NInoTestSetDirty(ctx->ntfs_ino))
3070 mark_ntfs_record_dirty(ctx->ntfs_ino->page,
3071 ctx->ntfs_ino->page_ofs);
3072 }
3050 ntfs_attr_put_search_ctx(ctx); 3073 ntfs_attr_put_search_ctx(ctx);
3051 /* Now the access times are updated, write the base mft record. */ 3074 /* Now the access times are updated, write the base mft record. */
3052 if (NInoDirty(ni)) 3075 if (NInoDirty(ni))
3053 err = write_mft_record(ni, m, sync); 3076 err = write_mft_record(ni, m, sync);
3054 /* Write all attached extent mft records. */ 3077 /* Write all attached extent mft records. */
3055 down(&ni->extent_lock); 3078 mutex_lock(&ni->extent_lock);
3056 if (ni->nr_extents > 0) { 3079 if (ni->nr_extents > 0) {
3057 ntfs_inode **extent_nis = ni->ext.extent_ntfs_inos; 3080 ntfs_inode **extent_nis = ni->ext.extent_ntfs_inos;
3058 int i; 3081 int i;
@@ -3079,7 +3102,7 @@ int ntfs_write_inode(struct inode *vi, int sync)
3079 } 3102 }
3080 } 3103 }
3081 } 3104 }
3082 up(&ni->extent_lock); 3105 mutex_unlock(&ni->extent_lock);
3083 unmap_mft_record(ni); 3106 unmap_mft_record(ni);
3084 if (unlikely(err)) 3107 if (unlikely(err))
3085 goto err_out; 3108 goto err_out;
@@ -3094,9 +3117,7 @@ err_out:
3094 "retries later."); 3117 "retries later.");
3095 mark_inode_dirty(vi); 3118 mark_inode_dirty(vi);
3096 } else { 3119 } else {
3097 ntfs_error(vi->i_sb, "Failed (error code %i): Marking inode " 3120 ntfs_error(vi->i_sb, "Failed (error %i): Run chkdsk.", -err);
3098 "as bad. You should run chkdsk.", -err);
3099 make_bad_inode(vi);
3100 NVolSetErrors(ni->vol); 3121 NVolSetErrors(ni->vol);
3101 } 3122 }
3102 return err; 3123 return err;
diff --git a/fs/ntfs/inode.h b/fs/ntfs/inode.h
index 3de5c0231966..f088291e017c 100644
--- a/fs/ntfs/inode.h
+++ b/fs/ntfs/inode.h
@@ -24,12 +24,13 @@
24#ifndef _LINUX_NTFS_INODE_H 24#ifndef _LINUX_NTFS_INODE_H
25#define _LINUX_NTFS_INODE_H 25#define _LINUX_NTFS_INODE_H
26 26
27#include <linux/mm.h> 27#include <asm/atomic.h>
28
28#include <linux/fs.h> 29#include <linux/fs.h>
29#include <linux/seq_file.h>
30#include <linux/list.h> 30#include <linux/list.h>
31#include <asm/atomic.h> 31#include <linux/mm.h>
32#include <asm/semaphore.h> 32#include <linux/mutex.h>
33#include <linux/seq_file.h>
33 34
34#include "layout.h" 35#include "layout.h"
35#include "volume.h" 36#include "volume.h"
@@ -81,7 +82,7 @@ struct _ntfs_inode {
81 * The following fields are only valid for real inodes and extent 82 * The following fields are only valid for real inodes and extent
82 * inodes. 83 * inodes.
83 */ 84 */
84 struct semaphore mrec_lock; /* Lock for serializing access to the 85 struct mutex mrec_lock; /* Lock for serializing access to the
85 mft record belonging to this inode. */ 86 mft record belonging to this inode. */
86 struct page *page; /* The page containing the mft record of the 87 struct page *page; /* The page containing the mft record of the
87 inode. This should only be touched by the 88 inode. This should only be touched by the
@@ -119,7 +120,7 @@ struct _ntfs_inode {
119 u8 block_clusters; /* Number of clusters per cb. */ 120 u8 block_clusters; /* Number of clusters per cb. */
120 } compressed; 121 } compressed;
121 } itype; 122 } itype;
122 struct semaphore extent_lock; /* Lock for accessing/modifying the 123 struct mutex extent_lock; /* Lock for accessing/modifying the
123 below . */ 124 below . */
124 s32 nr_extents; /* For a base mft record, the number of attached extent 125 s32 nr_extents; /* For a base mft record, the number of attached extent
125 inodes (0 if none), for extent records and for fake 126 inodes (0 if none), for extent records and for fake
diff --git a/fs/ntfs/layout.h b/fs/ntfs/layout.h
index bb408d4dcbb0..d34b93cb8b48 100644
--- a/fs/ntfs/layout.h
+++ b/fs/ntfs/layout.h
@@ -769,7 +769,7 @@ typedef struct {
769 compressed. (This effectively limits the 769 compressed. (This effectively limits the
770 compression unit size to be a power of two 770 compression unit size to be a power of two
771 clusters.) WinNT4 only uses a value of 4. 771 clusters.) WinNT4 only uses a value of 4.
772 Sparse files also have this set to 4. */ 772 Sparse files have this set to 0 on XPSP2. */
773/* 35*/ u8 reserved[5]; /* Align to 8-byte boundary. */ 773/* 35*/ u8 reserved[5]; /* Align to 8-byte boundary. */
774/* The sizes below are only used when lowest_vcn is zero, as otherwise it would 774/* The sizes below are only used when lowest_vcn is zero, as otherwise it would
775 be difficult to keep them up-to-date.*/ 775 be difficult to keep them up-to-date.*/
@@ -801,13 +801,16 @@ typedef struct {
801typedef ATTR_RECORD ATTR_REC; 801typedef ATTR_RECORD ATTR_REC;
802 802
803/* 803/*
804 * File attribute flags (32-bit). 804 * File attribute flags (32-bit) appearing in the file_attributes fields of the
805 * STANDARD_INFORMATION attribute of MFT_RECORDs and the FILENAME_ATTR
806 * attributes of MFT_RECORDs and directory index entries.
807 *
808 * All of the below flags appear in the directory index entries but only some
809 * appear in the STANDARD_INFORMATION attribute whilst only some others appear
810 * in the FILENAME_ATTR attribute of MFT_RECORDs. Unless otherwise stated the
811 * flags appear in all of the above.
805 */ 812 */
806enum { 813enum {
807 /*
808 * The following flags are only present in the STANDARD_INFORMATION
809 * attribute (in the field file_attributes).
810 */
811 FILE_ATTR_READONLY = const_cpu_to_le32(0x00000001), 814 FILE_ATTR_READONLY = const_cpu_to_le32(0x00000001),
812 FILE_ATTR_HIDDEN = const_cpu_to_le32(0x00000002), 815 FILE_ATTR_HIDDEN = const_cpu_to_le32(0x00000002),
813 FILE_ATTR_SYSTEM = const_cpu_to_le32(0x00000004), 816 FILE_ATTR_SYSTEM = const_cpu_to_le32(0x00000004),
@@ -839,18 +842,14 @@ enum {
839 F_A_COMPRESSED, and F_A_ENCRYPTED and preserves the rest. This mask 842 F_A_COMPRESSED, and F_A_ENCRYPTED and preserves the rest. This mask
840 is used to to obtain all flags that are valid for setting. */ 843 is used to to obtain all flags that are valid for setting. */
841 /* 844 /*
842 * The following flag is only present in the FILE_NAME attribute (in 845 * The flag FILE_ATTR_DUP_FILENAME_INDEX_PRESENT is present in all
843 * the field file_attributes). 846 * FILENAME_ATTR attributes but not in the STANDARD_INFORMATION
847 * attribute of an mft record.
844 */ 848 */
845 FILE_ATTR_DUP_FILE_NAME_INDEX_PRESENT = const_cpu_to_le32(0x10000000), 849 FILE_ATTR_DUP_FILE_NAME_INDEX_PRESENT = const_cpu_to_le32(0x10000000),
846 /* Note, this is a copy of the corresponding bit from the mft record, 850 /* Note, this is a copy of the corresponding bit from the mft record,
847 telling us whether this is a directory or not, i.e. whether it has 851 telling us whether this is a directory or not, i.e. whether it has
848 an index root attribute or not. */ 852 an index root attribute or not. */
849 /*
850 * The following flag is present both in the STANDARD_INFORMATION
851 * attribute and in the FILE_NAME attribute (in the field
852 * file_attributes).
853 */
854 FILE_ATTR_DUP_VIEW_INDEX_PRESENT = const_cpu_to_le32(0x20000000), 853 FILE_ATTR_DUP_VIEW_INDEX_PRESENT = const_cpu_to_le32(0x20000000),
855 /* Note, this is a copy of the corresponding bit from the mft record, 854 /* Note, this is a copy of the corresponding bit from the mft record,
856 telling us whether this file has a view index present (eg. object id 855 telling us whether this file has a view index present (eg. object id
@@ -891,7 +890,7 @@ typedef struct {
891 Windows this is only updated when 890 Windows this is only updated when
892 accessed if some time delta has 891 accessed if some time delta has
893 passed since the last update. Also, 892 passed since the last update. Also,
894 last access times updates can be 893 last access time updates can be
895 disabled altogether for speed. */ 894 disabled altogether for speed. */
896/* 32*/ FILE_ATTR_FLAGS file_attributes; /* Flags describing the file. */ 895/* 32*/ FILE_ATTR_FLAGS file_attributes; /* Flags describing the file. */
897/* 36*/ union { 896/* 36*/ union {
@@ -1076,16 +1075,21 @@ typedef struct {
1076/* 20*/ sle64 last_access_time; /* Time this mft record was last 1075/* 20*/ sle64 last_access_time; /* Time this mft record was last
1077 accessed. */ 1076 accessed. */
1078/* 28*/ sle64 allocated_size; /* Byte size of on-disk allocated space 1077/* 28*/ sle64 allocated_size; /* Byte size of on-disk allocated space
1079 for the data attribute. So for 1078 for the unnamed data attribute. So
1080 normal $DATA, this is the 1079 for normal $DATA, this is the
1081 allocated_size from the unnamed 1080 allocated_size from the unnamed
1082 $DATA attribute and for compressed 1081 $DATA attribute and for compressed
1083 and/or sparse $DATA, this is the 1082 and/or sparse $DATA, this is the
1084 compressed_size from the unnamed 1083 compressed_size from the unnamed
1085 $DATA attribute. NOTE: This is a 1084 $DATA attribute. For a directory or
1086 multiple of the cluster size. */ 1085 other inode without an unnamed $DATA
1087/* 30*/ sle64 data_size; /* Byte size of actual data in data 1086 attribute, this is always 0. NOTE:
1088 attribute. */ 1087 This is a multiple of the cluster
1088 size. */
1089/* 30*/ sle64 data_size; /* Byte size of actual data in unnamed
1090 data attribute. For a directory or
1091 other inode without an unnamed $DATA
1092 attribute, this is always 0. */
1089/* 38*/ FILE_ATTR_FLAGS file_attributes; /* Flags describing the file. */ 1093/* 38*/ FILE_ATTR_FLAGS file_attributes; /* Flags describing the file. */
1090/* 3c*/ union { 1094/* 3c*/ union {
1091 /* 3c*/ struct { 1095 /* 3c*/ struct {
diff --git a/fs/ntfs/mft.c b/fs/ntfs/mft.c
index 6499aafc2258..4e72bc7afdf9 100644
--- a/fs/ntfs/mft.c
+++ b/fs/ntfs/mft.c
@@ -93,6 +93,7 @@ static inline MFT_RECORD *map_mft_record_page(ntfs_inode *ni)
93 "Run chkdsk.", ni->mft_no); 93 "Run chkdsk.", ni->mft_no);
94 ntfs_unmap_page(page); 94 ntfs_unmap_page(page);
95 page = ERR_PTR(-EIO); 95 page = ERR_PTR(-EIO);
96 NVolSetErrors(vol);
96 } 97 }
97err_out: 98err_out:
98 ni->page = NULL; 99 ni->page = NULL;
@@ -104,8 +105,8 @@ err_out:
104 * map_mft_record - map, pin and lock an mft record 105 * map_mft_record - map, pin and lock an mft record
105 * @ni: ntfs inode whose MFT record to map 106 * @ni: ntfs inode whose MFT record to map
106 * 107 *
107 * First, take the mrec_lock semaphore. We might now be sleeping, while waiting 108 * First, take the mrec_lock mutex. We might now be sleeping, while waiting
108 * for the semaphore if it was already locked by someone else. 109 * for the mutex if it was already locked by someone else.
109 * 110 *
110 * The page of the record is mapped using map_mft_record_page() before being 111 * The page of the record is mapped using map_mft_record_page() before being
111 * returned to the caller. 112 * returned to the caller.
@@ -135,9 +136,9 @@ err_out:
135 * So that code will end up having to own the mrec_lock of all mft 136 * So that code will end up having to own the mrec_lock of all mft
136 * records/inodes present in the page before I/O can proceed. In that case we 137 * records/inodes present in the page before I/O can proceed. In that case we
137 * wouldn't need to bother with PG_locked and PG_uptodate as nobody will be 138 * wouldn't need to bother with PG_locked and PG_uptodate as nobody will be
138 * accessing anything without owning the mrec_lock semaphore. But we do need 139 * accessing anything without owning the mrec_lock mutex. But we do need to
139 * to use them because of the read_cache_page() invocation and the code becomes 140 * use them because of the read_cache_page() invocation and the code becomes so
140 * so much simpler this way that it is well worth it. 141 * much simpler this way that it is well worth it.
141 * 142 *
142 * The mft record is now ours and we return a pointer to it. You need to check 143 * The mft record is now ours and we return a pointer to it. You need to check
143 * the returned pointer with IS_ERR() and if that is true, PTR_ERR() will return 144 * the returned pointer with IS_ERR() and if that is true, PTR_ERR() will return
@@ -160,13 +161,13 @@ MFT_RECORD *map_mft_record(ntfs_inode *ni)
160 atomic_inc(&ni->count); 161 atomic_inc(&ni->count);
161 162
162 /* Serialize access to this mft record. */ 163 /* Serialize access to this mft record. */
163 down(&ni->mrec_lock); 164 mutex_lock(&ni->mrec_lock);
164 165
165 m = map_mft_record_page(ni); 166 m = map_mft_record_page(ni);
166 if (likely(!IS_ERR(m))) 167 if (likely(!IS_ERR(m)))
167 return m; 168 return m;
168 169
169 up(&ni->mrec_lock); 170 mutex_unlock(&ni->mrec_lock);
170 atomic_dec(&ni->count); 171 atomic_dec(&ni->count);
171 ntfs_error(ni->vol->sb, "Failed with error code %lu.", -PTR_ERR(m)); 172 ntfs_error(ni->vol->sb, "Failed with error code %lu.", -PTR_ERR(m));
172 return m; 173 return m;
@@ -217,7 +218,7 @@ void unmap_mft_record(ntfs_inode *ni)
217 ntfs_debug("Entering for mft_no 0x%lx.", ni->mft_no); 218 ntfs_debug("Entering for mft_no 0x%lx.", ni->mft_no);
218 219
219 unmap_mft_record_page(ni); 220 unmap_mft_record_page(ni);
220 up(&ni->mrec_lock); 221 mutex_unlock(&ni->mrec_lock);
221 atomic_dec(&ni->count); 222 atomic_dec(&ni->count);
222 /* 223 /*
223 * If pure ntfs_inode, i.e. no vfs inode attached, we leave it to 224 * If pure ntfs_inode, i.e. no vfs inode attached, we leave it to
@@ -261,7 +262,7 @@ MFT_RECORD *map_extent_mft_record(ntfs_inode *base_ni, MFT_REF mref,
261 * in which case just return it. If not found, add it to the base 262 * in which case just return it. If not found, add it to the base
262 * inode before returning it. 263 * inode before returning it.
263 */ 264 */
264 down(&base_ni->extent_lock); 265 mutex_lock(&base_ni->extent_lock);
265 if (base_ni->nr_extents > 0) { 266 if (base_ni->nr_extents > 0) {
266 extent_nis = base_ni->ext.extent_ntfs_inos; 267 extent_nis = base_ni->ext.extent_ntfs_inos;
267 for (i = 0; i < base_ni->nr_extents; i++) { 268 for (i = 0; i < base_ni->nr_extents; i++) {
@@ -274,7 +275,7 @@ MFT_RECORD *map_extent_mft_record(ntfs_inode *base_ni, MFT_REF mref,
274 } 275 }
275 } 276 }
276 if (likely(ni != NULL)) { 277 if (likely(ni != NULL)) {
277 up(&base_ni->extent_lock); 278 mutex_unlock(&base_ni->extent_lock);
278 atomic_dec(&base_ni->count); 279 atomic_dec(&base_ni->count);
279 /* We found the record; just have to map and return it. */ 280 /* We found the record; just have to map and return it. */
280 m = map_mft_record(ni); 281 m = map_mft_record(ni);
@@ -301,7 +302,7 @@ map_err_out:
301 /* Record wasn't there. Get a new ntfs inode and initialize it. */ 302 /* Record wasn't there. Get a new ntfs inode and initialize it. */
302 ni = ntfs_new_extent_inode(base_ni->vol->sb, mft_no); 303 ni = ntfs_new_extent_inode(base_ni->vol->sb, mft_no);
303 if (unlikely(!ni)) { 304 if (unlikely(!ni)) {
304 up(&base_ni->extent_lock); 305 mutex_unlock(&base_ni->extent_lock);
305 atomic_dec(&base_ni->count); 306 atomic_dec(&base_ni->count);
306 return ERR_PTR(-ENOMEM); 307 return ERR_PTR(-ENOMEM);
307 } 308 }
@@ -312,7 +313,7 @@ map_err_out:
312 /* Now map the record. */ 313 /* Now map the record. */
313 m = map_mft_record(ni); 314 m = map_mft_record(ni);
314 if (IS_ERR(m)) { 315 if (IS_ERR(m)) {
315 up(&base_ni->extent_lock); 316 mutex_unlock(&base_ni->extent_lock);
316 atomic_dec(&base_ni->count); 317 atomic_dec(&base_ni->count);
317 ntfs_clear_extent_inode(ni); 318 ntfs_clear_extent_inode(ni);
318 goto map_err_out; 319 goto map_err_out;
@@ -347,14 +348,14 @@ map_err_out:
347 base_ni->ext.extent_ntfs_inos = tmp; 348 base_ni->ext.extent_ntfs_inos = tmp;
348 } 349 }
349 base_ni->ext.extent_ntfs_inos[base_ni->nr_extents++] = ni; 350 base_ni->ext.extent_ntfs_inos[base_ni->nr_extents++] = ni;
350 up(&base_ni->extent_lock); 351 mutex_unlock(&base_ni->extent_lock);
351 atomic_dec(&base_ni->count); 352 atomic_dec(&base_ni->count);
352 ntfs_debug("Done 2."); 353 ntfs_debug("Done 2.");
353 *ntfs_ino = ni; 354 *ntfs_ino = ni;
354 return m; 355 return m;
355unm_err_out: 356unm_err_out:
356 unmap_mft_record(ni); 357 unmap_mft_record(ni);
357 up(&base_ni->extent_lock); 358 mutex_unlock(&base_ni->extent_lock);
358 atomic_dec(&base_ni->count); 359 atomic_dec(&base_ni->count);
359 /* 360 /*
360 * If the extent inode was not attached to the base inode we need to 361 * If the extent inode was not attached to the base inode we need to
@@ -399,12 +400,12 @@ void __mark_mft_record_dirty(ntfs_inode *ni)
399 BUG_ON(NInoAttr(ni)); 400 BUG_ON(NInoAttr(ni));
400 mark_ntfs_record_dirty(ni->page, ni->page_ofs); 401 mark_ntfs_record_dirty(ni->page, ni->page_ofs);
401 /* Determine the base vfs inode and mark it dirty, too. */ 402 /* Determine the base vfs inode and mark it dirty, too. */
402 down(&ni->extent_lock); 403 mutex_lock(&ni->extent_lock);
403 if (likely(ni->nr_extents >= 0)) 404 if (likely(ni->nr_extents >= 0))
404 base_ni = ni; 405 base_ni = ni;
405 else 406 else
406 base_ni = ni->ext.base_ntfs_ino; 407 base_ni = ni->ext.base_ntfs_ino;
407 up(&ni->extent_lock); 408 mutex_unlock(&ni->extent_lock);
408 __mark_inode_dirty(VFS_I(base_ni), I_DIRTY_SYNC | I_DIRTY_DATASYNC); 409 __mark_inode_dirty(VFS_I(base_ni), I_DIRTY_SYNC | I_DIRTY_DATASYNC);
409} 410}
410 411
@@ -650,10 +651,7 @@ err_out:
650 * fs/ntfs/aops.c::mark_ntfs_record_dirty(). 651 * fs/ntfs/aops.c::mark_ntfs_record_dirty().
651 * 652 *
652 * On success, clean the mft record and return 0. On error, leave the mft 653 * On success, clean the mft record and return 0. On error, leave the mft
653 * record dirty and return -errno. The caller should call make_bad_inode() on 654 * record dirty and return -errno.
654 * the base inode to ensure no more access happens to this inode. We do not do
655 * it here as the caller may want to finish writing other extent mft records
656 * first to minimize on-disk metadata inconsistencies.
657 * 655 *
658 * NOTE: We always perform synchronous i/o and ignore the @sync parameter. 656 * NOTE: We always perform synchronous i/o and ignore the @sync parameter.
659 * However, if the mft record has a counterpart in the mft mirror and @sync is 657 * However, if the mft record has a counterpart in the mft mirror and @sync is
@@ -983,7 +981,7 @@ BOOL ntfs_may_write_mft_record(ntfs_volume *vol, const unsigned long mft_no,
983 } 981 }
984 ntfs_debug("Inode 0x%lx is not dirty.", mft_no); 982 ntfs_debug("Inode 0x%lx is not dirty.", mft_no);
985 /* The inode is not dirty, try to take the mft record lock. */ 983 /* The inode is not dirty, try to take the mft record lock. */
986 if (unlikely(down_trylock(&ni->mrec_lock))) { 984 if (unlikely(!mutex_trylock(&ni->mrec_lock))) {
987 ntfs_debug("Mft record 0x%lx is already locked, do " 985 ntfs_debug("Mft record 0x%lx is already locked, do "
988 "not write it.", mft_no); 986 "not write it.", mft_no);
989 atomic_dec(&ni->count); 987 atomic_dec(&ni->count);
@@ -1043,13 +1041,13 @@ BOOL ntfs_may_write_mft_record(ntfs_volume *vol, const unsigned long mft_no,
1043 * corresponding to this extent mft record attached. 1041 * corresponding to this extent mft record attached.
1044 */ 1042 */
1045 ni = NTFS_I(vi); 1043 ni = NTFS_I(vi);
1046 down(&ni->extent_lock); 1044 mutex_lock(&ni->extent_lock);
1047 if (ni->nr_extents <= 0) { 1045 if (ni->nr_extents <= 0) {
1048 /* 1046 /*
1049 * The base inode has no attached extent inodes, write this 1047 * The base inode has no attached extent inodes, write this
1050 * extent mft record. 1048 * extent mft record.
1051 */ 1049 */
1052 up(&ni->extent_lock); 1050 mutex_unlock(&ni->extent_lock);
1053 iput(vi); 1051 iput(vi);
1054 ntfs_debug("Base inode 0x%lx has no attached extent inodes, " 1052 ntfs_debug("Base inode 0x%lx has no attached extent inodes, "
1055 "write the extent record.", na.mft_no); 1053 "write the extent record.", na.mft_no);
@@ -1072,7 +1070,7 @@ BOOL ntfs_may_write_mft_record(ntfs_volume *vol, const unsigned long mft_no,
1072 * extent mft record. 1070 * extent mft record.
1073 */ 1071 */
1074 if (!eni) { 1072 if (!eni) {
1075 up(&ni->extent_lock); 1073 mutex_unlock(&ni->extent_lock);
1076 iput(vi); 1074 iput(vi);
1077 ntfs_debug("Extent inode 0x%lx is not attached to its base " 1075 ntfs_debug("Extent inode 0x%lx is not attached to its base "
1078 "inode 0x%lx, write the extent record.", 1076 "inode 0x%lx, write the extent record.",
@@ -1083,12 +1081,12 @@ BOOL ntfs_may_write_mft_record(ntfs_volume *vol, const unsigned long mft_no,
1083 mft_no, na.mft_no); 1081 mft_no, na.mft_no);
1084 /* Take a reference to the extent ntfs inode. */ 1082 /* Take a reference to the extent ntfs inode. */
1085 atomic_inc(&eni->count); 1083 atomic_inc(&eni->count);
1086 up(&ni->extent_lock); 1084 mutex_unlock(&ni->extent_lock);
1087 /* 1085 /*
1088 * Found the extent inode coresponding to this extent mft record. 1086 * Found the extent inode coresponding to this extent mft record.
1089 * Try to take the mft record lock. 1087 * Try to take the mft record lock.
1090 */ 1088 */
1091 if (unlikely(down_trylock(&eni->mrec_lock))) { 1089 if (unlikely(!mutex_trylock(&eni->mrec_lock))) {
1092 atomic_dec(&eni->count); 1090 atomic_dec(&eni->count);
1093 iput(vi); 1091 iput(vi);
1094 ntfs_debug("Extent mft record 0x%lx is already locked, do " 1092 ntfs_debug("Extent mft record 0x%lx is already locked, do "
@@ -2711,7 +2709,7 @@ mft_rec_already_initialized:
2711 * have its page mapped and it is very easy to do. 2709 * have its page mapped and it is very easy to do.
2712 */ 2710 */
2713 atomic_inc(&ni->count); 2711 atomic_inc(&ni->count);
2714 down(&ni->mrec_lock); 2712 mutex_lock(&ni->mrec_lock);
2715 ni->page = page; 2713 ni->page = page;
2716 ni->page_ofs = ofs; 2714 ni->page_ofs = ofs;
2717 /* 2715 /*
@@ -2798,22 +2796,22 @@ int ntfs_extent_mft_record_free(ntfs_inode *ni, MFT_RECORD *m)
2798 BUG_ON(NInoAttr(ni)); 2796 BUG_ON(NInoAttr(ni));
2799 BUG_ON(ni->nr_extents != -1); 2797 BUG_ON(ni->nr_extents != -1);
2800 2798
2801 down(&ni->extent_lock); 2799 mutex_lock(&ni->extent_lock);
2802 base_ni = ni->ext.base_ntfs_ino; 2800 base_ni = ni->ext.base_ntfs_ino;
2803 up(&ni->extent_lock); 2801 mutex_unlock(&ni->extent_lock);
2804 2802
2805 BUG_ON(base_ni->nr_extents <= 0); 2803 BUG_ON(base_ni->nr_extents <= 0);
2806 2804
2807 ntfs_debug("Entering for extent inode 0x%lx, base inode 0x%lx.\n", 2805 ntfs_debug("Entering for extent inode 0x%lx, base inode 0x%lx.\n",
2808 mft_no, base_ni->mft_no); 2806 mft_no, base_ni->mft_no);
2809 2807
2810 down(&base_ni->extent_lock); 2808 mutex_lock(&base_ni->extent_lock);
2811 2809
2812 /* Make sure we are holding the only reference to the extent inode. */ 2810 /* Make sure we are holding the only reference to the extent inode. */
2813 if (atomic_read(&ni->count) > 2) { 2811 if (atomic_read(&ni->count) > 2) {
2814 ntfs_error(vol->sb, "Tried to free busy extent inode 0x%lx, " 2812 ntfs_error(vol->sb, "Tried to free busy extent inode 0x%lx, "
2815 "not freeing.", base_ni->mft_no); 2813 "not freeing.", base_ni->mft_no);
2816 up(&base_ni->extent_lock); 2814 mutex_unlock(&base_ni->extent_lock);
2817 return -EBUSY; 2815 return -EBUSY;
2818 } 2816 }
2819 2817
@@ -2831,7 +2829,7 @@ int ntfs_extent_mft_record_free(ntfs_inode *ni, MFT_RECORD *m)
2831 break; 2829 break;
2832 } 2830 }
2833 2831
2834 up(&base_ni->extent_lock); 2832 mutex_unlock(&base_ni->extent_lock);
2835 2833
2836 if (unlikely(err)) { 2834 if (unlikely(err)) {
2837 ntfs_error(vol->sb, "Extent inode 0x%lx is not attached to " 2835 ntfs_error(vol->sb, "Extent inode 0x%lx is not attached to "
@@ -2890,7 +2888,7 @@ rollback_error:
2890 return 0; 2888 return 0;
2891rollback: 2889rollback:
2892 /* Rollback what we did... */ 2890 /* Rollback what we did... */
2893 down(&base_ni->extent_lock); 2891 mutex_lock(&base_ni->extent_lock);
2894 extent_nis = base_ni->ext.extent_ntfs_inos; 2892 extent_nis = base_ni->ext.extent_ntfs_inos;
2895 if (!(base_ni->nr_extents & 3)) { 2893 if (!(base_ni->nr_extents & 3)) {
2896 int new_size = (base_ni->nr_extents + 4) * sizeof(ntfs_inode*); 2894 int new_size = (base_ni->nr_extents + 4) * sizeof(ntfs_inode*);
@@ -2899,7 +2897,7 @@ rollback:
2899 if (unlikely(!extent_nis)) { 2897 if (unlikely(!extent_nis)) {
2900 ntfs_error(vol->sb, "Failed to allocate internal " 2898 ntfs_error(vol->sb, "Failed to allocate internal "
2901 "buffer during rollback.%s", es); 2899 "buffer during rollback.%s", es);
2902 up(&base_ni->extent_lock); 2900 mutex_unlock(&base_ni->extent_lock);
2903 NVolSetErrors(vol); 2901 NVolSetErrors(vol);
2904 goto rollback_error; 2902 goto rollback_error;
2905 } 2903 }
@@ -2914,7 +2912,7 @@ rollback:
2914 m->flags |= MFT_RECORD_IN_USE; 2912 m->flags |= MFT_RECORD_IN_USE;
2915 m->sequence_number = old_seq_no; 2913 m->sequence_number = old_seq_no;
2916 extent_nis[base_ni->nr_extents++] = ni; 2914 extent_nis[base_ni->nr_extents++] = ni;
2917 up(&base_ni->extent_lock); 2915 mutex_unlock(&base_ni->extent_lock);
2918 mark_mft_record_dirty(ni); 2916 mark_mft_record_dirty(ni);
2919 return err; 2917 return err;
2920} 2918}
diff --git a/fs/ntfs/mft.h b/fs/ntfs/mft.h
index 407de2cef1d6..639cd1bab08b 100644
--- a/fs/ntfs/mft.h
+++ b/fs/ntfs/mft.h
@@ -97,10 +97,7 @@ extern int write_mft_record_nolock(ntfs_inode *ni, MFT_RECORD *m, int sync);
97 * uptodate. 97 * uptodate.
98 * 98 *
99 * On success, clean the mft record and return 0. On error, leave the mft 99 * On success, clean the mft record and return 0. On error, leave the mft
100 * record dirty and return -errno. The caller should call make_bad_inode() on 100 * record dirty and return -errno.
101 * the base inode to ensure no more access happens to this inode. We do not do
102 * it here as the caller may want to finish writing other extent mft records
103 * first to minimize on-disk metadata inconsistencies.
104 */ 101 */
105static inline int write_mft_record(ntfs_inode *ni, MFT_RECORD *m, int sync) 102static inline int write_mft_record(ntfs_inode *ni, MFT_RECORD *m, int sync)
106{ 103{
diff --git a/fs/ntfs/namei.c b/fs/ntfs/namei.c
index 5ea9eb93af62..eddb2247cec5 100644
--- a/fs/ntfs/namei.c
+++ b/fs/ntfs/namei.c
@@ -2,7 +2,7 @@
2 * namei.c - NTFS kernel directory inode operations. Part of the Linux-NTFS 2 * namei.c - NTFS kernel directory inode operations. Part of the Linux-NTFS
3 * project. 3 * project.
4 * 4 *
5 * Copyright (c) 2001-2004 Anton Altaparmakov 5 * Copyright (c) 2001-2006 Anton Altaparmakov
6 * 6 *
7 * This program/include file is free software; you can redistribute it and/or 7 * This program/include file is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as published 8 * modify it under the terms of the GNU General Public License as published
@@ -115,7 +115,9 @@ static struct dentry *ntfs_lookup(struct inode *dir_ino, struct dentry *dent,
115 uname_len = ntfs_nlstoucs(vol, dent->d_name.name, dent->d_name.len, 115 uname_len = ntfs_nlstoucs(vol, dent->d_name.name, dent->d_name.len,
116 &uname); 116 &uname);
117 if (uname_len < 0) { 117 if (uname_len < 0) {
118 ntfs_error(vol->sb, "Failed to convert name to Unicode."); 118 if (uname_len != -ENAMETOOLONG)
119 ntfs_error(vol->sb, "Failed to convert name to "
120 "Unicode.");
119 return ERR_PTR(uname_len); 121 return ERR_PTR(uname_len);
120 } 122 }
121 mref = ntfs_lookup_inode_by_name(NTFS_I(dir_ino), uname, uname_len, 123 mref = ntfs_lookup_inode_by_name(NTFS_I(dir_ino), uname, uname_len,
@@ -157,7 +159,7 @@ static struct dentry *ntfs_lookup(struct inode *dir_ino, struct dentry *dent,
157 /* Return the error code. */ 159 /* Return the error code. */
158 return (struct dentry *)dent_inode; 160 return (struct dentry *)dent_inode;
159 } 161 }
160 /* It is guaranteed that name is no longer allocated at this point. */ 162 /* It is guaranteed that @name is no longer allocated at this point. */
161 if (MREF_ERR(mref) == -ENOENT) { 163 if (MREF_ERR(mref) == -ENOENT) {
162 ntfs_debug("Entry was not found, adding negative dentry."); 164 ntfs_debug("Entry was not found, adding negative dentry.");
163 /* The dcache will handle negative entries. */ 165 /* The dcache will handle negative entries. */
@@ -168,7 +170,6 @@ static struct dentry *ntfs_lookup(struct inode *dir_ino, struct dentry *dent,
168 ntfs_error(vol->sb, "ntfs_lookup_ino_by_name() failed with error " 170 ntfs_error(vol->sb, "ntfs_lookup_ino_by_name() failed with error "
169 "code %i.", -MREF_ERR(mref)); 171 "code %i.", -MREF_ERR(mref));
170 return ERR_PTR(MREF_ERR(mref)); 172 return ERR_PTR(MREF_ERR(mref));
171
172 // TODO: Consider moving this lot to a separate function! (AIA) 173 // TODO: Consider moving this lot to a separate function! (AIA)
173handle_name: 174handle_name:
174 { 175 {
diff --git a/fs/ntfs/ntfs.h b/fs/ntfs/ntfs.h
index 653d2a5c4899..0624c8ef4d9c 100644
--- a/fs/ntfs/ntfs.h
+++ b/fs/ntfs/ntfs.h
@@ -91,7 +91,7 @@ extern void free_compression_buffers(void);
91 91
92/* From fs/ntfs/super.c */ 92/* From fs/ntfs/super.c */
93#define default_upcase_len 0x10000 93#define default_upcase_len 0x10000
94extern struct semaphore ntfs_lock; 94extern struct mutex ntfs_lock;
95 95
96typedef struct { 96typedef struct {
97 int val; 97 int val;
diff --git a/fs/ntfs/runlist.c b/fs/ntfs/runlist.c
index 061b5ff6b73c..eb52b801512b 100644
--- a/fs/ntfs/runlist.c
+++ b/fs/ntfs/runlist.c
@@ -381,6 +381,7 @@ static inline runlist_element *ntfs_rl_insert(runlist_element *dst,
381static inline runlist_element *ntfs_rl_replace(runlist_element *dst, 381static inline runlist_element *ntfs_rl_replace(runlist_element *dst,
382 int dsize, runlist_element *src, int ssize, int loc) 382 int dsize, runlist_element *src, int ssize, int loc)
383{ 383{
384 signed delta;
384 BOOL left = FALSE; /* Left end of @src needs merging. */ 385 BOOL left = FALSE; /* Left end of @src needs merging. */
385 BOOL right = FALSE; /* Right end of @src needs merging. */ 386 BOOL right = FALSE; /* Right end of @src needs merging. */
386 int tail; /* Start of tail of @dst. */ 387 int tail; /* Start of tail of @dst. */
@@ -396,11 +397,14 @@ static inline runlist_element *ntfs_rl_replace(runlist_element *dst,
396 left = ntfs_are_rl_mergeable(dst + loc - 1, src); 397 left = ntfs_are_rl_mergeable(dst + loc - 1, src);
397 /* 398 /*
398 * Allocate some space. We will need less if the left, right, or both 399 * Allocate some space. We will need less if the left, right, or both
399 * ends get merged. 400 * ends get merged. The -1 accounts for the run being replaced.
400 */ 401 */
401 dst = ntfs_rl_realloc(dst, dsize, dsize + ssize - left - right); 402 delta = ssize - 1 - left - right;
402 if (IS_ERR(dst)) 403 if (delta > 0) {
403 return dst; 404 dst = ntfs_rl_realloc(dst, dsize, dsize + delta);
405 if (IS_ERR(dst))
406 return dst;
407 }
404 /* 408 /*
405 * We are guaranteed to succeed from here so can start modifying the 409 * We are guaranteed to succeed from here so can start modifying the
406 * original runlists. 410 * original runlists.
diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
index 368a8ec10668..7646b5059389 100644
--- a/fs/ntfs/super.c
+++ b/fs/ntfs/super.c
@@ -1099,26 +1099,38 @@ static BOOL check_mft_mirror(ntfs_volume *vol)
1099 kmirr = page_address(mirr_page); 1099 kmirr = page_address(mirr_page);
1100 ++index; 1100 ++index;
1101 } 1101 }
1102 /* Make sure the record is ok. */ 1102 /* Do not check the record if it is not in use. */
1103 if (ntfs_is_baad_recordp((le32*)kmft)) { 1103 if (((MFT_RECORD*)kmft)->flags & MFT_RECORD_IN_USE) {
1104 ntfs_error(sb, "Incomplete multi sector transfer " 1104 /* Make sure the record is ok. */
1105 "detected in mft record %i.", i); 1105 if (ntfs_is_baad_recordp((le32*)kmft)) {
1106 ntfs_error(sb, "Incomplete multi sector "
1107 "transfer detected in mft "
1108 "record %i.", i);
1106mm_unmap_out: 1109mm_unmap_out:
1107 ntfs_unmap_page(mirr_page); 1110 ntfs_unmap_page(mirr_page);
1108mft_unmap_out: 1111mft_unmap_out:
1109 ntfs_unmap_page(mft_page); 1112 ntfs_unmap_page(mft_page);
1110 return FALSE; 1113 return FALSE;
1114 }
1111 } 1115 }
1112 if (ntfs_is_baad_recordp((le32*)kmirr)) { 1116 /* Do not check the mirror record if it is not in use. */
1113 ntfs_error(sb, "Incomplete multi sector transfer " 1117 if (((MFT_RECORD*)kmirr)->flags & MFT_RECORD_IN_USE) {
1114 "detected in mft mirror record %i.", i); 1118 if (ntfs_is_baad_recordp((le32*)kmirr)) {
1115 goto mm_unmap_out; 1119 ntfs_error(sb, "Incomplete multi sector "
1120 "transfer detected in mft "
1121 "mirror record %i.", i);
1122 goto mm_unmap_out;
1123 }
1116 } 1124 }
1117 /* Get the amount of data in the current record. */ 1125 /* Get the amount of data in the current record. */
1118 bytes = le32_to_cpu(((MFT_RECORD*)kmft)->bytes_in_use); 1126 bytes = le32_to_cpu(((MFT_RECORD*)kmft)->bytes_in_use);
1119 if (!bytes || bytes > vol->mft_record_size) { 1127 if (bytes < sizeof(MFT_RECORD_OLD) ||
1128 bytes > vol->mft_record_size ||
1129 ntfs_is_baad_recordp((le32*)kmft)) {
1120 bytes = le32_to_cpu(((MFT_RECORD*)kmirr)->bytes_in_use); 1130 bytes = le32_to_cpu(((MFT_RECORD*)kmirr)->bytes_in_use);
1121 if (!bytes || bytes > vol->mft_record_size) 1131 if (bytes < sizeof(MFT_RECORD_OLD) ||
1132 bytes > vol->mft_record_size ||
1133 ntfs_is_baad_recordp((le32*)kmirr))
1122 bytes = vol->mft_record_size; 1134 bytes = vol->mft_record_size;
1123 } 1135 }
1124 /* Compare the two records. */ 1136 /* Compare the two records. */
@@ -1665,11 +1677,11 @@ read_partial_upcase_page:
1665 ntfs_debug("Read %llu bytes from $UpCase (expected %zu bytes).", 1677 ntfs_debug("Read %llu bytes from $UpCase (expected %zu bytes).",
1666 i_size, 64 * 1024 * sizeof(ntfschar)); 1678 i_size, 64 * 1024 * sizeof(ntfschar));
1667 iput(ino); 1679 iput(ino);
1668 down(&ntfs_lock); 1680 mutex_lock(&ntfs_lock);
1669 if (!default_upcase) { 1681 if (!default_upcase) {
1670 ntfs_debug("Using volume specified $UpCase since default is " 1682 ntfs_debug("Using volume specified $UpCase since default is "
1671 "not present."); 1683 "not present.");
1672 up(&ntfs_lock); 1684 mutex_unlock(&ntfs_lock);
1673 return TRUE; 1685 return TRUE;
1674 } 1686 }
1675 max = default_upcase_len; 1687 max = default_upcase_len;
@@ -1683,12 +1695,12 @@ read_partial_upcase_page:
1683 vol->upcase = default_upcase; 1695 vol->upcase = default_upcase;
1684 vol->upcase_len = max; 1696 vol->upcase_len = max;
1685 ntfs_nr_upcase_users++; 1697 ntfs_nr_upcase_users++;
1686 up(&ntfs_lock); 1698 mutex_unlock(&ntfs_lock);
1687 ntfs_debug("Volume specified $UpCase matches default. Using " 1699 ntfs_debug("Volume specified $UpCase matches default. Using "
1688 "default."); 1700 "default.");
1689 return TRUE; 1701 return TRUE;
1690 } 1702 }
1691 up(&ntfs_lock); 1703 mutex_unlock(&ntfs_lock);
1692 ntfs_debug("Using volume specified $UpCase since it does not match " 1704 ntfs_debug("Using volume specified $UpCase since it does not match "
1693 "the default."); 1705 "the default.");
1694 return TRUE; 1706 return TRUE;
@@ -1697,17 +1709,17 @@ iput_upcase_failed:
1697 ntfs_free(vol->upcase); 1709 ntfs_free(vol->upcase);
1698 vol->upcase = NULL; 1710 vol->upcase = NULL;
1699upcase_failed: 1711upcase_failed:
1700 down(&ntfs_lock); 1712 mutex_lock(&ntfs_lock);
1701 if (default_upcase) { 1713 if (default_upcase) {
1702 vol->upcase = default_upcase; 1714 vol->upcase = default_upcase;
1703 vol->upcase_len = default_upcase_len; 1715 vol->upcase_len = default_upcase_len;
1704 ntfs_nr_upcase_users++; 1716 ntfs_nr_upcase_users++;
1705 up(&ntfs_lock); 1717 mutex_unlock(&ntfs_lock);
1706 ntfs_error(sb, "Failed to load $UpCase from the volume. Using " 1718 ntfs_error(sb, "Failed to load $UpCase from the volume. Using "
1707 "default."); 1719 "default.");
1708 return TRUE; 1720 return TRUE;
1709 } 1721 }
1710 up(&ntfs_lock); 1722 mutex_unlock(&ntfs_lock);
1711 ntfs_error(sb, "Failed to initialize upcase table."); 1723 ntfs_error(sb, "Failed to initialize upcase table.");
1712 return FALSE; 1724 return FALSE;
1713} 1725}
@@ -2183,12 +2195,12 @@ iput_attrdef_err_out:
2183iput_upcase_err_out: 2195iput_upcase_err_out:
2184#endif /* NTFS_RW */ 2196#endif /* NTFS_RW */
2185 vol->upcase_len = 0; 2197 vol->upcase_len = 0;
2186 down(&ntfs_lock); 2198 mutex_lock(&ntfs_lock);
2187 if (vol->upcase == default_upcase) { 2199 if (vol->upcase == default_upcase) {
2188 ntfs_nr_upcase_users--; 2200 ntfs_nr_upcase_users--;
2189 vol->upcase = NULL; 2201 vol->upcase = NULL;
2190 } 2202 }
2191 up(&ntfs_lock); 2203 mutex_unlock(&ntfs_lock);
2192 if (vol->upcase) { 2204 if (vol->upcase) {
2193 ntfs_free(vol->upcase); 2205 ntfs_free(vol->upcase);
2194 vol->upcase = NULL; 2206 vol->upcase = NULL;
@@ -2393,7 +2405,7 @@ static void ntfs_put_super(struct super_block *sb)
2393 * Destroy the global default upcase table if necessary. Also decrease 2405 * Destroy the global default upcase table if necessary. Also decrease
2394 * the number of upcase users if we are a user. 2406 * the number of upcase users if we are a user.
2395 */ 2407 */
2396 down(&ntfs_lock); 2408 mutex_lock(&ntfs_lock);
2397 if (vol->upcase == default_upcase) { 2409 if (vol->upcase == default_upcase) {
2398 ntfs_nr_upcase_users--; 2410 ntfs_nr_upcase_users--;
2399 vol->upcase = NULL; 2411 vol->upcase = NULL;
@@ -2404,7 +2416,7 @@ static void ntfs_put_super(struct super_block *sb)
2404 } 2416 }
2405 if (vol->cluster_size <= 4096 && !--ntfs_nr_compression_users) 2417 if (vol->cluster_size <= 4096 && !--ntfs_nr_compression_users)
2406 free_compression_buffers(); 2418 free_compression_buffers();
2407 up(&ntfs_lock); 2419 mutex_unlock(&ntfs_lock);
2408 if (vol->upcase) { 2420 if (vol->upcase) {
2409 ntfs_free(vol->upcase); 2421 ntfs_free(vol->upcase);
2410 vol->upcase = NULL; 2422 vol->upcase = NULL;
@@ -2878,7 +2890,7 @@ static int ntfs_fill_super(struct super_block *sb, void *opt, const int silent)
2878 ntfs_error(sb, "Failed to load essential metadata."); 2890 ntfs_error(sb, "Failed to load essential metadata.");
2879 goto iput_tmp_ino_err_out_now; 2891 goto iput_tmp_ino_err_out_now;
2880 } 2892 }
2881 down(&ntfs_lock); 2893 mutex_lock(&ntfs_lock);
2882 /* 2894 /*
2883 * The current mount is a compression user if the cluster size is 2895 * The current mount is a compression user if the cluster size is
2884 * less than or equal 4kiB. 2896 * less than or equal 4kiB.
@@ -2889,7 +2901,7 @@ static int ntfs_fill_super(struct super_block *sb, void *opt, const int silent)
2889 ntfs_error(NULL, "Failed to allocate buffers " 2901 ntfs_error(NULL, "Failed to allocate buffers "
2890 "for compression engine."); 2902 "for compression engine.");
2891 ntfs_nr_compression_users--; 2903 ntfs_nr_compression_users--;
2892 up(&ntfs_lock); 2904 mutex_unlock(&ntfs_lock);
2893 goto iput_tmp_ino_err_out_now; 2905 goto iput_tmp_ino_err_out_now;
2894 } 2906 }
2895 } 2907 }
@@ -2901,7 +2913,7 @@ static int ntfs_fill_super(struct super_block *sb, void *opt, const int silent)
2901 if (!default_upcase) 2913 if (!default_upcase)
2902 default_upcase = generate_default_upcase(); 2914 default_upcase = generate_default_upcase();
2903 ntfs_nr_upcase_users++; 2915 ntfs_nr_upcase_users++;
2904 up(&ntfs_lock); 2916 mutex_unlock(&ntfs_lock);
2905 /* 2917 /*
2906 * From now on, ignore @silent parameter. If we fail below this line, 2918 * From now on, ignore @silent parameter. If we fail below this line,
2907 * it will be due to a corrupt fs or a system error, so we report it. 2919 * it will be due to a corrupt fs or a system error, so we report it.
@@ -2919,12 +2931,12 @@ static int ntfs_fill_super(struct super_block *sb, void *opt, const int silent)
2919 atomic_inc(&vol->root_ino->i_count); 2931 atomic_inc(&vol->root_ino->i_count);
2920 ntfs_debug("Exiting, status successful."); 2932 ntfs_debug("Exiting, status successful.");
2921 /* Release the default upcase if it has no users. */ 2933 /* Release the default upcase if it has no users. */
2922 down(&ntfs_lock); 2934 mutex_lock(&ntfs_lock);
2923 if (!--ntfs_nr_upcase_users && default_upcase) { 2935 if (!--ntfs_nr_upcase_users && default_upcase) {
2924 ntfs_free(default_upcase); 2936 ntfs_free(default_upcase);
2925 default_upcase = NULL; 2937 default_upcase = NULL;
2926 } 2938 }
2927 up(&ntfs_lock); 2939 mutex_unlock(&ntfs_lock);
2928 sb->s_export_op = &ntfs_export_ops; 2940 sb->s_export_op = &ntfs_export_ops;
2929 lock_kernel(); 2941 lock_kernel();
2930 return 0; 2942 return 0;
@@ -2992,12 +3004,12 @@ static int ntfs_fill_super(struct super_block *sb, void *opt, const int silent)
2992 vol->attrdef = NULL; 3004 vol->attrdef = NULL;
2993 } 3005 }
2994 vol->upcase_len = 0; 3006 vol->upcase_len = 0;
2995 down(&ntfs_lock); 3007 mutex_lock(&ntfs_lock);
2996 if (vol->upcase == default_upcase) { 3008 if (vol->upcase == default_upcase) {
2997 ntfs_nr_upcase_users--; 3009 ntfs_nr_upcase_users--;
2998 vol->upcase = NULL; 3010 vol->upcase = NULL;
2999 } 3011 }
3000 up(&ntfs_lock); 3012 mutex_unlock(&ntfs_lock);
3001 if (vol->upcase) { 3013 if (vol->upcase) {
3002 ntfs_free(vol->upcase); 3014 ntfs_free(vol->upcase);
3003 vol->upcase = NULL; 3015 vol->upcase = NULL;
@@ -3012,14 +3024,14 @@ unl_upcase_iput_tmp_ino_err_out_now:
3012 * Decrease the number of upcase users and destroy the global default 3024 * Decrease the number of upcase users and destroy the global default
3013 * upcase table if necessary. 3025 * upcase table if necessary.
3014 */ 3026 */
3015 down(&ntfs_lock); 3027 mutex_lock(&ntfs_lock);
3016 if (!--ntfs_nr_upcase_users && default_upcase) { 3028 if (!--ntfs_nr_upcase_users && default_upcase) {
3017 ntfs_free(default_upcase); 3029 ntfs_free(default_upcase);
3018 default_upcase = NULL; 3030 default_upcase = NULL;
3019 } 3031 }
3020 if (vol->cluster_size <= 4096 && !--ntfs_nr_compression_users) 3032 if (vol->cluster_size <= 4096 && !--ntfs_nr_compression_users)
3021 free_compression_buffers(); 3033 free_compression_buffers();
3022 up(&ntfs_lock); 3034 mutex_unlock(&ntfs_lock);
3023iput_tmp_ino_err_out_now: 3035iput_tmp_ino_err_out_now:
3024 iput(tmp_ino); 3036 iput(tmp_ino);
3025 if (vol->mft_ino && vol->mft_ino != tmp_ino) 3037 if (vol->mft_ino && vol->mft_ino != tmp_ino)
@@ -3078,8 +3090,8 @@ static void ntfs_big_inode_init_once(void *foo, struct kmem_cache *cachep,
3078struct kmem_cache *ntfs_attr_ctx_cache; 3090struct kmem_cache *ntfs_attr_ctx_cache;
3079struct kmem_cache *ntfs_index_ctx_cache; 3091struct kmem_cache *ntfs_index_ctx_cache;
3080 3092
3081/* Driver wide semaphore. */ 3093/* Driver wide mutex. */
3082DECLARE_MUTEX(ntfs_lock); 3094DEFINE_MUTEX(ntfs_lock);
3083 3095
3084static struct super_block *ntfs_get_sb(struct file_system_type *fs_type, 3096static struct super_block *ntfs_get_sb(struct file_system_type *fs_type,
3085 int flags, const char *dev_name, void *data) 3097 int flags, const char *dev_name, void *data)
@@ -3234,7 +3246,7 @@ static void __exit exit_ntfs_fs(void)
3234} 3246}
3235 3247
3236MODULE_AUTHOR("Anton Altaparmakov <aia21@cantab.net>"); 3248MODULE_AUTHOR("Anton Altaparmakov <aia21@cantab.net>");
3237MODULE_DESCRIPTION("NTFS 1.2/3.x driver - Copyright (c) 2001-2005 Anton Altaparmakov"); 3249MODULE_DESCRIPTION("NTFS 1.2/3.x driver - Copyright (c) 2001-2006 Anton Altaparmakov");
3238MODULE_VERSION(NTFS_VERSION); 3250MODULE_VERSION(NTFS_VERSION);
3239MODULE_LICENSE("GPL"); 3251MODULE_LICENSE("GPL");
3240#ifdef DEBUG 3252#ifdef DEBUG
diff --git a/fs/ntfs/unistr.c b/fs/ntfs/unistr.c
index 0ea887fc859c..b123c0fa6bf6 100644
--- a/fs/ntfs/unistr.c
+++ b/fs/ntfs/unistr.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * unistr.c - NTFS Unicode string handling. Part of the Linux-NTFS project. 2 * unistr.c - NTFS Unicode string handling. Part of the Linux-NTFS project.
3 * 3 *
4 * Copyright (c) 2001-2005 Anton Altaparmakov 4 * Copyright (c) 2001-2006 Anton Altaparmakov
5 * 5 *
6 * This program/include file is free software; you can redistribute it and/or 6 * This program/include file is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as published 7 * modify it under the terms of the GNU General Public License as published
@@ -19,6 +19,8 @@
19 * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19 * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */ 20 */
21 21
22#include <linux/slab.h>
23
22#include "types.h" 24#include "types.h"
23#include "debug.h" 25#include "debug.h"
24#include "ntfs.h" 26#include "ntfs.h"
@@ -242,7 +244,7 @@ int ntfs_file_compare_values(FILE_NAME_ATTR *file_name_attr1,
242 * map dictates, into a little endian, 2-byte Unicode string. 244 * map dictates, into a little endian, 2-byte Unicode string.
243 * 245 *
244 * This function allocates the string and the caller is responsible for 246 * This function allocates the string and the caller is responsible for
245 * calling kmem_cache_free(ntfs_name_cache, @outs); when finished with it. 247 * calling kmem_cache_free(ntfs_name_cache, *@outs); when finished with it.
246 * 248 *
247 * On success the function returns the number of Unicode characters written to 249 * On success the function returns the number of Unicode characters written to
248 * the output string *@outs (>= 0), not counting the terminating Unicode NULL 250 * the output string *@outs (>= 0), not counting the terminating Unicode NULL
@@ -262,37 +264,48 @@ int ntfs_nlstoucs(const ntfs_volume *vol, const char *ins,
262 wchar_t wc; 264 wchar_t wc;
263 int i, o, wc_len; 265 int i, o, wc_len;
264 266
265 /* We don't trust outside sources. */ 267 /* We do not trust outside sources. */
266 if (ins) { 268 if (likely(ins)) {
267 ucs = kmem_cache_alloc(ntfs_name_cache, SLAB_NOFS); 269 ucs = kmem_cache_alloc(ntfs_name_cache, SLAB_NOFS);
268 if (ucs) { 270 if (likely(ucs)) {
269 for (i = o = 0; i < ins_len; i += wc_len) { 271 for (i = o = 0; i < ins_len; i += wc_len) {
270 wc_len = nls->char2uni(ins + i, ins_len - i, 272 wc_len = nls->char2uni(ins + i, ins_len - i,
271 &wc); 273 &wc);
272 if (wc_len >= 0) { 274 if (likely(wc_len >= 0 &&
273 if (wc) { 275 o < NTFS_MAX_NAME_LEN)) {
276 if (likely(wc)) {
274 ucs[o++] = cpu_to_le16(wc); 277 ucs[o++] = cpu_to_le16(wc);
275 continue; 278 continue;
276 } /* else (!wc) */ 279 } /* else if (!wc) */
277 break; 280 break;
278 } /* else (wc_len < 0) */ 281 } /* else if (wc_len < 0 ||
279 goto conversion_err; 282 o >= NTFS_MAX_NAME_LEN) */
283 goto name_err;
280 } 284 }
281 ucs[o] = 0; 285 ucs[o] = 0;
282 *outs = ucs; 286 *outs = ucs;
283 return o; 287 return o;
284 } /* else (!ucs) */ 288 } /* else if (!ucs) */
285 ntfs_error(vol->sb, "Failed to allocate name from " 289 ntfs_error(vol->sb, "Failed to allocate buffer for converted "
286 "ntfs_name_cache!"); 290 "name from ntfs_name_cache.");
287 return -ENOMEM; 291 return -ENOMEM;
288 } /* else (!ins) */ 292 } /* else if (!ins) */
289 ntfs_error(NULL, "Received NULL pointer."); 293 ntfs_error(vol->sb, "Received NULL pointer.");
290 return -EINVAL; 294 return -EINVAL;
291conversion_err: 295name_err:
292 ntfs_error(vol->sb, "Name using character set %s contains characters "
293 "that cannot be converted to Unicode.", nls->charset);
294 kmem_cache_free(ntfs_name_cache, ucs); 296 kmem_cache_free(ntfs_name_cache, ucs);
295 return -EILSEQ; 297 if (wc_len < 0) {
298 ntfs_error(vol->sb, "Name using character set %s contains "
299 "characters that cannot be converted to "
300 "Unicode.", nls->charset);
301 i = -EILSEQ;
302 } else /* if (o >= NTFS_MAX_NAME_LEN) */ {
303 ntfs_error(vol->sb, "Name is too long (maximum length for a "
304 "name on NTFS is %d Unicode characters.",
305 NTFS_MAX_NAME_LEN);
306 i = -ENAMETOOLONG;
307 }
308 return i;
296} 309}
297 310
298/** 311/**
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 8dd3aafec499..09e1c57a86a0 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -959,7 +959,7 @@ static int ocfs2_initialize_mem_caches(void)
959 ocfs2_lock_cache = kmem_cache_create("ocfs2_lock", 959 ocfs2_lock_cache = kmem_cache_create("ocfs2_lock",
960 sizeof(struct ocfs2_journal_lock), 960 sizeof(struct ocfs2_journal_lock),
961 0, 961 0,
962 SLAB_NO_REAP|SLAB_HWCACHE_ALIGN, 962 SLAB_HWCACHE_ALIGN,
963 NULL, NULL); 963 NULL, NULL);
964 if (!ocfs2_lock_cache) 964 if (!ocfs2_lock_cache)
965 return -ENOMEM; 965 return -ENOMEM;
diff --git a/fs/open.c b/fs/open.c
index 70e0230d8e77..1091dadd6c38 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -973,7 +973,7 @@ repeat:
973 fdt = files_fdtable(files); 973 fdt = files_fdtable(files);
974 fd = find_next_zero_bit(fdt->open_fds->fds_bits, 974 fd = find_next_zero_bit(fdt->open_fds->fds_bits,
975 fdt->max_fdset, 975 fdt->max_fdset,
976 fdt->next_fd); 976 files->next_fd);
977 977
978 /* 978 /*
979 * N.B. For clone tasks sharing a files structure, this test 979 * N.B. For clone tasks sharing a files structure, this test
@@ -998,7 +998,7 @@ repeat:
998 998
999 FD_SET(fd, fdt->open_fds); 999 FD_SET(fd, fdt->open_fds);
1000 FD_CLR(fd, fdt->close_on_exec); 1000 FD_CLR(fd, fdt->close_on_exec);
1001 fdt->next_fd = fd + 1; 1001 files->next_fd = fd + 1;
1002#if 1 1002#if 1
1003 /* Sanity check */ 1003 /* Sanity check */
1004 if (fdt->fd[fd] != NULL) { 1004 if (fdt->fd[fd] != NULL) {
@@ -1019,8 +1019,8 @@ static void __put_unused_fd(struct files_struct *files, unsigned int fd)
1019{ 1019{
1020 struct fdtable *fdt = files_fdtable(files); 1020 struct fdtable *fdt = files_fdtable(files);
1021 __FD_CLR(fd, fdt->open_fds); 1021 __FD_CLR(fd, fdt->open_fds);
1022 if (fd < fdt->next_fd) 1022 if (fd < files->next_fd)
1023 fdt->next_fd = fd; 1023 files->next_fd = fd;
1024} 1024}
1025 1025
1026void fastcall put_unused_fd(unsigned int fd) 1026void fastcall put_unused_fd(unsigned int fd)
diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c
index 1d24fead51a6..826c131994c3 100644
--- a/fs/proc/proc_misc.c
+++ b/fs/proc/proc_misc.c
@@ -312,7 +312,7 @@ static void *devinfo_next(struct seq_file *f, void *v, loff_t *pos)
312 case BLK_HDR: 312 case BLK_HDR:
313 info->state = BLK_LIST; 313 info->state = BLK_LIST;
314 (*pos)++; 314 (*pos)++;
315 break; 315 /*fallthrough*/
316 case BLK_LIST: 316 case BLK_LIST:
317 if (get_blkdev_info(info->blkdev,&idummy,&ndummy)) { 317 if (get_blkdev_info(info->blkdev,&idummy,&ndummy)) {
318 /* 318 /*
diff --git a/fs/qnx4/file.c b/fs/qnx4/file.c
index b471315e24ef..c33963fded9e 100644
--- a/fs/qnx4/file.c
+++ b/fs/qnx4/file.c
@@ -12,10 +12,7 @@
12 * 27-06-1998 by Frank Denis : file overwriting. 12 * 27-06-1998 by Frank Denis : file overwriting.
13 */ 13 */
14 14
15#include <linux/config.h>
16#include <linux/types.h>
17#include <linux/fs.h> 15#include <linux/fs.h>
18#include <linux/time.h>
19#include <linux/qnx4_fs.h> 16#include <linux/qnx4_fs.h>
20 17
21/* 18/*
diff --git a/fs/quota.c b/fs/quota.c
index ba9e0bf32f67..d6a2be826e29 100644
--- a/fs/quota.c
+++ b/fs/quota.c
@@ -170,10 +170,10 @@ static void quota_sync_sb(struct super_block *sb, int type)
170 170
171 /* Now when everything is written we can discard the pagecache so 171 /* Now when everything is written we can discard the pagecache so
172 * that userspace sees the changes. We need i_mutex and so we could 172 * that userspace sees the changes. We need i_mutex and so we could
173 * not do it inside dqonoff_sem. Moreover we need to be carefull 173 * not do it inside dqonoff_mutex. Moreover we need to be carefull
174 * about races with quotaoff() (that is the reason why we have own 174 * about races with quotaoff() (that is the reason why we have own
175 * reference to inode). */ 175 * reference to inode). */
176 down(&sb_dqopt(sb)->dqonoff_sem); 176 mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
177 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 177 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
178 discard[cnt] = NULL; 178 discard[cnt] = NULL;
179 if (type != -1 && cnt != type) 179 if (type != -1 && cnt != type)
@@ -182,7 +182,7 @@ static void quota_sync_sb(struct super_block *sb, int type)
182 continue; 182 continue;
183 discard[cnt] = igrab(sb_dqopt(sb)->files[cnt]); 183 discard[cnt] = igrab(sb_dqopt(sb)->files[cnt]);
184 } 184 }
185 up(&sb_dqopt(sb)->dqonoff_sem); 185 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
186 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 186 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
187 if (discard[cnt]) { 187 if (discard[cnt]) {
188 mutex_lock(&discard[cnt]->i_mutex); 188 mutex_lock(&discard[cnt]->i_mutex);
diff --git a/fs/quota_v2.c b/fs/quota_v2.c
index b4199ec3ece4..c519a583e681 100644
--- a/fs/quota_v2.c
+++ b/fs/quota_v2.c
@@ -394,7 +394,7 @@ static int v2_write_dquot(struct dquot *dquot)
394 ssize_t ret; 394 ssize_t ret;
395 struct v2_disk_dqblk ddquot, empty; 395 struct v2_disk_dqblk ddquot, empty;
396 396
397 /* dq_off is guarded by dqio_sem */ 397 /* dq_off is guarded by dqio_mutex */
398 if (!dquot->dq_off) 398 if (!dquot->dq_off)
399 if ((ret = dq_insert_tree(dquot)) < 0) { 399 if ((ret = dq_insert_tree(dquot)) < 0) {
400 printk(KERN_ERR "VFS: Error %zd occurred while creating quota.\n", ret); 400 printk(KERN_ERR "VFS: Error %zd occurred while creating quota.\n", ret);
diff --git a/fs/ramfs/file-mmu.c b/fs/ramfs/file-mmu.c
index 2115383dcc8d..6ada2095b9ac 100644
--- a/fs/ramfs/file-mmu.c
+++ b/fs/ramfs/file-mmu.c
@@ -24,18 +24,7 @@
24 * caches is sufficient. 24 * caches is sufficient.
25 */ 25 */
26 26
27#include <linux/module.h>
28#include <linux/fs.h> 27#include <linux/fs.h>
29#include <linux/pagemap.h>
30#include <linux/highmem.h>
31#include <linux/init.h>
32#include <linux/string.h>
33#include <linux/smp_lock.h>
34#include <linux/backing-dev.h>
35#include <linux/ramfs.h>
36
37#include <asm/uaccess.h>
38#include "internal.h"
39 28
40struct address_space_operations ramfs_aops = { 29struct address_space_operations ramfs_aops = {
41 .readpage = simple_readpage, 30 .readpage = simple_readpage,
diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c
index 3f810acd0bfa..b1ca234068f6 100644
--- a/fs/ramfs/file-nommu.c
+++ b/fs/ramfs/file-nommu.c
@@ -87,8 +87,7 @@ static int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize)
87 xpages = 1UL << order; 87 xpages = 1UL << order;
88 npages = (newsize + PAGE_SIZE - 1) >> PAGE_SHIFT; 88 npages = (newsize + PAGE_SIZE - 1) >> PAGE_SHIFT;
89 89
90 for (loop = 0; loop < npages; loop++) 90 split_page(pages, order);
91 set_page_count(pages + loop, 1);
92 91
93 /* trim off any pages we don't actually require */ 92 /* trim off any pages we don't actually require */
94 for (loop = npages; loop < xpages; loop++) 93 for (loop = npages; loop < xpages; loop++)
diff --git a/fs/relayfs/Makefile b/fs/relayfs/Makefile
deleted file mode 100644
index e76e182cdb38..000000000000
--- a/fs/relayfs/Makefile
+++ /dev/null
@@ -1,4 +0,0 @@
1obj-$(CONFIG_RELAYFS_FS) += relayfs.o
2
3relayfs-y := relay.o inode.o buffers.o
4
diff --git a/fs/relayfs/buffers.c b/fs/relayfs/buffers.c
deleted file mode 100644
index 10187812771e..000000000000
--- a/fs/relayfs/buffers.c
+++ /dev/null
@@ -1,190 +0,0 @@
1/*
2 * RelayFS buffer management code.
3 *
4 * Copyright (C) 2002-2005 - Tom Zanussi (zanussi@us.ibm.com), IBM Corp
5 * Copyright (C) 1999-2005 - Karim Yaghmour (karim@opersys.com)
6 *
7 * This file is released under the GPL.
8 */
9
10#include <linux/module.h>
11#include <linux/vmalloc.h>
12#include <linux/mm.h>
13#include <linux/relayfs_fs.h>
14#include "relay.h"
15#include "buffers.h"
16
17/*
18 * close() vm_op implementation for relayfs file mapping.
19 */
20static void relay_file_mmap_close(struct vm_area_struct *vma)
21{
22 struct rchan_buf *buf = vma->vm_private_data;
23 buf->chan->cb->buf_unmapped(buf, vma->vm_file);
24}
25
26/*
27 * nopage() vm_op implementation for relayfs file mapping.
28 */
29static struct page *relay_buf_nopage(struct vm_area_struct *vma,
30 unsigned long address,
31 int *type)
32{
33 struct page *page;
34 struct rchan_buf *buf = vma->vm_private_data;
35 unsigned long offset = address - vma->vm_start;
36
37 if (address > vma->vm_end)
38 return NOPAGE_SIGBUS; /* Disallow mremap */
39 if (!buf)
40 return NOPAGE_OOM;
41
42 page = vmalloc_to_page(buf->start + offset);
43 if (!page)
44 return NOPAGE_OOM;
45 get_page(page);
46
47 if (type)
48 *type = VM_FAULT_MINOR;
49
50 return page;
51}
52
53/*
54 * vm_ops for relay file mappings.
55 */
56static struct vm_operations_struct relay_file_mmap_ops = {
57 .nopage = relay_buf_nopage,
58 .close = relay_file_mmap_close,
59};
60
61/**
62 * relay_mmap_buf: - mmap channel buffer to process address space
63 * @buf: relay channel buffer
64 * @vma: vm_area_struct describing memory to be mapped
65 *
66 * Returns 0 if ok, negative on error
67 *
68 * Caller should already have grabbed mmap_sem.
69 */
70int relay_mmap_buf(struct rchan_buf *buf, struct vm_area_struct *vma)
71{
72 unsigned long length = vma->vm_end - vma->vm_start;
73 struct file *filp = vma->vm_file;
74
75 if (!buf)
76 return -EBADF;
77
78 if (length != (unsigned long)buf->chan->alloc_size)
79 return -EINVAL;
80
81 vma->vm_ops = &relay_file_mmap_ops;
82 vma->vm_private_data = buf;
83 buf->chan->cb->buf_mapped(buf, filp);
84
85 return 0;
86}
87
88/**
89 * relay_alloc_buf - allocate a channel buffer
90 * @buf: the buffer struct
91 * @size: total size of the buffer
92 *
93 * Returns a pointer to the resulting buffer, NULL if unsuccessful
94 */
95static void *relay_alloc_buf(struct rchan_buf *buf, unsigned long size)
96{
97 void *mem;
98 unsigned int i, j, n_pages;
99
100 size = PAGE_ALIGN(size);
101 n_pages = size >> PAGE_SHIFT;
102
103 buf->page_array = kcalloc(n_pages, sizeof(struct page *), GFP_KERNEL);
104 if (!buf->page_array)
105 return NULL;
106
107 for (i = 0; i < n_pages; i++) {
108 buf->page_array[i] = alloc_page(GFP_KERNEL);
109 if (unlikely(!buf->page_array[i]))
110 goto depopulate;
111 }
112 mem = vmap(buf->page_array, n_pages, VM_MAP, PAGE_KERNEL);
113 if (!mem)
114 goto depopulate;
115
116 memset(mem, 0, size);
117 buf->page_count = n_pages;
118 return mem;
119
120depopulate:
121 for (j = 0; j < i; j++)
122 __free_page(buf->page_array[j]);
123 kfree(buf->page_array);
124 return NULL;
125}
126
127/**
128 * relay_create_buf - allocate and initialize a channel buffer
129 * @alloc_size: size of the buffer to allocate
130 * @n_subbufs: number of sub-buffers in the channel
131 *
132 * Returns channel buffer if successful, NULL otherwise
133 */
134struct rchan_buf *relay_create_buf(struct rchan *chan)
135{
136 struct rchan_buf *buf = kcalloc(1, sizeof(struct rchan_buf), GFP_KERNEL);
137 if (!buf)
138 return NULL;
139
140 buf->padding = kmalloc(chan->n_subbufs * sizeof(size_t *), GFP_KERNEL);
141 if (!buf->padding)
142 goto free_buf;
143
144 buf->start = relay_alloc_buf(buf, chan->alloc_size);
145 if (!buf->start)
146 goto free_buf;
147
148 buf->chan = chan;
149 kref_get(&buf->chan->kref);
150 return buf;
151
152free_buf:
153 kfree(buf->padding);
154 kfree(buf);
155 return NULL;
156}
157
158/**
159 * relay_destroy_buf - destroy an rchan_buf struct and associated buffer
160 * @buf: the buffer struct
161 */
162void relay_destroy_buf(struct rchan_buf *buf)
163{
164 struct rchan *chan = buf->chan;
165 unsigned int i;
166
167 if (likely(buf->start)) {
168 vunmap(buf->start);
169 for (i = 0; i < buf->page_count; i++)
170 __free_page(buf->page_array[i]);
171 kfree(buf->page_array);
172 }
173 kfree(buf->padding);
174 kfree(buf);
175 kref_put(&chan->kref, relay_destroy_channel);
176}
177
178/**
179 * relay_remove_buf - remove a channel buffer
180 *
181 * Removes the file from the relayfs fileystem, which also frees the
182 * rchan_buf_struct and the channel buffer. Should only be called from
183 * kref_put().
184 */
185void relay_remove_buf(struct kref *kref)
186{
187 struct rchan_buf *buf = container_of(kref, struct rchan_buf, kref);
188 buf->chan->cb->remove_buf_file(buf->dentry);
189 relay_destroy_buf(buf);
190}
diff --git a/fs/relayfs/buffers.h b/fs/relayfs/buffers.h
deleted file mode 100644
index 37a12493f641..000000000000
--- a/fs/relayfs/buffers.h
+++ /dev/null
@@ -1,12 +0,0 @@
1#ifndef _BUFFERS_H
2#define _BUFFERS_H
3
4/* This inspired by rtai/shmem */
5#define FIX_SIZE(x) (((x) - 1) & PAGE_MASK) + PAGE_SIZE
6
7extern int relay_mmap_buf(struct rchan_buf *buf, struct vm_area_struct *vma);
8extern struct rchan_buf *relay_create_buf(struct rchan *chan);
9extern void relay_destroy_buf(struct rchan_buf *buf);
10extern void relay_remove_buf(struct kref *kref);
11
12#endif/* _BUFFERS_H */
diff --git a/fs/relayfs/inode.c b/fs/relayfs/inode.c
deleted file mode 100644
index 383523011aad..000000000000
--- a/fs/relayfs/inode.c
+++ /dev/null
@@ -1,581 +0,0 @@
1/*
2 * VFS-related code for RelayFS, a high-speed data relay filesystem.
3 *
4 * Copyright (C) 2003-2005 - Tom Zanussi <zanussi@us.ibm.com>, IBM Corp
5 * Copyright (C) 2003-2005 - Karim Yaghmour <karim@opersys.com>
6 *
7 * Based on ramfs, Copyright (C) 2002 - Linus Torvalds
8 *
9 * This file is released under the GPL.
10 */
11
12#include <linux/module.h>
13#include <linux/fs.h>
14#include <linux/mount.h>
15#include <linux/pagemap.h>
16#include <linux/init.h>
17#include <linux/string.h>
18#include <linux/backing-dev.h>
19#include <linux/namei.h>
20#include <linux/poll.h>
21#include <linux/relayfs_fs.h>
22#include "relay.h"
23#include "buffers.h"
24
25#define RELAYFS_MAGIC 0xF0B4A981
26
27static struct vfsmount * relayfs_mount;
28static int relayfs_mount_count;
29
30static struct backing_dev_info relayfs_backing_dev_info = {
31 .ra_pages = 0, /* No readahead */
32 .capabilities = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK,
33};
34
35static struct inode *relayfs_get_inode(struct super_block *sb,
36 int mode,
37 struct file_operations *fops,
38 void *data)
39{
40 struct inode *inode;
41
42 inode = new_inode(sb);
43 if (!inode)
44 return NULL;
45
46 inode->i_mode = mode;
47 inode->i_uid = 0;
48 inode->i_gid = 0;
49 inode->i_blksize = PAGE_CACHE_SIZE;
50 inode->i_blocks = 0;
51 inode->i_mapping->backing_dev_info = &relayfs_backing_dev_info;
52 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
53 switch (mode & S_IFMT) {
54 case S_IFREG:
55 inode->i_fop = fops;
56 if (data)
57 inode->u.generic_ip = data;
58 break;
59 case S_IFDIR:
60 inode->i_op = &simple_dir_inode_operations;
61 inode->i_fop = &simple_dir_operations;
62
63 /* directory inodes start off with i_nlink == 2 (for "." entry) */
64 inode->i_nlink++;
65 break;
66 default:
67 break;
68 }
69
70 return inode;
71}
72
73/**
74 * relayfs_create_entry - create a relayfs directory or file
75 * @name: the name of the file to create
76 * @parent: parent directory
77 * @mode: mode
78 * @fops: file operations to use for the file
79 * @data: user-associated data for this file
80 *
81 * Returns the new dentry, NULL on failure
82 *
83 * Creates a file or directory with the specifed permissions.
84 */
85static struct dentry *relayfs_create_entry(const char *name,
86 struct dentry *parent,
87 int mode,
88 struct file_operations *fops,
89 void *data)
90{
91 struct dentry *d;
92 struct inode *inode;
93 int error = 0;
94
95 BUG_ON(!name || !(S_ISREG(mode) || S_ISDIR(mode)));
96
97 error = simple_pin_fs("relayfs", &relayfs_mount, &relayfs_mount_count);
98 if (error) {
99 printk(KERN_ERR "Couldn't mount relayfs: errcode %d\n", error);
100 return NULL;
101 }
102
103 if (!parent && relayfs_mount && relayfs_mount->mnt_sb)
104 parent = relayfs_mount->mnt_sb->s_root;
105
106 if (!parent) {
107 simple_release_fs(&relayfs_mount, &relayfs_mount_count);
108 return NULL;
109 }
110
111 parent = dget(parent);
112 mutex_lock(&parent->d_inode->i_mutex);
113 d = lookup_one_len(name, parent, strlen(name));
114 if (IS_ERR(d)) {
115 d = NULL;
116 goto release_mount;
117 }
118
119 if (d->d_inode) {
120 d = NULL;
121 goto release_mount;
122 }
123
124 inode = relayfs_get_inode(parent->d_inode->i_sb, mode, fops, data);
125 if (!inode) {
126 d = NULL;
127 goto release_mount;
128 }
129
130 d_instantiate(d, inode);
131 dget(d); /* Extra count - pin the dentry in core */
132
133 if (S_ISDIR(mode))
134 parent->d_inode->i_nlink++;
135
136 goto exit;
137
138release_mount:
139 simple_release_fs(&relayfs_mount, &relayfs_mount_count);
140
141exit:
142 mutex_unlock(&parent->d_inode->i_mutex);
143 dput(parent);
144 return d;
145}
146
147/**
148 * relayfs_create_file - create a file in the relay filesystem
149 * @name: the name of the file to create
150 * @parent: parent directory
151 * @mode: mode, if not specied the default perms are used
152 * @fops: file operations to use for the file
153 * @data: user-associated data for this file
154 *
155 * Returns file dentry if successful, NULL otherwise.
156 *
157 * The file will be created user r on behalf of current user.
158 */
159struct dentry *relayfs_create_file(const char *name,
160 struct dentry *parent,
161 int mode,
162 struct file_operations *fops,
163 void *data)
164{
165 BUG_ON(!fops);
166
167 if (!mode)
168 mode = S_IRUSR;
169 mode = (mode & S_IALLUGO) | S_IFREG;
170
171 return relayfs_create_entry(name, parent, mode, fops, data);
172}
173
174/**
175 * relayfs_create_dir - create a directory in the relay filesystem
176 * @name: the name of the directory to create
177 * @parent: parent directory, NULL if parent should be fs root
178 *
179 * Returns directory dentry if successful, NULL otherwise.
180 *
181 * The directory will be created world rwx on behalf of current user.
182 */
183struct dentry *relayfs_create_dir(const char *name, struct dentry *parent)
184{
185 int mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
186 return relayfs_create_entry(name, parent, mode, NULL, NULL);
187}
188
189/**
190 * relayfs_remove - remove a file or directory in the relay filesystem
191 * @dentry: file or directory dentry
192 *
193 * Returns 0 if successful, negative otherwise.
194 */
195int relayfs_remove(struct dentry *dentry)
196{
197 struct dentry *parent;
198 int error = 0;
199
200 if (!dentry)
201 return -EINVAL;
202 parent = dentry->d_parent;
203 if (!parent)
204 return -EINVAL;
205
206 parent = dget(parent);
207 mutex_lock(&parent->d_inode->i_mutex);
208 if (dentry->d_inode) {
209 if (S_ISDIR(dentry->d_inode->i_mode))
210 error = simple_rmdir(parent->d_inode, dentry);
211 else
212 error = simple_unlink(parent->d_inode, dentry);
213 if (!error)
214 d_delete(dentry);
215 }
216 if (!error)
217 dput(dentry);
218 mutex_unlock(&parent->d_inode->i_mutex);
219 dput(parent);
220
221 if (!error)
222 simple_release_fs(&relayfs_mount, &relayfs_mount_count);
223
224 return error;
225}
226
227/**
228 * relayfs_remove_file - remove a file from relay filesystem
229 * @dentry: directory dentry
230 *
231 * Returns 0 if successful, negative otherwise.
232 */
233int relayfs_remove_file(struct dentry *dentry)
234{
235 return relayfs_remove(dentry);
236}
237
238/**
239 * relayfs_remove_dir - remove a directory in the relay filesystem
240 * @dentry: directory dentry
241 *
242 * Returns 0 if successful, negative otherwise.
243 */
244int relayfs_remove_dir(struct dentry *dentry)
245{
246 return relayfs_remove(dentry);
247}
248
249/**
250 * relay_file_open - open file op for relay files
251 * @inode: the inode
252 * @filp: the file
253 *
254 * Increments the channel buffer refcount.
255 */
256static int relay_file_open(struct inode *inode, struct file *filp)
257{
258 struct rchan_buf *buf = inode->u.generic_ip;
259 kref_get(&buf->kref);
260 filp->private_data = buf;
261
262 return 0;
263}
264
265/**
266 * relay_file_mmap - mmap file op for relay files
267 * @filp: the file
268 * @vma: the vma describing what to map
269 *
270 * Calls upon relay_mmap_buf to map the file into user space.
271 */
272static int relay_file_mmap(struct file *filp, struct vm_area_struct *vma)
273{
274 struct rchan_buf *buf = filp->private_data;
275 return relay_mmap_buf(buf, vma);
276}
277
278/**
279 * relay_file_poll - poll file op for relay files
280 * @filp: the file
281 * @wait: poll table
282 *
283 * Poll implemention.
284 */
285static unsigned int relay_file_poll(struct file *filp, poll_table *wait)
286{
287 unsigned int mask = 0;
288 struct rchan_buf *buf = filp->private_data;
289
290 if (buf->finalized)
291 return POLLERR;
292
293 if (filp->f_mode & FMODE_READ) {
294 poll_wait(filp, &buf->read_wait, wait);
295 if (!relay_buf_empty(buf))
296 mask |= POLLIN | POLLRDNORM;
297 }
298
299 return mask;
300}
301
302/**
303 * relay_file_release - release file op for relay files
304 * @inode: the inode
305 * @filp: the file
306 *
307 * Decrements the channel refcount, as the filesystem is
308 * no longer using it.
309 */
310static int relay_file_release(struct inode *inode, struct file *filp)
311{
312 struct rchan_buf *buf = filp->private_data;
313 kref_put(&buf->kref, relay_remove_buf);
314
315 return 0;
316}
317
318/**
319 * relay_file_read_consume - update the consumed count for the buffer
320 */
321static void relay_file_read_consume(struct rchan_buf *buf,
322 size_t read_pos,
323 size_t bytes_consumed)
324{
325 size_t subbuf_size = buf->chan->subbuf_size;
326 size_t n_subbufs = buf->chan->n_subbufs;
327 size_t read_subbuf;
328
329 if (buf->bytes_consumed + bytes_consumed > subbuf_size) {
330 relay_subbufs_consumed(buf->chan, buf->cpu, 1);
331 buf->bytes_consumed = 0;
332 }
333
334 buf->bytes_consumed += bytes_consumed;
335 read_subbuf = read_pos / buf->chan->subbuf_size;
336 if (buf->bytes_consumed + buf->padding[read_subbuf] == subbuf_size) {
337 if ((read_subbuf == buf->subbufs_produced % n_subbufs) &&
338 (buf->offset == subbuf_size))
339 return;
340 relay_subbufs_consumed(buf->chan, buf->cpu, 1);
341 buf->bytes_consumed = 0;
342 }
343}
344
345/**
346 * relay_file_read_avail - boolean, are there unconsumed bytes available?
347 */
348static int relay_file_read_avail(struct rchan_buf *buf, size_t read_pos)
349{
350 size_t bytes_produced, bytes_consumed, write_offset;
351 size_t subbuf_size = buf->chan->subbuf_size;
352 size_t n_subbufs = buf->chan->n_subbufs;
353 size_t produced = buf->subbufs_produced % n_subbufs;
354 size_t consumed = buf->subbufs_consumed % n_subbufs;
355
356 write_offset = buf->offset > subbuf_size ? subbuf_size : buf->offset;
357
358 if (consumed > produced) {
359 if ((produced > n_subbufs) &&
360 (produced + n_subbufs - consumed <= n_subbufs))
361 produced += n_subbufs;
362 } else if (consumed == produced) {
363 if (buf->offset > subbuf_size) {
364 produced += n_subbufs;
365 if (buf->subbufs_produced == buf->subbufs_consumed)
366 consumed += n_subbufs;
367 }
368 }
369
370 if (buf->offset > subbuf_size)
371 bytes_produced = (produced - 1) * subbuf_size + write_offset;
372 else
373 bytes_produced = produced * subbuf_size + write_offset;
374 bytes_consumed = consumed * subbuf_size + buf->bytes_consumed;
375
376 if (bytes_produced == bytes_consumed)
377 return 0;
378
379 relay_file_read_consume(buf, read_pos, 0);
380
381 return 1;
382}
383
384/**
385 * relay_file_read_subbuf_avail - return bytes available in sub-buffer
386 */
387static size_t relay_file_read_subbuf_avail(size_t read_pos,
388 struct rchan_buf *buf)
389{
390 size_t padding, avail = 0;
391 size_t read_subbuf, read_offset, write_subbuf, write_offset;
392 size_t subbuf_size = buf->chan->subbuf_size;
393
394 write_subbuf = (buf->data - buf->start) / subbuf_size;
395 write_offset = buf->offset > subbuf_size ? subbuf_size : buf->offset;
396 read_subbuf = read_pos / subbuf_size;
397 read_offset = read_pos % subbuf_size;
398 padding = buf->padding[read_subbuf];
399
400 if (read_subbuf == write_subbuf) {
401 if (read_offset + padding < write_offset)
402 avail = write_offset - (read_offset + padding);
403 } else
404 avail = (subbuf_size - padding) - read_offset;
405
406 return avail;
407}
408
409/**
410 * relay_file_read_start_pos - find the first available byte to read
411 *
412 * If the read_pos is in the middle of padding, return the
413 * position of the first actually available byte, otherwise
414 * return the original value.
415 */
416static size_t relay_file_read_start_pos(size_t read_pos,
417 struct rchan_buf *buf)
418{
419 size_t read_subbuf, padding, padding_start, padding_end;
420 size_t subbuf_size = buf->chan->subbuf_size;
421 size_t n_subbufs = buf->chan->n_subbufs;
422
423 read_subbuf = read_pos / subbuf_size;
424 padding = buf->padding[read_subbuf];
425 padding_start = (read_subbuf + 1) * subbuf_size - padding;
426 padding_end = (read_subbuf + 1) * subbuf_size;
427 if (read_pos >= padding_start && read_pos < padding_end) {
428 read_subbuf = (read_subbuf + 1) % n_subbufs;
429 read_pos = read_subbuf * subbuf_size;
430 }
431
432 return read_pos;
433}
434
435/**
436 * relay_file_read_end_pos - return the new read position
437 */
438static size_t relay_file_read_end_pos(struct rchan_buf *buf,
439 size_t read_pos,
440 size_t count)
441{
442 size_t read_subbuf, padding, end_pos;
443 size_t subbuf_size = buf->chan->subbuf_size;
444 size_t n_subbufs = buf->chan->n_subbufs;
445
446 read_subbuf = read_pos / subbuf_size;
447 padding = buf->padding[read_subbuf];
448 if (read_pos % subbuf_size + count + padding == subbuf_size)
449 end_pos = (read_subbuf + 1) * subbuf_size;
450 else
451 end_pos = read_pos + count;
452 if (end_pos >= subbuf_size * n_subbufs)
453 end_pos = 0;
454
455 return end_pos;
456}
457
458/**
459 * relay_file_read - read file op for relay files
460 * @filp: the file
461 * @buffer: the userspace buffer
462 * @count: number of bytes to read
463 * @ppos: position to read from
464 *
465 * Reads count bytes or the number of bytes available in the
466 * current sub-buffer being read, whichever is smaller.
467 */
468static ssize_t relay_file_read(struct file *filp,
469 char __user *buffer,
470 size_t count,
471 loff_t *ppos)
472{
473 struct rchan_buf *buf = filp->private_data;
474 struct inode *inode = filp->f_dentry->d_inode;
475 size_t read_start, avail;
476 ssize_t ret = 0;
477 void *from;
478
479 mutex_lock(&inode->i_mutex);
480 if(!relay_file_read_avail(buf, *ppos))
481 goto out;
482
483 read_start = relay_file_read_start_pos(*ppos, buf);
484 avail = relay_file_read_subbuf_avail(read_start, buf);
485 if (!avail)
486 goto out;
487
488 from = buf->start + read_start;
489 ret = count = min(count, avail);
490 if (copy_to_user(buffer, from, count)) {
491 ret = -EFAULT;
492 goto out;
493 }
494 relay_file_read_consume(buf, read_start, count);
495 *ppos = relay_file_read_end_pos(buf, read_start, count);
496out:
497 mutex_unlock(&inode->i_mutex);
498 return ret;
499}
500
501struct file_operations relay_file_operations = {
502 .open = relay_file_open,
503 .poll = relay_file_poll,
504 .mmap = relay_file_mmap,
505 .read = relay_file_read,
506 .llseek = no_llseek,
507 .release = relay_file_release,
508};
509
510static struct super_operations relayfs_ops = {
511 .statfs = simple_statfs,
512 .drop_inode = generic_delete_inode,
513};
514
515static int relayfs_fill_super(struct super_block * sb, void * data, int silent)
516{
517 struct inode *inode;
518 struct dentry *root;
519 int mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
520
521 sb->s_blocksize = PAGE_CACHE_SIZE;
522 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
523 sb->s_magic = RELAYFS_MAGIC;
524 sb->s_op = &relayfs_ops;
525 inode = relayfs_get_inode(sb, mode, NULL, NULL);
526
527 if (!inode)
528 return -ENOMEM;
529
530 root = d_alloc_root(inode);
531 if (!root) {
532 iput(inode);
533 return -ENOMEM;
534 }
535 sb->s_root = root;
536
537 return 0;
538}
539
540static struct super_block * relayfs_get_sb(struct file_system_type *fs_type,
541 int flags, const char *dev_name,
542 void *data)
543{
544 return get_sb_single(fs_type, flags, data, relayfs_fill_super);
545}
546
547static struct file_system_type relayfs_fs_type = {
548 .owner = THIS_MODULE,
549 .name = "relayfs",
550 .get_sb = relayfs_get_sb,
551 .kill_sb = kill_litter_super,
552};
553
554static int __init init_relayfs_fs(void)
555{
556 return register_filesystem(&relayfs_fs_type);
557}
558
559static void __exit exit_relayfs_fs(void)
560{
561
562
563
564
565
566 unregister_filesystem(&relayfs_fs_type);
567}
568
569module_init(init_relayfs_fs)
570module_exit(exit_relayfs_fs)
571
572EXPORT_SYMBOL_GPL(relay_file_operations);
573EXPORT_SYMBOL_GPL(relayfs_create_dir);
574EXPORT_SYMBOL_GPL(relayfs_remove_dir);
575EXPORT_SYMBOL_GPL(relayfs_create_file);
576EXPORT_SYMBOL_GPL(relayfs_remove_file);
577
578MODULE_AUTHOR("Tom Zanussi <zanussi@us.ibm.com> and Karim Yaghmour <karim@opersys.com>");
579MODULE_DESCRIPTION("Relay Filesystem");
580MODULE_LICENSE("GPL");
581
diff --git a/fs/relayfs/relay.c b/fs/relayfs/relay.c
deleted file mode 100644
index abf3ceaace49..000000000000
--- a/fs/relayfs/relay.c
+++ /dev/null
@@ -1,482 +0,0 @@
1/*
2 * Public API and common code for RelayFS.
3 *
4 * See Documentation/filesystems/relayfs.txt for an overview of relayfs.
5 *
6 * Copyright (C) 2002-2005 - Tom Zanussi (zanussi@us.ibm.com), IBM Corp
7 * Copyright (C) 1999-2005 - Karim Yaghmour (karim@opersys.com)
8 *
9 * This file is released under the GPL.
10 */
11
12#include <linux/errno.h>
13#include <linux/stddef.h>
14#include <linux/slab.h>
15#include <linux/module.h>
16#include <linux/string.h>
17#include <linux/relayfs_fs.h>
18#include "relay.h"
19#include "buffers.h"
20
21/**
22 * relay_buf_empty - boolean, is the channel buffer empty?
23 * @buf: channel buffer
24 *
25 * Returns 1 if the buffer is empty, 0 otherwise.
26 */
27int relay_buf_empty(struct rchan_buf *buf)
28{
29 return (buf->subbufs_produced - buf->subbufs_consumed) ? 0 : 1;
30}
31
32/**
33 * relay_buf_full - boolean, is the channel buffer full?
34 * @buf: channel buffer
35 *
36 * Returns 1 if the buffer is full, 0 otherwise.
37 */
38int relay_buf_full(struct rchan_buf *buf)
39{
40 size_t ready = buf->subbufs_produced - buf->subbufs_consumed;
41 return (ready >= buf->chan->n_subbufs) ? 1 : 0;
42}
43
44/*
45 * High-level relayfs kernel API and associated functions.
46 */
47
48/*
49 * rchan_callback implementations defining default channel behavior. Used
50 * in place of corresponding NULL values in client callback struct.
51 */
52
53/*
54 * subbuf_start() default callback. Does nothing.
55 */
56static int subbuf_start_default_callback (struct rchan_buf *buf,
57 void *subbuf,
58 void *prev_subbuf,
59 size_t prev_padding)
60{
61 if (relay_buf_full(buf))
62 return 0;
63
64 return 1;
65}
66
67/*
68 * buf_mapped() default callback. Does nothing.
69 */
70static void buf_mapped_default_callback(struct rchan_buf *buf,
71 struct file *filp)
72{
73}
74
75/*
76 * buf_unmapped() default callback. Does nothing.
77 */
78static void buf_unmapped_default_callback(struct rchan_buf *buf,
79 struct file *filp)
80{
81}
82
83/*
84 * create_buf_file_create() default callback. Creates file to represent buf.
85 */
86static struct dentry *create_buf_file_default_callback(const char *filename,
87 struct dentry *parent,
88 int mode,
89 struct rchan_buf *buf,
90 int *is_global)
91{
92 return relayfs_create_file(filename, parent, mode,
93 &relay_file_operations, buf);
94}
95
96/*
97 * remove_buf_file() default callback. Removes file representing relay buffer.
98 */
99static int remove_buf_file_default_callback(struct dentry *dentry)
100{
101 return relayfs_remove(dentry);
102}
103
104/* relay channel default callbacks */
105static struct rchan_callbacks default_channel_callbacks = {
106 .subbuf_start = subbuf_start_default_callback,
107 .buf_mapped = buf_mapped_default_callback,
108 .buf_unmapped = buf_unmapped_default_callback,
109 .create_buf_file = create_buf_file_default_callback,
110 .remove_buf_file = remove_buf_file_default_callback,
111};
112
113/**
114 * wakeup_readers - wake up readers waiting on a channel
115 * @private: the channel buffer
116 *
117 * This is the work function used to defer reader waking. The
118 * reason waking is deferred is that calling directly from write
119 * causes problems if you're writing from say the scheduler.
120 */
121static void wakeup_readers(void *private)
122{
123 struct rchan_buf *buf = private;
124 wake_up_interruptible(&buf->read_wait);
125}
126
127/**
128 * __relay_reset - reset a channel buffer
129 * @buf: the channel buffer
130 * @init: 1 if this is a first-time initialization
131 *
132 * See relay_reset for description of effect.
133 */
134static inline void __relay_reset(struct rchan_buf *buf, unsigned int init)
135{
136 size_t i;
137
138 if (init) {
139 init_waitqueue_head(&buf->read_wait);
140 kref_init(&buf->kref);
141 INIT_WORK(&buf->wake_readers, NULL, NULL);
142 } else {
143 cancel_delayed_work(&buf->wake_readers);
144 flush_scheduled_work();
145 }
146
147 buf->subbufs_produced = 0;
148 buf->subbufs_consumed = 0;
149 buf->bytes_consumed = 0;
150 buf->finalized = 0;
151 buf->data = buf->start;
152 buf->offset = 0;
153
154 for (i = 0; i < buf->chan->n_subbufs; i++)
155 buf->padding[i] = 0;
156
157 buf->chan->cb->subbuf_start(buf, buf->data, NULL, 0);
158}
159
160/**
161 * relay_reset - reset the channel
162 * @chan: the channel
163 *
164 * This has the effect of erasing all data from all channel buffers
165 * and restarting the channel in its initial state. The buffers
166 * are not freed, so any mappings are still in effect.
167 *
168 * NOTE: Care should be taken that the channel isn't actually
169 * being used by anything when this call is made.
170 */
171void relay_reset(struct rchan *chan)
172{
173 unsigned int i;
174 struct rchan_buf *prev = NULL;
175
176 if (!chan)
177 return;
178
179 for (i = 0; i < NR_CPUS; i++) {
180 if (!chan->buf[i] || chan->buf[i] == prev)
181 break;
182 __relay_reset(chan->buf[i], 0);
183 prev = chan->buf[i];
184 }
185}
186
187/**
188 * relay_open_buf - create a new channel buffer in relayfs
189 *
190 * Internal - used by relay_open().
191 */
192static struct rchan_buf *relay_open_buf(struct rchan *chan,
193 const char *filename,
194 struct dentry *parent,
195 int *is_global)
196{
197 struct rchan_buf *buf;
198 struct dentry *dentry;
199
200 if (*is_global)
201 return chan->buf[0];
202
203 buf = relay_create_buf(chan);
204 if (!buf)
205 return NULL;
206
207 /* Create file in fs */
208 dentry = chan->cb->create_buf_file(filename, parent, S_IRUSR,
209 buf, is_global);
210 if (!dentry) {
211 relay_destroy_buf(buf);
212 return NULL;
213 }
214
215 buf->dentry = dentry;
216 __relay_reset(buf, 1);
217
218 return buf;
219}
220
221/**
222 * relay_close_buf - close a channel buffer
223 * @buf: channel buffer
224 *
225 * Marks the buffer finalized and restores the default callbacks.
226 * The channel buffer and channel buffer data structure are then freed
227 * automatically when the last reference is given up.
228 */
229static inline void relay_close_buf(struct rchan_buf *buf)
230{
231 buf->finalized = 1;
232 buf->chan->cb = &default_channel_callbacks;
233 cancel_delayed_work(&buf->wake_readers);
234 flush_scheduled_work();
235 kref_put(&buf->kref, relay_remove_buf);
236}
237
238static inline void setup_callbacks(struct rchan *chan,
239 struct rchan_callbacks *cb)
240{
241 if (!cb) {
242 chan->cb = &default_channel_callbacks;
243 return;
244 }
245
246 if (!cb->subbuf_start)
247 cb->subbuf_start = subbuf_start_default_callback;
248 if (!cb->buf_mapped)
249 cb->buf_mapped = buf_mapped_default_callback;
250 if (!cb->buf_unmapped)
251 cb->buf_unmapped = buf_unmapped_default_callback;
252 if (!cb->create_buf_file)
253 cb->create_buf_file = create_buf_file_default_callback;
254 if (!cb->remove_buf_file)
255 cb->remove_buf_file = remove_buf_file_default_callback;
256 chan->cb = cb;
257}
258
259/**
260 * relay_open - create a new relayfs channel
261 * @base_filename: base name of files to create
262 * @parent: dentry of parent directory, NULL for root directory
263 * @subbuf_size: size of sub-buffers
264 * @n_subbufs: number of sub-buffers
265 * @cb: client callback functions
266 *
267 * Returns channel pointer if successful, NULL otherwise.
268 *
269 * Creates a channel buffer for each cpu using the sizes and
270 * attributes specified. The created channel buffer files
271 * will be named base_filename0...base_filenameN-1. File
272 * permissions will be S_IRUSR.
273 */
274struct rchan *relay_open(const char *base_filename,
275 struct dentry *parent,
276 size_t subbuf_size,
277 size_t n_subbufs,
278 struct rchan_callbacks *cb)
279{
280 unsigned int i;
281 struct rchan *chan;
282 char *tmpname;
283 int is_global = 0;
284
285 if (!base_filename)
286 return NULL;
287
288 if (!(subbuf_size && n_subbufs))
289 return NULL;
290
291 chan = kcalloc(1, sizeof(struct rchan), GFP_KERNEL);
292 if (!chan)
293 return NULL;
294
295 chan->version = RELAYFS_CHANNEL_VERSION;
296 chan->n_subbufs = n_subbufs;
297 chan->subbuf_size = subbuf_size;
298 chan->alloc_size = FIX_SIZE(subbuf_size * n_subbufs);
299 setup_callbacks(chan, cb);
300 kref_init(&chan->kref);
301
302 tmpname = kmalloc(NAME_MAX + 1, GFP_KERNEL);
303 if (!tmpname)
304 goto free_chan;
305
306 for_each_online_cpu(i) {
307 sprintf(tmpname, "%s%d", base_filename, i);
308 chan->buf[i] = relay_open_buf(chan, tmpname, parent,
309 &is_global);
310 chan->buf[i]->cpu = i;
311 if (!chan->buf[i])
312 goto free_bufs;
313 }
314
315 kfree(tmpname);
316 return chan;
317
318free_bufs:
319 for (i = 0; i < NR_CPUS; i++) {
320 if (!chan->buf[i])
321 break;
322 relay_close_buf(chan->buf[i]);
323 if (is_global)
324 break;
325 }
326 kfree(tmpname);
327
328free_chan:
329 kref_put(&chan->kref, relay_destroy_channel);
330 return NULL;
331}
332
333/**
334 * relay_switch_subbuf - switch to a new sub-buffer
335 * @buf: channel buffer
336 * @length: size of current event
337 *
338 * Returns either the length passed in or 0 if full.
339
340 * Performs sub-buffer-switch tasks such as invoking callbacks,
341 * updating padding counts, waking up readers, etc.
342 */
343size_t relay_switch_subbuf(struct rchan_buf *buf, size_t length)
344{
345 void *old, *new;
346 size_t old_subbuf, new_subbuf;
347
348 if (unlikely(length > buf->chan->subbuf_size))
349 goto toobig;
350
351 if (buf->offset != buf->chan->subbuf_size + 1) {
352 buf->prev_padding = buf->chan->subbuf_size - buf->offset;
353 old_subbuf = buf->subbufs_produced % buf->chan->n_subbufs;
354 buf->padding[old_subbuf] = buf->prev_padding;
355 buf->subbufs_produced++;
356 if (waitqueue_active(&buf->read_wait)) {
357 PREPARE_WORK(&buf->wake_readers, wakeup_readers, buf);
358 schedule_delayed_work(&buf->wake_readers, 1);
359 }
360 }
361
362 old = buf->data;
363 new_subbuf = buf->subbufs_produced % buf->chan->n_subbufs;
364 new = buf->start + new_subbuf * buf->chan->subbuf_size;
365 buf->offset = 0;
366 if (!buf->chan->cb->subbuf_start(buf, new, old, buf->prev_padding)) {
367 buf->offset = buf->chan->subbuf_size + 1;
368 return 0;
369 }
370 buf->data = new;
371 buf->padding[new_subbuf] = 0;
372
373 if (unlikely(length + buf->offset > buf->chan->subbuf_size))
374 goto toobig;
375
376 return length;
377
378toobig:
379 buf->chan->last_toobig = length;
380 return 0;
381}
382
383/**
384 * relay_subbufs_consumed - update the buffer's sub-buffers-consumed count
385 * @chan: the channel
386 * @cpu: the cpu associated with the channel buffer to update
387 * @subbufs_consumed: number of sub-buffers to add to current buf's count
388 *
389 * Adds to the channel buffer's consumed sub-buffer count.
390 * subbufs_consumed should be the number of sub-buffers newly consumed,
391 * not the total consumed.
392 *
393 * NOTE: kernel clients don't need to call this function if the channel
394 * mode is 'overwrite'.
395 */
396void relay_subbufs_consumed(struct rchan *chan,
397 unsigned int cpu,
398 size_t subbufs_consumed)
399{
400 struct rchan_buf *buf;
401
402 if (!chan)
403 return;
404
405 if (cpu >= NR_CPUS || !chan->buf[cpu])
406 return;
407
408 buf = chan->buf[cpu];
409 buf->subbufs_consumed += subbufs_consumed;
410 if (buf->subbufs_consumed > buf->subbufs_produced)
411 buf->subbufs_consumed = buf->subbufs_produced;
412}
413
414/**
415 * relay_destroy_channel - free the channel struct
416 *
417 * Should only be called from kref_put().
418 */
419void relay_destroy_channel(struct kref *kref)
420{
421 struct rchan *chan = container_of(kref, struct rchan, kref);
422 kfree(chan);
423}
424
425/**
426 * relay_close - close the channel
427 * @chan: the channel
428 *
429 * Closes all channel buffers and frees the channel.
430 */
431void relay_close(struct rchan *chan)
432{
433 unsigned int i;
434 struct rchan_buf *prev = NULL;
435
436 if (!chan)
437 return;
438
439 for (i = 0; i < NR_CPUS; i++) {
440 if (!chan->buf[i] || chan->buf[i] == prev)
441 break;
442 relay_close_buf(chan->buf[i]);
443 prev = chan->buf[i];
444 }
445
446 if (chan->last_toobig)
447 printk(KERN_WARNING "relayfs: one or more items not logged "
448 "[item size (%Zd) > sub-buffer size (%Zd)]\n",
449 chan->last_toobig, chan->subbuf_size);
450
451 kref_put(&chan->kref, relay_destroy_channel);
452}
453
454/**
455 * relay_flush - close the channel
456 * @chan: the channel
457 *
458 * Flushes all channel buffers i.e. forces buffer switch.
459 */
460void relay_flush(struct rchan *chan)
461{
462 unsigned int i;
463 struct rchan_buf *prev = NULL;
464
465 if (!chan)
466 return;
467
468 for (i = 0; i < NR_CPUS; i++) {
469 if (!chan->buf[i] || chan->buf[i] == prev)
470 break;
471 relay_switch_subbuf(chan->buf[i], 0);
472 prev = chan->buf[i];
473 }
474}
475
476EXPORT_SYMBOL_GPL(relay_open);
477EXPORT_SYMBOL_GPL(relay_close);
478EXPORT_SYMBOL_GPL(relay_flush);
479EXPORT_SYMBOL_GPL(relay_reset);
480EXPORT_SYMBOL_GPL(relay_subbufs_consumed);
481EXPORT_SYMBOL_GPL(relay_switch_subbuf);
482EXPORT_SYMBOL_GPL(relay_buf_full);
diff --git a/fs/relayfs/relay.h b/fs/relayfs/relay.h
deleted file mode 100644
index 0993d3e5753b..000000000000
--- a/fs/relayfs/relay.h
+++ /dev/null
@@ -1,8 +0,0 @@
1#ifndef _RELAY_H
2#define _RELAY_H
3
4extern int relayfs_remove(struct dentry *dentry);
5extern int relay_buf_empty(struct rchan_buf *buf);
6extern void relay_destroy_channel(struct kref *kref);
7
8#endif /* _RELAY_H */
diff --git a/fs/seq_file.c b/fs/seq_file.c
index 7c40570b71dc..555b9ac04c25 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -37,7 +37,7 @@ int seq_open(struct file *file, struct seq_operations *op)
37 file->private_data = p; 37 file->private_data = p;
38 } 38 }
39 memset(p, 0, sizeof(*p)); 39 memset(p, 0, sizeof(*p));
40 sema_init(&p->sem, 1); 40 mutex_init(&p->lock);
41 p->op = op; 41 p->op = op;
42 42
43 /* 43 /*
@@ -71,7 +71,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
71 void *p; 71 void *p;
72 int err = 0; 72 int err = 0;
73 73
74 down(&m->sem); 74 mutex_lock(&m->lock);
75 /* 75 /*
76 * seq_file->op->..m_start/m_stop/m_next may do special actions 76 * seq_file->op->..m_start/m_stop/m_next may do special actions
77 * or optimisations based on the file->f_version, so we want to 77 * or optimisations based on the file->f_version, so we want to
@@ -164,7 +164,7 @@ Done:
164 else 164 else
165 *ppos += copied; 165 *ppos += copied;
166 file->f_version = m->version; 166 file->f_version = m->version;
167 up(&m->sem); 167 mutex_unlock(&m->lock);
168 return copied; 168 return copied;
169Enomem: 169Enomem:
170 err = -ENOMEM; 170 err = -ENOMEM;
@@ -237,7 +237,7 @@ loff_t seq_lseek(struct file *file, loff_t offset, int origin)
237 struct seq_file *m = (struct seq_file *)file->private_data; 237 struct seq_file *m = (struct seq_file *)file->private_data;
238 long long retval = -EINVAL; 238 long long retval = -EINVAL;
239 239
240 down(&m->sem); 240 mutex_lock(&m->lock);
241 m->version = file->f_version; 241 m->version = file->f_version;
242 switch (origin) { 242 switch (origin) {
243 case 1: 243 case 1:
@@ -260,7 +260,7 @@ loff_t seq_lseek(struct file *file, loff_t offset, int origin)
260 } 260 }
261 } 261 }
262 } 262 }
263 up(&m->sem); 263 mutex_unlock(&m->lock);
264 file->f_version = m->version; 264 file->f_version = m->version;
265 return retval; 265 return retval;
266} 266}
diff --git a/fs/super.c b/fs/super.c
index e20b5580afd5..425861cb1caa 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -76,9 +76,9 @@ static struct super_block *alloc_super(void)
76 down_write(&s->s_umount); 76 down_write(&s->s_umount);
77 s->s_count = S_BIAS; 77 s->s_count = S_BIAS;
78 atomic_set(&s->s_active, 1); 78 atomic_set(&s->s_active, 1);
79 sema_init(&s->s_vfs_rename_sem,1); 79 mutex_init(&s->s_vfs_rename_mutex);
80 sema_init(&s->s_dquot.dqio_sem, 1); 80 mutex_init(&s->s_dquot.dqio_mutex);
81 sema_init(&s->s_dquot.dqonoff_sem, 1); 81 mutex_init(&s->s_dquot.dqonoff_mutex);
82 init_rwsem(&s->s_dquot.dqptr_sem); 82 init_rwsem(&s->s_dquot.dqptr_sem);
83 init_waitqueue_head(&s->s_wait_unfrozen); 83 init_waitqueue_head(&s->s_wait_unfrozen);
84 s->s_maxbytes = MAX_NON_LFS; 84 s->s_maxbytes = MAX_NON_LFS;
@@ -693,9 +693,9 @@ struct super_block *get_sb_bdev(struct file_system_type *fs_type,
693 * will protect the lockfs code from trying to start a snapshot 693 * will protect the lockfs code from trying to start a snapshot
694 * while we are mounting 694 * while we are mounting
695 */ 695 */
696 down(&bdev->bd_mount_sem); 696 mutex_lock(&bdev->bd_mount_mutex);
697 s = sget(fs_type, test_bdev_super, set_bdev_super, bdev); 697 s = sget(fs_type, test_bdev_super, set_bdev_super, bdev);
698 up(&bdev->bd_mount_sem); 698 mutex_unlock(&bdev->bd_mount_mutex);
699 if (IS_ERR(s)) 699 if (IS_ERR(s))
700 goto out; 700 goto out;
701 701
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
index 49bd219275db..9ee956864445 100644
--- a/fs/sysfs/dir.c
+++ b/fs/sysfs/dir.c
@@ -50,6 +50,32 @@ static struct sysfs_dirent * sysfs_new_dirent(struct sysfs_dirent * parent_sd,
50 return sd; 50 return sd;
51} 51}
52 52
53/**
54 *
55 * Return -EEXIST if there is already a sysfs element with the same name for
56 * the same parent.
57 *
58 * called with parent inode's i_mutex held
59 */
60int sysfs_dirent_exist(struct sysfs_dirent *parent_sd,
61 const unsigned char *new)
62{
63 struct sysfs_dirent * sd;
64
65 list_for_each_entry(sd, &parent_sd->s_children, s_sibling) {
66 if (sd->s_element) {
67 const unsigned char *existing = sysfs_get_name(sd);
68 if (strcmp(existing, new))
69 continue;
70 else
71 return -EEXIST;
72 }
73 }
74
75 return 0;
76}
77
78
53int sysfs_make_dirent(struct sysfs_dirent * parent_sd, struct dentry * dentry, 79int sysfs_make_dirent(struct sysfs_dirent * parent_sd, struct dentry * dentry,
54 void * element, umode_t mode, int type) 80 void * element, umode_t mode, int type)
55{ 81{
@@ -102,7 +128,11 @@ static int create_dir(struct kobject * k, struct dentry * p,
102 mutex_lock(&p->d_inode->i_mutex); 128 mutex_lock(&p->d_inode->i_mutex);
103 *d = lookup_one_len(n, p, strlen(n)); 129 *d = lookup_one_len(n, p, strlen(n));
104 if (!IS_ERR(*d)) { 130 if (!IS_ERR(*d)) {
105 error = sysfs_make_dirent(p->d_fsdata, *d, k, mode, SYSFS_DIR); 131 if (sysfs_dirent_exist(p->d_fsdata, n))
132 error = -EEXIST;
133 else
134 error = sysfs_make_dirent(p->d_fsdata, *d, k, mode,
135 SYSFS_DIR);
106 if (!error) { 136 if (!error) {
107 error = sysfs_create(*d, mode, init_dir); 137 error = sysfs_create(*d, mode, init_dir);
108 if (!error) { 138 if (!error) {
@@ -302,6 +332,7 @@ void sysfs_remove_dir(struct kobject * kobj)
302 * Drop reference from dget() on entrance. 332 * Drop reference from dget() on entrance.
303 */ 333 */
304 dput(dentry); 334 dput(dentry);
335 kobj->dentry = NULL;
305} 336}
306 337
307int sysfs_rename_dir(struct kobject * kobj, const char *new_name) 338int sysfs_rename_dir(struct kobject * kobj, const char *new_name)
@@ -479,7 +510,3 @@ struct file_operations sysfs_dir_operations = {
479 .read = generic_read_dir, 510 .read = generic_read_dir,
480 .readdir = sysfs_readdir, 511 .readdir = sysfs_readdir,
481}; 512};
482
483EXPORT_SYMBOL_GPL(sysfs_create_dir);
484EXPORT_SYMBOL_GPL(sysfs_remove_dir);
485EXPORT_SYMBOL_GPL(sysfs_rename_dir);
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index d0e3d8495165..5e83e7246788 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -301,9 +301,8 @@ static int check_perm(struct inode * inode, struct file * file)
301 /* No error? Great, allocate a buffer for the file, and store it 301 /* No error? Great, allocate a buffer for the file, and store it
302 * it in file->private_data for easy access. 302 * it in file->private_data for easy access.
303 */ 303 */
304 buffer = kmalloc(sizeof(struct sysfs_buffer),GFP_KERNEL); 304 buffer = kzalloc(sizeof(struct sysfs_buffer), GFP_KERNEL);
305 if (buffer) { 305 if (buffer) {
306 memset(buffer,0,sizeof(struct sysfs_buffer));
307 init_MUTEX(&buffer->sem); 306 init_MUTEX(&buffer->sem);
308 buffer->needs_read_fill = 1; 307 buffer->needs_read_fill = 1;
309 buffer->ops = ops; 308 buffer->ops = ops;
@@ -362,10 +361,12 @@ int sysfs_add_file(struct dentry * dir, const struct attribute * attr, int type)
362{ 361{
363 struct sysfs_dirent * parent_sd = dir->d_fsdata; 362 struct sysfs_dirent * parent_sd = dir->d_fsdata;
364 umode_t mode = (attr->mode & S_IALLUGO) | S_IFREG; 363 umode_t mode = (attr->mode & S_IALLUGO) | S_IFREG;
365 int error = 0; 364 int error = -EEXIST;
366 365
367 mutex_lock(&dir->d_inode->i_mutex); 366 mutex_lock(&dir->d_inode->i_mutex);
368 error = sysfs_make_dirent(parent_sd, NULL, (void *) attr, mode, type); 367 if (!sysfs_dirent_exist(parent_sd, attr->name))
368 error = sysfs_make_dirent(parent_sd, NULL, (void *)attr,
369 mode, type);
369 mutex_unlock(&dir->d_inode->i_mutex); 370 mutex_unlock(&dir->d_inode->i_mutex);
370 371
371 return error; 372 return error;
diff --git a/fs/sysfs/inode.c b/fs/sysfs/inode.c
index 689f7bcfaf30..4c29ac41ac3e 100644
--- a/fs/sysfs/inode.c
+++ b/fs/sysfs/inode.c
@@ -54,11 +54,10 @@ int sysfs_setattr(struct dentry * dentry, struct iattr * iattr)
54 54
55 if (!sd_iattr) { 55 if (!sd_iattr) {
56 /* setting attributes for the first time, allocate now */ 56 /* setting attributes for the first time, allocate now */
57 sd_iattr = kmalloc(sizeof(struct iattr), GFP_KERNEL); 57 sd_iattr = kzalloc(sizeof(struct iattr), GFP_KERNEL);
58 if (!sd_iattr) 58 if (!sd_iattr)
59 return -ENOMEM; 59 return -ENOMEM;
60 /* assign default attributes */ 60 /* assign default attributes */
61 memset(sd_iattr, 0, sizeof(struct iattr));
62 sd_iattr->ia_mode = sd->s_mode; 61 sd_iattr->ia_mode = sd->s_mode;
63 sd_iattr->ia_uid = 0; 62 sd_iattr->ia_uid = 0;
64 sd_iattr->ia_gid = 0; 63 sd_iattr->ia_gid = 0;
@@ -227,12 +226,16 @@ void sysfs_drop_dentry(struct sysfs_dirent * sd, struct dentry * parent)
227void sysfs_hash_and_remove(struct dentry * dir, const char * name) 226void sysfs_hash_and_remove(struct dentry * dir, const char * name)
228{ 227{
229 struct sysfs_dirent * sd; 228 struct sysfs_dirent * sd;
230 struct sysfs_dirent * parent_sd = dir->d_fsdata; 229 struct sysfs_dirent * parent_sd;
230
231 if (!dir)
232 return;
231 233
232 if (dir->d_inode == NULL) 234 if (dir->d_inode == NULL)
233 /* no inode means this hasn't been made visible yet */ 235 /* no inode means this hasn't been made visible yet */
234 return; 236 return;
235 237
238 parent_sd = dir->d_fsdata;
236 mutex_lock(&dir->d_inode->i_mutex); 239 mutex_lock(&dir->d_inode->i_mutex);
237 list_for_each_entry(sd, &parent_sd->s_children, s_sibling) { 240 list_for_each_entry(sd, &parent_sd->s_children, s_sibling) {
238 if (!sd->s_element) 241 if (!sd->s_element)
diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
index e38d6338a20d..d2eac3ceed5f 100644
--- a/fs/sysfs/symlink.c
+++ b/fs/sysfs/symlink.c
@@ -66,6 +66,7 @@ static int sysfs_add_link(struct dentry * parent, const char * name, struct kobj
66 if (!error) 66 if (!error)
67 return 0; 67 return 0;
68 68
69 kobject_put(target);
69 kfree(sl->link_name); 70 kfree(sl->link_name);
70exit2: 71exit2:
71 kfree(sl); 72 kfree(sl);
@@ -82,12 +83,13 @@ exit1:
82int sysfs_create_link(struct kobject * kobj, struct kobject * target, const char * name) 83int sysfs_create_link(struct kobject * kobj, struct kobject * target, const char * name)
83{ 84{
84 struct dentry * dentry = kobj->dentry; 85 struct dentry * dentry = kobj->dentry;
85 int error = 0; 86 int error = -EEXIST;
86 87
87 BUG_ON(!kobj || !kobj->dentry || !name); 88 BUG_ON(!kobj || !kobj->dentry || !name);
88 89
89 mutex_lock(&dentry->d_inode->i_mutex); 90 mutex_lock(&dentry->d_inode->i_mutex);
90 error = sysfs_add_link(dentry, name, target); 91 if (!sysfs_dirent_exist(dentry->d_fsdata, name))
92 error = sysfs_add_link(dentry, name, target);
91 mutex_unlock(&dentry->d_inode->i_mutex); 93 mutex_unlock(&dentry->d_inode->i_mutex);
92 return error; 94 return error;
93} 95}
diff --git a/fs/sysfs/sysfs.h b/fs/sysfs/sysfs.h
index 3f8953e0e5d0..cf11d5b789d9 100644
--- a/fs/sysfs/sysfs.h
+++ b/fs/sysfs/sysfs.h
@@ -5,6 +5,7 @@ extern kmem_cache_t *sysfs_dir_cachep;
5extern struct inode * sysfs_new_inode(mode_t mode, struct sysfs_dirent *); 5extern struct inode * sysfs_new_inode(mode_t mode, struct sysfs_dirent *);
6extern int sysfs_create(struct dentry *, int mode, int (*init)(struct inode *)); 6extern int sysfs_create(struct dentry *, int mode, int (*init)(struct inode *));
7 7
8extern int sysfs_dirent_exist(struct sysfs_dirent *, const unsigned char *);
8extern int sysfs_make_dirent(struct sysfs_dirent *, struct dentry *, void *, 9extern int sysfs_make_dirent(struct sysfs_dirent *, struct dentry *, void *,
9 umode_t, int); 10 umode_t, int);
10 11
diff --git a/fs/sysv/namei.c b/fs/sysv/namei.c
index 7f0e4b53085e..b8a73f716fbe 100644
--- a/fs/sysv/namei.c
+++ b/fs/sysv/namei.c
@@ -16,18 +16,6 @@
16#include <linux/smp_lock.h> 16#include <linux/smp_lock.h>
17#include "sysv.h" 17#include "sysv.h"
18 18
19static inline void inc_count(struct inode *inode)
20{
21 inode->i_nlink++;
22 mark_inode_dirty(inode);
23}
24
25static inline void dec_count(struct inode *inode)
26{
27 inode->i_nlink--;
28 mark_inode_dirty(inode);
29}
30
31static int add_nondir(struct dentry *dentry, struct inode *inode) 19static int add_nondir(struct dentry *dentry, struct inode *inode)
32{ 20{
33 int err = sysv_add_link(dentry, inode); 21 int err = sysv_add_link(dentry, inode);
@@ -35,7 +23,7 @@ static int add_nondir(struct dentry *dentry, struct inode *inode)
35 d_instantiate(dentry, inode); 23 d_instantiate(dentry, inode);
36 return 0; 24 return 0;
37 } 25 }
38 dec_count(inode); 26 inode_dec_link_count(inode);
39 iput(inode); 27 iput(inode);
40 return err; 28 return err;
41} 29}
@@ -124,7 +112,7 @@ out:
124 return err; 112 return err;
125 113
126out_fail: 114out_fail:
127 dec_count(inode); 115 inode_dec_link_count(inode);
128 iput(inode); 116 iput(inode);
129 goto out; 117 goto out;
130} 118}
@@ -138,7 +126,7 @@ static int sysv_link(struct dentry * old_dentry, struct inode * dir,
138 return -EMLINK; 126 return -EMLINK;
139 127
140 inode->i_ctime = CURRENT_TIME_SEC; 128 inode->i_ctime = CURRENT_TIME_SEC;
141 inc_count(inode); 129 inode_inc_link_count(inode);
142 atomic_inc(&inode->i_count); 130 atomic_inc(&inode->i_count);
143 131
144 return add_nondir(dentry, inode); 132 return add_nondir(dentry, inode);
@@ -151,7 +139,7 @@ static int sysv_mkdir(struct inode * dir, struct dentry *dentry, int mode)
151 139
152 if (dir->i_nlink >= SYSV_SB(dir->i_sb)->s_link_max) 140 if (dir->i_nlink >= SYSV_SB(dir->i_sb)->s_link_max)
153 goto out; 141 goto out;
154 inc_count(dir); 142 inode_inc_link_count(dir);
155 143
156 inode = sysv_new_inode(dir, S_IFDIR|mode); 144 inode = sysv_new_inode(dir, S_IFDIR|mode);
157 err = PTR_ERR(inode); 145 err = PTR_ERR(inode);
@@ -160,7 +148,7 @@ static int sysv_mkdir(struct inode * dir, struct dentry *dentry, int mode)
160 148
161 sysv_set_inode(inode, 0); 149 sysv_set_inode(inode, 0);
162 150
163 inc_count(inode); 151 inode_inc_link_count(inode);
164 152
165 err = sysv_make_empty(inode, dir); 153 err = sysv_make_empty(inode, dir);
166 if (err) 154 if (err)
@@ -175,11 +163,11 @@ out:
175 return err; 163 return err;
176 164
177out_fail: 165out_fail:
178 dec_count(inode); 166 inode_dec_link_count(inode);
179 dec_count(inode); 167 inode_dec_link_count(inode);
180 iput(inode); 168 iput(inode);
181out_dir: 169out_dir:
182 dec_count(dir); 170 inode_dec_link_count(dir);
183 goto out; 171 goto out;
184} 172}
185 173
@@ -199,7 +187,7 @@ static int sysv_unlink(struct inode * dir, struct dentry * dentry)
199 goto out; 187 goto out;
200 188
201 inode->i_ctime = dir->i_ctime; 189 inode->i_ctime = dir->i_ctime;
202 dec_count(inode); 190 inode_dec_link_count(inode);
203out: 191out:
204 return err; 192 return err;
205} 193}
@@ -213,8 +201,8 @@ static int sysv_rmdir(struct inode * dir, struct dentry * dentry)
213 err = sysv_unlink(dir, dentry); 201 err = sysv_unlink(dir, dentry);
214 if (!err) { 202 if (!err) {
215 inode->i_size = 0; 203 inode->i_size = 0;
216 dec_count(inode); 204 inode_dec_link_count(inode);
217 dec_count(dir); 205 inode_dec_link_count(dir);
218 } 206 }
219 } 207 }
220 return err; 208 return err;
@@ -258,34 +246,34 @@ static int sysv_rename(struct inode * old_dir, struct dentry * old_dentry,
258 new_de = sysv_find_entry(new_dentry, &new_page); 246 new_de = sysv_find_entry(new_dentry, &new_page);
259 if (!new_de) 247 if (!new_de)
260 goto out_dir; 248 goto out_dir;
261 inc_count(old_inode); 249 inode_inc_link_count(old_inode);
262 sysv_set_link(new_de, new_page, old_inode); 250 sysv_set_link(new_de, new_page, old_inode);
263 new_inode->i_ctime = CURRENT_TIME_SEC; 251 new_inode->i_ctime = CURRENT_TIME_SEC;
264 if (dir_de) 252 if (dir_de)
265 new_inode->i_nlink--; 253 new_inode->i_nlink--;
266 dec_count(new_inode); 254 inode_dec_link_count(new_inode);
267 } else { 255 } else {
268 if (dir_de) { 256 if (dir_de) {
269 err = -EMLINK; 257 err = -EMLINK;
270 if (new_dir->i_nlink >= SYSV_SB(new_dir->i_sb)->s_link_max) 258 if (new_dir->i_nlink >= SYSV_SB(new_dir->i_sb)->s_link_max)
271 goto out_dir; 259 goto out_dir;
272 } 260 }
273 inc_count(old_inode); 261 inode_inc_link_count(old_inode);
274 err = sysv_add_link(new_dentry, old_inode); 262 err = sysv_add_link(new_dentry, old_inode);
275 if (err) { 263 if (err) {
276 dec_count(old_inode); 264 inode_dec_link_count(old_inode);
277 goto out_dir; 265 goto out_dir;
278 } 266 }
279 if (dir_de) 267 if (dir_de)
280 inc_count(new_dir); 268 inode_inc_link_count(new_dir);
281 } 269 }
282 270
283 sysv_delete_entry(old_de, old_page); 271 sysv_delete_entry(old_de, old_page);
284 dec_count(old_inode); 272 inode_dec_link_count(old_inode);
285 273
286 if (dir_de) { 274 if (dir_de) {
287 sysv_set_link(dir_de, dir_page, new_dir); 275 sysv_set_link(dir_de, dir_page, new_dir);
288 dec_count(old_dir); 276 inode_dec_link_count(old_dir);
289 } 277 }
290 return 0; 278 return 0;
291 279
diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
index 201049ac8a96..ea521f846d97 100644
--- a/fs/udf/balloc.c
+++ b/fs/udf/balloc.c
@@ -152,7 +152,7 @@ static void udf_bitmap_free_blocks(struct super_block * sb,
152 int bitmap_nr; 152 int bitmap_nr;
153 unsigned long overflow; 153 unsigned long overflow;
154 154
155 down(&sbi->s_alloc_sem); 155 mutex_lock(&sbi->s_alloc_mutex);
156 if (bloc.logicalBlockNum < 0 || 156 if (bloc.logicalBlockNum < 0 ||
157 (bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum)) 157 (bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum))
158 { 158 {
@@ -211,7 +211,7 @@ error_return:
211 sb->s_dirt = 1; 211 sb->s_dirt = 1;
212 if (UDF_SB_LVIDBH(sb)) 212 if (UDF_SB_LVIDBH(sb))
213 mark_buffer_dirty(UDF_SB_LVIDBH(sb)); 213 mark_buffer_dirty(UDF_SB_LVIDBH(sb));
214 up(&sbi->s_alloc_sem); 214 mutex_unlock(&sbi->s_alloc_mutex);
215 return; 215 return;
216} 216}
217 217
@@ -226,7 +226,7 @@ static int udf_bitmap_prealloc_blocks(struct super_block * sb,
226 int nr_groups, bitmap_nr; 226 int nr_groups, bitmap_nr;
227 struct buffer_head *bh; 227 struct buffer_head *bh;
228 228
229 down(&sbi->s_alloc_sem); 229 mutex_lock(&sbi->s_alloc_mutex);
230 if (first_block < 0 || first_block >= UDF_SB_PARTLEN(sb, partition)) 230 if (first_block < 0 || first_block >= UDF_SB_PARTLEN(sb, partition))
231 goto out; 231 goto out;
232 232
@@ -275,7 +275,7 @@ out:
275 mark_buffer_dirty(UDF_SB_LVIDBH(sb)); 275 mark_buffer_dirty(UDF_SB_LVIDBH(sb));
276 } 276 }
277 sb->s_dirt = 1; 277 sb->s_dirt = 1;
278 up(&sbi->s_alloc_sem); 278 mutex_unlock(&sbi->s_alloc_mutex);
279 return alloc_count; 279 return alloc_count;
280} 280}
281 281
@@ -291,7 +291,7 @@ static int udf_bitmap_new_block(struct super_block * sb,
291 int newblock = 0; 291 int newblock = 0;
292 292
293 *err = -ENOSPC; 293 *err = -ENOSPC;
294 down(&sbi->s_alloc_sem); 294 mutex_lock(&sbi->s_alloc_mutex);
295 295
296repeat: 296repeat:
297 if (goal < 0 || goal >= UDF_SB_PARTLEN(sb, partition)) 297 if (goal < 0 || goal >= UDF_SB_PARTLEN(sb, partition))
@@ -364,7 +364,7 @@ repeat:
364 } 364 }
365 if (i >= (nr_groups*2)) 365 if (i >= (nr_groups*2))
366 { 366 {
367 up(&sbi->s_alloc_sem); 367 mutex_unlock(&sbi->s_alloc_mutex);
368 return newblock; 368 return newblock;
369 } 369 }
370 if (bit < sb->s_blocksize << 3) 370 if (bit < sb->s_blocksize << 3)
@@ -373,7 +373,7 @@ repeat:
373 bit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3, group_start << 3); 373 bit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3, group_start << 3);
374 if (bit >= sb->s_blocksize << 3) 374 if (bit >= sb->s_blocksize << 3)
375 { 375 {
376 up(&sbi->s_alloc_sem); 376 mutex_unlock(&sbi->s_alloc_mutex);
377 return 0; 377 return 0;
378 } 378 }
379 379
@@ -387,7 +387,7 @@ got_block:
387 */ 387 */
388 if (inode && DQUOT_ALLOC_BLOCK(inode, 1)) 388 if (inode && DQUOT_ALLOC_BLOCK(inode, 1))
389 { 389 {
390 up(&sbi->s_alloc_sem); 390 mutex_unlock(&sbi->s_alloc_mutex);
391 *err = -EDQUOT; 391 *err = -EDQUOT;
392 return 0; 392 return 0;
393 } 393 }
@@ -410,13 +410,13 @@ got_block:
410 mark_buffer_dirty(UDF_SB_LVIDBH(sb)); 410 mark_buffer_dirty(UDF_SB_LVIDBH(sb));
411 } 411 }
412 sb->s_dirt = 1; 412 sb->s_dirt = 1;
413 up(&sbi->s_alloc_sem); 413 mutex_unlock(&sbi->s_alloc_mutex);
414 *err = 0; 414 *err = 0;
415 return newblock; 415 return newblock;
416 416
417error_return: 417error_return:
418 *err = -EIO; 418 *err = -EIO;
419 up(&sbi->s_alloc_sem); 419 mutex_unlock(&sbi->s_alloc_mutex);
420 return 0; 420 return 0;
421} 421}
422 422
@@ -433,7 +433,7 @@ static void udf_table_free_blocks(struct super_block * sb,
433 int8_t etype; 433 int8_t etype;
434 int i; 434 int i;
435 435
436 down(&sbi->s_alloc_sem); 436 mutex_lock(&sbi->s_alloc_mutex);
437 if (bloc.logicalBlockNum < 0 || 437 if (bloc.logicalBlockNum < 0 ||
438 (bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum)) 438 (bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum))
439 { 439 {
@@ -666,7 +666,7 @@ static void udf_table_free_blocks(struct super_block * sb,
666 666
667error_return: 667error_return:
668 sb->s_dirt = 1; 668 sb->s_dirt = 1;
669 up(&sbi->s_alloc_sem); 669 mutex_unlock(&sbi->s_alloc_mutex);
670 return; 670 return;
671} 671}
672 672
@@ -692,7 +692,7 @@ static int udf_table_prealloc_blocks(struct super_block * sb,
692 else 692 else
693 return 0; 693 return 0;
694 694
695 down(&sbi->s_alloc_sem); 695 mutex_lock(&sbi->s_alloc_mutex);
696 extoffset = sizeof(struct unallocSpaceEntry); 696 extoffset = sizeof(struct unallocSpaceEntry);
697 bloc = UDF_I_LOCATION(table); 697 bloc = UDF_I_LOCATION(table);
698 698
@@ -736,7 +736,7 @@ static int udf_table_prealloc_blocks(struct super_block * sb,
736 mark_buffer_dirty(UDF_SB_LVIDBH(sb)); 736 mark_buffer_dirty(UDF_SB_LVIDBH(sb));
737 sb->s_dirt = 1; 737 sb->s_dirt = 1;
738 } 738 }
739 up(&sbi->s_alloc_sem); 739 mutex_unlock(&sbi->s_alloc_mutex);
740 return alloc_count; 740 return alloc_count;
741} 741}
742 742
@@ -761,7 +761,7 @@ static int udf_table_new_block(struct super_block * sb,
761 else 761 else
762 return newblock; 762 return newblock;
763 763
764 down(&sbi->s_alloc_sem); 764 mutex_lock(&sbi->s_alloc_mutex);
765 if (goal < 0 || goal >= UDF_SB_PARTLEN(sb, partition)) 765 if (goal < 0 || goal >= UDF_SB_PARTLEN(sb, partition))
766 goal = 0; 766 goal = 0;
767 767
@@ -811,7 +811,7 @@ static int udf_table_new_block(struct super_block * sb,
811 if (spread == 0xFFFFFFFF) 811 if (spread == 0xFFFFFFFF)
812 { 812 {
813 udf_release_data(goal_bh); 813 udf_release_data(goal_bh);
814 up(&sbi->s_alloc_sem); 814 mutex_unlock(&sbi->s_alloc_mutex);
815 return 0; 815 return 0;
816 } 816 }
817 817
@@ -827,7 +827,7 @@ static int udf_table_new_block(struct super_block * sb,
827 if (inode && DQUOT_ALLOC_BLOCK(inode, 1)) 827 if (inode && DQUOT_ALLOC_BLOCK(inode, 1))
828 { 828 {
829 udf_release_data(goal_bh); 829 udf_release_data(goal_bh);
830 up(&sbi->s_alloc_sem); 830 mutex_unlock(&sbi->s_alloc_mutex);
831 *err = -EDQUOT; 831 *err = -EDQUOT;
832 return 0; 832 return 0;
833 } 833 }
@@ -846,7 +846,7 @@ static int udf_table_new_block(struct super_block * sb,
846 } 846 }
847 847
848 sb->s_dirt = 1; 848 sb->s_dirt = 1;
849 up(&sbi->s_alloc_sem); 849 mutex_unlock(&sbi->s_alloc_mutex);
850 *err = 0; 850 *err = 0;
851 return newblock; 851 return newblock;
852} 852}
diff --git a/fs/udf/ialloc.c b/fs/udf/ialloc.c
index c9b707b470ca..3873c672cb4c 100644
--- a/fs/udf/ialloc.c
+++ b/fs/udf/ialloc.c
@@ -42,7 +42,7 @@ void udf_free_inode(struct inode * inode)
42 42
43 clear_inode(inode); 43 clear_inode(inode);
44 44
45 down(&sbi->s_alloc_sem); 45 mutex_lock(&sbi->s_alloc_mutex);
46 if (sbi->s_lvidbh) { 46 if (sbi->s_lvidbh) {
47 if (S_ISDIR(inode->i_mode)) 47 if (S_ISDIR(inode->i_mode))
48 UDF_SB_LVIDIU(sb)->numDirs = 48 UDF_SB_LVIDIU(sb)->numDirs =
@@ -53,7 +53,7 @@ void udf_free_inode(struct inode * inode)
53 53
54 mark_buffer_dirty(sbi->s_lvidbh); 54 mark_buffer_dirty(sbi->s_lvidbh);
55 } 55 }
56 up(&sbi->s_alloc_sem); 56 mutex_unlock(&sbi->s_alloc_mutex);
57 57
58 udf_free_blocks(sb, NULL, UDF_I_LOCATION(inode), 0, 1); 58 udf_free_blocks(sb, NULL, UDF_I_LOCATION(inode), 0, 1);
59} 59}
@@ -83,7 +83,7 @@ struct inode * udf_new_inode (struct inode *dir, int mode, int * err)
83 return NULL; 83 return NULL;
84 } 84 }
85 85
86 down(&sbi->s_alloc_sem); 86 mutex_lock(&sbi->s_alloc_mutex);
87 UDF_I_UNIQUE(inode) = 0; 87 UDF_I_UNIQUE(inode) = 0;
88 UDF_I_LENEXTENTS(inode) = 0; 88 UDF_I_LENEXTENTS(inode) = 0;
89 UDF_I_NEXT_ALLOC_BLOCK(inode) = 0; 89 UDF_I_NEXT_ALLOC_BLOCK(inode) = 0;
@@ -148,7 +148,7 @@ struct inode * udf_new_inode (struct inode *dir, int mode, int * err)
148 UDF_I_CRTIME(inode) = current_fs_time(inode->i_sb); 148 UDF_I_CRTIME(inode) = current_fs_time(inode->i_sb);
149 insert_inode_hash(inode); 149 insert_inode_hash(inode);
150 mark_inode_dirty(inode); 150 mark_inode_dirty(inode);
151 up(&sbi->s_alloc_sem); 151 mutex_unlock(&sbi->s_alloc_mutex);
152 152
153 if (DQUOT_ALLOC_INODE(inode)) 153 if (DQUOT_ALLOC_INODE(inode))
154 { 154 {
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 368d8f81fe54..9303c50c5d55 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -1515,7 +1515,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
1515 sb->s_fs_info = sbi; 1515 sb->s_fs_info = sbi;
1516 memset(UDF_SB(sb), 0x00, sizeof(struct udf_sb_info)); 1516 memset(UDF_SB(sb), 0x00, sizeof(struct udf_sb_info));
1517 1517
1518 init_MUTEX(&sbi->s_alloc_sem); 1518 mutex_init(&sbi->s_alloc_mutex);
1519 1519
1520 if (!udf_parse_options((char *)options, &uopt)) 1520 if (!udf_parse_options((char *)options, &uopt))
1521 goto error_out; 1521 goto error_out;
diff --git a/fs/ufs/file.c b/fs/ufs/file.c
index ed69d7fe1b5d..62ad481810ef 100644
--- a/fs/ufs/file.c
+++ b/fs/ufs/file.c
@@ -23,18 +23,8 @@
23 * ext2 fs regular file handling primitives 23 * ext2 fs regular file handling primitives
24 */ 24 */
25 25
26#include <asm/uaccess.h>
27#include <asm/system.h>
28
29#include <linux/errno.h>
30#include <linux/fs.h> 26#include <linux/fs.h>
31#include <linux/ufs_fs.h> 27#include <linux/ufs_fs.h>
32#include <linux/fcntl.h>
33#include <linux/time.h>
34#include <linux/stat.h>
35#include <linux/mm.h>
36#include <linux/pagemap.h>
37#include <linux/smp_lock.h>
38 28
39/* 29/*
40 * We have mostly NULL's here: the current defaults are ok for 30 * We have mostly NULL's here: the current defaults are ok for
diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c
index 2958cde7d3d6..8d5f98a01c74 100644
--- a/fs/ufs/namei.c
+++ b/fs/ufs/namei.c
@@ -43,18 +43,6 @@
43#define UFSD(x) 43#define UFSD(x)
44#endif 44#endif
45 45
46static inline void ufs_inc_count(struct inode *inode)
47{
48 inode->i_nlink++;
49 mark_inode_dirty(inode);
50}
51
52static inline void ufs_dec_count(struct inode *inode)
53{
54 inode->i_nlink--;
55 mark_inode_dirty(inode);
56}
57
58static inline int ufs_add_nondir(struct dentry *dentry, struct inode *inode) 46static inline int ufs_add_nondir(struct dentry *dentry, struct inode *inode)
59{ 47{
60 int err = ufs_add_link(dentry, inode); 48 int err = ufs_add_link(dentry, inode);
@@ -62,7 +50,7 @@ static inline int ufs_add_nondir(struct dentry *dentry, struct inode *inode)
62 d_instantiate(dentry, inode); 50 d_instantiate(dentry, inode);
63 return 0; 51 return 0;
64 } 52 }
65 ufs_dec_count(inode); 53 inode_dec_link_count(inode);
66 iput(inode); 54 iput(inode);
67 return err; 55 return err;
68} 56}
@@ -173,7 +161,7 @@ out:
173 return err; 161 return err;
174 162
175out_fail: 163out_fail:
176 ufs_dec_count(inode); 164 inode_dec_link_count(inode);
177 iput(inode); 165 iput(inode);
178 goto out; 166 goto out;
179} 167}
@@ -191,7 +179,7 @@ static int ufs_link (struct dentry * old_dentry, struct inode * dir,
191 } 179 }
192 180
193 inode->i_ctime = CURRENT_TIME_SEC; 181 inode->i_ctime = CURRENT_TIME_SEC;
194 ufs_inc_count(inode); 182 inode_inc_link_count(inode);
195 atomic_inc(&inode->i_count); 183 atomic_inc(&inode->i_count);
196 184
197 error = ufs_add_nondir(dentry, inode); 185 error = ufs_add_nondir(dentry, inode);
@@ -208,7 +196,7 @@ static int ufs_mkdir(struct inode * dir, struct dentry * dentry, int mode)
208 goto out; 196 goto out;
209 197
210 lock_kernel(); 198 lock_kernel();
211 ufs_inc_count(dir); 199 inode_inc_link_count(dir);
212 200
213 inode = ufs_new_inode(dir, S_IFDIR|mode); 201 inode = ufs_new_inode(dir, S_IFDIR|mode);
214 err = PTR_ERR(inode); 202 err = PTR_ERR(inode);
@@ -218,7 +206,7 @@ static int ufs_mkdir(struct inode * dir, struct dentry * dentry, int mode)
218 inode->i_op = &ufs_dir_inode_operations; 206 inode->i_op = &ufs_dir_inode_operations;
219 inode->i_fop = &ufs_dir_operations; 207 inode->i_fop = &ufs_dir_operations;
220 208
221 ufs_inc_count(inode); 209 inode_inc_link_count(inode);
222 210
223 err = ufs_make_empty(inode, dir); 211 err = ufs_make_empty(inode, dir);
224 if (err) 212 if (err)
@@ -234,11 +222,11 @@ out:
234 return err; 222 return err;
235 223
236out_fail: 224out_fail:
237 ufs_dec_count(inode); 225 inode_dec_link_count(inode);
238 ufs_dec_count(inode); 226 inode_dec_link_count(inode);
239 iput (inode); 227 iput (inode);
240out_dir: 228out_dir:
241 ufs_dec_count(dir); 229 inode_dec_link_count(dir);
242 unlock_kernel(); 230 unlock_kernel();
243 goto out; 231 goto out;
244} 232}
@@ -260,7 +248,7 @@ static int ufs_unlink(struct inode * dir, struct dentry *dentry)
260 goto out; 248 goto out;
261 249
262 inode->i_ctime = dir->i_ctime; 250 inode->i_ctime = dir->i_ctime;
263 ufs_dec_count(inode); 251 inode_dec_link_count(inode);
264 err = 0; 252 err = 0;
265out: 253out:
266 unlock_kernel(); 254 unlock_kernel();
@@ -277,8 +265,8 @@ static int ufs_rmdir (struct inode * dir, struct dentry *dentry)
277 err = ufs_unlink(dir, dentry); 265 err = ufs_unlink(dir, dentry);
278 if (!err) { 266 if (!err) {
279 inode->i_size = 0; 267 inode->i_size = 0;
280 ufs_dec_count(inode); 268 inode_dec_link_count(inode);
281 ufs_dec_count(dir); 269 inode_dec_link_count(dir);
282 } 270 }
283 } 271 }
284 unlock_kernel(); 272 unlock_kernel();
@@ -319,35 +307,35 @@ static int ufs_rename (struct inode * old_dir, struct dentry * old_dentry,
319 new_de = ufs_find_entry (new_dentry, &new_bh); 307 new_de = ufs_find_entry (new_dentry, &new_bh);
320 if (!new_de) 308 if (!new_de)
321 goto out_dir; 309 goto out_dir;
322 ufs_inc_count(old_inode); 310 inode_inc_link_count(old_inode);
323 ufs_set_link(new_dir, new_de, new_bh, old_inode); 311 ufs_set_link(new_dir, new_de, new_bh, old_inode);
324 new_inode->i_ctime = CURRENT_TIME_SEC; 312 new_inode->i_ctime = CURRENT_TIME_SEC;
325 if (dir_de) 313 if (dir_de)
326 new_inode->i_nlink--; 314 new_inode->i_nlink--;
327 ufs_dec_count(new_inode); 315 inode_dec_link_count(new_inode);
328 } else { 316 } else {
329 if (dir_de) { 317 if (dir_de) {
330 err = -EMLINK; 318 err = -EMLINK;
331 if (new_dir->i_nlink >= UFS_LINK_MAX) 319 if (new_dir->i_nlink >= UFS_LINK_MAX)
332 goto out_dir; 320 goto out_dir;
333 } 321 }
334 ufs_inc_count(old_inode); 322 inode_inc_link_count(old_inode);
335 err = ufs_add_link(new_dentry, old_inode); 323 err = ufs_add_link(new_dentry, old_inode);
336 if (err) { 324 if (err) {
337 ufs_dec_count(old_inode); 325 inode_dec_link_count(old_inode);
338 goto out_dir; 326 goto out_dir;
339 } 327 }
340 if (dir_de) 328 if (dir_de)
341 ufs_inc_count(new_dir); 329 inode_inc_link_count(new_dir);
342 } 330 }
343 331
344 ufs_delete_entry (old_dir, old_de, old_bh); 332 ufs_delete_entry (old_dir, old_de, old_bh);
345 333
346 ufs_dec_count(old_inode); 334 inode_dec_link_count(old_inode);
347 335
348 if (dir_de) { 336 if (dir_de) {
349 ufs_set_link(old_inode, dir_de, dir_bh, new_dir); 337 ufs_set_link(old_inode, dir_de, dir_bh, new_dir);
350 ufs_dec_count(old_dir); 338 inode_dec_link_count(old_dir);
351 } 339 }
352 unlock_kernel(); 340 unlock_kernel();
353 return 0; 341 return 0;
diff --git a/fs/xfs/Makefile-linux-2.6 b/fs/xfs/Makefile-linux-2.6
index 97bd4743b461..5d73eaa1971f 100644
--- a/fs/xfs/Makefile-linux-2.6
+++ b/fs/xfs/Makefile-linux-2.6
@@ -1,33 +1,19 @@
1# 1#
2# Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved. 2# Copyright (c) 2000-2005 Silicon Graphics, Inc.
3# All Rights Reserved.
3# 4#
4# This program is free software; you can redistribute it and/or modify it 5# This program is free software; you can redistribute it and/or
5# under the terms of version 2 of the GNU General Public License as 6# modify it under the terms of the GNU General Public License as
6# published by the Free Software Foundation. 7# published by the Free Software Foundation.
7# 8#
8# This program is distributed in the hope that it would be useful, but 9# This program is distributed in the hope that it would be useful,
9# WITHOUT ANY WARRANTY; without even the implied warranty of 10# but WITHOUT ANY WARRANTY; without even the implied warranty of
10# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. 11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU General Public License for more details.
11# 13#
12# Further, this software is distributed without any warranty that it is 14# You should have received a copy of the GNU General Public License
13# free of the rightful claim of any third person regarding infringement 15# along with this program; if not, write the Free Software Foundation,
14# or the like. Any license provided herein, whether implied or 16# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
15# otherwise, applies only to this software file. Patent licenses, if
16# any, provided herein do not apply to combinations of this program with
17# other software, or any other product whatsoever.
18#
19# You should have received a copy of the GNU General Public License along
20# with this program; if not, write the Free Software Foundation, Inc., 59
21# Temple Place - Suite 330, Boston MA 02111-1307, USA.
22#
23# Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
24# Mountain View, CA 94043, or:
25#
26# http://www.sgi.com
27#
28# For further information regarding this notice, see:
29#
30# http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
31# 17#
32 18
33EXTRA_CFLAGS += -Ifs/xfs -Ifs/xfs/linux-2.6 -funsigned-char 19EXTRA_CFLAGS += -Ifs/xfs -Ifs/xfs/linux-2.6 -funsigned-char
@@ -36,7 +22,7 @@ XFS_LINUX := linux-2.6
36 22
37ifeq ($(CONFIG_XFS_DEBUG),y) 23ifeq ($(CONFIG_XFS_DEBUG),y)
38 EXTRA_CFLAGS += -g -DSTATIC="" -DDEBUG 24 EXTRA_CFLAGS += -g -DSTATIC="" -DDEBUG
39 EXTRA_CFLAGS += -DPAGEBUF_LOCK_TRACKING 25 EXTRA_CFLAGS += -DXFS_BUF_LOCK_TRACKING
40endif 26endif
41ifeq ($(CONFIG_XFS_TRACE),y) 27ifeq ($(CONFIG_XFS_TRACE),y)
42 EXTRA_CFLAGS += -DXFS_ALLOC_TRACE 28 EXTRA_CFLAGS += -DXFS_ALLOC_TRACE
@@ -50,7 +36,7 @@ ifeq ($(CONFIG_XFS_TRACE),y)
50 EXTRA_CFLAGS += -DXFS_ILOCK_TRACE 36 EXTRA_CFLAGS += -DXFS_ILOCK_TRACE
51 EXTRA_CFLAGS += -DXFS_LOG_TRACE 37 EXTRA_CFLAGS += -DXFS_LOG_TRACE
52 EXTRA_CFLAGS += -DXFS_RW_TRACE 38 EXTRA_CFLAGS += -DXFS_RW_TRACE
53 EXTRA_CFLAGS += -DPAGEBUF_TRACE 39 EXTRA_CFLAGS += -DXFS_BUF_TRACE
54 EXTRA_CFLAGS += -DXFS_VNODE_TRACE 40 EXTRA_CFLAGS += -DXFS_VNODE_TRACE
55endif 41endif
56 42
diff --git a/fs/xfs/linux-2.6/kmem.h b/fs/xfs/linux-2.6/kmem.h
index c64a29cdfff3..f0268a84e6fd 100644
--- a/fs/xfs/linux-2.6/kmem.h
+++ b/fs/xfs/linux-2.6/kmem.h
@@ -23,17 +23,8 @@
23#include <linux/mm.h> 23#include <linux/mm.h>
24 24
25/* 25/*
26 * memory management routines 26 * Process flags handling
27 */ 27 */
28#define KM_SLEEP 0x0001u
29#define KM_NOSLEEP 0x0002u
30#define KM_NOFS 0x0004u
31#define KM_MAYFAIL 0x0008u
32
33#define kmem_zone kmem_cache
34#define kmem_zone_t struct kmem_cache
35
36typedef unsigned long xfs_pflags_t;
37 28
38#define PFLAGS_TEST_NOIO() (current->flags & PF_NOIO) 29#define PFLAGS_TEST_NOIO() (current->flags & PF_NOIO)
39#define PFLAGS_TEST_FSTRANS() (current->flags & PF_FSTRANS) 30#define PFLAGS_TEST_FSTRANS() (current->flags & PF_FSTRANS)
@@ -67,74 +58,102 @@ typedef unsigned long xfs_pflags_t;
67 *(NSTATEP) = *(OSTATEP); \ 58 *(NSTATEP) = *(OSTATEP); \
68} while (0) 59} while (0)
69 60
70static __inline gfp_t kmem_flags_convert(unsigned int __nocast flags) 61/*
62 * General memory allocation interfaces
63 */
64
65#define KM_SLEEP 0x0001u
66#define KM_NOSLEEP 0x0002u
67#define KM_NOFS 0x0004u
68#define KM_MAYFAIL 0x0008u
69
70/*
71 * We use a special process flag to avoid recursive callbacks into
72 * the filesystem during transactions. We will also issue our own
73 * warnings, so we explicitly skip any generic ones (silly of us).
74 */
75static inline gfp_t
76kmem_flags_convert(unsigned int __nocast flags)
71{ 77{
72 gfp_t lflags = __GFP_NOWARN; /* we'll report problems, if need be */ 78 gfp_t lflags;
73 79
74#ifdef DEBUG 80 BUG_ON(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL));
75 if (unlikely(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL))) {
76 printk(KERN_WARNING
77 "XFS: memory allocation with wrong flags (%x)\n", flags);
78 BUG();
79 }
80#endif
81 81
82 if (flags & KM_NOSLEEP) { 82 if (flags & KM_NOSLEEP) {
83 lflags |= GFP_ATOMIC; 83 lflags = GFP_ATOMIC | __GFP_NOWARN;
84 } else { 84 } else {
85 lflags |= GFP_KERNEL; 85 lflags = GFP_KERNEL | __GFP_NOWARN;
86
87 /* avoid recusive callbacks to filesystem during transactions */
88 if (PFLAGS_TEST_FSTRANS() || (flags & KM_NOFS)) 86 if (PFLAGS_TEST_FSTRANS() || (flags & KM_NOFS))
89 lflags &= ~__GFP_FS; 87 lflags &= ~__GFP_FS;
90 } 88 }
91 89 return lflags;
92 return lflags;
93} 90}
94 91
95static __inline kmem_zone_t * 92extern void *kmem_alloc(size_t, unsigned int __nocast);
93extern void *kmem_realloc(void *, size_t, size_t, unsigned int __nocast);
94extern void *kmem_zalloc(size_t, unsigned int __nocast);
95extern void kmem_free(void *, size_t);
96
97/*
98 * Zone interfaces
99 */
100
101#define KM_ZONE_HWALIGN SLAB_HWCACHE_ALIGN
102#define KM_ZONE_RECLAIM SLAB_RECLAIM_ACCOUNT
103#define KM_ZONE_SPREAD 0
104
105#define kmem_zone kmem_cache
106#define kmem_zone_t struct kmem_cache
107
108static inline kmem_zone_t *
96kmem_zone_init(int size, char *zone_name) 109kmem_zone_init(int size, char *zone_name)
97{ 110{
98 return kmem_cache_create(zone_name, size, 0, 0, NULL, NULL); 111 return kmem_cache_create(zone_name, size, 0, 0, NULL, NULL);
99} 112}
100 113
101static __inline void 114static inline kmem_zone_t *
115kmem_zone_init_flags(int size, char *zone_name, unsigned long flags,
116 void (*construct)(void *, kmem_zone_t *, unsigned long))
117{
118 return kmem_cache_create(zone_name, size, 0, flags, construct, NULL);
119}
120
121static inline void
102kmem_zone_free(kmem_zone_t *zone, void *ptr) 122kmem_zone_free(kmem_zone_t *zone, void *ptr)
103{ 123{
104 kmem_cache_free(zone, ptr); 124 kmem_cache_free(zone, ptr);
105} 125}
106 126
107static __inline void 127static inline void
108kmem_zone_destroy(kmem_zone_t *zone) 128kmem_zone_destroy(kmem_zone_t *zone)
109{ 129{
110 if (zone && kmem_cache_destroy(zone)) 130 if (zone && kmem_cache_destroy(zone))
111 BUG(); 131 BUG();
112} 132}
113 133
114extern void *kmem_zone_zalloc(kmem_zone_t *, unsigned int __nocast);
115extern void *kmem_zone_alloc(kmem_zone_t *, unsigned int __nocast); 134extern void *kmem_zone_alloc(kmem_zone_t *, unsigned int __nocast);
135extern void *kmem_zone_zalloc(kmem_zone_t *, unsigned int __nocast);
116 136
117extern void *kmem_alloc(size_t, unsigned int __nocast); 137/*
118extern void *kmem_realloc(void *, size_t, size_t, unsigned int __nocast); 138 * Low memory cache shrinkers
119extern void *kmem_zalloc(size_t, unsigned int __nocast); 139 */
120extern void kmem_free(void *, size_t);
121 140
122typedef struct shrinker *kmem_shaker_t; 141typedef struct shrinker *kmem_shaker_t;
123typedef int (*kmem_shake_func_t)(int, gfp_t); 142typedef int (*kmem_shake_func_t)(int, gfp_t);
124 143
125static __inline kmem_shaker_t 144static inline kmem_shaker_t
126kmem_shake_register(kmem_shake_func_t sfunc) 145kmem_shake_register(kmem_shake_func_t sfunc)
127{ 146{
128 return set_shrinker(DEFAULT_SEEKS, sfunc); 147 return set_shrinker(DEFAULT_SEEKS, sfunc);
129} 148}
130 149
131static __inline void 150static inline void
132kmem_shake_deregister(kmem_shaker_t shrinker) 151kmem_shake_deregister(kmem_shaker_t shrinker)
133{ 152{
134 remove_shrinker(shrinker); 153 remove_shrinker(shrinker);
135} 154}
136 155
137static __inline int 156static inline int
138kmem_shake_allow(gfp_t gfp_mask) 157kmem_shake_allow(gfp_t gfp_mask)
139{ 158{
140 return (gfp_mask & __GFP_WAIT); 159 return (gfp_mask & __GFP_WAIT);
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index 74d8be87f983..97fc056130eb 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -43,7 +43,29 @@
43#include <linux/pagevec.h> 43#include <linux/pagevec.h>
44#include <linux/writeback.h> 44#include <linux/writeback.h>
45 45
46STATIC void xfs_count_page_state(struct page *, int *, int *, int *); 46STATIC void
47xfs_count_page_state(
48 struct page *page,
49 int *delalloc,
50 int *unmapped,
51 int *unwritten)
52{
53 struct buffer_head *bh, *head;
54
55 *delalloc = *unmapped = *unwritten = 0;
56
57 bh = head = page_buffers(page);
58 do {
59 if (buffer_uptodate(bh) && !buffer_mapped(bh))
60 (*unmapped) = 1;
61 else if (buffer_unwritten(bh) && !buffer_delay(bh))
62 clear_buffer_unwritten(bh);
63 else if (buffer_unwritten(bh))
64 (*unwritten) = 1;
65 else if (buffer_delay(bh))
66 (*delalloc) = 1;
67 } while ((bh = bh->b_this_page) != head);
68}
47 69
48#if defined(XFS_RW_TRACE) 70#if defined(XFS_RW_TRACE)
49void 71void
@@ -54,7 +76,7 @@ xfs_page_trace(
54 int mask) 76 int mask)
55{ 77{
56 xfs_inode_t *ip; 78 xfs_inode_t *ip;
57 vnode_t *vp = LINVFS_GET_VP(inode); 79 vnode_t *vp = vn_from_inode(inode);
58 loff_t isize = i_size_read(inode); 80 loff_t isize = i_size_read(inode);
59 loff_t offset = page_offset(page); 81 loff_t offset = page_offset(page);
60 int delalloc = -1, unmapped = -1, unwritten = -1; 82 int delalloc = -1, unmapped = -1, unwritten = -1;
@@ -81,7 +103,7 @@ xfs_page_trace(
81 (void *)((unsigned long)delalloc), 103 (void *)((unsigned long)delalloc),
82 (void *)((unsigned long)unmapped), 104 (void *)((unsigned long)unmapped),
83 (void *)((unsigned long)unwritten), 105 (void *)((unsigned long)unwritten),
84 (void *)NULL, 106 (void *)((unsigned long)current_pid()),
85 (void *)NULL); 107 (void *)NULL);
86} 108}
87#else 109#else
@@ -192,7 +214,7 @@ xfs_alloc_ioend(
192 ioend->io_uptodate = 1; /* cleared if any I/O fails */ 214 ioend->io_uptodate = 1; /* cleared if any I/O fails */
193 ioend->io_list = NULL; 215 ioend->io_list = NULL;
194 ioend->io_type = type; 216 ioend->io_type = type;
195 ioend->io_vnode = LINVFS_GET_VP(inode); 217 ioend->io_vnode = vn_from_inode(inode);
196 ioend->io_buffer_head = NULL; 218 ioend->io_buffer_head = NULL;
197 ioend->io_buffer_tail = NULL; 219 ioend->io_buffer_tail = NULL;
198 atomic_inc(&ioend->io_vnode->v_iocount); 220 atomic_inc(&ioend->io_vnode->v_iocount);
@@ -217,7 +239,7 @@ xfs_map_blocks(
217 xfs_iomap_t *mapp, 239 xfs_iomap_t *mapp,
218 int flags) 240 int flags)
219{ 241{
220 vnode_t *vp = LINVFS_GET_VP(inode); 242 vnode_t *vp = vn_from_inode(inode);
221 int error, nmaps = 1; 243 int error, nmaps = 1;
222 244
223 VOP_BMAP(vp, offset, count, flags, mapp, &nmaps, error); 245 VOP_BMAP(vp, offset, count, flags, mapp, &nmaps, error);
@@ -462,28 +484,37 @@ xfs_add_to_ioend(
462} 484}
463 485
464STATIC void 486STATIC void
487xfs_map_buffer(
488 struct buffer_head *bh,
489 xfs_iomap_t *mp,
490 xfs_off_t offset,
491 uint block_bits)
492{
493 sector_t bn;
494
495 ASSERT(mp->iomap_bn != IOMAP_DADDR_NULL);
496
497 bn = (mp->iomap_bn >> (block_bits - BBSHIFT)) +
498 ((offset - mp->iomap_offset) >> block_bits);
499
500 ASSERT(bn || (mp->iomap_flags & IOMAP_REALTIME));
501
502 bh->b_blocknr = bn;
503 set_buffer_mapped(bh);
504}
505
506STATIC void
465xfs_map_at_offset( 507xfs_map_at_offset(
466 struct buffer_head *bh, 508 struct buffer_head *bh,
467 loff_t offset, 509 loff_t offset,
468 int block_bits, 510 int block_bits,
469 xfs_iomap_t *iomapp) 511 xfs_iomap_t *iomapp)
470{ 512{
471 xfs_daddr_t bn;
472 int sector_shift;
473
474 ASSERT(!(iomapp->iomap_flags & IOMAP_HOLE)); 513 ASSERT(!(iomapp->iomap_flags & IOMAP_HOLE));
475 ASSERT(!(iomapp->iomap_flags & IOMAP_DELAY)); 514 ASSERT(!(iomapp->iomap_flags & IOMAP_DELAY));
476 ASSERT(iomapp->iomap_bn != IOMAP_DADDR_NULL);
477
478 sector_shift = block_bits - BBSHIFT;
479 bn = (iomapp->iomap_bn >> sector_shift) +
480 ((offset - iomapp->iomap_offset) >> block_bits);
481
482 ASSERT(bn || (iomapp->iomap_flags & IOMAP_REALTIME));
483 ASSERT((bn << sector_shift) >= iomapp->iomap_bn);
484 515
485 lock_buffer(bh); 516 lock_buffer(bh);
486 bh->b_blocknr = bn; 517 xfs_map_buffer(bh, iomapp, offset, block_bits);
487 bh->b_bdev = iomapp->iomap_target->bt_bdev; 518 bh->b_bdev = iomapp->iomap_target->bt_bdev;
488 set_buffer_mapped(bh); 519 set_buffer_mapped(bh);
489 clear_buffer_delay(bh); 520 clear_buffer_delay(bh);
@@ -616,7 +647,7 @@ xfs_is_delayed_page(
616 acceptable = (type == IOMAP_UNWRITTEN); 647 acceptable = (type == IOMAP_UNWRITTEN);
617 else if (buffer_delay(bh)) 648 else if (buffer_delay(bh))
618 acceptable = (type == IOMAP_DELAY); 649 acceptable = (type == IOMAP_DELAY);
619 else if (buffer_mapped(bh)) 650 else if (buffer_dirty(bh) && buffer_mapped(bh))
620 acceptable = (type == 0); 651 acceptable = (type == 0);
621 else 652 else
622 break; 653 break;
@@ -1040,8 +1071,159 @@ error:
1040 return err; 1071 return err;
1041} 1072}
1042 1073
1074/*
1075 * writepage: Called from one of two places:
1076 *
1077 * 1. we are flushing a delalloc buffer head.
1078 *
1079 * 2. we are writing out a dirty page. Typically the page dirty
1080 * state is cleared before we get here. In this case is it
1081 * conceivable we have no buffer heads.
1082 *
1083 * For delalloc space on the page we need to allocate space and
1084 * flush it. For unmapped buffer heads on the page we should
1085 * allocate space if the page is uptodate. For any other dirty
1086 * buffer heads on the page we should flush them.
1087 *
1088 * If we detect that a transaction would be required to flush
1089 * the page, we have to check the process flags first, if we
1090 * are already in a transaction or disk I/O during allocations
1091 * is off, we need to fail the writepage and redirty the page.
1092 */
1093
1094STATIC int
1095xfs_vm_writepage(
1096 struct page *page,
1097 struct writeback_control *wbc)
1098{
1099 int error;
1100 int need_trans;
1101 int delalloc, unmapped, unwritten;
1102 struct inode *inode = page->mapping->host;
1103
1104 xfs_page_trace(XFS_WRITEPAGE_ENTER, inode, page, 0);
1105
1106 /*
1107 * We need a transaction if:
1108 * 1. There are delalloc buffers on the page
1109 * 2. The page is uptodate and we have unmapped buffers
1110 * 3. The page is uptodate and we have no buffers
1111 * 4. There are unwritten buffers on the page
1112 */
1113
1114 if (!page_has_buffers(page)) {
1115 unmapped = 1;
1116 need_trans = 1;
1117 } else {
1118 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
1119 if (!PageUptodate(page))
1120 unmapped = 0;
1121 need_trans = delalloc + unmapped + unwritten;
1122 }
1123
1124 /*
1125 * If we need a transaction and the process flags say
1126 * we are already in a transaction, or no IO is allowed
1127 * then mark the page dirty again and leave the page
1128 * as is.
1129 */
1130 if (PFLAGS_TEST_FSTRANS() && need_trans)
1131 goto out_fail;
1132
1133 /*
1134 * Delay hooking up buffer heads until we have
1135 * made our go/no-go decision.
1136 */
1137 if (!page_has_buffers(page))
1138 create_empty_buffers(page, 1 << inode->i_blkbits, 0);
1139
1140 /*
1141 * Convert delayed allocate, unwritten or unmapped space
1142 * to real space and flush out to disk.
1143 */
1144 error = xfs_page_state_convert(inode, page, wbc, 1, unmapped);
1145 if (error == -EAGAIN)
1146 goto out_fail;
1147 if (unlikely(error < 0))
1148 goto out_unlock;
1149
1150 return 0;
1151
1152out_fail:
1153 redirty_page_for_writepage(wbc, page);
1154 unlock_page(page);
1155 return 0;
1156out_unlock:
1157 unlock_page(page);
1158 return error;
1159}
1160
1161/*
1162 * Called to move a page into cleanable state - and from there
1163 * to be released. Possibly the page is already clean. We always
1164 * have buffer heads in this call.
1165 *
1166 * Returns 0 if the page is ok to release, 1 otherwise.
1167 *
1168 * Possible scenarios are:
1169 *
1170 * 1. We are being called to release a page which has been written
1171 * to via regular I/O. buffer heads will be dirty and possibly
1172 * delalloc. If no delalloc buffer heads in this case then we
1173 * can just return zero.
1174 *
1175 * 2. We are called to release a page which has been written via
1176 * mmap, all we need to do is ensure there is no delalloc
1177 * state in the buffer heads, if not we can let the caller
1178 * free them and we should come back later via writepage.
1179 */
1043STATIC int 1180STATIC int
1044__linvfs_get_block( 1181xfs_vm_releasepage(
1182 struct page *page,
1183 gfp_t gfp_mask)
1184{
1185 struct inode *inode = page->mapping->host;
1186 int dirty, delalloc, unmapped, unwritten;
1187 struct writeback_control wbc = {
1188 .sync_mode = WB_SYNC_ALL,
1189 .nr_to_write = 1,
1190 };
1191
1192 xfs_page_trace(XFS_RELEASEPAGE_ENTER, inode, page, gfp_mask);
1193
1194 if (!page_has_buffers(page))
1195 return 0;
1196
1197 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
1198 if (!delalloc && !unwritten)
1199 goto free_buffers;
1200
1201 if (!(gfp_mask & __GFP_FS))
1202 return 0;
1203
1204 /* If we are already inside a transaction or the thread cannot
1205 * do I/O, we cannot release this page.
1206 */
1207 if (PFLAGS_TEST_FSTRANS())
1208 return 0;
1209
1210 /*
1211 * Convert delalloc space to real space, do not flush the
1212 * data out to disk, that will be done by the caller.
1213 * Never need to allocate space here - we will always
1214 * come back to writepage in that case.
1215 */
1216 dirty = xfs_page_state_convert(inode, page, &wbc, 0, 0);
1217 if (dirty == 0 && !unwritten)
1218 goto free_buffers;
1219 return 0;
1220
1221free_buffers:
1222 return try_to_free_buffers(page);
1223}
1224
1225STATIC int
1226__xfs_get_block(
1045 struct inode *inode, 1227 struct inode *inode,
1046 sector_t iblock, 1228 sector_t iblock,
1047 unsigned long blocks, 1229 unsigned long blocks,
@@ -1050,7 +1232,7 @@ __linvfs_get_block(
1050 int direct, 1232 int direct,
1051 bmapi_flags_t flags) 1233 bmapi_flags_t flags)
1052{ 1234{
1053 vnode_t *vp = LINVFS_GET_VP(inode); 1235 vnode_t *vp = vn_from_inode(inode);
1054 xfs_iomap_t iomap; 1236 xfs_iomap_t iomap;
1055 xfs_off_t offset; 1237 xfs_off_t offset;
1056 ssize_t size; 1238 ssize_t size;
@@ -1073,21 +1255,13 @@ __linvfs_get_block(
1073 return 0; 1255 return 0;
1074 1256
1075 if (iomap.iomap_bn != IOMAP_DADDR_NULL) { 1257 if (iomap.iomap_bn != IOMAP_DADDR_NULL) {
1076 xfs_daddr_t bn; 1258 /*
1077 xfs_off_t delta; 1259 * For unwritten extents do not report a disk address on
1078
1079 /* For unwritten extents do not report a disk address on
1080 * the read case (treat as if we're reading into a hole). 1260 * the read case (treat as if we're reading into a hole).
1081 */ 1261 */
1082 if (create || !(iomap.iomap_flags & IOMAP_UNWRITTEN)) { 1262 if (create || !(iomap.iomap_flags & IOMAP_UNWRITTEN)) {
1083 delta = offset - iomap.iomap_offset; 1263 xfs_map_buffer(bh_result, &iomap, offset,
1084 delta >>= inode->i_blkbits; 1264 inode->i_blkbits);
1085
1086 bn = iomap.iomap_bn >> (inode->i_blkbits - BBSHIFT);
1087 bn += delta;
1088 BUG_ON(!bn && !(iomap.iomap_flags & IOMAP_REALTIME));
1089 bh_result->b_blocknr = bn;
1090 set_buffer_mapped(bh_result);
1091 } 1265 }
1092 if (create && (iomap.iomap_flags & IOMAP_UNWRITTEN)) { 1266 if (create && (iomap.iomap_flags & IOMAP_UNWRITTEN)) {
1093 if (direct) 1267 if (direct)
@@ -1130,30 +1304,30 @@ __linvfs_get_block(
1130} 1304}
1131 1305
1132int 1306int
1133linvfs_get_block( 1307xfs_get_block(
1134 struct inode *inode, 1308 struct inode *inode,
1135 sector_t iblock, 1309 sector_t iblock,
1136 struct buffer_head *bh_result, 1310 struct buffer_head *bh_result,
1137 int create) 1311 int create)
1138{ 1312{
1139 return __linvfs_get_block(inode, iblock, 0, bh_result, 1313 return __xfs_get_block(inode, iblock, 0, bh_result,
1140 create, 0, BMAPI_WRITE); 1314 create, 0, BMAPI_WRITE);
1141} 1315}
1142 1316
1143STATIC int 1317STATIC int
1144linvfs_get_blocks_direct( 1318xfs_get_blocks_direct(
1145 struct inode *inode, 1319 struct inode *inode,
1146 sector_t iblock, 1320 sector_t iblock,
1147 unsigned long max_blocks, 1321 unsigned long max_blocks,
1148 struct buffer_head *bh_result, 1322 struct buffer_head *bh_result,
1149 int create) 1323 int create)
1150{ 1324{
1151 return __linvfs_get_block(inode, iblock, max_blocks, bh_result, 1325 return __xfs_get_block(inode, iblock, max_blocks, bh_result,
1152 create, 1, BMAPI_WRITE|BMAPI_DIRECT); 1326 create, 1, BMAPI_WRITE|BMAPI_DIRECT);
1153} 1327}
1154 1328
1155STATIC void 1329STATIC void
1156linvfs_end_io_direct( 1330xfs_end_io_direct(
1157 struct kiocb *iocb, 1331 struct kiocb *iocb,
1158 loff_t offset, 1332 loff_t offset,
1159 ssize_t size, 1333 ssize_t size,
@@ -1191,7 +1365,7 @@ linvfs_end_io_direct(
1191} 1365}
1192 1366
1193STATIC ssize_t 1367STATIC ssize_t
1194linvfs_direct_IO( 1368xfs_vm_direct_IO(
1195 int rw, 1369 int rw,
1196 struct kiocb *iocb, 1370 struct kiocb *iocb,
1197 const struct iovec *iov, 1371 const struct iovec *iov,
@@ -1200,7 +1374,7 @@ linvfs_direct_IO(
1200{ 1374{
1201 struct file *file = iocb->ki_filp; 1375 struct file *file = iocb->ki_filp;
1202 struct inode *inode = file->f_mapping->host; 1376 struct inode *inode = file->f_mapping->host;
1203 vnode_t *vp = LINVFS_GET_VP(inode); 1377 vnode_t *vp = vn_from_inode(inode);
1204 xfs_iomap_t iomap; 1378 xfs_iomap_t iomap;
1205 int maps = 1; 1379 int maps = 1;
1206 int error; 1380 int error;
@@ -1215,164 +1389,61 @@ linvfs_direct_IO(
1215 ret = blockdev_direct_IO_own_locking(rw, iocb, inode, 1389 ret = blockdev_direct_IO_own_locking(rw, iocb, inode,
1216 iomap.iomap_target->bt_bdev, 1390 iomap.iomap_target->bt_bdev,
1217 iov, offset, nr_segs, 1391 iov, offset, nr_segs,
1218 linvfs_get_blocks_direct, 1392 xfs_get_blocks_direct,
1219 linvfs_end_io_direct); 1393 xfs_end_io_direct);
1220 1394
1221 if (unlikely(ret <= 0 && iocb->private)) 1395 if (unlikely(ret <= 0 && iocb->private))
1222 xfs_destroy_ioend(iocb->private); 1396 xfs_destroy_ioend(iocb->private);
1223 return ret; 1397 return ret;
1224} 1398}
1225 1399
1400STATIC int
1401xfs_vm_prepare_write(
1402 struct file *file,
1403 struct page *page,
1404 unsigned int from,
1405 unsigned int to)
1406{
1407 return block_prepare_write(page, from, to, xfs_get_block);
1408}
1226 1409
1227STATIC sector_t 1410STATIC sector_t
1228linvfs_bmap( 1411xfs_vm_bmap(
1229 struct address_space *mapping, 1412 struct address_space *mapping,
1230 sector_t block) 1413 sector_t block)
1231{ 1414{
1232 struct inode *inode = (struct inode *)mapping->host; 1415 struct inode *inode = (struct inode *)mapping->host;
1233 vnode_t *vp = LINVFS_GET_VP(inode); 1416 vnode_t *vp = vn_from_inode(inode);
1234 int error; 1417 int error;
1235 1418
1236 vn_trace_entry(vp, "linvfs_bmap", (inst_t *)__return_address); 1419 vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
1237 1420
1238 VOP_RWLOCK(vp, VRWLOCK_READ); 1421 VOP_RWLOCK(vp, VRWLOCK_READ);
1239 VOP_FLUSH_PAGES(vp, (xfs_off_t)0, -1, 0, FI_REMAPF, error); 1422 VOP_FLUSH_PAGES(vp, (xfs_off_t)0, -1, 0, FI_REMAPF, error);
1240 VOP_RWUNLOCK(vp, VRWLOCK_READ); 1423 VOP_RWUNLOCK(vp, VRWLOCK_READ);
1241 return generic_block_bmap(mapping, block, linvfs_get_block); 1424 return generic_block_bmap(mapping, block, xfs_get_block);
1242} 1425}
1243 1426
1244STATIC int 1427STATIC int
1245linvfs_readpage( 1428xfs_vm_readpage(
1246 struct file *unused, 1429 struct file *unused,
1247 struct page *page) 1430 struct page *page)
1248{ 1431{
1249 return mpage_readpage(page, linvfs_get_block); 1432 return mpage_readpage(page, xfs_get_block);
1250} 1433}
1251 1434
1252STATIC int 1435STATIC int
1253linvfs_readpages( 1436xfs_vm_readpages(
1254 struct file *unused, 1437 struct file *unused,
1255 struct address_space *mapping, 1438 struct address_space *mapping,
1256 struct list_head *pages, 1439 struct list_head *pages,
1257 unsigned nr_pages) 1440 unsigned nr_pages)
1258{ 1441{
1259 return mpage_readpages(mapping, pages, nr_pages, linvfs_get_block); 1442 return mpage_readpages(mapping, pages, nr_pages, xfs_get_block);
1260}
1261
1262STATIC void
1263xfs_count_page_state(
1264 struct page *page,
1265 int *delalloc,
1266 int *unmapped,
1267 int *unwritten)
1268{
1269 struct buffer_head *bh, *head;
1270
1271 *delalloc = *unmapped = *unwritten = 0;
1272
1273 bh = head = page_buffers(page);
1274 do {
1275 if (buffer_uptodate(bh) && !buffer_mapped(bh))
1276 (*unmapped) = 1;
1277 else if (buffer_unwritten(bh) && !buffer_delay(bh))
1278 clear_buffer_unwritten(bh);
1279 else if (buffer_unwritten(bh))
1280 (*unwritten) = 1;
1281 else if (buffer_delay(bh))
1282 (*delalloc) = 1;
1283 } while ((bh = bh->b_this_page) != head);
1284} 1443}
1285 1444
1286
1287/*
1288 * writepage: Called from one of two places:
1289 *
1290 * 1. we are flushing a delalloc buffer head.
1291 *
1292 * 2. we are writing out a dirty page. Typically the page dirty
1293 * state is cleared before we get here. In this case is it
1294 * conceivable we have no buffer heads.
1295 *
1296 * For delalloc space on the page we need to allocate space and
1297 * flush it. For unmapped buffer heads on the page we should
1298 * allocate space if the page is uptodate. For any other dirty
1299 * buffer heads on the page we should flush them.
1300 *
1301 * If we detect that a transaction would be required to flush
1302 * the page, we have to check the process flags first, if we
1303 * are already in a transaction or disk I/O during allocations
1304 * is off, we need to fail the writepage and redirty the page.
1305 */
1306
1307STATIC int 1445STATIC int
1308linvfs_writepage( 1446xfs_vm_invalidatepage(
1309 struct page *page,
1310 struct writeback_control *wbc)
1311{
1312 int error;
1313 int need_trans;
1314 int delalloc, unmapped, unwritten;
1315 struct inode *inode = page->mapping->host;
1316
1317 xfs_page_trace(XFS_WRITEPAGE_ENTER, inode, page, 0);
1318
1319 /*
1320 * We need a transaction if:
1321 * 1. There are delalloc buffers on the page
1322 * 2. The page is uptodate and we have unmapped buffers
1323 * 3. The page is uptodate and we have no buffers
1324 * 4. There are unwritten buffers on the page
1325 */
1326
1327 if (!page_has_buffers(page)) {
1328 unmapped = 1;
1329 need_trans = 1;
1330 } else {
1331 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
1332 if (!PageUptodate(page))
1333 unmapped = 0;
1334 need_trans = delalloc + unmapped + unwritten;
1335 }
1336
1337 /*
1338 * If we need a transaction and the process flags say
1339 * we are already in a transaction, or no IO is allowed
1340 * then mark the page dirty again and leave the page
1341 * as is.
1342 */
1343 if (PFLAGS_TEST_FSTRANS() && need_trans)
1344 goto out_fail;
1345
1346 /*
1347 * Delay hooking up buffer heads until we have
1348 * made our go/no-go decision.
1349 */
1350 if (!page_has_buffers(page))
1351 create_empty_buffers(page, 1 << inode->i_blkbits, 0);
1352
1353 /*
1354 * Convert delayed allocate, unwritten or unmapped space
1355 * to real space and flush out to disk.
1356 */
1357 error = xfs_page_state_convert(inode, page, wbc, 1, unmapped);
1358 if (error == -EAGAIN)
1359 goto out_fail;
1360 if (unlikely(error < 0))
1361 goto out_unlock;
1362
1363 return 0;
1364
1365out_fail:
1366 redirty_page_for_writepage(wbc, page);
1367 unlock_page(page);
1368 return 0;
1369out_unlock:
1370 unlock_page(page);
1371 return error;
1372}
1373
1374STATIC int
1375linvfs_invalidate_page(
1376 struct page *page, 1447 struct page *page,
1377 unsigned long offset) 1448 unsigned long offset)
1378{ 1449{
@@ -1381,87 +1452,16 @@ linvfs_invalidate_page(
1381 return block_invalidatepage(page, offset); 1452 return block_invalidatepage(page, offset);
1382} 1453}
1383 1454
1384/* 1455struct address_space_operations xfs_address_space_operations = {
1385 * Called to move a page into cleanable state - and from there 1456 .readpage = xfs_vm_readpage,
1386 * to be released. Possibly the page is already clean. We always 1457 .readpages = xfs_vm_readpages,
1387 * have buffer heads in this call. 1458 .writepage = xfs_vm_writepage,
1388 *
1389 * Returns 0 if the page is ok to release, 1 otherwise.
1390 *
1391 * Possible scenarios are:
1392 *
1393 * 1. We are being called to release a page which has been written
1394 * to via regular I/O. buffer heads will be dirty and possibly
1395 * delalloc. If no delalloc buffer heads in this case then we
1396 * can just return zero.
1397 *
1398 * 2. We are called to release a page which has been written via
1399 * mmap, all we need to do is ensure there is no delalloc
1400 * state in the buffer heads, if not we can let the caller
1401 * free them and we should come back later via writepage.
1402 */
1403STATIC int
1404linvfs_release_page(
1405 struct page *page,
1406 gfp_t gfp_mask)
1407{
1408 struct inode *inode = page->mapping->host;
1409 int dirty, delalloc, unmapped, unwritten;
1410 struct writeback_control wbc = {
1411 .sync_mode = WB_SYNC_ALL,
1412 .nr_to_write = 1,
1413 };
1414
1415 xfs_page_trace(XFS_RELEASEPAGE_ENTER, inode, page, gfp_mask);
1416
1417 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
1418 if (!delalloc && !unwritten)
1419 goto free_buffers;
1420
1421 if (!(gfp_mask & __GFP_FS))
1422 return 0;
1423
1424 /* If we are already inside a transaction or the thread cannot
1425 * do I/O, we cannot release this page.
1426 */
1427 if (PFLAGS_TEST_FSTRANS())
1428 return 0;
1429
1430 /*
1431 * Convert delalloc space to real space, do not flush the
1432 * data out to disk, that will be done by the caller.
1433 * Never need to allocate space here - we will always
1434 * come back to writepage in that case.
1435 */
1436 dirty = xfs_page_state_convert(inode, page, &wbc, 0, 0);
1437 if (dirty == 0 && !unwritten)
1438 goto free_buffers;
1439 return 0;
1440
1441free_buffers:
1442 return try_to_free_buffers(page);
1443}
1444
1445STATIC int
1446linvfs_prepare_write(
1447 struct file *file,
1448 struct page *page,
1449 unsigned int from,
1450 unsigned int to)
1451{
1452 return block_prepare_write(page, from, to, linvfs_get_block);
1453}
1454
1455struct address_space_operations linvfs_aops = {
1456 .readpage = linvfs_readpage,
1457 .readpages = linvfs_readpages,
1458 .writepage = linvfs_writepage,
1459 .sync_page = block_sync_page, 1459 .sync_page = block_sync_page,
1460 .releasepage = linvfs_release_page, 1460 .releasepage = xfs_vm_releasepage,
1461 .invalidatepage = linvfs_invalidate_page, 1461 .invalidatepage = xfs_vm_invalidatepage,
1462 .prepare_write = linvfs_prepare_write, 1462 .prepare_write = xfs_vm_prepare_write,
1463 .commit_write = generic_commit_write, 1463 .commit_write = generic_commit_write,
1464 .bmap = linvfs_bmap, 1464 .bmap = xfs_vm_bmap,
1465 .direct_IO = linvfs_direct_IO, 1465 .direct_IO = xfs_vm_direct_IO,
1466 .migratepage = buffer_migrate_page, 1466 .migratepage = buffer_migrate_page,
1467}; 1467};
diff --git a/fs/xfs/linux-2.6/xfs_aops.h b/fs/xfs/linux-2.6/xfs_aops.h
index 55339dd5a30d..795699f121d2 100644
--- a/fs/xfs/linux-2.6/xfs_aops.h
+++ b/fs/xfs/linux-2.6/xfs_aops.h
@@ -40,7 +40,7 @@ typedef struct xfs_ioend {
40 struct work_struct io_work; /* xfsdatad work queue */ 40 struct work_struct io_work; /* xfsdatad work queue */
41} xfs_ioend_t; 41} xfs_ioend_t;
42 42
43extern struct address_space_operations linvfs_aops; 43extern struct address_space_operations xfs_address_space_operations;
44extern int linvfs_get_block(struct inode *, sector_t, struct buffer_head *, int); 44extern int xfs_get_block(struct inode *, sector_t, struct buffer_head *, int);
45 45
46#endif /* __XFS_IOPS_H__ */ 46#endif /* __XFS_IOPS_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index bfb4f2917bb6..9fb0312665ca 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -29,6 +29,7 @@
29#include <linux/blkdev.h> 29#include <linux/blkdev.h>
30#include <linux/hash.h> 30#include <linux/hash.h>
31#include <linux/kthread.h> 31#include <linux/kthread.h>
32#include <linux/migrate.h>
32#include "xfs_linux.h" 33#include "xfs_linux.h"
33 34
34STATIC kmem_zone_t *xfs_buf_zone; 35STATIC kmem_zone_t *xfs_buf_zone;
@@ -1805,13 +1806,12 @@ xfs_flush_buftarg(
1805int __init 1806int __init
1806xfs_buf_init(void) 1807xfs_buf_init(void)
1807{ 1808{
1808 int error = -ENOMEM;
1809
1810#ifdef XFS_BUF_TRACE 1809#ifdef XFS_BUF_TRACE
1811 xfs_buf_trace_buf = ktrace_alloc(XFS_BUF_TRACE_SIZE, KM_SLEEP); 1810 xfs_buf_trace_buf = ktrace_alloc(XFS_BUF_TRACE_SIZE, KM_SLEEP);
1812#endif 1811#endif
1813 1812
1814 xfs_buf_zone = kmem_zone_init(sizeof(xfs_buf_t), "xfs_buf"); 1813 xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
1814 KM_ZONE_HWALIGN, NULL);
1815 if (!xfs_buf_zone) 1815 if (!xfs_buf_zone)
1816 goto out_free_trace_buf; 1816 goto out_free_trace_buf;
1817 1817
@@ -1839,7 +1839,7 @@ xfs_buf_init(void)
1839#ifdef XFS_BUF_TRACE 1839#ifdef XFS_BUF_TRACE
1840 ktrace_free(xfs_buf_trace_buf); 1840 ktrace_free(xfs_buf_trace_buf);
1841#endif 1841#endif
1842 return error; 1842 return -ENOMEM;
1843} 1843}
1844 1844
1845void 1845void
diff --git a/fs/xfs/linux-2.6/xfs_export.c b/fs/xfs/linux-2.6/xfs_export.c
index 80eb249f2fa0..b768ea910bbe 100644
--- a/fs/xfs/linux-2.6/xfs_export.c
+++ b/fs/xfs/linux-2.6/xfs_export.c
@@ -25,6 +25,8 @@
25#include "xfs_mount.h" 25#include "xfs_mount.h"
26#include "xfs_export.h" 26#include "xfs_export.h"
27 27
28STATIC struct dentry dotdot = { .d_name.name = "..", .d_name.len = 2, };
29
28/* 30/*
29 * XFS encodes and decodes the fileid portion of NFS filehandles 31 * XFS encodes and decodes the fileid portion of NFS filehandles
30 * itself instead of letting the generic NFS code do it. This 32 * itself instead of letting the generic NFS code do it. This
@@ -37,7 +39,7 @@
37 */ 39 */
38 40
39STATIC struct dentry * 41STATIC struct dentry *
40linvfs_decode_fh( 42xfs_fs_decode_fh(
41 struct super_block *sb, 43 struct super_block *sb,
42 __u32 *fh, 44 __u32 *fh,
43 int fh_len, 45 int fh_len,
@@ -78,12 +80,12 @@ linvfs_decode_fh(
78 } 80 }
79 81
80 fh = (__u32 *)&ifid; 82 fh = (__u32 *)&ifid;
81 return find_exported_dentry(sb, fh, parent, acceptable, context); 83 return sb->s_export_op->find_exported_dentry(sb, fh, parent, acceptable, context);
82} 84}
83 85
84 86
85STATIC int 87STATIC int
86linvfs_encode_fh( 88xfs_fs_encode_fh(
87 struct dentry *dentry, 89 struct dentry *dentry,
88 __u32 *fh, 90 __u32 *fh,
89 int *max_len, 91 int *max_len,
@@ -95,7 +97,7 @@ linvfs_encode_fh(
95 int len; 97 int len;
96 int is64 = 0; 98 int is64 = 0;
97#if XFS_BIG_INUMS 99#if XFS_BIG_INUMS
98 vfs_t *vfs = LINVFS_GET_VFS(inode->i_sb); 100 vfs_t *vfs = vfs_from_sb(inode->i_sb);
99 101
100 if (!(vfs->vfs_flag & VFS_32BITINODES)) { 102 if (!(vfs->vfs_flag & VFS_32BITINODES)) {
101 /* filesystem may contain 64bit inode numbers */ 103 /* filesystem may contain 64bit inode numbers */
@@ -130,21 +132,21 @@ linvfs_encode_fh(
130} 132}
131 133
132STATIC struct dentry * 134STATIC struct dentry *
133linvfs_get_dentry( 135xfs_fs_get_dentry(
134 struct super_block *sb, 136 struct super_block *sb,
135 void *data) 137 void *data)
136{ 138{
137 vnode_t *vp; 139 vnode_t *vp;
138 struct inode *inode; 140 struct inode *inode;
139 struct dentry *result; 141 struct dentry *result;
140 vfs_t *vfsp = LINVFS_GET_VFS(sb); 142 vfs_t *vfsp = vfs_from_sb(sb);
141 int error; 143 int error;
142 144
143 VFS_VGET(vfsp, &vp, (fid_t *)data, error); 145 VFS_VGET(vfsp, &vp, (fid_t *)data, error);
144 if (error || vp == NULL) 146 if (error || vp == NULL)
145 return ERR_PTR(-ESTALE) ; 147 return ERR_PTR(-ESTALE) ;
146 148
147 inode = LINVFS_GET_IP(vp); 149 inode = vn_to_inode(vp);
148 result = d_alloc_anon(inode); 150 result = d_alloc_anon(inode);
149 if (!result) { 151 if (!result) {
150 iput(inode); 152 iput(inode);
@@ -154,25 +156,20 @@ linvfs_get_dentry(
154} 156}
155 157
156STATIC struct dentry * 158STATIC struct dentry *
157linvfs_get_parent( 159xfs_fs_get_parent(
158 struct dentry *child) 160 struct dentry *child)
159{ 161{
160 int error; 162 int error;
161 vnode_t *vp, *cvp; 163 vnode_t *vp, *cvp;
162 struct dentry *parent; 164 struct dentry *parent;
163 struct dentry dotdot;
164
165 dotdot.d_name.name = "..";
166 dotdot.d_name.len = 2;
167 dotdot.d_inode = NULL;
168 165
169 cvp = NULL; 166 cvp = NULL;
170 vp = LINVFS_GET_VP(child->d_inode); 167 vp = vn_from_inode(child->d_inode);
171 VOP_LOOKUP(vp, &dotdot, &cvp, 0, NULL, NULL, error); 168 VOP_LOOKUP(vp, &dotdot, &cvp, 0, NULL, NULL, error);
172 if (unlikely(error)) 169 if (unlikely(error))
173 return ERR_PTR(-error); 170 return ERR_PTR(-error);
174 171
175 parent = d_alloc_anon(LINVFS_GET_IP(cvp)); 172 parent = d_alloc_anon(vn_to_inode(cvp));
176 if (unlikely(!parent)) { 173 if (unlikely(!parent)) {
177 VN_RELE(cvp); 174 VN_RELE(cvp);
178 return ERR_PTR(-ENOMEM); 175 return ERR_PTR(-ENOMEM);
@@ -180,9 +177,9 @@ linvfs_get_parent(
180 return parent; 177 return parent;
181} 178}
182 179
183struct export_operations linvfs_export_ops = { 180struct export_operations xfs_export_operations = {
184 .decode_fh = linvfs_decode_fh, 181 .decode_fh = xfs_fs_decode_fh,
185 .encode_fh = linvfs_encode_fh, 182 .encode_fh = xfs_fs_encode_fh,
186 .get_parent = linvfs_get_parent, 183 .get_parent = xfs_fs_get_parent,
187 .get_dentry = linvfs_get_dentry, 184 .get_dentry = xfs_fs_get_dentry,
188}; 185};
diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c
index ced4404339c7..185567a6a561 100644
--- a/fs/xfs/linux-2.6/xfs_file.c
+++ b/fs/xfs/linux-2.6/xfs_file.c
@@ -43,13 +43,13 @@
43#include <linux/dcache.h> 43#include <linux/dcache.h>
44#include <linux/smp_lock.h> 44#include <linux/smp_lock.h>
45 45
46static struct vm_operations_struct linvfs_file_vm_ops; 46static struct vm_operations_struct xfs_file_vm_ops;
47#ifdef CONFIG_XFS_DMAPI 47#ifdef CONFIG_XFS_DMAPI
48static struct vm_operations_struct linvfs_dmapi_file_vm_ops; 48static struct vm_operations_struct xfs_dmapi_file_vm_ops;
49#endif 49#endif
50 50
51STATIC inline ssize_t 51STATIC inline ssize_t
52__linvfs_read( 52__xfs_file_read(
53 struct kiocb *iocb, 53 struct kiocb *iocb,
54 char __user *buf, 54 char __user *buf,
55 int ioflags, 55 int ioflags,
@@ -58,7 +58,7 @@ __linvfs_read(
58{ 58{
59 struct iovec iov = {buf, count}; 59 struct iovec iov = {buf, count};
60 struct file *file = iocb->ki_filp; 60 struct file *file = iocb->ki_filp;
61 vnode_t *vp = LINVFS_GET_VP(file->f_dentry->d_inode); 61 vnode_t *vp = vn_from_inode(file->f_dentry->d_inode);
62 ssize_t rval; 62 ssize_t rval;
63 63
64 BUG_ON(iocb->ki_pos != pos); 64 BUG_ON(iocb->ki_pos != pos);
@@ -71,28 +71,28 @@ __linvfs_read(
71 71
72 72
73STATIC ssize_t 73STATIC ssize_t
74linvfs_aio_read( 74xfs_file_aio_read(
75 struct kiocb *iocb, 75 struct kiocb *iocb,
76 char __user *buf, 76 char __user *buf,
77 size_t count, 77 size_t count,
78 loff_t pos) 78 loff_t pos)
79{ 79{
80 return __linvfs_read(iocb, buf, IO_ISAIO, count, pos); 80 return __xfs_file_read(iocb, buf, IO_ISAIO, count, pos);
81} 81}
82 82
83STATIC ssize_t 83STATIC ssize_t
84linvfs_aio_read_invis( 84xfs_file_aio_read_invis(
85 struct kiocb *iocb, 85 struct kiocb *iocb,
86 char __user *buf, 86 char __user *buf,
87 size_t count, 87 size_t count,
88 loff_t pos) 88 loff_t pos)
89{ 89{
90 return __linvfs_read(iocb, buf, IO_ISAIO|IO_INVIS, count, pos); 90 return __xfs_file_read(iocb, buf, IO_ISAIO|IO_INVIS, count, pos);
91} 91}
92 92
93 93
94STATIC inline ssize_t 94STATIC inline ssize_t
95__linvfs_write( 95__xfs_file_write(
96 struct kiocb *iocb, 96 struct kiocb *iocb,
97 const char __user *buf, 97 const char __user *buf,
98 int ioflags, 98 int ioflags,
@@ -102,7 +102,7 @@ __linvfs_write(
102 struct iovec iov = {(void __user *)buf, count}; 102 struct iovec iov = {(void __user *)buf, count};
103 struct file *file = iocb->ki_filp; 103 struct file *file = iocb->ki_filp;
104 struct inode *inode = file->f_mapping->host; 104 struct inode *inode = file->f_mapping->host;
105 vnode_t *vp = LINVFS_GET_VP(inode); 105 vnode_t *vp = vn_from_inode(inode);
106 ssize_t rval; 106 ssize_t rval;
107 107
108 BUG_ON(iocb->ki_pos != pos); 108 BUG_ON(iocb->ki_pos != pos);
@@ -115,28 +115,28 @@ __linvfs_write(
115 115
116 116
117STATIC ssize_t 117STATIC ssize_t
118linvfs_aio_write( 118xfs_file_aio_write(
119 struct kiocb *iocb, 119 struct kiocb *iocb,
120 const char __user *buf, 120 const char __user *buf,
121 size_t count, 121 size_t count,
122 loff_t pos) 122 loff_t pos)
123{ 123{
124 return __linvfs_write(iocb, buf, IO_ISAIO, count, pos); 124 return __xfs_file_write(iocb, buf, IO_ISAIO, count, pos);
125} 125}
126 126
127STATIC ssize_t 127STATIC ssize_t
128linvfs_aio_write_invis( 128xfs_file_aio_write_invis(
129 struct kiocb *iocb, 129 struct kiocb *iocb,
130 const char __user *buf, 130 const char __user *buf,
131 size_t count, 131 size_t count,
132 loff_t pos) 132 loff_t pos)
133{ 133{
134 return __linvfs_write(iocb, buf, IO_ISAIO|IO_INVIS, count, pos); 134 return __xfs_file_write(iocb, buf, IO_ISAIO|IO_INVIS, count, pos);
135} 135}
136 136
137 137
138STATIC inline ssize_t 138STATIC inline ssize_t
139__linvfs_readv( 139__xfs_file_readv(
140 struct file *file, 140 struct file *file,
141 const struct iovec *iov, 141 const struct iovec *iov,
142 int ioflags, 142 int ioflags,
@@ -144,8 +144,8 @@ __linvfs_readv(
144 loff_t *ppos) 144 loff_t *ppos)
145{ 145{
146 struct inode *inode = file->f_mapping->host; 146 struct inode *inode = file->f_mapping->host;
147 vnode_t *vp = LINVFS_GET_VP(inode); 147 vnode_t *vp = vn_from_inode(inode);
148 struct kiocb kiocb; 148 struct kiocb kiocb;
149 ssize_t rval; 149 ssize_t rval;
150 150
151 init_sync_kiocb(&kiocb, file); 151 init_sync_kiocb(&kiocb, file);
@@ -160,28 +160,28 @@ __linvfs_readv(
160} 160}
161 161
162STATIC ssize_t 162STATIC ssize_t
163linvfs_readv( 163xfs_file_readv(
164 struct file *file, 164 struct file *file,
165 const struct iovec *iov, 165 const struct iovec *iov,
166 unsigned long nr_segs, 166 unsigned long nr_segs,
167 loff_t *ppos) 167 loff_t *ppos)
168{ 168{
169 return __linvfs_readv(file, iov, 0, nr_segs, ppos); 169 return __xfs_file_readv(file, iov, 0, nr_segs, ppos);
170} 170}
171 171
172STATIC ssize_t 172STATIC ssize_t
173linvfs_readv_invis( 173xfs_file_readv_invis(
174 struct file *file, 174 struct file *file,
175 const struct iovec *iov, 175 const struct iovec *iov,
176 unsigned long nr_segs, 176 unsigned long nr_segs,
177 loff_t *ppos) 177 loff_t *ppos)
178{ 178{
179 return __linvfs_readv(file, iov, IO_INVIS, nr_segs, ppos); 179 return __xfs_file_readv(file, iov, IO_INVIS, nr_segs, ppos);
180} 180}
181 181
182 182
183STATIC inline ssize_t 183STATIC inline ssize_t
184__linvfs_writev( 184__xfs_file_writev(
185 struct file *file, 185 struct file *file,
186 const struct iovec *iov, 186 const struct iovec *iov,
187 int ioflags, 187 int ioflags,
@@ -189,8 +189,8 @@ __linvfs_writev(
189 loff_t *ppos) 189 loff_t *ppos)
190{ 190{
191 struct inode *inode = file->f_mapping->host; 191 struct inode *inode = file->f_mapping->host;
192 vnode_t *vp = LINVFS_GET_VP(inode); 192 vnode_t *vp = vn_from_inode(inode);
193 struct kiocb kiocb; 193 struct kiocb kiocb;
194 ssize_t rval; 194 ssize_t rval;
195 195
196 init_sync_kiocb(&kiocb, file); 196 init_sync_kiocb(&kiocb, file);
@@ -206,34 +206,34 @@ __linvfs_writev(
206 206
207 207
208STATIC ssize_t 208STATIC ssize_t
209linvfs_writev( 209xfs_file_writev(
210 struct file *file, 210 struct file *file,
211 const struct iovec *iov, 211 const struct iovec *iov,
212 unsigned long nr_segs, 212 unsigned long nr_segs,
213 loff_t *ppos) 213 loff_t *ppos)
214{ 214{
215 return __linvfs_writev(file, iov, 0, nr_segs, ppos); 215 return __xfs_file_writev(file, iov, 0, nr_segs, ppos);
216} 216}
217 217
218STATIC ssize_t 218STATIC ssize_t
219linvfs_writev_invis( 219xfs_file_writev_invis(
220 struct file *file, 220 struct file *file,
221 const struct iovec *iov, 221 const struct iovec *iov,
222 unsigned long nr_segs, 222 unsigned long nr_segs,
223 loff_t *ppos) 223 loff_t *ppos)
224{ 224{
225 return __linvfs_writev(file, iov, IO_INVIS, nr_segs, ppos); 225 return __xfs_file_writev(file, iov, IO_INVIS, nr_segs, ppos);
226} 226}
227 227
228STATIC ssize_t 228STATIC ssize_t
229linvfs_sendfile( 229xfs_file_sendfile(
230 struct file *filp, 230 struct file *filp,
231 loff_t *ppos, 231 loff_t *ppos,
232 size_t count, 232 size_t count,
233 read_actor_t actor, 233 read_actor_t actor,
234 void *target) 234 void *target)
235{ 235{
236 vnode_t *vp = LINVFS_GET_VP(filp->f_dentry->d_inode); 236 vnode_t *vp = vn_from_inode(filp->f_dentry->d_inode);
237 ssize_t rval; 237 ssize_t rval;
238 238
239 VOP_SENDFILE(vp, filp, ppos, 0, count, actor, target, NULL, rval); 239 VOP_SENDFILE(vp, filp, ppos, 0, count, actor, target, NULL, rval);
@@ -242,11 +242,11 @@ linvfs_sendfile(
242 242
243 243
244STATIC int 244STATIC int
245linvfs_open( 245xfs_file_open(
246 struct inode *inode, 246 struct inode *inode,
247 struct file *filp) 247 struct file *filp)
248{ 248{
249 vnode_t *vp = LINVFS_GET_VP(inode); 249 vnode_t *vp = vn_from_inode(inode);
250 int error; 250 int error;
251 251
252 if (!(filp->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS) 252 if (!(filp->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
@@ -259,11 +259,11 @@ linvfs_open(
259 259
260 260
261STATIC int 261STATIC int
262linvfs_release( 262xfs_file_release(
263 struct inode *inode, 263 struct inode *inode,
264 struct file *filp) 264 struct file *filp)
265{ 265{
266 vnode_t *vp = LINVFS_GET_VP(inode); 266 vnode_t *vp = vn_from_inode(inode);
267 int error = 0; 267 int error = 0;
268 268
269 if (vp) 269 if (vp)
@@ -273,13 +273,13 @@ linvfs_release(
273 273
274 274
275STATIC int 275STATIC int
276linvfs_fsync( 276xfs_file_fsync(
277 struct file *filp, 277 struct file *filp,
278 struct dentry *dentry, 278 struct dentry *dentry,
279 int datasync) 279 int datasync)
280{ 280{
281 struct inode *inode = dentry->d_inode; 281 struct inode *inode = dentry->d_inode;
282 vnode_t *vp = LINVFS_GET_VP(inode); 282 vnode_t *vp = vn_from_inode(inode);
283 int error; 283 int error;
284 int flags = FSYNC_WAIT; 284 int flags = FSYNC_WAIT;
285 285
@@ -292,7 +292,7 @@ linvfs_fsync(
292} 292}
293 293
294/* 294/*
295 * linvfs_readdir maps to VOP_READDIR(). 295 * xfs_file_readdir maps to VOP_READDIR().
296 * We need to build a uio, cred, ... 296 * We need to build a uio, cred, ...
297 */ 297 */
298 298
@@ -301,13 +301,13 @@ linvfs_fsync(
301#ifdef CONFIG_XFS_DMAPI 301#ifdef CONFIG_XFS_DMAPI
302 302
303STATIC struct page * 303STATIC struct page *
304linvfs_filemap_nopage( 304xfs_vm_nopage(
305 struct vm_area_struct *area, 305 struct vm_area_struct *area,
306 unsigned long address, 306 unsigned long address,
307 int *type) 307 int *type)
308{ 308{
309 struct inode *inode = area->vm_file->f_dentry->d_inode; 309 struct inode *inode = area->vm_file->f_dentry->d_inode;
310 vnode_t *vp = LINVFS_GET_VP(inode); 310 vnode_t *vp = vn_from_inode(inode);
311 xfs_mount_t *mp = XFS_VFSTOM(vp->v_vfsp); 311 xfs_mount_t *mp = XFS_VFSTOM(vp->v_vfsp);
312 int error; 312 int error;
313 313
@@ -324,7 +324,7 @@ linvfs_filemap_nopage(
324 324
325 325
326STATIC int 326STATIC int
327linvfs_readdir( 327xfs_file_readdir(
328 struct file *filp, 328 struct file *filp,
329 void *dirent, 329 void *dirent,
330 filldir_t filldir) 330 filldir_t filldir)
@@ -340,7 +340,7 @@ linvfs_readdir(
340 xfs_off_t start_offset, curr_offset; 340 xfs_off_t start_offset, curr_offset;
341 xfs_dirent_t *dbp = NULL; 341 xfs_dirent_t *dbp = NULL;
342 342
343 vp = LINVFS_GET_VP(filp->f_dentry->d_inode); 343 vp = vn_from_inode(filp->f_dentry->d_inode);
344 ASSERT(vp); 344 ASSERT(vp);
345 345
346 /* Try fairly hard to get memory */ 346 /* Try fairly hard to get memory */
@@ -404,39 +404,40 @@ done:
404 404
405 405
406STATIC int 406STATIC int
407linvfs_file_mmap( 407xfs_file_mmap(
408 struct file *filp, 408 struct file *filp,
409 struct vm_area_struct *vma) 409 struct vm_area_struct *vma)
410{ 410{
411 struct inode *ip = filp->f_dentry->d_inode; 411 struct inode *ip = filp->f_dentry->d_inode;
412 vnode_t *vp = LINVFS_GET_VP(ip); 412 vnode_t *vp = vn_from_inode(ip);
413 vattr_t va = { .va_mask = XFS_AT_UPDATIME }; 413 vattr_t vattr;
414 int error; 414 int error;
415 415
416 vma->vm_ops = &linvfs_file_vm_ops; 416 vma->vm_ops = &xfs_file_vm_ops;
417 417
418#ifdef CONFIG_XFS_DMAPI 418#ifdef CONFIG_XFS_DMAPI
419 if (vp->v_vfsp->vfs_flag & VFS_DMI) { 419 if (vp->v_vfsp->vfs_flag & VFS_DMI) {
420 vma->vm_ops = &linvfs_dmapi_file_vm_ops; 420 vma->vm_ops = &xfs_dmapi_file_vm_ops;
421 } 421 }
422#endif /* CONFIG_XFS_DMAPI */ 422#endif /* CONFIG_XFS_DMAPI */
423 423
424 VOP_SETATTR(vp, &va, XFS_AT_UPDATIME, NULL, error); 424 vattr.va_mask = XFS_AT_UPDATIME;
425 if (!error) 425 VOP_SETATTR(vp, &vattr, XFS_AT_UPDATIME, NULL, error);
426 vn_revalidate(vp); /* update Linux inode flags */ 426 if (likely(!error))
427 __vn_revalidate(vp, &vattr); /* update flags */
427 return 0; 428 return 0;
428} 429}
429 430
430 431
431STATIC long 432STATIC long
432linvfs_ioctl( 433xfs_file_ioctl(
433 struct file *filp, 434 struct file *filp,
434 unsigned int cmd, 435 unsigned int cmd,
435 unsigned long arg) 436 unsigned long arg)
436{ 437{
437 int error; 438 int error;
438 struct inode *inode = filp->f_dentry->d_inode; 439 struct inode *inode = filp->f_dentry->d_inode;
439 vnode_t *vp = LINVFS_GET_VP(inode); 440 vnode_t *vp = vn_from_inode(inode);
440 441
441 VOP_IOCTL(vp, inode, filp, 0, cmd, (void __user *)arg, error); 442 VOP_IOCTL(vp, inode, filp, 0, cmd, (void __user *)arg, error);
442 VMODIFY(vp); 443 VMODIFY(vp);
@@ -451,14 +452,14 @@ linvfs_ioctl(
451} 452}
452 453
453STATIC long 454STATIC long
454linvfs_ioctl_invis( 455xfs_file_ioctl_invis(
455 struct file *filp, 456 struct file *filp,
456 unsigned int cmd, 457 unsigned int cmd,
457 unsigned long arg) 458 unsigned long arg)
458{ 459{
459 int error; 460 int error;
460 struct inode *inode = filp->f_dentry->d_inode; 461 struct inode *inode = filp->f_dentry->d_inode;
461 vnode_t *vp = LINVFS_GET_VP(inode); 462 vnode_t *vp = vn_from_inode(inode);
462 463
463 ASSERT(vp); 464 ASSERT(vp);
464 VOP_IOCTL(vp, inode, filp, IO_INVIS, cmd, (void __user *)arg, error); 465 VOP_IOCTL(vp, inode, filp, IO_INVIS, cmd, (void __user *)arg, error);
@@ -476,11 +477,11 @@ linvfs_ioctl_invis(
476#ifdef CONFIG_XFS_DMAPI 477#ifdef CONFIG_XFS_DMAPI
477#ifdef HAVE_VMOP_MPROTECT 478#ifdef HAVE_VMOP_MPROTECT
478STATIC int 479STATIC int
479linvfs_mprotect( 480xfs_vm_mprotect(
480 struct vm_area_struct *vma, 481 struct vm_area_struct *vma,
481 unsigned int newflags) 482 unsigned int newflags)
482{ 483{
483 vnode_t *vp = LINVFS_GET_VP(vma->vm_file->f_dentry->d_inode); 484 vnode_t *vp = vn_from_inode(vma->vm_file->f_dentry->d_inode);
484 int error = 0; 485 int error = 0;
485 486
486 if (vp->v_vfsp->vfs_flag & VFS_DMI) { 487 if (vp->v_vfsp->vfs_flag & VFS_DMI) {
@@ -503,10 +504,10 @@ linvfs_mprotect(
503 * it back online. 504 * it back online.
504 */ 505 */
505STATIC int 506STATIC int
506linvfs_open_exec( 507xfs_file_open_exec(
507 struct inode *inode) 508 struct inode *inode)
508{ 509{
509 vnode_t *vp = LINVFS_GET_VP(inode); 510 vnode_t *vp = vn_from_inode(inode);
510 xfs_mount_t *mp = XFS_VFSTOM(vp->v_vfsp); 511 xfs_mount_t *mp = XFS_VFSTOM(vp->v_vfsp);
511 int error = 0; 512 int error = 0;
512 xfs_inode_t *ip; 513 xfs_inode_t *ip;
@@ -527,69 +528,69 @@ open_exec_out:
527} 528}
528#endif /* HAVE_FOP_OPEN_EXEC */ 529#endif /* HAVE_FOP_OPEN_EXEC */
529 530
530struct file_operations linvfs_file_operations = { 531struct file_operations xfs_file_operations = {
531 .llseek = generic_file_llseek, 532 .llseek = generic_file_llseek,
532 .read = do_sync_read, 533 .read = do_sync_read,
533 .write = do_sync_write, 534 .write = do_sync_write,
534 .readv = linvfs_readv, 535 .readv = xfs_file_readv,
535 .writev = linvfs_writev, 536 .writev = xfs_file_writev,
536 .aio_read = linvfs_aio_read, 537 .aio_read = xfs_file_aio_read,
537 .aio_write = linvfs_aio_write, 538 .aio_write = xfs_file_aio_write,
538 .sendfile = linvfs_sendfile, 539 .sendfile = xfs_file_sendfile,
539 .unlocked_ioctl = linvfs_ioctl, 540 .unlocked_ioctl = xfs_file_ioctl,
540#ifdef CONFIG_COMPAT 541#ifdef CONFIG_COMPAT
541 .compat_ioctl = linvfs_compat_ioctl, 542 .compat_ioctl = xfs_file_compat_ioctl,
542#endif 543#endif
543 .mmap = linvfs_file_mmap, 544 .mmap = xfs_file_mmap,
544 .open = linvfs_open, 545 .open = xfs_file_open,
545 .release = linvfs_release, 546 .release = xfs_file_release,
546 .fsync = linvfs_fsync, 547 .fsync = xfs_file_fsync,
547#ifdef HAVE_FOP_OPEN_EXEC 548#ifdef HAVE_FOP_OPEN_EXEC
548 .open_exec = linvfs_open_exec, 549 .open_exec = xfs_file_open_exec,
549#endif 550#endif
550}; 551};
551 552
552struct file_operations linvfs_invis_file_operations = { 553struct file_operations xfs_invis_file_operations = {
553 .llseek = generic_file_llseek, 554 .llseek = generic_file_llseek,
554 .read = do_sync_read, 555 .read = do_sync_read,
555 .write = do_sync_write, 556 .write = do_sync_write,
556 .readv = linvfs_readv_invis, 557 .readv = xfs_file_readv_invis,
557 .writev = linvfs_writev_invis, 558 .writev = xfs_file_writev_invis,
558 .aio_read = linvfs_aio_read_invis, 559 .aio_read = xfs_file_aio_read_invis,
559 .aio_write = linvfs_aio_write_invis, 560 .aio_write = xfs_file_aio_write_invis,
560 .sendfile = linvfs_sendfile, 561 .sendfile = xfs_file_sendfile,
561 .unlocked_ioctl = linvfs_ioctl_invis, 562 .unlocked_ioctl = xfs_file_ioctl_invis,
562#ifdef CONFIG_COMPAT 563#ifdef CONFIG_COMPAT
563 .compat_ioctl = linvfs_compat_invis_ioctl, 564 .compat_ioctl = xfs_file_compat_invis_ioctl,
564#endif 565#endif
565 .mmap = linvfs_file_mmap, 566 .mmap = xfs_file_mmap,
566 .open = linvfs_open, 567 .open = xfs_file_open,
567 .release = linvfs_release, 568 .release = xfs_file_release,
568 .fsync = linvfs_fsync, 569 .fsync = xfs_file_fsync,
569}; 570};
570 571
571 572
572struct file_operations linvfs_dir_operations = { 573struct file_operations xfs_dir_file_operations = {
573 .read = generic_read_dir, 574 .read = generic_read_dir,
574 .readdir = linvfs_readdir, 575 .readdir = xfs_file_readdir,
575 .unlocked_ioctl = linvfs_ioctl, 576 .unlocked_ioctl = xfs_file_ioctl,
576#ifdef CONFIG_COMPAT 577#ifdef CONFIG_COMPAT
577 .compat_ioctl = linvfs_compat_ioctl, 578 .compat_ioctl = xfs_file_compat_ioctl,
578#endif 579#endif
579 .fsync = linvfs_fsync, 580 .fsync = xfs_file_fsync,
580}; 581};
581 582
582static struct vm_operations_struct linvfs_file_vm_ops = { 583static struct vm_operations_struct xfs_file_vm_ops = {
583 .nopage = filemap_nopage, 584 .nopage = filemap_nopage,
584 .populate = filemap_populate, 585 .populate = filemap_populate,
585}; 586};
586 587
587#ifdef CONFIG_XFS_DMAPI 588#ifdef CONFIG_XFS_DMAPI
588static struct vm_operations_struct linvfs_dmapi_file_vm_ops = { 589static struct vm_operations_struct xfs_dmapi_file_vm_ops = {
589 .nopage = linvfs_filemap_nopage, 590 .nopage = xfs_vm_nopage,
590 .populate = filemap_populate, 591 .populate = filemap_populate,
591#ifdef HAVE_VMOP_MPROTECT 592#ifdef HAVE_VMOP_MPROTECT
592 .mprotect = linvfs_mprotect, 593 .mprotect = xfs_vm_mprotect,
593#endif 594#endif
594}; 595};
595#endif /* CONFIG_XFS_DMAPI */ 596#endif /* CONFIG_XFS_DMAPI */
diff --git a/fs/xfs/linux-2.6/xfs_fs_subr.c b/fs/xfs/linux-2.6/xfs_fs_subr.c
index 4fa4b1a5187e..575f2a790f31 100644
--- a/fs/xfs/linux-2.6/xfs_fs_subr.c
+++ b/fs/xfs/linux-2.6/xfs_fs_subr.c
@@ -57,7 +57,7 @@ fs_tosspages(
57 int fiopt) 57 int fiopt)
58{ 58{
59 vnode_t *vp = BHV_TO_VNODE(bdp); 59 vnode_t *vp = BHV_TO_VNODE(bdp);
60 struct inode *ip = LINVFS_GET_IP(vp); 60 struct inode *ip = vn_to_inode(vp);
61 61
62 if (VN_CACHED(vp)) 62 if (VN_CACHED(vp))
63 truncate_inode_pages(ip->i_mapping, first); 63 truncate_inode_pages(ip->i_mapping, first);
@@ -76,7 +76,7 @@ fs_flushinval_pages(
76 int fiopt) 76 int fiopt)
77{ 77{
78 vnode_t *vp = BHV_TO_VNODE(bdp); 78 vnode_t *vp = BHV_TO_VNODE(bdp);
79 struct inode *ip = LINVFS_GET_IP(vp); 79 struct inode *ip = vn_to_inode(vp);
80 80
81 if (VN_CACHED(vp)) { 81 if (VN_CACHED(vp)) {
82 filemap_write_and_wait(ip->i_mapping); 82 filemap_write_and_wait(ip->i_mapping);
@@ -98,7 +98,7 @@ fs_flush_pages(
98 int fiopt) 98 int fiopt)
99{ 99{
100 vnode_t *vp = BHV_TO_VNODE(bdp); 100 vnode_t *vp = BHV_TO_VNODE(bdp);
101 struct inode *ip = LINVFS_GET_IP(vp); 101 struct inode *ip = vn_to_inode(vp);
102 102
103 if (VN_CACHED(vp)) { 103 if (VN_CACHED(vp)) {
104 filemap_fdatawrite(ip->i_mapping); 104 filemap_fdatawrite(ip->i_mapping);
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
index 4db47790415c..84478491609b 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
@@ -138,7 +138,7 @@ xfs_find_handle(
138 } 138 }
139 139
140 /* we need the vnode */ 140 /* we need the vnode */
141 vp = LINVFS_GET_VP(inode); 141 vp = vn_from_inode(inode);
142 142
143 /* now we can grab the fsid */ 143 /* now we can grab the fsid */
144 memcpy(&handle.ha_fsid, vp->v_vfsp->vfs_altfsid, sizeof(xfs_fsid_t)); 144 memcpy(&handle.ha_fsid, vp->v_vfsp->vfs_altfsid, sizeof(xfs_fsid_t));
@@ -256,7 +256,7 @@ xfs_vget_fsop_handlereq(
256 } 256 }
257 257
258 vpp = XFS_ITOV(ip); 258 vpp = XFS_ITOV(ip);
259 inodep = LINVFS_GET_IP(vpp); 259 inodep = vn_to_inode(vpp);
260 xfs_iunlock(ip, XFS_ILOCK_SHARED); 260 xfs_iunlock(ip, XFS_ILOCK_SHARED);
261 261
262 *vp = vpp; 262 *vp = vpp;
@@ -344,7 +344,7 @@ xfs_open_by_handle(
344 return -XFS_ERROR(-PTR_ERR(filp)); 344 return -XFS_ERROR(-PTR_ERR(filp));
345 } 345 }
346 if (inode->i_mode & S_IFREG) 346 if (inode->i_mode & S_IFREG)
347 filp->f_op = &linvfs_invis_file_operations; 347 filp->f_op = &xfs_invis_file_operations;
348 348
349 fd_install(new_fd, filp); 349 fd_install(new_fd, filp);
350 return new_fd; 350 return new_fd;
@@ -715,7 +715,7 @@ xfs_ioctl(
715 xfs_inode_t *ip; 715 xfs_inode_t *ip;
716 xfs_mount_t *mp; 716 xfs_mount_t *mp;
717 717
718 vp = LINVFS_GET_VP(inode); 718 vp = vn_from_inode(inode);
719 719
720 vn_trace_entry(vp, "xfs_ioctl", (inst_t *)__return_address); 720 vn_trace_entry(vp, "xfs_ioctl", (inst_t *)__return_address);
721 721
@@ -1160,105 +1160,129 @@ xfs_ioc_xattr(
1160 void __user *arg) 1160 void __user *arg)
1161{ 1161{
1162 struct fsxattr fa; 1162 struct fsxattr fa;
1163 vattr_t va; 1163 struct vattr *vattr;
1164 int error; 1164 int error = 0;
1165 int attr_flags; 1165 int attr_flags;
1166 unsigned int flags; 1166 unsigned int flags;
1167 1167
1168 vattr = kmalloc(sizeof(*vattr), GFP_KERNEL);
1169 if (unlikely(!vattr))
1170 return -ENOMEM;
1171
1168 switch (cmd) { 1172 switch (cmd) {
1169 case XFS_IOC_FSGETXATTR: { 1173 case XFS_IOC_FSGETXATTR: {
1170 va.va_mask = XFS_AT_XFLAGS | XFS_AT_EXTSIZE | \ 1174 vattr->va_mask = XFS_AT_XFLAGS | XFS_AT_EXTSIZE | \
1171 XFS_AT_NEXTENTS | XFS_AT_PROJID; 1175 XFS_AT_NEXTENTS | XFS_AT_PROJID;
1172 VOP_GETATTR(vp, &va, 0, NULL, error); 1176 VOP_GETATTR(vp, vattr, 0, NULL, error);
1173 if (error) 1177 if (unlikely(error)) {
1174 return -error; 1178 error = -error;
1179 break;
1180 }
1175 1181
1176 fa.fsx_xflags = va.va_xflags; 1182 fa.fsx_xflags = vattr->va_xflags;
1177 fa.fsx_extsize = va.va_extsize; 1183 fa.fsx_extsize = vattr->va_extsize;
1178 fa.fsx_nextents = va.va_nextents; 1184 fa.fsx_nextents = vattr->va_nextents;
1179 fa.fsx_projid = va.va_projid; 1185 fa.fsx_projid = vattr->va_projid;
1180 1186
1181 if (copy_to_user(arg, &fa, sizeof(fa))) 1187 if (copy_to_user(arg, &fa, sizeof(fa))) {
1182 return -XFS_ERROR(EFAULT); 1188 error = -EFAULT;
1183 return 0; 1189 break;
1190 }
1191 break;
1184 } 1192 }
1185 1193
1186 case XFS_IOC_FSSETXATTR: { 1194 case XFS_IOC_FSSETXATTR: {
1187 if (copy_from_user(&fa, arg, sizeof(fa))) 1195 if (copy_from_user(&fa, arg, sizeof(fa))) {
1188 return -XFS_ERROR(EFAULT); 1196 error = -EFAULT;
1197 break;
1198 }
1189 1199
1190 attr_flags = 0; 1200 attr_flags = 0;
1191 if (filp->f_flags & (O_NDELAY|O_NONBLOCK)) 1201 if (filp->f_flags & (O_NDELAY|O_NONBLOCK))
1192 attr_flags |= ATTR_NONBLOCK; 1202 attr_flags |= ATTR_NONBLOCK;
1193 1203
1194 va.va_mask = XFS_AT_XFLAGS | XFS_AT_EXTSIZE | XFS_AT_PROJID; 1204 vattr->va_mask = XFS_AT_XFLAGS | XFS_AT_EXTSIZE | XFS_AT_PROJID;
1195 va.va_xflags = fa.fsx_xflags; 1205 vattr->va_xflags = fa.fsx_xflags;
1196 va.va_extsize = fa.fsx_extsize; 1206 vattr->va_extsize = fa.fsx_extsize;
1197 va.va_projid = fa.fsx_projid; 1207 vattr->va_projid = fa.fsx_projid;
1198 1208
1199 VOP_SETATTR(vp, &va, attr_flags, NULL, error); 1209 VOP_SETATTR(vp, vattr, attr_flags, NULL, error);
1200 if (!error) 1210 if (likely(!error))
1201 vn_revalidate(vp); /* update Linux inode flags */ 1211 __vn_revalidate(vp, vattr); /* update flags */
1202 return -error; 1212 error = -error;
1213 break;
1203 } 1214 }
1204 1215
1205 case XFS_IOC_FSGETXATTRA: { 1216 case XFS_IOC_FSGETXATTRA: {
1206 va.va_mask = XFS_AT_XFLAGS | XFS_AT_EXTSIZE | \ 1217 vattr->va_mask = XFS_AT_XFLAGS | XFS_AT_EXTSIZE | \
1207 XFS_AT_ANEXTENTS | XFS_AT_PROJID; 1218 XFS_AT_ANEXTENTS | XFS_AT_PROJID;
1208 VOP_GETATTR(vp, &va, 0, NULL, error); 1219 VOP_GETATTR(vp, vattr, 0, NULL, error);
1209 if (error) 1220 if (unlikely(error)) {
1210 return -error; 1221 error = -error;
1222 break;
1223 }
1211 1224
1212 fa.fsx_xflags = va.va_xflags; 1225 fa.fsx_xflags = vattr->va_xflags;
1213 fa.fsx_extsize = va.va_extsize; 1226 fa.fsx_extsize = vattr->va_extsize;
1214 fa.fsx_nextents = va.va_anextents; 1227 fa.fsx_nextents = vattr->va_anextents;
1215 fa.fsx_projid = va.va_projid; 1228 fa.fsx_projid = vattr->va_projid;
1216 1229
1217 if (copy_to_user(arg, &fa, sizeof(fa))) 1230 if (copy_to_user(arg, &fa, sizeof(fa))) {
1218 return -XFS_ERROR(EFAULT); 1231 error = -EFAULT;
1219 return 0; 1232 break;
1233 }
1234 break;
1220 } 1235 }
1221 1236
1222 case XFS_IOC_GETXFLAGS: { 1237 case XFS_IOC_GETXFLAGS: {
1223 flags = xfs_di2lxflags(ip->i_d.di_flags); 1238 flags = xfs_di2lxflags(ip->i_d.di_flags);
1224 if (copy_to_user(arg, &flags, sizeof(flags))) 1239 if (copy_to_user(arg, &flags, sizeof(flags)))
1225 return -XFS_ERROR(EFAULT); 1240 error = -EFAULT;
1226 return 0; 1241 break;
1227 } 1242 }
1228 1243
1229 case XFS_IOC_SETXFLAGS: { 1244 case XFS_IOC_SETXFLAGS: {
1230 if (copy_from_user(&flags, arg, sizeof(flags))) 1245 if (copy_from_user(&flags, arg, sizeof(flags))) {
1231 return -XFS_ERROR(EFAULT); 1246 error = -EFAULT;
1247 break;
1248 }
1232 1249
1233 if (flags & ~(LINUX_XFLAG_IMMUTABLE | LINUX_XFLAG_APPEND | \ 1250 if (flags & ~(LINUX_XFLAG_IMMUTABLE | LINUX_XFLAG_APPEND | \
1234 LINUX_XFLAG_NOATIME | LINUX_XFLAG_NODUMP | \ 1251 LINUX_XFLAG_NOATIME | LINUX_XFLAG_NODUMP | \
1235 LINUX_XFLAG_SYNC)) 1252 LINUX_XFLAG_SYNC)) {
1236 return -XFS_ERROR(EOPNOTSUPP); 1253 error = -EOPNOTSUPP;
1254 break;
1255 }
1237 1256
1238 attr_flags = 0; 1257 attr_flags = 0;
1239 if (filp->f_flags & (O_NDELAY|O_NONBLOCK)) 1258 if (filp->f_flags & (O_NDELAY|O_NONBLOCK))
1240 attr_flags |= ATTR_NONBLOCK; 1259 attr_flags |= ATTR_NONBLOCK;
1241 1260
1242 va.va_mask = XFS_AT_XFLAGS; 1261 vattr->va_mask = XFS_AT_XFLAGS;
1243 va.va_xflags = xfs_merge_ioc_xflags(flags, 1262 vattr->va_xflags = xfs_merge_ioc_xflags(flags,
1244 xfs_ip2xflags(ip)); 1263 xfs_ip2xflags(ip));
1245 1264
1246 VOP_SETATTR(vp, &va, attr_flags, NULL, error); 1265 VOP_SETATTR(vp, vattr, attr_flags, NULL, error);
1247 if (!error) 1266 if (likely(!error))
1248 vn_revalidate(vp); /* update Linux inode flags */ 1267 __vn_revalidate(vp, vattr); /* update flags */
1249 return -error; 1268 error = -error;
1269 break;
1250 } 1270 }
1251 1271
1252 case XFS_IOC_GETVERSION: { 1272 case XFS_IOC_GETVERSION: {
1253 flags = LINVFS_GET_IP(vp)->i_generation; 1273 flags = vn_to_inode(vp)->i_generation;
1254 if (copy_to_user(arg, &flags, sizeof(flags))) 1274 if (copy_to_user(arg, &flags, sizeof(flags)))
1255 return -XFS_ERROR(EFAULT); 1275 error = -EFAULT;
1256 return 0; 1276 break;
1257 } 1277 }
1258 1278
1259 default: 1279 default:
1260 return -ENOTTY; 1280 error = -ENOTTY;
1281 break;
1261 } 1282 }
1283
1284 kfree(vattr);
1285 return error;
1262} 1286}
1263 1287
1264STATIC int 1288STATIC int
diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c
index a7c9ba1a9f7b..b6321abd9a81 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl32.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl32.c
@@ -107,11 +107,11 @@ xfs_ioctl32_bulkstat(
107#endif 107#endif
108 108
109STATIC long 109STATIC long
110__linvfs_compat_ioctl(int mode, struct file *f, unsigned cmd, unsigned long arg) 110xfs_compat_ioctl(int mode, struct file *f, unsigned cmd, unsigned long arg)
111{ 111{
112 int error; 112 int error;
113 struct inode *inode = f->f_dentry->d_inode; 113 struct inode *inode = f->f_dentry->d_inode;
114 vnode_t *vp = LINVFS_GET_VP(inode); 114 vnode_t *vp = vn_to_inode(inode);
115 115
116 switch (cmd) { 116 switch (cmd) {
117 case XFS_IOC_DIOINFO: 117 case XFS_IOC_DIOINFO:
@@ -196,19 +196,19 @@ __linvfs_compat_ioctl(int mode, struct file *f, unsigned cmd, unsigned long arg)
196} 196}
197 197
198long 198long
199linvfs_compat_ioctl( 199xfs_file_compat_ioctl(
200 struct file *f, 200 struct file *f,
201 unsigned cmd, 201 unsigned cmd,
202 unsigned long arg) 202 unsigned long arg)
203{ 203{
204 return __linvfs_compat_ioctl(0, f, cmd, arg); 204 return xfs_compat_ioctl(0, f, cmd, arg);
205} 205}
206 206
207long 207long
208linvfs_compat_invis_ioctl( 208xfs_file_compat_invis_ioctl(
209 struct file *f, 209 struct file *f,
210 unsigned cmd, 210 unsigned cmd,
211 unsigned long arg) 211 unsigned long arg)
212{ 212{
213 return __linvfs_compat_ioctl(IO_INVIS, f, cmd, arg); 213 return xfs_compat_ioctl(IO_INVIS, f, cmd, arg);
214} 214}
diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.h b/fs/xfs/linux-2.6/xfs_ioctl32.h
index 011c273bec50..02de6e62ee37 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl32.h
+++ b/fs/xfs/linux-2.6/xfs_ioctl32.h
@@ -18,7 +18,7 @@
18#ifndef __XFS_IOCTL32_H__ 18#ifndef __XFS_IOCTL32_H__
19#define __XFS_IOCTL32_H__ 19#define __XFS_IOCTL32_H__
20 20
21extern long linvfs_compat_ioctl(struct file *, unsigned, unsigned long); 21extern long xfs_file_compat_ioctl(struct file *, unsigned, unsigned long);
22extern long linvfs_compat_invis_ioctl(struct file *f, unsigned, unsigned long); 22extern long xfs_file_compat_invis_ioctl(struct file *, unsigned, unsigned long);
23 23
24#endif /* __XFS_IOCTL32_H__ */ 24#endif /* __XFS_IOCTL32_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
index d7f6f2d8ac8e..af487437bd7e 100644
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ b/fs/xfs/linux-2.6/xfs_iops.c
@@ -106,7 +106,7 @@ xfs_ichgtime(
106 xfs_inode_t *ip, 106 xfs_inode_t *ip,
107 int flags) 107 int flags)
108{ 108{
109 struct inode *inode = LINVFS_GET_IP(XFS_ITOV(ip)); 109 struct inode *inode = vn_to_inode(XFS_ITOV(ip));
110 timespec_t tv; 110 timespec_t tv;
111 111
112 nanotime(&tv); 112 nanotime(&tv);
@@ -198,22 +198,22 @@ xfs_ichgtime_fast(
198 * Pull the link count and size up from the xfs inode to the linux inode 198 * Pull the link count and size up from the xfs inode to the linux inode
199 */ 199 */
200STATIC void 200STATIC void
201validate_fields( 201xfs_validate_fields(
202 struct inode *ip) 202 struct inode *ip,
203 struct vattr *vattr)
203{ 204{
204 vnode_t *vp = LINVFS_GET_VP(ip); 205 vnode_t *vp = vn_from_inode(ip);
205 vattr_t va;
206 int error; 206 int error;
207 207
208 va.va_mask = XFS_AT_NLINK|XFS_AT_SIZE|XFS_AT_NBLOCKS; 208 vattr->va_mask = XFS_AT_NLINK|XFS_AT_SIZE|XFS_AT_NBLOCKS;
209 VOP_GETATTR(vp, &va, ATTR_LAZY, NULL, error); 209 VOP_GETATTR(vp, vattr, ATTR_LAZY, NULL, error);
210 if (likely(!error)) { 210 if (likely(!error)) {
211 ip->i_nlink = va.va_nlink; 211 ip->i_nlink = vattr->va_nlink;
212 ip->i_blocks = va.va_nblocks; 212 ip->i_blocks = vattr->va_nblocks;
213 213
214 /* we're under i_mutex so i_size can't change under us */ 214 /* we're under i_sem so i_size can't change under us */
215 if (i_size_read(ip) != va.va_size) 215 if (i_size_read(ip) != vattr->va_size)
216 i_size_write(ip, va.va_size); 216 i_size_write(ip, vattr->va_size);
217 } 217 }
218} 218}
219 219
@@ -224,11 +224,11 @@ validate_fields(
224 * inode, of course, such that log replay can't cause these to be lost). 224 * inode, of course, such that log replay can't cause these to be lost).
225 */ 225 */
226STATIC int 226STATIC int
227linvfs_init_security( 227xfs_init_security(
228 struct vnode *vp, 228 struct vnode *vp,
229 struct inode *dir) 229 struct inode *dir)
230{ 230{
231 struct inode *ip = LINVFS_GET_IP(vp); 231 struct inode *ip = vn_to_inode(vp);
232 size_t length; 232 size_t length;
233 void *value; 233 void *value;
234 char *name; 234 char *name;
@@ -257,46 +257,46 @@ linvfs_init_security(
257 * XXX(hch): nfsd is broken, better fix it instead. 257 * XXX(hch): nfsd is broken, better fix it instead.
258 */ 258 */
259STATIC inline int 259STATIC inline int
260has_fs_struct(struct task_struct *task) 260xfs_has_fs_struct(struct task_struct *task)
261{ 261{
262 return (task->fs != init_task.fs); 262 return (task->fs != init_task.fs);
263} 263}
264 264
265STATIC inline void 265STATIC inline void
266cleanup_inode( 266xfs_cleanup_inode(
267 vnode_t *dvp, 267 vnode_t *dvp,
268 vnode_t *vp, 268 vnode_t *vp,
269 struct dentry *dentry, 269 struct dentry *dentry,
270 int mode) 270 int mode)
271{ 271{
272 struct dentry teardown = {}; 272 struct dentry teardown = {};
273 int err2; 273 int error;
274 274
275 /* Oh, the horror. 275 /* Oh, the horror.
276 * If we can't add the ACL or we fail in 276 * If we can't add the ACL or we fail in
277 * linvfs_init_security we must back out. 277 * xfs_init_security we must back out.
278 * ENOSPC can hit here, among other things. 278 * ENOSPC can hit here, among other things.
279 */ 279 */
280 teardown.d_inode = LINVFS_GET_IP(vp); 280 teardown.d_inode = vn_to_inode(vp);
281 teardown.d_name = dentry->d_name; 281 teardown.d_name = dentry->d_name;
282 282
283 if (S_ISDIR(mode)) 283 if (S_ISDIR(mode))
284 VOP_RMDIR(dvp, &teardown, NULL, err2); 284 VOP_RMDIR(dvp, &teardown, NULL, error);
285 else 285 else
286 VOP_REMOVE(dvp, &teardown, NULL, err2); 286 VOP_REMOVE(dvp, &teardown, NULL, error);
287 VN_RELE(vp); 287 VN_RELE(vp);
288} 288}
289 289
290STATIC int 290STATIC int
291linvfs_mknod( 291xfs_vn_mknod(
292 struct inode *dir, 292 struct inode *dir,
293 struct dentry *dentry, 293 struct dentry *dentry,
294 int mode, 294 int mode,
295 dev_t rdev) 295 dev_t rdev)
296{ 296{
297 struct inode *ip; 297 struct inode *ip;
298 vattr_t va; 298 vattr_t vattr = { 0 };
299 vnode_t *vp = NULL, *dvp = LINVFS_GET_VP(dir); 299 vnode_t *vp = NULL, *dvp = vn_from_inode(dir);
300 xfs_acl_t *default_acl = NULL; 300 xfs_acl_t *default_acl = NULL;
301 attrexists_t test_default_acl = _ACL_DEFAULT_EXISTS; 301 attrexists_t test_default_acl = _ACL_DEFAULT_EXISTS;
302 int error; 302 int error;
@@ -305,99 +305,98 @@ linvfs_mknod(
305 * Irix uses Missed'em'V split, but doesn't want to see 305 * Irix uses Missed'em'V split, but doesn't want to see
306 * the upper 5 bits of (14bit) major. 306 * the upper 5 bits of (14bit) major.
307 */ 307 */
308 if (!sysv_valid_dev(rdev) || MAJOR(rdev) & ~0x1ff) 308 if (unlikely(!sysv_valid_dev(rdev) || MAJOR(rdev) & ~0x1ff))
309 return -EINVAL; 309 return -EINVAL;
310 310
311 if (test_default_acl && test_default_acl(dvp)) { 311 if (unlikely(test_default_acl && test_default_acl(dvp))) {
312 if (!_ACL_ALLOC(default_acl)) 312 if (!_ACL_ALLOC(default_acl)) {
313 return -ENOMEM; 313 return -ENOMEM;
314 }
314 if (!_ACL_GET_DEFAULT(dvp, default_acl)) { 315 if (!_ACL_GET_DEFAULT(dvp, default_acl)) {
315 _ACL_FREE(default_acl); 316 _ACL_FREE(default_acl);
316 default_acl = NULL; 317 default_acl = NULL;
317 } 318 }
318 } 319 }
319 320
320 if (IS_POSIXACL(dir) && !default_acl && has_fs_struct(current)) 321 if (IS_POSIXACL(dir) && !default_acl && xfs_has_fs_struct(current))
321 mode &= ~current->fs->umask; 322 mode &= ~current->fs->umask;
322 323
323 memset(&va, 0, sizeof(va)); 324 vattr.va_mask = XFS_AT_TYPE|XFS_AT_MODE;
324 va.va_mask = XFS_AT_TYPE|XFS_AT_MODE; 325 vattr.va_mode = mode;
325 va.va_mode = mode;
326 326
327 switch (mode & S_IFMT) { 327 switch (mode & S_IFMT) {
328 case S_IFCHR: case S_IFBLK: case S_IFIFO: case S_IFSOCK: 328 case S_IFCHR: case S_IFBLK: case S_IFIFO: case S_IFSOCK:
329 va.va_rdev = sysv_encode_dev(rdev); 329 vattr.va_rdev = sysv_encode_dev(rdev);
330 va.va_mask |= XFS_AT_RDEV; 330 vattr.va_mask |= XFS_AT_RDEV;
331 /*FALLTHROUGH*/ 331 /*FALLTHROUGH*/
332 case S_IFREG: 332 case S_IFREG:
333 VOP_CREATE(dvp, dentry, &va, &vp, NULL, error); 333 VOP_CREATE(dvp, dentry, &vattr, &vp, NULL, error);
334 break; 334 break;
335 case S_IFDIR: 335 case S_IFDIR:
336 VOP_MKDIR(dvp, dentry, &va, &vp, NULL, error); 336 VOP_MKDIR(dvp, dentry, &vattr, &vp, NULL, error);
337 break; 337 break;
338 default: 338 default:
339 error = EINVAL; 339 error = EINVAL;
340 break; 340 break;
341 } 341 }
342 342
343 if (!error) 343 if (unlikely(!error)) {
344 { 344 error = xfs_init_security(vp, dir);
345 error = linvfs_init_security(vp, dir);
346 if (error) 345 if (error)
347 cleanup_inode(dvp, vp, dentry, mode); 346 xfs_cleanup_inode(dvp, vp, dentry, mode);
348 } 347 }
349 348
350 if (default_acl) { 349 if (unlikely(default_acl)) {
351 if (!error) { 350 if (!error) {
352 error = _ACL_INHERIT(vp, &va, default_acl); 351 error = _ACL_INHERIT(vp, &vattr, default_acl);
353 if (!error) 352 if (!error)
354 VMODIFY(vp); 353 VMODIFY(vp);
355 else 354 else
356 cleanup_inode(dvp, vp, dentry, mode); 355 xfs_cleanup_inode(dvp, vp, dentry, mode);
357 } 356 }
358 _ACL_FREE(default_acl); 357 _ACL_FREE(default_acl);
359 } 358 }
360 359
361 if (!error) { 360 if (likely(!error)) {
362 ASSERT(vp); 361 ASSERT(vp);
363 ip = LINVFS_GET_IP(vp); 362 ip = vn_to_inode(vp);
364 363
365 if (S_ISCHR(mode) || S_ISBLK(mode)) 364 if (S_ISCHR(mode) || S_ISBLK(mode))
366 ip->i_rdev = rdev; 365 ip->i_rdev = rdev;
367 else if (S_ISDIR(mode)) 366 else if (S_ISDIR(mode))
368 validate_fields(ip); 367 xfs_validate_fields(ip, &vattr);
369 d_instantiate(dentry, ip); 368 d_instantiate(dentry, ip);
370 validate_fields(dir); 369 xfs_validate_fields(dir, &vattr);
371 } 370 }
372 return -error; 371 return -error;
373} 372}
374 373
375STATIC int 374STATIC int
376linvfs_create( 375xfs_vn_create(
377 struct inode *dir, 376 struct inode *dir,
378 struct dentry *dentry, 377 struct dentry *dentry,
379 int mode, 378 int mode,
380 struct nameidata *nd) 379 struct nameidata *nd)
381{ 380{
382 return linvfs_mknod(dir, dentry, mode, 0); 381 return xfs_vn_mknod(dir, dentry, mode, 0);
383} 382}
384 383
385STATIC int 384STATIC int
386linvfs_mkdir( 385xfs_vn_mkdir(
387 struct inode *dir, 386 struct inode *dir,
388 struct dentry *dentry, 387 struct dentry *dentry,
389 int mode) 388 int mode)
390{ 389{
391 return linvfs_mknod(dir, dentry, mode|S_IFDIR, 0); 390 return xfs_vn_mknod(dir, dentry, mode|S_IFDIR, 0);
392} 391}
393 392
394STATIC struct dentry * 393STATIC struct dentry *
395linvfs_lookup( 394xfs_vn_lookup(
396 struct inode *dir, 395 struct inode *dir,
397 struct dentry *dentry, 396 struct dentry *dentry,
398 struct nameidata *nd) 397 struct nameidata *nd)
399{ 398{
400 struct vnode *vp = LINVFS_GET_VP(dir), *cvp; 399 struct vnode *vp = vn_from_inode(dir), *cvp;
401 int error; 400 int error;
402 401
403 if (dentry->d_name.len >= MAXNAMELEN) 402 if (dentry->d_name.len >= MAXNAMELEN)
@@ -411,11 +410,11 @@ linvfs_lookup(
411 return NULL; 410 return NULL;
412 } 411 }
413 412
414 return d_splice_alias(LINVFS_GET_IP(cvp), dentry); 413 return d_splice_alias(vn_to_inode(cvp), dentry);
415} 414}
416 415
417STATIC int 416STATIC int
418linvfs_link( 417xfs_vn_link(
419 struct dentry *old_dentry, 418 struct dentry *old_dentry,
420 struct inode *dir, 419 struct inode *dir,
421 struct dentry *dentry) 420 struct dentry *dentry)
@@ -423,99 +422,102 @@ linvfs_link(
423 struct inode *ip; /* inode of guy being linked to */ 422 struct inode *ip; /* inode of guy being linked to */
424 vnode_t *tdvp; /* target directory for new name/link */ 423 vnode_t *tdvp; /* target directory for new name/link */
425 vnode_t *vp; /* vp of name being linked */ 424 vnode_t *vp; /* vp of name being linked */
425 vattr_t vattr;
426 int error; 426 int error;
427 427
428 ip = old_dentry->d_inode; /* inode being linked to */ 428 ip = old_dentry->d_inode; /* inode being linked to */
429 if (S_ISDIR(ip->i_mode)) 429 if (S_ISDIR(ip->i_mode))
430 return -EPERM; 430 return -EPERM;
431 431
432 tdvp = LINVFS_GET_VP(dir); 432 tdvp = vn_from_inode(dir);
433 vp = LINVFS_GET_VP(ip); 433 vp = vn_from_inode(ip);
434 434
435 VOP_LINK(tdvp, vp, dentry, NULL, error); 435 VOP_LINK(tdvp, vp, dentry, NULL, error);
436 if (!error) { 436 if (likely(!error)) {
437 VMODIFY(tdvp); 437 VMODIFY(tdvp);
438 VN_HOLD(vp); 438 VN_HOLD(vp);
439 validate_fields(ip); 439 xfs_validate_fields(ip, &vattr);
440 d_instantiate(dentry, ip); 440 d_instantiate(dentry, ip);
441 } 441 }
442 return -error; 442 return -error;
443} 443}
444 444
445STATIC int 445STATIC int
446linvfs_unlink( 446xfs_vn_unlink(
447 struct inode *dir, 447 struct inode *dir,
448 struct dentry *dentry) 448 struct dentry *dentry)
449{ 449{
450 struct inode *inode; 450 struct inode *inode;
451 vnode_t *dvp; /* directory containing name to remove */ 451 vnode_t *dvp; /* directory containing name to remove */
452 vattr_t vattr;
452 int error; 453 int error;
453 454
454 inode = dentry->d_inode; 455 inode = dentry->d_inode;
455 dvp = LINVFS_GET_VP(dir); 456 dvp = vn_from_inode(dir);
456 457
457 VOP_REMOVE(dvp, dentry, NULL, error); 458 VOP_REMOVE(dvp, dentry, NULL, error);
458 if (!error) { 459 if (likely(!error)) {
459 validate_fields(dir); /* For size only */ 460 xfs_validate_fields(dir, &vattr); /* size needs update */
460 validate_fields(inode); 461 xfs_validate_fields(inode, &vattr);
461 } 462 }
462
463 return -error; 463 return -error;
464} 464}
465 465
466STATIC int 466STATIC int
467linvfs_symlink( 467xfs_vn_symlink(
468 struct inode *dir, 468 struct inode *dir,
469 struct dentry *dentry, 469 struct dentry *dentry,
470 const char *symname) 470 const char *symname)
471{ 471{
472 struct inode *ip; 472 struct inode *ip;
473 vattr_t va; 473 vattr_t vattr = { 0 };
474 vnode_t *dvp; /* directory containing name of symlink */ 474 vnode_t *dvp; /* directory containing name of symlink */
475 vnode_t *cvp; /* used to lookup symlink to put in dentry */ 475 vnode_t *cvp; /* used to lookup symlink to put in dentry */
476 int error; 476 int error;
477 477
478 dvp = LINVFS_GET_VP(dir); 478 dvp = vn_from_inode(dir);
479 cvp = NULL; 479 cvp = NULL;
480 480
481 memset(&va, 0, sizeof(va)); 481 vattr.va_mode = S_IFLNK |
482 va.va_mode = S_IFLNK |
483 (irix_symlink_mode ? 0777 & ~current->fs->umask : S_IRWXUGO); 482 (irix_symlink_mode ? 0777 & ~current->fs->umask : S_IRWXUGO);
484 va.va_mask = XFS_AT_TYPE|XFS_AT_MODE; 483 vattr.va_mask = XFS_AT_TYPE|XFS_AT_MODE;
485 484
486 error = 0; 485 error = 0;
487 VOP_SYMLINK(dvp, dentry, &va, (char *)symname, &cvp, NULL, error); 486 VOP_SYMLINK(dvp, dentry, &vattr, (char *)symname, &cvp, NULL, error);
488 if (likely(!error && cvp)) { 487 if (likely(!error && cvp)) {
489 error = linvfs_init_security(cvp, dir); 488 error = xfs_init_security(cvp, dir);
490 if (likely(!error)) { 489 if (likely(!error)) {
491 ip = LINVFS_GET_IP(cvp); 490 ip = vn_to_inode(cvp);
492 d_instantiate(dentry, ip); 491 d_instantiate(dentry, ip);
493 validate_fields(dir); 492 xfs_validate_fields(dir, &vattr);
494 validate_fields(ip); 493 xfs_validate_fields(ip, &vattr);
494 } else {
495 xfs_cleanup_inode(dvp, cvp, dentry, 0);
495 } 496 }
496 } 497 }
497 return -error; 498 return -error;
498} 499}
499 500
500STATIC int 501STATIC int
501linvfs_rmdir( 502xfs_vn_rmdir(
502 struct inode *dir, 503 struct inode *dir,
503 struct dentry *dentry) 504 struct dentry *dentry)
504{ 505{
505 struct inode *inode = dentry->d_inode; 506 struct inode *inode = dentry->d_inode;
506 vnode_t *dvp = LINVFS_GET_VP(dir); 507 vnode_t *dvp = vn_from_inode(dir);
508 vattr_t vattr;
507 int error; 509 int error;
508 510
509 VOP_RMDIR(dvp, dentry, NULL, error); 511 VOP_RMDIR(dvp, dentry, NULL, error);
510 if (!error) { 512 if (likely(!error)) {
511 validate_fields(inode); 513 xfs_validate_fields(inode, &vattr);
512 validate_fields(dir); 514 xfs_validate_fields(dir, &vattr);
513 } 515 }
514 return -error; 516 return -error;
515} 517}
516 518
517STATIC int 519STATIC int
518linvfs_rename( 520xfs_vn_rename(
519 struct inode *odir, 521 struct inode *odir,
520 struct dentry *odentry, 522 struct dentry *odentry,
521 struct inode *ndir, 523 struct inode *ndir,
@@ -524,22 +526,21 @@ linvfs_rename(
524 struct inode *new_inode = ndentry->d_inode; 526 struct inode *new_inode = ndentry->d_inode;
525 vnode_t *fvp; /* from directory */ 527 vnode_t *fvp; /* from directory */
526 vnode_t *tvp; /* target directory */ 528 vnode_t *tvp; /* target directory */
529 vattr_t vattr;
527 int error; 530 int error;
528 531
529 fvp = LINVFS_GET_VP(odir); 532 fvp = vn_from_inode(odir);
530 tvp = LINVFS_GET_VP(ndir); 533 tvp = vn_from_inode(ndir);
531 534
532 VOP_RENAME(fvp, odentry, tvp, ndentry, NULL, error); 535 VOP_RENAME(fvp, odentry, tvp, ndentry, NULL, error);
533 if (error) 536 if (likely(!error)) {
534 return -error; 537 if (new_inode)
535 538 xfs_validate_fields(new_inode, &vattr);
536 if (new_inode) 539 xfs_validate_fields(odir, &vattr);
537 validate_fields(new_inode); 540 if (ndir != odir)
538 541 xfs_validate_fields(ndir, &vattr);
539 validate_fields(odir); 542 }
540 if (ndir != odir) 543 return -error;
541 validate_fields(ndir);
542 return 0;
543} 544}
544 545
545/* 546/*
@@ -548,7 +549,7 @@ linvfs_rename(
548 * uio is kmalloced for this reason... 549 * uio is kmalloced for this reason...
549 */ 550 */
550STATIC void * 551STATIC void *
551linvfs_follow_link( 552xfs_vn_follow_link(
552 struct dentry *dentry, 553 struct dentry *dentry,
553 struct nameidata *nd) 554 struct nameidata *nd)
554{ 555{
@@ -574,7 +575,7 @@ linvfs_follow_link(
574 return NULL; 575 return NULL;
575 } 576 }
576 577
577 vp = LINVFS_GET_VP(dentry->d_inode); 578 vp = vn_from_inode(dentry->d_inode);
578 579
579 iov.iov_base = link; 580 iov.iov_base = link;
580 iov.iov_len = MAXPATHLEN; 581 iov.iov_len = MAXPATHLEN;
@@ -599,7 +600,7 @@ linvfs_follow_link(
599} 600}
600 601
601STATIC void 602STATIC void
602linvfs_put_link( 603xfs_vn_put_link(
603 struct dentry *dentry, 604 struct dentry *dentry,
604 struct nameidata *nd, 605 struct nameidata *nd,
605 void *p) 606 void *p)
@@ -612,12 +613,12 @@ linvfs_put_link(
612 613
613#ifdef CONFIG_XFS_POSIX_ACL 614#ifdef CONFIG_XFS_POSIX_ACL
614STATIC int 615STATIC int
615linvfs_permission( 616xfs_vn_permission(
616 struct inode *inode, 617 struct inode *inode,
617 int mode, 618 int mode,
618 struct nameidata *nd) 619 struct nameidata *nd)
619{ 620{
620 vnode_t *vp = LINVFS_GET_VP(inode); 621 vnode_t *vp = vn_from_inode(inode);
621 int error; 622 int error;
622 623
623 mode <<= 6; /* convert from linux to vnode access bits */ 624 mode <<= 6; /* convert from linux to vnode access bits */
@@ -625,17 +626,17 @@ linvfs_permission(
625 return -error; 626 return -error;
626} 627}
627#else 628#else
628#define linvfs_permission NULL 629#define xfs_vn_permission NULL
629#endif 630#endif
630 631
631STATIC int 632STATIC int
632linvfs_getattr( 633xfs_vn_getattr(
633 struct vfsmount *mnt, 634 struct vfsmount *mnt,
634 struct dentry *dentry, 635 struct dentry *dentry,
635 struct kstat *stat) 636 struct kstat *stat)
636{ 637{
637 struct inode *inode = dentry->d_inode; 638 struct inode *inode = dentry->d_inode;
638 vnode_t *vp = LINVFS_GET_VP(inode); 639 vnode_t *vp = vn_from_inode(inode);
639 int error = 0; 640 int error = 0;
640 641
641 if (unlikely(vp->v_flag & VMODIFIED)) 642 if (unlikely(vp->v_flag & VMODIFIED))
@@ -646,18 +647,17 @@ linvfs_getattr(
646} 647}
647 648
648STATIC int 649STATIC int
649linvfs_setattr( 650xfs_vn_setattr(
650 struct dentry *dentry, 651 struct dentry *dentry,
651 struct iattr *attr) 652 struct iattr *attr)
652{ 653{
653 struct inode *inode = dentry->d_inode; 654 struct inode *inode = dentry->d_inode;
654 unsigned int ia_valid = attr->ia_valid; 655 unsigned int ia_valid = attr->ia_valid;
655 vnode_t *vp = LINVFS_GET_VP(inode); 656 vnode_t *vp = vn_from_inode(inode);
656 vattr_t vattr; 657 vattr_t vattr = { 0 };
657 int flags = 0; 658 int flags = 0;
658 int error; 659 int error;
659 660
660 memset(&vattr, 0, sizeof(vattr_t));
661 if (ia_valid & ATTR_UID) { 661 if (ia_valid & ATTR_UID) {
662 vattr.va_mask |= XFS_AT_UID; 662 vattr.va_mask |= XFS_AT_UID;
663 vattr.va_uid = attr->ia_uid; 663 vattr.va_uid = attr->ia_uid;
@@ -699,28 +699,27 @@ linvfs_setattr(
699#endif 699#endif
700 700
701 VOP_SETATTR(vp, &vattr, flags, NULL, error); 701 VOP_SETATTR(vp, &vattr, flags, NULL, error);
702 if (error) 702 if (likely(!error))
703 return -error; 703 __vn_revalidate(vp, &vattr);
704 vn_revalidate(vp); 704 return -error;
705 return error;
706} 705}
707 706
708STATIC void 707STATIC void
709linvfs_truncate( 708xfs_vn_truncate(
710 struct inode *inode) 709 struct inode *inode)
711{ 710{
712 block_truncate_page(inode->i_mapping, inode->i_size, linvfs_get_block); 711 block_truncate_page(inode->i_mapping, inode->i_size, xfs_get_block);
713} 712}
714 713
715STATIC int 714STATIC int
716linvfs_setxattr( 715xfs_vn_setxattr(
717 struct dentry *dentry, 716 struct dentry *dentry,
718 const char *name, 717 const char *name,
719 const void *data, 718 const void *data,
720 size_t size, 719 size_t size,
721 int flags) 720 int flags)
722{ 721{
723 vnode_t *vp = LINVFS_GET_VP(dentry->d_inode); 722 vnode_t *vp = vn_from_inode(dentry->d_inode);
724 char *attr = (char *)name; 723 char *attr = (char *)name;
725 attrnames_t *namesp; 724 attrnames_t *namesp;
726 int xflags = 0; 725 int xflags = 0;
@@ -744,13 +743,13 @@ linvfs_setxattr(
744} 743}
745 744
746STATIC ssize_t 745STATIC ssize_t
747linvfs_getxattr( 746xfs_vn_getxattr(
748 struct dentry *dentry, 747 struct dentry *dentry,
749 const char *name, 748 const char *name,
750 void *data, 749 void *data,
751 size_t size) 750 size_t size)
752{ 751{
753 vnode_t *vp = LINVFS_GET_VP(dentry->d_inode); 752 vnode_t *vp = vn_from_inode(dentry->d_inode);
754 char *attr = (char *)name; 753 char *attr = (char *)name;
755 attrnames_t *namesp; 754 attrnames_t *namesp;
756 int xflags = 0; 755 int xflags = 0;
@@ -774,12 +773,12 @@ linvfs_getxattr(
774} 773}
775 774
776STATIC ssize_t 775STATIC ssize_t
777linvfs_listxattr( 776xfs_vn_listxattr(
778 struct dentry *dentry, 777 struct dentry *dentry,
779 char *data, 778 char *data,
780 size_t size) 779 size_t size)
781{ 780{
782 vnode_t *vp = LINVFS_GET_VP(dentry->d_inode); 781 vnode_t *vp = vn_from_inode(dentry->d_inode);
783 int error, xflags = ATTR_KERNAMELS; 782 int error, xflags = ATTR_KERNAMELS;
784 ssize_t result; 783 ssize_t result;
785 784
@@ -794,11 +793,11 @@ linvfs_listxattr(
794} 793}
795 794
796STATIC int 795STATIC int
797linvfs_removexattr( 796xfs_vn_removexattr(
798 struct dentry *dentry, 797 struct dentry *dentry,
799 const char *name) 798 const char *name)
800{ 799{
801 vnode_t *vp = LINVFS_GET_VP(dentry->d_inode); 800 vnode_t *vp = vn_from_inode(dentry->d_inode);
802 char *attr = (char *)name; 801 char *attr = (char *)name;
803 attrnames_t *namesp; 802 attrnames_t *namesp;
804 int xflags = 0; 803 int xflags = 0;
@@ -816,45 +815,45 @@ linvfs_removexattr(
816} 815}
817 816
818 817
819struct inode_operations linvfs_file_inode_operations = { 818struct inode_operations xfs_inode_operations = {
820 .permission = linvfs_permission, 819 .permission = xfs_vn_permission,
821 .truncate = linvfs_truncate, 820 .truncate = xfs_vn_truncate,
822 .getattr = linvfs_getattr, 821 .getattr = xfs_vn_getattr,
823 .setattr = linvfs_setattr, 822 .setattr = xfs_vn_setattr,
824 .setxattr = linvfs_setxattr, 823 .setxattr = xfs_vn_setxattr,
825 .getxattr = linvfs_getxattr, 824 .getxattr = xfs_vn_getxattr,
826 .listxattr = linvfs_listxattr, 825 .listxattr = xfs_vn_listxattr,
827 .removexattr = linvfs_removexattr, 826 .removexattr = xfs_vn_removexattr,
828}; 827};
829 828
830struct inode_operations linvfs_dir_inode_operations = { 829struct inode_operations xfs_dir_inode_operations = {
831 .create = linvfs_create, 830 .create = xfs_vn_create,
832 .lookup = linvfs_lookup, 831 .lookup = xfs_vn_lookup,
833 .link = linvfs_link, 832 .link = xfs_vn_link,
834 .unlink = linvfs_unlink, 833 .unlink = xfs_vn_unlink,
835 .symlink = linvfs_symlink, 834 .symlink = xfs_vn_symlink,
836 .mkdir = linvfs_mkdir, 835 .mkdir = xfs_vn_mkdir,
837 .rmdir = linvfs_rmdir, 836 .rmdir = xfs_vn_rmdir,
838 .mknod = linvfs_mknod, 837 .mknod = xfs_vn_mknod,
839 .rename = linvfs_rename, 838 .rename = xfs_vn_rename,
840 .permission = linvfs_permission, 839 .permission = xfs_vn_permission,
841 .getattr = linvfs_getattr, 840 .getattr = xfs_vn_getattr,
842 .setattr = linvfs_setattr, 841 .setattr = xfs_vn_setattr,
843 .setxattr = linvfs_setxattr, 842 .setxattr = xfs_vn_setxattr,
844 .getxattr = linvfs_getxattr, 843 .getxattr = xfs_vn_getxattr,
845 .listxattr = linvfs_listxattr, 844 .listxattr = xfs_vn_listxattr,
846 .removexattr = linvfs_removexattr, 845 .removexattr = xfs_vn_removexattr,
847}; 846};
848 847
849struct inode_operations linvfs_symlink_inode_operations = { 848struct inode_operations xfs_symlink_inode_operations = {
850 .readlink = generic_readlink, 849 .readlink = generic_readlink,
851 .follow_link = linvfs_follow_link, 850 .follow_link = xfs_vn_follow_link,
852 .put_link = linvfs_put_link, 851 .put_link = xfs_vn_put_link,
853 .permission = linvfs_permission, 852 .permission = xfs_vn_permission,
854 .getattr = linvfs_getattr, 853 .getattr = xfs_vn_getattr,
855 .setattr = linvfs_setattr, 854 .setattr = xfs_vn_setattr,
856 .setxattr = linvfs_setxattr, 855 .setxattr = xfs_vn_setxattr,
857 .getxattr = linvfs_getxattr, 856 .getxattr = xfs_vn_getxattr,
858 .listxattr = linvfs_listxattr, 857 .listxattr = xfs_vn_listxattr,
859 .removexattr = linvfs_removexattr, 858 .removexattr = xfs_vn_removexattr,
860}; 859};
diff --git a/fs/xfs/linux-2.6/xfs_iops.h b/fs/xfs/linux-2.6/xfs_iops.h
index 6899a6b4a50a..a8417d7af5f9 100644
--- a/fs/xfs/linux-2.6/xfs_iops.h
+++ b/fs/xfs/linux-2.6/xfs_iops.h
@@ -18,13 +18,13 @@
18#ifndef __XFS_IOPS_H__ 18#ifndef __XFS_IOPS_H__
19#define __XFS_IOPS_H__ 19#define __XFS_IOPS_H__
20 20
21extern struct inode_operations linvfs_file_inode_operations; 21extern struct inode_operations xfs_inode_operations;
22extern struct inode_operations linvfs_dir_inode_operations; 22extern struct inode_operations xfs_dir_inode_operations;
23extern struct inode_operations linvfs_symlink_inode_operations; 23extern struct inode_operations xfs_symlink_inode_operations;
24 24
25extern struct file_operations linvfs_file_operations; 25extern struct file_operations xfs_file_operations;
26extern struct file_operations linvfs_invis_file_operations; 26extern struct file_operations xfs_dir_file_operations;
27extern struct file_operations linvfs_dir_operations; 27extern struct file_operations xfs_invis_file_operations;
28 28
29extern int xfs_ioctl(struct bhv_desc *, struct inode *, struct file *, 29extern int xfs_ioctl(struct bhv_desc *, struct inode *, struct file *,
30 int, unsigned int, void __user *); 30 int, unsigned int, void __user *);
diff --git a/fs/xfs/linux-2.6/xfs_linux.h b/fs/xfs/linux-2.6/xfs_linux.h
index 67389b745526..1fe09f2d6519 100644
--- a/fs/xfs/linux-2.6/xfs_linux.h
+++ b/fs/xfs/linux-2.6/xfs_linux.h
@@ -73,6 +73,9 @@
73#include <linux/list.h> 73#include <linux/list.h>
74#include <linux/proc_fs.h> 74#include <linux/proc_fs.h>
75#include <linux/sort.h> 75#include <linux/sort.h>
76#include <linux/cpu.h>
77#include <linux/notifier.h>
78#include <linux/delay.h>
76 79
77#include <asm/page.h> 80#include <asm/page.h>
78#include <asm/div64.h> 81#include <asm/div64.h>
@@ -100,6 +103,11 @@
100 */ 103 */
101#undef HAVE_REFCACHE /* reference cache not needed for NFS in 2.6 */ 104#undef HAVE_REFCACHE /* reference cache not needed for NFS in 2.6 */
102#define HAVE_SENDFILE /* sendfile(2) exists in 2.6, but not in 2.4 */ 105#define HAVE_SENDFILE /* sendfile(2) exists in 2.6, but not in 2.4 */
106#ifdef CONFIG_SMP
107#define HAVE_PERCPU_SB /* per cpu superblock counters are a 2.6 feature */
108#else
109#undef HAVE_PERCPU_SB /* per cpu superblock counters are a 2.6 feature */
110#endif
103 111
104/* 112/*
105 * State flag for unwritten extent buffers. 113 * State flag for unwritten extent buffers.
@@ -226,7 +234,7 @@ BUFFER_FNS(PrivateStart, unwritten);
226#define xfs_sort(a,n,s,fn) sort(a,n,s,fn,NULL) 234#define xfs_sort(a,n,s,fn) sort(a,n,s,fn,NULL)
227#define xfs_stack_trace() dump_stack() 235#define xfs_stack_trace() dump_stack()
228#define xfs_itruncate_data(ip, off) \ 236#define xfs_itruncate_data(ip, off) \
229 (-vmtruncate(LINVFS_GET_IP(XFS_ITOV(ip)), (off))) 237 (-vmtruncate(vn_to_inode(XFS_ITOV(ip)), (off)))
230#define xfs_statvfs_fsid(statp, mp) \ 238#define xfs_statvfs_fsid(statp, mp) \
231 ({ u64 id = huge_encode_dev((mp)->m_ddev_targp->bt_dev); \ 239 ({ u64 id = huge_encode_dev((mp)->m_ddev_targp->bt_dev); \
232 __kernel_fsid_t *fsid = &(statp)->f_fsid; \ 240 __kernel_fsid_t *fsid = &(statp)->f_fsid; \
diff --git a/fs/xfs/linux-2.6/xfs_lrw.c b/fs/xfs/linux-2.6/xfs_lrw.c
index e0ab45fbfebd..0169360475c4 100644
--- a/fs/xfs/linux-2.6/xfs_lrw.c
+++ b/fs/xfs/linux-2.6/xfs_lrw.c
@@ -83,7 +83,7 @@ xfs_rw_enter_trace(
83 (void *)((unsigned long)ioflags), 83 (void *)((unsigned long)ioflags),
84 (void *)((unsigned long)((io->io_new_size >> 32) & 0xffffffff)), 84 (void *)((unsigned long)((io->io_new_size >> 32) & 0xffffffff)),
85 (void *)((unsigned long)(io->io_new_size & 0xffffffff)), 85 (void *)((unsigned long)(io->io_new_size & 0xffffffff)),
86 (void *)NULL, 86 (void *)((unsigned long)current_pid()),
87 (void *)NULL, 87 (void *)NULL,
88 (void *)NULL, 88 (void *)NULL,
89 (void *)NULL, 89 (void *)NULL,
@@ -113,7 +113,7 @@ xfs_inval_cached_trace(
113 (void *)((unsigned long)(first & 0xffffffff)), 113 (void *)((unsigned long)(first & 0xffffffff)),
114 (void *)((unsigned long)((last >> 32) & 0xffffffff)), 114 (void *)((unsigned long)((last >> 32) & 0xffffffff)),
115 (void *)((unsigned long)(last & 0xffffffff)), 115 (void *)((unsigned long)(last & 0xffffffff)),
116 (void *)NULL, 116 (void *)((unsigned long)current_pid()),
117 (void *)NULL, 117 (void *)NULL,
118 (void *)NULL, 118 (void *)NULL,
119 (void *)NULL, 119 (void *)NULL,
@@ -249,9 +249,8 @@ xfs_read(
249 if (n < size) 249 if (n < size)
250 size = n; 250 size = n;
251 251
252 if (XFS_FORCED_SHUTDOWN(mp)) { 252 if (XFS_FORCED_SHUTDOWN(mp))
253 return -EIO; 253 return -EIO;
254 }
255 254
256 if (unlikely(ioflags & IO_ISDIRECT)) 255 if (unlikely(ioflags & IO_ISDIRECT))
257 mutex_lock(&inode->i_mutex); 256 mutex_lock(&inode->i_mutex);
@@ -267,10 +266,14 @@ xfs_read(
267 dmflags, &locktype); 266 dmflags, &locktype);
268 if (ret) { 267 if (ret) {
269 xfs_iunlock(ip, XFS_IOLOCK_SHARED); 268 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
270 goto unlock_isem; 269 goto unlock_mutex;
271 } 270 }
272 } 271 }
273 272
273 if (unlikely((ioflags & IO_ISDIRECT) && VN_CACHED(vp)))
274 VOP_FLUSHINVAL_PAGES(vp, ctooff(offtoct(*offset)),
275 -1, FI_REMAPF_LOCKED);
276
274 xfs_rw_enter_trace(XFS_READ_ENTER, &ip->i_iocore, 277 xfs_rw_enter_trace(XFS_READ_ENTER, &ip->i_iocore,
275 (void *)iovp, segs, *offset, ioflags); 278 (void *)iovp, segs, *offset, ioflags);
276 ret = __generic_file_aio_read(iocb, iovp, segs, offset); 279 ret = __generic_file_aio_read(iocb, iovp, segs, offset);
@@ -281,7 +284,7 @@ xfs_read(
281 284
282 xfs_iunlock(ip, XFS_IOLOCK_SHARED); 285 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
283 286
284unlock_isem: 287unlock_mutex:
285 if (unlikely(ioflags & IO_ISDIRECT)) 288 if (unlikely(ioflags & IO_ISDIRECT))
286 mutex_unlock(&inode->i_mutex); 289 mutex_unlock(&inode->i_mutex);
287 return ret; 290 return ret;
@@ -432,7 +435,7 @@ xfs_zero_eof(
432 xfs_fsize_t isize, /* current inode size */ 435 xfs_fsize_t isize, /* current inode size */
433 xfs_fsize_t end_size) /* terminal inode size */ 436 xfs_fsize_t end_size) /* terminal inode size */
434{ 437{
435 struct inode *ip = LINVFS_GET_IP(vp); 438 struct inode *ip = vn_to_inode(vp);
436 xfs_fileoff_t start_zero_fsb; 439 xfs_fileoff_t start_zero_fsb;
437 xfs_fileoff_t end_zero_fsb; 440 xfs_fileoff_t end_zero_fsb;
438 xfs_fileoff_t zero_count_fsb; 441 xfs_fileoff_t zero_count_fsb;
@@ -573,7 +576,7 @@ xfs_write(
573 vrwlock_t locktype; 576 vrwlock_t locktype;
574 size_t ocount = 0, count; 577 size_t ocount = 0, count;
575 loff_t pos; 578 loff_t pos;
576 int need_isem = 1, need_flush = 0; 579 int need_i_mutex = 1, need_flush = 0;
577 580
578 XFS_STATS_INC(xs_write_calls); 581 XFS_STATS_INC(xs_write_calls);
579 582
@@ -622,14 +625,14 @@ xfs_write(
622 return XFS_ERROR(-EINVAL); 625 return XFS_ERROR(-EINVAL);
623 626
624 if (!VN_CACHED(vp) && pos < i_size_read(inode)) 627 if (!VN_CACHED(vp) && pos < i_size_read(inode))
625 need_isem = 0; 628 need_i_mutex = 0;
626 629
627 if (VN_CACHED(vp)) 630 if (VN_CACHED(vp))
628 need_flush = 1; 631 need_flush = 1;
629 } 632 }
630 633
631relock: 634relock:
632 if (need_isem) { 635 if (need_i_mutex) {
633 iolock = XFS_IOLOCK_EXCL; 636 iolock = XFS_IOLOCK_EXCL;
634 locktype = VRWLOCK_WRITE; 637 locktype = VRWLOCK_WRITE;
635 638
@@ -651,7 +654,7 @@ start:
651 S_ISBLK(inode->i_mode)); 654 S_ISBLK(inode->i_mode));
652 if (error) { 655 if (error) {
653 xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock); 656 xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock);
654 goto out_unlock_isem; 657 goto out_unlock_mutex;
655 } 658 }
656 659
657 new_size = pos + count; 660 new_size = pos + count;
@@ -663,7 +666,7 @@ start:
663 loff_t savedsize = pos; 666 loff_t savedsize = pos;
664 int dmflags = FILP_DELAY_FLAG(file); 667 int dmflags = FILP_DELAY_FLAG(file);
665 668
666 if (need_isem) 669 if (need_i_mutex)
667 dmflags |= DM_FLAGS_IMUX; 670 dmflags |= DM_FLAGS_IMUX;
668 671
669 xfs_iunlock(xip, XFS_ILOCK_EXCL); 672 xfs_iunlock(xip, XFS_ILOCK_EXCL);
@@ -672,7 +675,7 @@ start:
672 dmflags, &locktype); 675 dmflags, &locktype);
673 if (error) { 676 if (error) {
674 xfs_iunlock(xip, iolock); 677 xfs_iunlock(xip, iolock);
675 goto out_unlock_isem; 678 goto out_unlock_mutex;
676 } 679 }
677 xfs_ilock(xip, XFS_ILOCK_EXCL); 680 xfs_ilock(xip, XFS_ILOCK_EXCL);
678 eventsent = 1; 681 eventsent = 1;
@@ -710,7 +713,7 @@ start:
710 isize, pos + count); 713 isize, pos + count);
711 if (error) { 714 if (error) {
712 xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock); 715 xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock);
713 goto out_unlock_isem; 716 goto out_unlock_mutex;
714 } 717 }
715 } 718 }
716 xfs_iunlock(xip, XFS_ILOCK_EXCL); 719 xfs_iunlock(xip, XFS_ILOCK_EXCL);
@@ -731,7 +734,7 @@ start:
731 error = -remove_suid(file->f_dentry); 734 error = -remove_suid(file->f_dentry);
732 if (unlikely(error)) { 735 if (unlikely(error)) {
733 xfs_iunlock(xip, iolock); 736 xfs_iunlock(xip, iolock);
734 goto out_unlock_isem; 737 goto out_unlock_mutex;
735 } 738 }
736 } 739 }
737 740
@@ -747,14 +750,14 @@ retry:
747 -1, FI_REMAPF_LOCKED); 750 -1, FI_REMAPF_LOCKED);
748 } 751 }
749 752
750 if (need_isem) { 753 if (need_i_mutex) {
751 /* demote the lock now the cached pages are gone */ 754 /* demote the lock now the cached pages are gone */
752 XFS_ILOCK_DEMOTE(mp, io, XFS_IOLOCK_EXCL); 755 XFS_ILOCK_DEMOTE(mp, io, XFS_IOLOCK_EXCL);
753 mutex_unlock(&inode->i_mutex); 756 mutex_unlock(&inode->i_mutex);
754 757
755 iolock = XFS_IOLOCK_SHARED; 758 iolock = XFS_IOLOCK_SHARED;
756 locktype = VRWLOCK_WRITE_DIRECT; 759 locktype = VRWLOCK_WRITE_DIRECT;
757 need_isem = 0; 760 need_i_mutex = 0;
758 } 761 }
759 762
760 xfs_rw_enter_trace(XFS_DIOWR_ENTER, io, (void *)iovp, segs, 763 xfs_rw_enter_trace(XFS_DIOWR_ENTER, io, (void *)iovp, segs,
@@ -772,7 +775,7 @@ retry:
772 pos += ret; 775 pos += ret;
773 count -= ret; 776 count -= ret;
774 777
775 need_isem = 1; 778 need_i_mutex = 1;
776 ioflags &= ~IO_ISDIRECT; 779 ioflags &= ~IO_ISDIRECT;
777 xfs_iunlock(xip, iolock); 780 xfs_iunlock(xip, iolock);
778 goto relock; 781 goto relock;
@@ -794,14 +797,14 @@ retry:
794 !(ioflags & IO_INVIS)) { 797 !(ioflags & IO_INVIS)) {
795 798
796 xfs_rwunlock(bdp, locktype); 799 xfs_rwunlock(bdp, locktype);
797 if (need_isem) 800 if (need_i_mutex)
798 mutex_unlock(&inode->i_mutex); 801 mutex_unlock(&inode->i_mutex);
799 error = XFS_SEND_NAMESP(xip->i_mount, DM_EVENT_NOSPACE, vp, 802 error = XFS_SEND_NAMESP(xip->i_mount, DM_EVENT_NOSPACE, vp,
800 DM_RIGHT_NULL, vp, DM_RIGHT_NULL, NULL, NULL, 803 DM_RIGHT_NULL, vp, DM_RIGHT_NULL, NULL, NULL,
801 0, 0, 0); /* Delay flag intentionally unused */ 804 0, 0, 0); /* Delay flag intentionally unused */
802 if (error) 805 if (error)
803 goto out_nounlocks; 806 goto out_nounlocks;
804 if (need_isem) 807 if (need_i_mutex)
805 mutex_lock(&inode->i_mutex); 808 mutex_lock(&inode->i_mutex);
806 xfs_rwlock(bdp, locktype); 809 xfs_rwlock(bdp, locktype);
807 pos = xip->i_d.di_size; 810 pos = xip->i_d.di_size;
@@ -905,9 +908,9 @@ retry:
905 if (error) 908 if (error)
906 goto out_unlock_internal; 909 goto out_unlock_internal;
907 } 910 }
908 911
909 xfs_rwunlock(bdp, locktype); 912 xfs_rwunlock(bdp, locktype);
910 if (need_isem) 913 if (need_i_mutex)
911 mutex_unlock(&inode->i_mutex); 914 mutex_unlock(&inode->i_mutex);
912 915
913 error = sync_page_range(inode, mapping, pos, ret); 916 error = sync_page_range(inode, mapping, pos, ret);
@@ -918,8 +921,8 @@ retry:
918 921
919 out_unlock_internal: 922 out_unlock_internal:
920 xfs_rwunlock(bdp, locktype); 923 xfs_rwunlock(bdp, locktype);
921 out_unlock_isem: 924 out_unlock_mutex:
922 if (need_isem) 925 if (need_i_mutex)
923 mutex_unlock(&inode->i_mutex); 926 mutex_unlock(&inode->i_mutex);
924 out_nounlocks: 927 out_nounlocks:
925 return -error; 928 return -error;
diff --git a/fs/xfs/linux-2.6/xfs_stats.c b/fs/xfs/linux-2.6/xfs_stats.c
index 8955720a2c6b..713e6a7505d0 100644
--- a/fs/xfs/linux-2.6/xfs_stats.c
+++ b/fs/xfs/linux-2.6/xfs_stats.c
@@ -62,18 +62,15 @@ xfs_read_xfsstats(
62 while (j < xstats[i].endpoint) { 62 while (j < xstats[i].endpoint) {
63 val = 0; 63 val = 0;
64 /* sum over all cpus */ 64 /* sum over all cpus */
65 for (c = 0; c < NR_CPUS; c++) { 65 for_each_cpu(c)
66 if (!cpu_possible(c)) continue;
67 val += *(((__u32*)&per_cpu(xfsstats, c) + j)); 66 val += *(((__u32*)&per_cpu(xfsstats, c) + j));
68 }
69 len += sprintf(buffer + len, " %u", val); 67 len += sprintf(buffer + len, " %u", val);
70 j++; 68 j++;
71 } 69 }
72 buffer[len++] = '\n'; 70 buffer[len++] = '\n';
73 } 71 }
74 /* extra precision counters */ 72 /* extra precision counters */
75 for (i = 0; i < NR_CPUS; i++) { 73 for_each_cpu(i) {
76 if (!cpu_possible(i)) continue;
77 xs_xstrat_bytes += per_cpu(xfsstats, i).xs_xstrat_bytes; 74 xs_xstrat_bytes += per_cpu(xfsstats, i).xs_xstrat_bytes;
78 xs_write_bytes += per_cpu(xfsstats, i).xs_write_bytes; 75 xs_write_bytes += per_cpu(xfsstats, i).xs_write_bytes;
79 xs_read_bytes += per_cpu(xfsstats, i).xs_read_bytes; 76 xs_read_bytes += per_cpu(xfsstats, i).xs_read_bytes;
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index f22e426d9e42..8355faf8ffde 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -59,8 +59,8 @@
59#include <linux/writeback.h> 59#include <linux/writeback.h>
60#include <linux/kthread.h> 60#include <linux/kthread.h>
61 61
62STATIC struct quotactl_ops linvfs_qops; 62STATIC struct quotactl_ops xfs_quotactl_operations;
63STATIC struct super_operations linvfs_sops; 63STATIC struct super_operations xfs_super_operations;
64STATIC kmem_zone_t *xfs_vnode_zone; 64STATIC kmem_zone_t *xfs_vnode_zone;
65STATIC kmem_zone_t *xfs_ioend_zone; 65STATIC kmem_zone_t *xfs_ioend_zone;
66mempool_t *xfs_ioend_pool; 66mempool_t *xfs_ioend_pool;
@@ -76,8 +76,6 @@ xfs_args_allocate(
76 strncpy(args->fsname, sb->s_id, MAXNAMELEN); 76 strncpy(args->fsname, sb->s_id, MAXNAMELEN);
77 77
78 /* Copy the already-parsed mount(2) flags we're interested in */ 78 /* Copy the already-parsed mount(2) flags we're interested in */
79 if (sb->s_flags & MS_NOATIME)
80 args->flags |= XFSMNT_NOATIME;
81 if (sb->s_flags & MS_DIRSYNC) 79 if (sb->s_flags & MS_DIRSYNC)
82 args->flags |= XFSMNT_DIRSYNC; 80 args->flags |= XFSMNT_DIRSYNC;
83 if (sb->s_flags & MS_SYNCHRONOUS) 81 if (sb->s_flags & MS_SYNCHRONOUS)
@@ -129,21 +127,21 @@ xfs_set_inodeops(
129{ 127{
130 switch (inode->i_mode & S_IFMT) { 128 switch (inode->i_mode & S_IFMT) {
131 case S_IFREG: 129 case S_IFREG:
132 inode->i_op = &linvfs_file_inode_operations; 130 inode->i_op = &xfs_inode_operations;
133 inode->i_fop = &linvfs_file_operations; 131 inode->i_fop = &xfs_file_operations;
134 inode->i_mapping->a_ops = &linvfs_aops; 132 inode->i_mapping->a_ops = &xfs_address_space_operations;
135 break; 133 break;
136 case S_IFDIR: 134 case S_IFDIR:
137 inode->i_op = &linvfs_dir_inode_operations; 135 inode->i_op = &xfs_dir_inode_operations;
138 inode->i_fop = &linvfs_dir_operations; 136 inode->i_fop = &xfs_dir_file_operations;
139 break; 137 break;
140 case S_IFLNK: 138 case S_IFLNK:
141 inode->i_op = &linvfs_symlink_inode_operations; 139 inode->i_op = &xfs_symlink_inode_operations;
142 if (inode->i_blocks) 140 if (inode->i_blocks)
143 inode->i_mapping->a_ops = &linvfs_aops; 141 inode->i_mapping->a_ops = &xfs_address_space_operations;
144 break; 142 break;
145 default: 143 default:
146 inode->i_op = &linvfs_file_inode_operations; 144 inode->i_op = &xfs_inode_operations;
147 init_special_inode(inode, inode->i_mode, inode->i_rdev); 145 init_special_inode(inode, inode->i_mode, inode->i_rdev);
148 break; 146 break;
149 } 147 }
@@ -155,7 +153,7 @@ xfs_revalidate_inode(
155 vnode_t *vp, 153 vnode_t *vp,
156 xfs_inode_t *ip) 154 xfs_inode_t *ip)
157{ 155{
158 struct inode *inode = LINVFS_GET_IP(vp); 156 struct inode *inode = vn_to_inode(vp);
159 157
160 inode->i_mode = ip->i_d.di_mode; 158 inode->i_mode = ip->i_d.di_mode;
161 inode->i_nlink = ip->i_d.di_nlink; 159 inode->i_nlink = ip->i_d.di_nlink;
@@ -212,7 +210,7 @@ xfs_initialize_vnode(
212 int unlock) 210 int unlock)
213{ 211{
214 xfs_inode_t *ip = XFS_BHVTOI(inode_bhv); 212 xfs_inode_t *ip = XFS_BHVTOI(inode_bhv);
215 struct inode *inode = LINVFS_GET_IP(vp); 213 struct inode *inode = vn_to_inode(vp);
216 214
217 if (!inode_bhv->bd_vobj) { 215 if (!inode_bhv->bd_vobj) {
218 vp->v_vfsp = bhvtovfs(bdp); 216 vp->v_vfsp = bhvtovfs(bdp);
@@ -230,7 +228,7 @@ xfs_initialize_vnode(
230 if (ip->i_d.di_mode != 0 && unlock && (inode->i_state & I_NEW)) { 228 if (ip->i_d.di_mode != 0 && unlock && (inode->i_state & I_NEW)) {
231 xfs_revalidate_inode(XFS_BHVTOM(bdp), vp, ip); 229 xfs_revalidate_inode(XFS_BHVTOM(bdp), vp, ip);
232 xfs_set_inodeops(inode); 230 xfs_set_inodeops(inode);
233 231
234 ip->i_flags &= ~XFS_INEW; 232 ip->i_flags &= ~XFS_INEW;
235 barrier(); 233 barrier();
236 234
@@ -334,43 +332,42 @@ xfs_blkdev_issue_flush(
334} 332}
335 333
336STATIC struct inode * 334STATIC struct inode *
337linvfs_alloc_inode( 335xfs_fs_alloc_inode(
338 struct super_block *sb) 336 struct super_block *sb)
339{ 337{
340 vnode_t *vp; 338 vnode_t *vp;
341 339
342 vp = kmem_cache_alloc(xfs_vnode_zone, kmem_flags_convert(KM_SLEEP)); 340 vp = kmem_zone_alloc(xfs_vnode_zone, KM_SLEEP);
343 if (!vp) 341 if (unlikely(!vp))
344 return NULL; 342 return NULL;
345 return LINVFS_GET_IP(vp); 343 return vn_to_inode(vp);
346} 344}
347 345
348STATIC void 346STATIC void
349linvfs_destroy_inode( 347xfs_fs_destroy_inode(
350 struct inode *inode) 348 struct inode *inode)
351{ 349{
352 kmem_zone_free(xfs_vnode_zone, LINVFS_GET_VP(inode)); 350 kmem_zone_free(xfs_vnode_zone, vn_from_inode(inode));
353} 351}
354 352
355STATIC void 353STATIC void
356linvfs_inode_init_once( 354xfs_fs_inode_init_once(
357 void *data, 355 void *vnode,
358 kmem_cache_t *cachep, 356 kmem_zone_t *zonep,
359 unsigned long flags) 357 unsigned long flags)
360{ 358{
361 vnode_t *vp = (vnode_t *)data;
362
363 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == 359 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
364 SLAB_CTOR_CONSTRUCTOR) 360 SLAB_CTOR_CONSTRUCTOR)
365 inode_init_once(LINVFS_GET_IP(vp)); 361 inode_init_once(vn_to_inode((vnode_t *)vnode));
366} 362}
367 363
368STATIC int 364STATIC int
369linvfs_init_zones(void) 365xfs_init_zones(void)
370{ 366{
371 xfs_vnode_zone = kmem_cache_create("xfs_vnode", 367 xfs_vnode_zone = kmem_zone_init_flags(sizeof(vnode_t), "xfs_vnode_t",
372 sizeof(vnode_t), 0, SLAB_RECLAIM_ACCOUNT, 368 KM_ZONE_HWALIGN | KM_ZONE_RECLAIM |
373 linvfs_inode_init_once, NULL); 369 KM_ZONE_SPREAD,
370 xfs_fs_inode_init_once);
374 if (!xfs_vnode_zone) 371 if (!xfs_vnode_zone)
375 goto out; 372 goto out;
376 373
@@ -379,14 +376,12 @@ linvfs_init_zones(void)
379 goto out_destroy_vnode_zone; 376 goto out_destroy_vnode_zone;
380 377
381 xfs_ioend_pool = mempool_create(4 * MAX_BUF_PER_PAGE, 378 xfs_ioend_pool = mempool_create(4 * MAX_BUF_PER_PAGE,
382 mempool_alloc_slab, mempool_free_slab, 379 mempool_alloc_slab, mempool_free_slab,
383 xfs_ioend_zone); 380 xfs_ioend_zone);
384 if (!xfs_ioend_pool) 381 if (!xfs_ioend_pool)
385 goto out_free_ioend_zone; 382 goto out_free_ioend_zone;
386
387 return 0; 383 return 0;
388 384
389
390 out_free_ioend_zone: 385 out_free_ioend_zone:
391 kmem_zone_destroy(xfs_ioend_zone); 386 kmem_zone_destroy(xfs_ioend_zone);
392 out_destroy_vnode_zone: 387 out_destroy_vnode_zone:
@@ -396,7 +391,7 @@ linvfs_init_zones(void)
396} 391}
397 392
398STATIC void 393STATIC void
399linvfs_destroy_zones(void) 394xfs_destroy_zones(void)
400{ 395{
401 mempool_destroy(xfs_ioend_pool); 396 mempool_destroy(xfs_ioend_pool);
402 kmem_zone_destroy(xfs_vnode_zone); 397 kmem_zone_destroy(xfs_vnode_zone);
@@ -407,14 +402,14 @@ linvfs_destroy_zones(void)
407 * Attempt to flush the inode, this will actually fail 402 * Attempt to flush the inode, this will actually fail
408 * if the inode is pinned, but we dirty the inode again 403 * if the inode is pinned, but we dirty the inode again
409 * at the point when it is unpinned after a log write, 404 * at the point when it is unpinned after a log write,
410 * since this is when the inode itself becomes flushable. 405 * since this is when the inode itself becomes flushable.
411 */ 406 */
412STATIC int 407STATIC int
413linvfs_write_inode( 408xfs_fs_write_inode(
414 struct inode *inode, 409 struct inode *inode,
415 int sync) 410 int sync)
416{ 411{
417 vnode_t *vp = LINVFS_GET_VP(inode); 412 vnode_t *vp = vn_from_inode(inode);
418 int error = 0, flags = FLUSH_INODE; 413 int error = 0, flags = FLUSH_INODE;
419 414
420 if (vp) { 415 if (vp) {
@@ -434,13 +429,13 @@ linvfs_write_inode(
434} 429}
435 430
436STATIC void 431STATIC void
437linvfs_clear_inode( 432xfs_fs_clear_inode(
438 struct inode *inode) 433 struct inode *inode)
439{ 434{
440 vnode_t *vp = LINVFS_GET_VP(inode); 435 vnode_t *vp = vn_from_inode(inode);
441 int error, cache; 436 int error, cache;
442 437
443 vn_trace_entry(vp, "clear_inode", (inst_t *)__return_address); 438 vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
444 439
445 XFS_STATS_INC(vn_rele); 440 XFS_STATS_INC(vn_rele);
446 XFS_STATS_INC(vn_remove); 441 XFS_STATS_INC(vn_remove);
@@ -516,7 +511,7 @@ void
516xfs_flush_inode( 511xfs_flush_inode(
517 xfs_inode_t *ip) 512 xfs_inode_t *ip)
518{ 513{
519 struct inode *inode = LINVFS_GET_IP(XFS_ITOV(ip)); 514 struct inode *inode = vn_to_inode(XFS_ITOV(ip));
520 struct vfs *vfs = XFS_MTOVFS(ip->i_mount); 515 struct vfs *vfs = XFS_MTOVFS(ip->i_mount);
521 516
522 igrab(inode); 517 igrab(inode);
@@ -541,7 +536,7 @@ void
541xfs_flush_device( 536xfs_flush_device(
542 xfs_inode_t *ip) 537 xfs_inode_t *ip)
543{ 538{
544 struct inode *inode = LINVFS_GET_IP(XFS_ITOV(ip)); 539 struct inode *inode = vn_to_inode(XFS_ITOV(ip));
545 struct vfs *vfs = XFS_MTOVFS(ip->i_mount); 540 struct vfs *vfs = XFS_MTOVFS(ip->i_mount);
546 541
547 igrab(inode); 542 igrab(inode);
@@ -550,7 +545,7 @@ xfs_flush_device(
550 xfs_log_force(ip->i_mount, (xfs_lsn_t)0, XFS_LOG_FORCE|XFS_LOG_SYNC); 545 xfs_log_force(ip->i_mount, (xfs_lsn_t)0, XFS_LOG_FORCE|XFS_LOG_SYNC);
551} 546}
552 547
553#define SYNCD_FLAGS (SYNC_FSDATA|SYNC_BDFLUSH|SYNC_ATTR) 548#define SYNCD_FLAGS (SYNC_FSDATA|SYNC_BDFLUSH|SYNC_ATTR|SYNC_REFCACHE)
554STATIC void 549STATIC void
555vfs_sync_worker( 550vfs_sync_worker(
556 vfs_t *vfsp, 551 vfs_t *vfsp,
@@ -613,7 +608,7 @@ xfssyncd(
613} 608}
614 609
615STATIC int 610STATIC int
616linvfs_start_syncd( 611xfs_fs_start_syncd(
617 vfs_t *vfsp) 612 vfs_t *vfsp)
618{ 613{
619 vfsp->vfs_sync_work.w_syncer = vfs_sync_worker; 614 vfsp->vfs_sync_work.w_syncer = vfs_sync_worker;
@@ -625,20 +620,20 @@ linvfs_start_syncd(
625} 620}
626 621
627STATIC void 622STATIC void
628linvfs_stop_syncd( 623xfs_fs_stop_syncd(
629 vfs_t *vfsp) 624 vfs_t *vfsp)
630{ 625{
631 kthread_stop(vfsp->vfs_sync_task); 626 kthread_stop(vfsp->vfs_sync_task);
632} 627}
633 628
634STATIC void 629STATIC void
635linvfs_put_super( 630xfs_fs_put_super(
636 struct super_block *sb) 631 struct super_block *sb)
637{ 632{
638 vfs_t *vfsp = LINVFS_GET_VFS(sb); 633 vfs_t *vfsp = vfs_from_sb(sb);
639 int error; 634 int error;
640 635
641 linvfs_stop_syncd(vfsp); 636 xfs_fs_stop_syncd(vfsp);
642 VFS_SYNC(vfsp, SYNC_ATTR|SYNC_DELWRI, NULL, error); 637 VFS_SYNC(vfsp, SYNC_ATTR|SYNC_DELWRI, NULL, error);
643 if (!error) 638 if (!error)
644 VFS_UNMOUNT(vfsp, 0, NULL, error); 639 VFS_UNMOUNT(vfsp, 0, NULL, error);
@@ -652,10 +647,10 @@ linvfs_put_super(
652} 647}
653 648
654STATIC void 649STATIC void
655linvfs_write_super( 650xfs_fs_write_super(
656 struct super_block *sb) 651 struct super_block *sb)
657{ 652{
658 vfs_t *vfsp = LINVFS_GET_VFS(sb); 653 vfs_t *vfsp = vfs_from_sb(sb);
659 int error; 654 int error;
660 655
661 if (sb->s_flags & MS_RDONLY) { 656 if (sb->s_flags & MS_RDONLY) {
@@ -668,11 +663,11 @@ linvfs_write_super(
668} 663}
669 664
670STATIC int 665STATIC int
671linvfs_sync_super( 666xfs_fs_sync_super(
672 struct super_block *sb, 667 struct super_block *sb,
673 int wait) 668 int wait)
674{ 669{
675 vfs_t *vfsp = LINVFS_GET_VFS(sb); 670 vfs_t *vfsp = vfs_from_sb(sb);
676 int error; 671 int error;
677 int flags = SYNC_FSDATA; 672 int flags = SYNC_FSDATA;
678 673
@@ -707,11 +702,11 @@ linvfs_sync_super(
707} 702}
708 703
709STATIC int 704STATIC int
710linvfs_statfs( 705xfs_fs_statfs(
711 struct super_block *sb, 706 struct super_block *sb,
712 struct kstatfs *statp) 707 struct kstatfs *statp)
713{ 708{
714 vfs_t *vfsp = LINVFS_GET_VFS(sb); 709 vfs_t *vfsp = vfs_from_sb(sb);
715 int error; 710 int error;
716 711
717 VFS_STATVFS(vfsp, statp, NULL, error); 712 VFS_STATVFS(vfsp, statp, NULL, error);
@@ -719,12 +714,12 @@ linvfs_statfs(
719} 714}
720 715
721STATIC int 716STATIC int
722linvfs_remount( 717xfs_fs_remount(
723 struct super_block *sb, 718 struct super_block *sb,
724 int *flags, 719 int *flags,
725 char *options) 720 char *options)
726{ 721{
727 vfs_t *vfsp = LINVFS_GET_VFS(sb); 722 vfs_t *vfsp = vfs_from_sb(sb);
728 struct xfs_mount_args *args = xfs_args_allocate(sb); 723 struct xfs_mount_args *args = xfs_args_allocate(sb);
729 int error; 724 int error;
730 725
@@ -736,18 +731,18 @@ linvfs_remount(
736} 731}
737 732
738STATIC void 733STATIC void
739linvfs_freeze_fs( 734xfs_fs_lockfs(
740 struct super_block *sb) 735 struct super_block *sb)
741{ 736{
742 VFS_FREEZE(LINVFS_GET_VFS(sb)); 737 VFS_FREEZE(vfs_from_sb(sb));
743} 738}
744 739
745STATIC int 740STATIC int
746linvfs_show_options( 741xfs_fs_show_options(
747 struct seq_file *m, 742 struct seq_file *m,
748 struct vfsmount *mnt) 743 struct vfsmount *mnt)
749{ 744{
750 struct vfs *vfsp = LINVFS_GET_VFS(mnt->mnt_sb); 745 struct vfs *vfsp = vfs_from_sb(mnt->mnt_sb);
751 int error; 746 int error;
752 747
753 VFS_SHOWARGS(vfsp, m, error); 748 VFS_SHOWARGS(vfsp, m, error);
@@ -755,11 +750,11 @@ linvfs_show_options(
755} 750}
756 751
757STATIC int 752STATIC int
758linvfs_quotasync( 753xfs_fs_quotasync(
759 struct super_block *sb, 754 struct super_block *sb,
760 int type) 755 int type)
761{ 756{
762 struct vfs *vfsp = LINVFS_GET_VFS(sb); 757 struct vfs *vfsp = vfs_from_sb(sb);
763 int error; 758 int error;
764 759
765 VFS_QUOTACTL(vfsp, Q_XQUOTASYNC, 0, (caddr_t)NULL, error); 760 VFS_QUOTACTL(vfsp, Q_XQUOTASYNC, 0, (caddr_t)NULL, error);
@@ -767,11 +762,11 @@ linvfs_quotasync(
767} 762}
768 763
769STATIC int 764STATIC int
770linvfs_getxstate( 765xfs_fs_getxstate(
771 struct super_block *sb, 766 struct super_block *sb,
772 struct fs_quota_stat *fqs) 767 struct fs_quota_stat *fqs)
773{ 768{
774 struct vfs *vfsp = LINVFS_GET_VFS(sb); 769 struct vfs *vfsp = vfs_from_sb(sb);
775 int error; 770 int error;
776 771
777 VFS_QUOTACTL(vfsp, Q_XGETQSTAT, 0, (caddr_t)fqs, error); 772 VFS_QUOTACTL(vfsp, Q_XGETQSTAT, 0, (caddr_t)fqs, error);
@@ -779,12 +774,12 @@ linvfs_getxstate(
779} 774}
780 775
781STATIC int 776STATIC int
782linvfs_setxstate( 777xfs_fs_setxstate(
783 struct super_block *sb, 778 struct super_block *sb,
784 unsigned int flags, 779 unsigned int flags,
785 int op) 780 int op)
786{ 781{
787 struct vfs *vfsp = LINVFS_GET_VFS(sb); 782 struct vfs *vfsp = vfs_from_sb(sb);
788 int error; 783 int error;
789 784
790 VFS_QUOTACTL(vfsp, op, 0, (caddr_t)&flags, error); 785 VFS_QUOTACTL(vfsp, op, 0, (caddr_t)&flags, error);
@@ -792,13 +787,13 @@ linvfs_setxstate(
792} 787}
793 788
794STATIC int 789STATIC int
795linvfs_getxquota( 790xfs_fs_getxquota(
796 struct super_block *sb, 791 struct super_block *sb,
797 int type, 792 int type,
798 qid_t id, 793 qid_t id,
799 struct fs_disk_quota *fdq) 794 struct fs_disk_quota *fdq)
800{ 795{
801 struct vfs *vfsp = LINVFS_GET_VFS(sb); 796 struct vfs *vfsp = vfs_from_sb(sb);
802 int error, getmode; 797 int error, getmode;
803 798
804 getmode = (type == USRQUOTA) ? Q_XGETQUOTA : 799 getmode = (type == USRQUOTA) ? Q_XGETQUOTA :
@@ -808,13 +803,13 @@ linvfs_getxquota(
808} 803}
809 804
810STATIC int 805STATIC int
811linvfs_setxquota( 806xfs_fs_setxquota(
812 struct super_block *sb, 807 struct super_block *sb,
813 int type, 808 int type,
814 qid_t id, 809 qid_t id,
815 struct fs_disk_quota *fdq) 810 struct fs_disk_quota *fdq)
816{ 811{
817 struct vfs *vfsp = LINVFS_GET_VFS(sb); 812 struct vfs *vfsp = vfs_from_sb(sb);
818 int error, setmode; 813 int error, setmode;
819 814
820 setmode = (type == USRQUOTA) ? Q_XSETQLIM : 815 setmode = (type == USRQUOTA) ? Q_XSETQLIM :
@@ -824,21 +819,17 @@ linvfs_setxquota(
824} 819}
825 820
826STATIC int 821STATIC int
827linvfs_fill_super( 822xfs_fs_fill_super(
828 struct super_block *sb, 823 struct super_block *sb,
829 void *data, 824 void *data,
830 int silent) 825 int silent)
831{ 826{
832 vnode_t *rootvp; 827 vnode_t *rootvp;
833 struct vfs *vfsp = vfs_allocate(); 828 struct vfs *vfsp = vfs_allocate(sb);
834 struct xfs_mount_args *args = xfs_args_allocate(sb); 829 struct xfs_mount_args *args = xfs_args_allocate(sb);
835 struct kstatfs statvfs; 830 struct kstatfs statvfs;
836 int error, error2; 831 int error, error2;
837 832
838 vfsp->vfs_super = sb;
839 LINVFS_SET_VFS(sb, vfsp);
840 if (sb->s_flags & MS_RDONLY)
841 vfsp->vfs_flag |= VFS_RDONLY;
842 bhv_insert_all_vfsops(vfsp); 833 bhv_insert_all_vfsops(vfsp);
843 834
844 VFS_PARSEARGS(vfsp, (char *)data, args, 0, error); 835 VFS_PARSEARGS(vfsp, (char *)data, args, 0, error);
@@ -849,10 +840,10 @@ linvfs_fill_super(
849 840
850 sb_min_blocksize(sb, BBSIZE); 841 sb_min_blocksize(sb, BBSIZE);
851#ifdef CONFIG_XFS_EXPORT 842#ifdef CONFIG_XFS_EXPORT
852 sb->s_export_op = &linvfs_export_ops; 843 sb->s_export_op = &xfs_export_operations;
853#endif 844#endif
854 sb->s_qcop = &linvfs_qops; 845 sb->s_qcop = &xfs_quotactl_operations;
855 sb->s_op = &linvfs_sops; 846 sb->s_op = &xfs_super_operations;
856 847
857 VFS_MOUNT(vfsp, args, NULL, error); 848 VFS_MOUNT(vfsp, args, NULL, error);
858 if (error) { 849 if (error) {
@@ -876,7 +867,7 @@ linvfs_fill_super(
876 if (error) 867 if (error)
877 goto fail_unmount; 868 goto fail_unmount;
878 869
879 sb->s_root = d_alloc_root(LINVFS_GET_IP(rootvp)); 870 sb->s_root = d_alloc_root(vn_to_inode(rootvp));
880 if (!sb->s_root) { 871 if (!sb->s_root) {
881 error = ENOMEM; 872 error = ENOMEM;
882 goto fail_vnrele; 873 goto fail_vnrele;
@@ -885,7 +876,7 @@ linvfs_fill_super(
885 error = EINVAL; 876 error = EINVAL;
886 goto fail_vnrele; 877 goto fail_vnrele;
887 } 878 }
888 if ((error = linvfs_start_syncd(vfsp))) 879 if ((error = xfs_fs_start_syncd(vfsp)))
889 goto fail_vnrele; 880 goto fail_vnrele;
890 vn_trace_exit(rootvp, __FUNCTION__, (inst_t *)__return_address); 881 vn_trace_exit(rootvp, __FUNCTION__, (inst_t *)__return_address);
891 882
@@ -910,41 +901,41 @@ fail_vfsop:
910} 901}
911 902
912STATIC struct super_block * 903STATIC struct super_block *
913linvfs_get_sb( 904xfs_fs_get_sb(
914 struct file_system_type *fs_type, 905 struct file_system_type *fs_type,
915 int flags, 906 int flags,
916 const char *dev_name, 907 const char *dev_name,
917 void *data) 908 void *data)
918{ 909{
919 return get_sb_bdev(fs_type, flags, dev_name, data, linvfs_fill_super); 910 return get_sb_bdev(fs_type, flags, dev_name, data, xfs_fs_fill_super);
920} 911}
921 912
922STATIC struct super_operations linvfs_sops = { 913STATIC struct super_operations xfs_super_operations = {
923 .alloc_inode = linvfs_alloc_inode, 914 .alloc_inode = xfs_fs_alloc_inode,
924 .destroy_inode = linvfs_destroy_inode, 915 .destroy_inode = xfs_fs_destroy_inode,
925 .write_inode = linvfs_write_inode, 916 .write_inode = xfs_fs_write_inode,
926 .clear_inode = linvfs_clear_inode, 917 .clear_inode = xfs_fs_clear_inode,
927 .put_super = linvfs_put_super, 918 .put_super = xfs_fs_put_super,
928 .write_super = linvfs_write_super, 919 .write_super = xfs_fs_write_super,
929 .sync_fs = linvfs_sync_super, 920 .sync_fs = xfs_fs_sync_super,
930 .write_super_lockfs = linvfs_freeze_fs, 921 .write_super_lockfs = xfs_fs_lockfs,
931 .statfs = linvfs_statfs, 922 .statfs = xfs_fs_statfs,
932 .remount_fs = linvfs_remount, 923 .remount_fs = xfs_fs_remount,
933 .show_options = linvfs_show_options, 924 .show_options = xfs_fs_show_options,
934}; 925};
935 926
936STATIC struct quotactl_ops linvfs_qops = { 927STATIC struct quotactl_ops xfs_quotactl_operations = {
937 .quota_sync = linvfs_quotasync, 928 .quota_sync = xfs_fs_quotasync,
938 .get_xstate = linvfs_getxstate, 929 .get_xstate = xfs_fs_getxstate,
939 .set_xstate = linvfs_setxstate, 930 .set_xstate = xfs_fs_setxstate,
940 .get_xquota = linvfs_getxquota, 931 .get_xquota = xfs_fs_getxquota,
941 .set_xquota = linvfs_setxquota, 932 .set_xquota = xfs_fs_setxquota,
942}; 933};
943 934
944STATIC struct file_system_type xfs_fs_type = { 935STATIC struct file_system_type xfs_fs_type = {
945 .owner = THIS_MODULE, 936 .owner = THIS_MODULE,
946 .name = "xfs", 937 .name = "xfs",
947 .get_sb = linvfs_get_sb, 938 .get_sb = xfs_fs_get_sb,
948 .kill_sb = kill_block_super, 939 .kill_sb = kill_block_super,
949 .fs_flags = FS_REQUIRES_DEV, 940 .fs_flags = FS_REQUIRES_DEV,
950}; 941};
@@ -965,7 +956,7 @@ init_xfs_fs( void )
965 956
966 ktrace_init(64); 957 ktrace_init(64);
967 958
968 error = linvfs_init_zones(); 959 error = xfs_init_zones();
969 if (error < 0) 960 if (error < 0)
970 goto undo_zones; 961 goto undo_zones;
971 962
@@ -981,14 +972,13 @@ init_xfs_fs( void )
981 error = register_filesystem(&xfs_fs_type); 972 error = register_filesystem(&xfs_fs_type);
982 if (error) 973 if (error)
983 goto undo_register; 974 goto undo_register;
984 XFS_DM_INIT(&xfs_fs_type);
985 return 0; 975 return 0;
986 976
987undo_register: 977undo_register:
988 xfs_buf_terminate(); 978 xfs_buf_terminate();
989 979
990undo_buffers: 980undo_buffers:
991 linvfs_destroy_zones(); 981 xfs_destroy_zones();
992 982
993undo_zones: 983undo_zones:
994 return error; 984 return error;
@@ -998,11 +988,10 @@ STATIC void __exit
998exit_xfs_fs( void ) 988exit_xfs_fs( void )
999{ 989{
1000 vfs_exitquota(); 990 vfs_exitquota();
1001 XFS_DM_EXIT(&xfs_fs_type);
1002 unregister_filesystem(&xfs_fs_type); 991 unregister_filesystem(&xfs_fs_type);
1003 xfs_cleanup(); 992 xfs_cleanup();
1004 xfs_buf_terminate(); 993 xfs_buf_terminate();
1005 linvfs_destroy_zones(); 994 xfs_destroy_zones();
1006 ktrace_uninit(); 995 ktrace_uninit();
1007} 996}
1008 997
diff --git a/fs/xfs/linux-2.6/xfs_super.h b/fs/xfs/linux-2.6/xfs_super.h
index df59408dca06..376b96cb513a 100644
--- a/fs/xfs/linux-2.6/xfs_super.h
+++ b/fs/xfs/linux-2.6/xfs_super.h
@@ -98,11 +98,6 @@ extern void xfs_qm_exit(void);
98 XFS_DMAPI_STRING \ 98 XFS_DMAPI_STRING \
99 XFS_DBG_STRING /* DBG must be last */ 99 XFS_DBG_STRING /* DBG must be last */
100 100
101#define LINVFS_GET_VFS(s) \
102 (vfs_t *)((s)->s_fs_info)
103#define LINVFS_SET_VFS(s, vfsp) \
104 ((s)->s_fs_info = vfsp)
105
106struct xfs_inode; 101struct xfs_inode;
107struct xfs_mount; 102struct xfs_mount;
108struct xfs_buftarg; 103struct xfs_buftarg;
@@ -120,6 +115,6 @@ extern int xfs_blkdev_get(struct xfs_mount *, const char *,
120extern void xfs_blkdev_put(struct block_device *); 115extern void xfs_blkdev_put(struct block_device *);
121extern void xfs_blkdev_issue_flush(struct xfs_buftarg *); 116extern void xfs_blkdev_issue_flush(struct xfs_buftarg *);
122 117
123extern struct export_operations linvfs_export_ops; 118extern struct export_operations xfs_export_operations;
124 119
125#endif /* __XFS_SUPER_H__ */ 120#endif /* __XFS_SUPER_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_sysctl.c b/fs/xfs/linux-2.6/xfs_sysctl.c
index a02564972420..7079cc837210 100644
--- a/fs/xfs/linux-2.6/xfs_sysctl.c
+++ b/fs/xfs/linux-2.6/xfs_sysctl.c
@@ -38,8 +38,7 @@ xfs_stats_clear_proc_handler(
38 38
39 if (!ret && write && *valp) { 39 if (!ret && write && *valp) {
40 printk("XFS Clearing xfsstats\n"); 40 printk("XFS Clearing xfsstats\n");
41 for (c = 0; c < NR_CPUS; c++) { 41 for_each_cpu(c) {
42 if (!cpu_possible(c)) continue;
43 preempt_disable(); 42 preempt_disable();
44 /* save vn_active, it's a universal truth! */ 43 /* save vn_active, it's a universal truth! */
45 vn_active = per_cpu(xfsstats, c).vn_active; 44 vn_active = per_cpu(xfsstats, c).vn_active;
diff --git a/fs/xfs/linux-2.6/xfs_vfs.c b/fs/xfs/linux-2.6/xfs_vfs.c
index c855d62e5344..6f7c9f7a8624 100644
--- a/fs/xfs/linux-2.6/xfs_vfs.c
+++ b/fs/xfs/linux-2.6/xfs_vfs.c
@@ -227,7 +227,8 @@ vfs_freeze(
227} 227}
228 228
229vfs_t * 229vfs_t *
230vfs_allocate( void ) 230vfs_allocate(
231 struct super_block *sb)
231{ 232{
232 struct vfs *vfsp; 233 struct vfs *vfsp;
233 234
@@ -236,9 +237,23 @@ vfs_allocate( void )
236 INIT_LIST_HEAD(&vfsp->vfs_sync_list); 237 INIT_LIST_HEAD(&vfsp->vfs_sync_list);
237 spin_lock_init(&vfsp->vfs_sync_lock); 238 spin_lock_init(&vfsp->vfs_sync_lock);
238 init_waitqueue_head(&vfsp->vfs_wait_single_sync_task); 239 init_waitqueue_head(&vfsp->vfs_wait_single_sync_task);
240
241 vfsp->vfs_super = sb;
242 sb->s_fs_info = vfsp;
243
244 if (sb->s_flags & MS_RDONLY)
245 vfsp->vfs_flag |= VFS_RDONLY;
246
239 return vfsp; 247 return vfsp;
240} 248}
241 249
250vfs_t *
251vfs_from_sb(
252 struct super_block *sb)
253{
254 return (vfs_t *)sb->s_fs_info;
255}
256
242void 257void
243vfs_deallocate( 258vfs_deallocate(
244 struct vfs *vfsp) 259 struct vfs *vfsp)
@@ -295,7 +310,7 @@ bhv_remove_all_vfsops(
295 bhv_remove_vfsops(vfsp, VFS_POSITION_DM); 310 bhv_remove_vfsops(vfsp, VFS_POSITION_DM);
296 if (!freebase) 311 if (!freebase)
297 return; 312 return;
298 mp = XFS_BHVTOM(bhv_lookup(VFS_BHVHEAD(vfsp), &xfs_vfsops)); 313 mp = XFS_VFSTOM(vfsp);
299 VFS_REMOVEBHV(vfsp, &mp->m_bhv); 314 VFS_REMOVEBHV(vfsp, &mp->m_bhv);
300 xfs_mount_free(mp, 0); 315 xfs_mount_free(mp, 0);
301} 316}
diff --git a/fs/xfs/linux-2.6/xfs_vfs.h b/fs/xfs/linux-2.6/xfs_vfs.h
index 57caf9eddee0..8fed356db055 100644
--- a/fs/xfs/linux-2.6/xfs_vfs.h
+++ b/fs/xfs/linux-2.6/xfs_vfs.h
@@ -193,7 +193,8 @@ typedef struct bhv_vfsops {
193#define vfs_bhv_set_custom(b,o) ( (b)->bhv_custom = (void *)(o)) 193#define vfs_bhv_set_custom(b,o) ( (b)->bhv_custom = (void *)(o))
194#define vfs_bhv_clr_custom(b) ( (b)->bhv_custom = NULL ) 194#define vfs_bhv_clr_custom(b) ( (b)->bhv_custom = NULL )
195 195
196extern vfs_t *vfs_allocate(void); 196extern vfs_t *vfs_allocate(struct super_block *);
197extern vfs_t *vfs_from_sb(struct super_block *);
197extern void vfs_deallocate(vfs_t *); 198extern void vfs_deallocate(vfs_t *);
198extern void vfs_insertops(vfs_t *, bhv_vfsops_t *); 199extern void vfs_insertops(vfs_t *, bhv_vfsops_t *);
199extern void vfs_insertbhv(vfs_t *, bhv_desc_t *, vfsops_t *, void *); 200extern void vfs_insertbhv(vfs_t *, bhv_desc_t *, vfsops_t *, void *);
diff --git a/fs/xfs/linux-2.6/xfs_vnode.c b/fs/xfs/linux-2.6/xfs_vnode.c
index 260dd8415dd7..d27c25b27ccd 100644
--- a/fs/xfs/linux-2.6/xfs_vnode.c
+++ b/fs/xfs/linux-2.6/xfs_vnode.c
@@ -58,7 +58,7 @@ struct vnode *
58vn_initialize( 58vn_initialize(
59 struct inode *inode) 59 struct inode *inode)
60{ 60{
61 struct vnode *vp = LINVFS_GET_VP(inode); 61 struct vnode *vp = vn_from_inode(inode);
62 62
63 XFS_STATS_INC(vn_active); 63 XFS_STATS_INC(vn_active);
64 XFS_STATS_INC(vn_alloc); 64 XFS_STATS_INC(vn_alloc);
@@ -83,7 +83,7 @@ vn_initialize(
83 vp->v_trace = ktrace_alloc(VNODE_TRACE_SIZE, KM_SLEEP); 83 vp->v_trace = ktrace_alloc(VNODE_TRACE_SIZE, KM_SLEEP);
84#endif /* XFS_VNODE_TRACE */ 84#endif /* XFS_VNODE_TRACE */
85 85
86 vn_trace_exit(vp, "vn_initialize", (inst_t *)__return_address); 86 vn_trace_exit(vp, __FUNCTION__, (inst_t *)__return_address);
87 return vp; 87 return vp;
88} 88}
89 89
@@ -97,7 +97,7 @@ vn_revalidate_core(
97 struct vnode *vp, 97 struct vnode *vp,
98 vattr_t *vap) 98 vattr_t *vap)
99{ 99{
100 struct inode *inode = LINVFS_GET_IP(vp); 100 struct inode *inode = vn_to_inode(vp);
101 101
102 inode->i_mode = vap->va_mode; 102 inode->i_mode = vap->va_mode;
103 inode->i_nlink = vap->va_nlink; 103 inode->i_nlink = vap->va_nlink;
@@ -129,24 +129,31 @@ vn_revalidate_core(
129 * Revalidate the Linux inode from the vnode. 129 * Revalidate the Linux inode from the vnode.
130 */ 130 */
131int 131int
132vn_revalidate( 132__vn_revalidate(
133 struct vnode *vp) 133 struct vnode *vp,
134 struct vattr *vattr)
134{ 135{
135 vattr_t va;
136 int error; 136 int error;
137 137
138 vn_trace_entry(vp, "vn_revalidate", (inst_t *)__return_address); 138 vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
139 ASSERT(vp->v_fbhv != NULL); 139 vattr->va_mask = XFS_AT_STAT | XFS_AT_XFLAGS;
140 140 VOP_GETATTR(vp, vattr, 0, NULL, error);
141 va.va_mask = XFS_AT_STAT|XFS_AT_XFLAGS; 141 if (likely(!error)) {
142 VOP_GETATTR(vp, &va, 0, NULL, error); 142 vn_revalidate_core(vp, vattr);
143 if (!error) {
144 vn_revalidate_core(vp, &va);
145 VUNMODIFY(vp); 143 VUNMODIFY(vp);
146 } 144 }
147 return -error; 145 return -error;
148} 146}
149 147
148int
149vn_revalidate(
150 struct vnode *vp)
151{
152 vattr_t vattr;
153
154 return __vn_revalidate(vp, &vattr);
155}
156
150/* 157/*
151 * Add a reference to a referenced vnode. 158 * Add a reference to a referenced vnode.
152 */ 159 */
@@ -159,7 +166,7 @@ vn_hold(
159 XFS_STATS_INC(vn_hold); 166 XFS_STATS_INC(vn_hold);
160 167
161 VN_LOCK(vp); 168 VN_LOCK(vp);
162 inode = igrab(LINVFS_GET_IP(vp)); 169 inode = igrab(vn_to_inode(vp));
163 ASSERT(inode); 170 ASSERT(inode);
164 VN_UNLOCK(vp, 0); 171 VN_UNLOCK(vp, 0);
165 172
diff --git a/fs/xfs/linux-2.6/xfs_vnode.h b/fs/xfs/linux-2.6/xfs_vnode.h
index 0fe2419461d6..06f5845e9568 100644
--- a/fs/xfs/linux-2.6/xfs_vnode.h
+++ b/fs/xfs/linux-2.6/xfs_vnode.h
@@ -116,8 +116,14 @@ typedef enum {
116/* 116/*
117 * Vnode to Linux inode mapping. 117 * Vnode to Linux inode mapping.
118 */ 118 */
119#define LINVFS_GET_VP(inode) ((vnode_t *)list_entry(inode, vnode_t, v_inode)) 119static inline struct vnode *vn_from_inode(struct inode *inode)
120#define LINVFS_GET_IP(vp) (&(vp)->v_inode) 120{
121 return (vnode_t *)list_entry(inode, vnode_t, v_inode);
122}
123static inline struct inode *vn_to_inode(struct vnode *vnode)
124{
125 return &vnode->v_inode;
126}
121 127
122/* 128/*
123 * Vnode flags. 129 * Vnode flags.
@@ -490,6 +496,7 @@ typedef struct vnode_map {
490 (vmap).v_ino = (vp)->v_inode.i_ino; } 496 (vmap).v_ino = (vp)->v_inode.i_ino; }
491 497
492extern int vn_revalidate(struct vnode *); 498extern int vn_revalidate(struct vnode *);
499extern int __vn_revalidate(struct vnode *, vattr_t *);
493extern void vn_revalidate_core(struct vnode *, vattr_t *); 500extern void vn_revalidate_core(struct vnode *, vattr_t *);
494 501
495extern void vn_iowait(struct vnode *vp); 502extern void vn_iowait(struct vnode *vp);
@@ -497,7 +504,7 @@ extern void vn_iowake(struct vnode *vp);
497 504
498static inline int vn_count(struct vnode *vp) 505static inline int vn_count(struct vnode *vp)
499{ 506{
500 return atomic_read(&LINVFS_GET_IP(vp)->i_count); 507 return atomic_read(&vn_to_inode(vp)->i_count);
501} 508}
502 509
503/* 510/*
@@ -511,16 +518,16 @@ extern vnode_t *vn_hold(struct vnode *);
511 vn_trace_hold(vp, __FILE__, __LINE__, (inst_t *)__return_address)) 518 vn_trace_hold(vp, __FILE__, __LINE__, (inst_t *)__return_address))
512#define VN_RELE(vp) \ 519#define VN_RELE(vp) \
513 (vn_trace_rele(vp, __FILE__, __LINE__, (inst_t *)__return_address), \ 520 (vn_trace_rele(vp, __FILE__, __LINE__, (inst_t *)__return_address), \
514 iput(LINVFS_GET_IP(vp))) 521 iput(vn_to_inode(vp)))
515#else 522#else
516#define VN_HOLD(vp) ((void)vn_hold(vp)) 523#define VN_HOLD(vp) ((void)vn_hold(vp))
517#define VN_RELE(vp) (iput(LINVFS_GET_IP(vp))) 524#define VN_RELE(vp) (iput(vn_to_inode(vp)))
518#endif 525#endif
519 526
520static inline struct vnode *vn_grab(struct vnode *vp) 527static inline struct vnode *vn_grab(struct vnode *vp)
521{ 528{
522 struct inode *inode = igrab(LINVFS_GET_IP(vp)); 529 struct inode *inode = igrab(vn_to_inode(vp));
523 return inode ? LINVFS_GET_VP(inode) : NULL; 530 return inode ? vn_from_inode(inode) : NULL;
524} 531}
525 532
526/* 533/*
@@ -528,7 +535,7 @@ static inline struct vnode *vn_grab(struct vnode *vp)
528 */ 535 */
529#define VNAME(dentry) ((char *) (dentry)->d_name.name) 536#define VNAME(dentry) ((char *) (dentry)->d_name.name)
530#define VNAMELEN(dentry) ((dentry)->d_name.len) 537#define VNAMELEN(dentry) ((dentry)->d_name.len)
531#define VNAME_TO_VNODE(dentry) (LINVFS_GET_VP((dentry)->d_inode)) 538#define VNAME_TO_VNODE(dentry) (vn_from_inode((dentry)->d_inode))
532 539
533/* 540/*
534 * Vnode spinlock manipulation. 541 * Vnode spinlock manipulation.
@@ -557,12 +564,12 @@ static __inline__ void vn_flagclr(struct vnode *vp, uint flag)
557 */ 564 */
558static inline void vn_mark_bad(struct vnode *vp) 565static inline void vn_mark_bad(struct vnode *vp)
559{ 566{
560 make_bad_inode(LINVFS_GET_IP(vp)); 567 make_bad_inode(vn_to_inode(vp));
561} 568}
562 569
563static inline int VN_BAD(struct vnode *vp) 570static inline int VN_BAD(struct vnode *vp)
564{ 571{
565 return is_bad_inode(LINVFS_GET_IP(vp)); 572 return is_bad_inode(vn_to_inode(vp));
566} 573}
567 574
568/* 575/*
@@ -587,9 +594,9 @@ static inline void vn_atime_to_time_t(struct vnode *vp, time_t *tt)
587/* 594/*
588 * Some useful predicates. 595 * Some useful predicates.
589 */ 596 */
590#define VN_MAPPED(vp) mapping_mapped(LINVFS_GET_IP(vp)->i_mapping) 597#define VN_MAPPED(vp) mapping_mapped(vn_to_inode(vp)->i_mapping)
591#define VN_CACHED(vp) (LINVFS_GET_IP(vp)->i_mapping->nrpages) 598#define VN_CACHED(vp) (vn_to_inode(vp)->i_mapping->nrpages)
592#define VN_DIRTY(vp) mapping_tagged(LINVFS_GET_IP(vp)->i_mapping, \ 599#define VN_DIRTY(vp) mapping_tagged(vn_to_inode(vp)->i_mapping, \
593 PAGECACHE_TAG_DIRTY) 600 PAGECACHE_TAG_DIRTY)
594#define VMODIFY(vp) VN_FLAGSET(vp, VMODIFIED) 601#define VMODIFY(vp) VN_FLAGSET(vp, VMODIFIED)
595#define VUNMODIFY(vp) VN_FLAGCLR(vp, VMODIFIED) 602#define VUNMODIFY(vp) VN_FLAGCLR(vp, VMODIFIED)
diff --git a/fs/xfs/quota/xfs_dquot_item.c b/fs/xfs/quota/xfs_dquot_item.c
index 2ec6b441849c..e4e5f05b841b 100644
--- a/fs/xfs/quota/xfs_dquot_item.c
+++ b/fs/xfs/quota/xfs_dquot_item.c
@@ -79,9 +79,11 @@ xfs_qm_dquot_logitem_format(
79 79
80 logvec->i_addr = (xfs_caddr_t)&logitem->qli_format; 80 logvec->i_addr = (xfs_caddr_t)&logitem->qli_format;
81 logvec->i_len = sizeof(xfs_dq_logformat_t); 81 logvec->i_len = sizeof(xfs_dq_logformat_t);
82 XLOG_VEC_SET_TYPE(logvec, XLOG_REG_TYPE_QFORMAT);
82 logvec++; 83 logvec++;
83 logvec->i_addr = (xfs_caddr_t)&logitem->qli_dquot->q_core; 84 logvec->i_addr = (xfs_caddr_t)&logitem->qli_dquot->q_core;
84 logvec->i_len = sizeof(xfs_disk_dquot_t); 85 logvec->i_len = sizeof(xfs_disk_dquot_t);
86 XLOG_VEC_SET_TYPE(logvec, XLOG_REG_TYPE_DQUOT);
85 87
86 ASSERT(2 == logitem->qli_item.li_desc->lid_size); 88 ASSERT(2 == logitem->qli_item.li_desc->lid_size);
87 logitem->qli_format.qlf_size = 2; 89 logitem->qli_format.qlf_size = 2;
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c
index 7c0e39dc6189..1fb757ef3f41 100644
--- a/fs/xfs/quota/xfs_qm.c
+++ b/fs/xfs/quota/xfs_qm.c
@@ -1704,9 +1704,9 @@ xfs_qm_get_rtblks(
1704 xfs_qcnt_t *O_rtblks) 1704 xfs_qcnt_t *O_rtblks)
1705{ 1705{
1706 xfs_filblks_t rtblks; /* total rt blks */ 1706 xfs_filblks_t rtblks; /* total rt blks */
1707 xfs_extnum_t idx; /* extent record index */
1707 xfs_ifork_t *ifp; /* inode fork pointer */ 1708 xfs_ifork_t *ifp; /* inode fork pointer */
1708 xfs_extnum_t nextents; /* number of extent entries */ 1709 xfs_extnum_t nextents; /* number of extent entries */
1709 xfs_bmbt_rec_t *base; /* base of extent array */
1710 xfs_bmbt_rec_t *ep; /* pointer to an extent entry */ 1710 xfs_bmbt_rec_t *ep; /* pointer to an extent entry */
1711 int error; 1711 int error;
1712 1712
@@ -1717,10 +1717,11 @@ xfs_qm_get_rtblks(
1717 return error; 1717 return error;
1718 } 1718 }
1719 rtblks = 0; 1719 rtblks = 0;
1720 nextents = ifp->if_bytes / sizeof(xfs_bmbt_rec_t); 1720 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
1721 base = &ifp->if_u1.if_extents[0]; 1721 for (idx = 0; idx < nextents; idx++) {
1722 for (ep = base; ep < &base[nextents]; ep++) 1722 ep = xfs_iext_get_ext(ifp, idx);
1723 rtblks += xfs_bmbt_get_blockcount(ep); 1723 rtblks += xfs_bmbt_get_blockcount(ep);
1724 }
1724 *O_rtblks = (xfs_qcnt_t)rtblks; 1725 *O_rtblks = (xfs_qcnt_t)rtblks;
1725 return 0; 1726 return 0;
1726} 1727}
@@ -2788,9 +2789,7 @@ xfs_qm_freelist_destroy(xfs_frlist_t *ql)
2788 xfs_qm_dqdestroy(dqp); 2789 xfs_qm_dqdestroy(dqp);
2789 dqp = nextdqp; 2790 dqp = nextdqp;
2790 } 2791 }
2791 /* 2792 mutex_unlock(&ql->qh_lock);
2792 * Don't bother about unlocking.
2793 */
2794 mutex_destroy(&ql->qh_lock); 2793 mutex_destroy(&ql->qh_lock);
2795 2794
2796 ASSERT(ql->qh_nelems == 0); 2795 ASSERT(ql->qh_nelems == 0);
diff --git a/fs/xfs/quota/xfs_qm_bhv.c b/fs/xfs/quota/xfs_qm_bhv.c
index 90402a1c3983..6838b36d95a9 100644
--- a/fs/xfs/quota/xfs_qm_bhv.c
+++ b/fs/xfs/quota/xfs_qm_bhv.c
@@ -374,7 +374,7 @@ xfs_qm_exit(void)
374 vfs_bhv_clr_custom(&xfs_qmops); 374 vfs_bhv_clr_custom(&xfs_qmops);
375 xfs_qm_cleanup_procfs(); 375 xfs_qm_cleanup_procfs();
376 if (qm_dqzone) 376 if (qm_dqzone)
377 kmem_cache_destroy(qm_dqzone); 377 kmem_zone_destroy(qm_dqzone);
378 if (qm_dqtrxzone) 378 if (qm_dqtrxzone)
379 kmem_cache_destroy(qm_dqtrxzone); 379 kmem_zone_destroy(qm_dqtrxzone);
380} 380}
diff --git a/fs/xfs/support/ktrace.c b/fs/xfs/support/ktrace.c
index 841aa4c15b8a..addf5a7ea06c 100644
--- a/fs/xfs/support/ktrace.c
+++ b/fs/xfs/support/ktrace.c
@@ -39,8 +39,8 @@ ktrace_init(int zentries)
39void 39void
40ktrace_uninit(void) 40ktrace_uninit(void)
41{ 41{
42 kmem_cache_destroy(ktrace_hdr_zone); 42 kmem_zone_destroy(ktrace_hdr_zone);
43 kmem_cache_destroy(ktrace_ent_zone); 43 kmem_zone_destroy(ktrace_ent_zone);
44} 44}
45 45
46/* 46/*
diff --git a/fs/xfs/support/uuid.c b/fs/xfs/support/uuid.c
index a3d565a67734..e157015c70ff 100644
--- a/fs/xfs/support/uuid.c
+++ b/fs/xfs/support/uuid.c
@@ -21,13 +21,6 @@ static mutex_t uuid_monitor;
21static int uuid_table_size; 21static int uuid_table_size;
22static uuid_t *uuid_table; 22static uuid_t *uuid_table;
23 23
24void
25uuid_init(void)
26{
27 mutex_init(&uuid_monitor);
28}
29
30
31/* IRIX interpretation of an uuid_t */ 24/* IRIX interpretation of an uuid_t */
32typedef struct { 25typedef struct {
33 __be32 uu_timelow; 26 __be32 uu_timelow;
@@ -50,7 +43,7 @@ uuid_getnodeuniq(uuid_t *uuid, int fsid [2])
50 43
51 fsid[0] = (be16_to_cpu(uup->uu_clockseq) << 16) | 44 fsid[0] = (be16_to_cpu(uup->uu_clockseq) << 16) |
52 be16_to_cpu(uup->uu_timemid); 45 be16_to_cpu(uup->uu_timemid);
53 fsid[1] = be16_to_cpu(uup->uu_timelow); 46 fsid[1] = be32_to_cpu(uup->uu_timelow);
54} 47}
55 48
56void 49void
@@ -139,3 +132,9 @@ uuid_table_remove(uuid_t *uuid)
139 ASSERT(i < uuid_table_size); 132 ASSERT(i < uuid_table_size);
140 mutex_unlock(&uuid_monitor); 133 mutex_unlock(&uuid_monitor);
141} 134}
135
136void
137uuid_init(void)
138{
139 mutex_init(&uuid_monitor);
140}
diff --git a/fs/xfs/xfs_acl.h b/fs/xfs/xfs_acl.h
index f9315bc960cb..538d0d65b04c 100644
--- a/fs/xfs/xfs_acl.h
+++ b/fs/xfs/xfs_acl.h
@@ -55,8 +55,8 @@ struct xfs_inode;
55 55
56extern struct kmem_zone *xfs_acl_zone; 56extern struct kmem_zone *xfs_acl_zone;
57#define xfs_acl_zone_init(zone, name) \ 57#define xfs_acl_zone_init(zone, name) \
58 (zone) = kmem_zone_init(sizeof(xfs_acl_t), name) 58 (zone) = kmem_zone_init(sizeof(xfs_acl_t), (name))
59#define xfs_acl_zone_destroy(zone) kmem_cache_destroy(zone) 59#define xfs_acl_zone_destroy(zone) kmem_zone_destroy(zone)
60 60
61extern int xfs_acl_inherit(struct vnode *, struct vattr *, xfs_acl_t *); 61extern int xfs_acl_inherit(struct vnode *, struct vattr *, xfs_acl_t *);
62extern int xfs_acl_iaccess(struct xfs_inode *, mode_t, cred_t *); 62extern int xfs_acl_iaccess(struct xfs_inode *, mode_t, cred_t *);
diff --git a/fs/xfs/xfs_attr.c b/fs/xfs/xfs_attr.c
index e5e91e9c7e89..093fac476bda 100644
--- a/fs/xfs/xfs_attr.c
+++ b/fs/xfs/xfs_attr.c
@@ -1127,8 +1127,7 @@ xfs_attr_leaf_list(xfs_attr_list_context_t *context)
1127 return(error); 1127 return(error);
1128 ASSERT(bp != NULL); 1128 ASSERT(bp != NULL);
1129 leaf = bp->data; 1129 leaf = bp->data;
1130 if (unlikely(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) 1130 if (unlikely(be16_to_cpu(leaf->hdr.info.magic) != XFS_ATTR_LEAF_MAGIC)) {
1131 != XFS_ATTR_LEAF_MAGIC)) {
1132 XFS_CORRUPTION_ERROR("xfs_attr_leaf_list", XFS_ERRLEVEL_LOW, 1131 XFS_CORRUPTION_ERROR("xfs_attr_leaf_list", XFS_ERRLEVEL_LOW,
1133 context->dp->i_mount, leaf); 1132 context->dp->i_mount, leaf);
1134 xfs_da_brelse(NULL, bp); 1133 xfs_da_brelse(NULL, bp);
@@ -1541,8 +1540,8 @@ xfs_attr_node_removename(xfs_da_args_t *args)
1541 XFS_ATTR_FORK); 1540 XFS_ATTR_FORK);
1542 if (error) 1541 if (error)
1543 goto out; 1542 goto out;
1544 ASSERT(INT_GET(((xfs_attr_leafblock_t *) 1543 ASSERT(be16_to_cpu(((xfs_attr_leafblock_t *)
1545 bp->data)->hdr.info.magic, ARCH_CONVERT) 1544 bp->data)->hdr.info.magic)
1546 == XFS_ATTR_LEAF_MAGIC); 1545 == XFS_ATTR_LEAF_MAGIC);
1547 1546
1548 if ((forkoff = xfs_attr_shortform_allfit(bp, dp))) { 1547 if ((forkoff = xfs_attr_shortform_allfit(bp, dp))) {
@@ -1763,7 +1762,7 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
1763 return(error); 1762 return(error);
1764 if (bp) { 1763 if (bp) {
1765 node = bp->data; 1764 node = bp->data;
1766 switch (INT_GET(node->hdr.info.magic, ARCH_CONVERT)) { 1765 switch (be16_to_cpu(node->hdr.info.magic)) {
1767 case XFS_DA_NODE_MAGIC: 1766 case XFS_DA_NODE_MAGIC:
1768 xfs_attr_trace_l_cn("wrong blk", context, node); 1767 xfs_attr_trace_l_cn("wrong blk", context, node);
1769 xfs_da_brelse(NULL, bp); 1768 xfs_da_brelse(NULL, bp);
@@ -1771,18 +1770,14 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
1771 break; 1770 break;
1772 case XFS_ATTR_LEAF_MAGIC: 1771 case XFS_ATTR_LEAF_MAGIC:
1773 leaf = bp->data; 1772 leaf = bp->data;
1774 if (cursor->hashval > 1773 if (cursor->hashval > be32_to_cpu(leaf->entries[
1775 INT_GET(leaf->entries[ 1774 be16_to_cpu(leaf->hdr.count)-1].hashval)) {
1776 INT_GET(leaf->hdr.count,
1777 ARCH_CONVERT)-1].hashval,
1778 ARCH_CONVERT)) {
1779 xfs_attr_trace_l_cl("wrong blk", 1775 xfs_attr_trace_l_cl("wrong blk",
1780 context, leaf); 1776 context, leaf);
1781 xfs_da_brelse(NULL, bp); 1777 xfs_da_brelse(NULL, bp);
1782 bp = NULL; 1778 bp = NULL;
1783 } else if (cursor->hashval <= 1779 } else if (cursor->hashval <=
1784 INT_GET(leaf->entries[0].hashval, 1780 be32_to_cpu(leaf->entries[0].hashval)) {
1785 ARCH_CONVERT)) {
1786 xfs_attr_trace_l_cl("maybe wrong blk", 1781 xfs_attr_trace_l_cl("maybe wrong blk",
1787 context, leaf); 1782 context, leaf);
1788 xfs_da_brelse(NULL, bp); 1783 xfs_da_brelse(NULL, bp);
@@ -1817,10 +1812,10 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
1817 return(XFS_ERROR(EFSCORRUPTED)); 1812 return(XFS_ERROR(EFSCORRUPTED));
1818 } 1813 }
1819 node = bp->data; 1814 node = bp->data;
1820 if (INT_GET(node->hdr.info.magic, ARCH_CONVERT) 1815 if (be16_to_cpu(node->hdr.info.magic)
1821 == XFS_ATTR_LEAF_MAGIC) 1816 == XFS_ATTR_LEAF_MAGIC)
1822 break; 1817 break;
1823 if (unlikely(INT_GET(node->hdr.info.magic, ARCH_CONVERT) 1818 if (unlikely(be16_to_cpu(node->hdr.info.magic)
1824 != XFS_DA_NODE_MAGIC)) { 1819 != XFS_DA_NODE_MAGIC)) {
1825 XFS_CORRUPTION_ERROR("xfs_attr_node_list(3)", 1820 XFS_CORRUPTION_ERROR("xfs_attr_node_list(3)",
1826 XFS_ERRLEVEL_LOW, 1821 XFS_ERRLEVEL_LOW,
@@ -1830,19 +1825,17 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
1830 return(XFS_ERROR(EFSCORRUPTED)); 1825 return(XFS_ERROR(EFSCORRUPTED));
1831 } 1826 }
1832 btree = node->btree; 1827 btree = node->btree;
1833 for (i = 0; 1828 for (i = 0; i < be16_to_cpu(node->hdr.count);
1834 i < INT_GET(node->hdr.count, ARCH_CONVERT);
1835 btree++, i++) { 1829 btree++, i++) {
1836 if (cursor->hashval 1830 if (cursor->hashval
1837 <= INT_GET(btree->hashval, 1831 <= be32_to_cpu(btree->hashval)) {
1838 ARCH_CONVERT)) { 1832 cursor->blkno = be32_to_cpu(btree->before);
1839 cursor->blkno = INT_GET(btree->before, ARCH_CONVERT);
1840 xfs_attr_trace_l_cb("descending", 1833 xfs_attr_trace_l_cb("descending",
1841 context, btree); 1834 context, btree);
1842 break; 1835 break;
1843 } 1836 }
1844 } 1837 }
1845 if (i == INT_GET(node->hdr.count, ARCH_CONVERT)) { 1838 if (i == be16_to_cpu(node->hdr.count)) {
1846 xfs_da_brelse(NULL, bp); 1839 xfs_da_brelse(NULL, bp);
1847 return(0); 1840 return(0);
1848 } 1841 }
@@ -1858,7 +1851,7 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
1858 */ 1851 */
1859 for (;;) { 1852 for (;;) {
1860 leaf = bp->data; 1853 leaf = bp->data;
1861 if (unlikely(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) 1854 if (unlikely(be16_to_cpu(leaf->hdr.info.magic)
1862 != XFS_ATTR_LEAF_MAGIC)) { 1855 != XFS_ATTR_LEAF_MAGIC)) {
1863 XFS_CORRUPTION_ERROR("xfs_attr_node_list(4)", 1856 XFS_CORRUPTION_ERROR("xfs_attr_node_list(4)",
1864 XFS_ERRLEVEL_LOW, 1857 XFS_ERRLEVEL_LOW,
@@ -1869,7 +1862,7 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
1869 error = xfs_attr_leaf_list_int(bp, context); 1862 error = xfs_attr_leaf_list_int(bp, context);
1870 if (error || !leaf->hdr.info.forw) 1863 if (error || !leaf->hdr.info.forw)
1871 break; /* not really an error, buffer full or EOF */ 1864 break; /* not really an error, buffer full or EOF */
1872 cursor->blkno = INT_GET(leaf->hdr.info.forw, ARCH_CONVERT); 1865 cursor->blkno = be32_to_cpu(leaf->hdr.info.forw);
1873 xfs_da_brelse(NULL, bp); 1866 xfs_da_brelse(NULL, bp);
1874 error = xfs_da_read_buf(NULL, context->dp, cursor->blkno, -1, 1867 error = xfs_da_read_buf(NULL, context->dp, cursor->blkno, -1,
1875 &bp, XFS_ATTR_FORK); 1868 &bp, XFS_ATTR_FORK);
@@ -2232,9 +2225,10 @@ xfs_attr_trace_l_cn(char *where, struct xfs_attr_list_context *context,
2232 : 0, 2225 : 0,
2233 (__psunsigned_t)context->dupcnt, 2226 (__psunsigned_t)context->dupcnt,
2234 (__psunsigned_t)context->flags, 2227 (__psunsigned_t)context->flags,
2235 (__psunsigned_t)INT_GET(node->hdr.count, ARCH_CONVERT), 2228 (__psunsigned_t)be16_to_cpu(node->hdr.count),
2236 (__psunsigned_t)INT_GET(node->btree[0].hashval, ARCH_CONVERT), 2229 (__psunsigned_t)be32_to_cpu(node->btree[0].hashval),
2237 (__psunsigned_t)INT_GET(node->btree[INT_GET(node->hdr.count, ARCH_CONVERT)-1].hashval, ARCH_CONVERT)); 2230 (__psunsigned_t)be32_to_cpu(node->btree[
2231 be16_to_cpu(node->hdr.count)-1].hashval));
2238} 2232}
2239 2233
2240/* 2234/*
@@ -2261,8 +2255,8 @@ xfs_attr_trace_l_cb(char *where, struct xfs_attr_list_context *context,
2261 : 0, 2255 : 0,
2262 (__psunsigned_t)context->dupcnt, 2256 (__psunsigned_t)context->dupcnt,
2263 (__psunsigned_t)context->flags, 2257 (__psunsigned_t)context->flags,
2264 (__psunsigned_t)INT_GET(btree->hashval, ARCH_CONVERT), 2258 (__psunsigned_t)be32_to_cpu(btree->hashval),
2265 (__psunsigned_t)INT_GET(btree->before, ARCH_CONVERT), 2259 (__psunsigned_t)be32_to_cpu(btree->before),
2266 (__psunsigned_t)NULL); 2260 (__psunsigned_t)NULL);
2267} 2261}
2268 2262
@@ -2290,9 +2284,10 @@ xfs_attr_trace_l_cl(char *where, struct xfs_attr_list_context *context,
2290 : 0, 2284 : 0,
2291 (__psunsigned_t)context->dupcnt, 2285 (__psunsigned_t)context->dupcnt,
2292 (__psunsigned_t)context->flags, 2286 (__psunsigned_t)context->flags,
2293 (__psunsigned_t)INT_GET(leaf->hdr.count, ARCH_CONVERT), 2287 (__psunsigned_t)be16_to_cpu(leaf->hdr.count),
2294 (__psunsigned_t)INT_GET(leaf->entries[0].hashval, ARCH_CONVERT), 2288 (__psunsigned_t)be32_to_cpu(leaf->entries[0].hashval),
2295 (__psunsigned_t)INT_GET(leaf->entries[INT_GET(leaf->hdr.count, ARCH_CONVERT)-1].hashval, ARCH_CONVERT)); 2289 (__psunsigned_t)be32_to_cpu(leaf->entries[
2290 be16_to_cpu(leaf->hdr.count)-1].hashval));
2296} 2291}
2297 2292
2298/* 2293/*
@@ -2522,7 +2517,7 @@ attr_user_capable(
2522 struct vnode *vp, 2517 struct vnode *vp,
2523 cred_t *cred) 2518 cred_t *cred)
2524{ 2519{
2525 struct inode *inode = LINVFS_GET_IP(vp); 2520 struct inode *inode = vn_to_inode(vp);
2526 2521
2527 if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) 2522 if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
2528 return -EPERM; 2523 return -EPERM;
@@ -2540,7 +2535,7 @@ attr_trusted_capable(
2540 struct vnode *vp, 2535 struct vnode *vp,
2541 cred_t *cred) 2536 cred_t *cred)
2542{ 2537{
2543 struct inode *inode = LINVFS_GET_IP(vp); 2538 struct inode *inode = vn_to_inode(vp);
2544 2539
2545 if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) 2540 if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
2546 return -EPERM; 2541 return -EPERM;
diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/xfs_attr_leaf.c
index fe91eac4e2a7..717682747bd2 100644
--- a/fs/xfs/xfs_attr_leaf.c
+++ b/fs/xfs/xfs_attr_leaf.c
@@ -194,7 +194,7 @@ xfs_attr_shortform_create(xfs_da_args_t *args)
194 xfs_idata_realloc(dp, sizeof(*hdr), XFS_ATTR_FORK); 194 xfs_idata_realloc(dp, sizeof(*hdr), XFS_ATTR_FORK);
195 hdr = (xfs_attr_sf_hdr_t *)ifp->if_u1.if_data; 195 hdr = (xfs_attr_sf_hdr_t *)ifp->if_u1.if_data;
196 hdr->count = 0; 196 hdr->count = 0;
197 INT_SET(hdr->totsize, ARCH_CONVERT, sizeof(*hdr)); 197 hdr->totsize = cpu_to_be16(sizeof(*hdr));
198 xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_ADATA); 198 xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_ADATA);
199} 199}
200 200
@@ -224,8 +224,7 @@ xfs_attr_shortform_add(xfs_da_args_t *args, int forkoff)
224 ASSERT(ifp->if_flags & XFS_IFINLINE); 224 ASSERT(ifp->if_flags & XFS_IFINLINE);
225 sf = (xfs_attr_shortform_t *)ifp->if_u1.if_data; 225 sf = (xfs_attr_shortform_t *)ifp->if_u1.if_data;
226 sfe = &sf->list[0]; 226 sfe = &sf->list[0];
227 for (i = 0; i < INT_GET(sf->hdr.count, ARCH_CONVERT); 227 for (i = 0; i < sf->hdr.count; sfe = XFS_ATTR_SF_NEXTENTRY(sfe), i++) {
228 sfe = XFS_ATTR_SF_NEXTENTRY(sfe), i++) {
229#ifdef DEBUG 228#ifdef DEBUG
230 if (sfe->namelen != args->namelen) 229 if (sfe->namelen != args->namelen)
231 continue; 230 continue;
@@ -248,13 +247,13 @@ xfs_attr_shortform_add(xfs_da_args_t *args, int forkoff)
248 sfe = (xfs_attr_sf_entry_t *)((char *)sf + offset); 247 sfe = (xfs_attr_sf_entry_t *)((char *)sf + offset);
249 248
250 sfe->namelen = args->namelen; 249 sfe->namelen = args->namelen;
251 INT_SET(sfe->valuelen, ARCH_CONVERT, args->valuelen); 250 sfe->valuelen = args->valuelen;
252 sfe->flags = (args->flags & ATTR_SECURE) ? XFS_ATTR_SECURE : 251 sfe->flags = (args->flags & ATTR_SECURE) ? XFS_ATTR_SECURE :
253 ((args->flags & ATTR_ROOT) ? XFS_ATTR_ROOT : 0); 252 ((args->flags & ATTR_ROOT) ? XFS_ATTR_ROOT : 0);
254 memcpy(sfe->nameval, args->name, args->namelen); 253 memcpy(sfe->nameval, args->name, args->namelen);
255 memcpy(&sfe->nameval[args->namelen], args->value, args->valuelen); 254 memcpy(&sfe->nameval[args->namelen], args->value, args->valuelen);
256 INT_MOD(sf->hdr.count, ARCH_CONVERT, 1); 255 sf->hdr.count++;
257 INT_MOD(sf->hdr.totsize, ARCH_CONVERT, size); 256 be16_add(&sf->hdr.totsize, size);
258 xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_ADATA); 257 xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_ADATA);
259 258
260 xfs_sbversion_add_attr2(mp, args->trans); 259 xfs_sbversion_add_attr2(mp, args->trans);
@@ -277,7 +276,7 @@ xfs_attr_shortform_remove(xfs_da_args_t *args)
277 base = sizeof(xfs_attr_sf_hdr_t); 276 base = sizeof(xfs_attr_sf_hdr_t);
278 sf = (xfs_attr_shortform_t *)dp->i_afp->if_u1.if_data; 277 sf = (xfs_attr_shortform_t *)dp->i_afp->if_u1.if_data;
279 sfe = &sf->list[0]; 278 sfe = &sf->list[0];
280 end = INT_GET(sf->hdr.count, ARCH_CONVERT); 279 end = sf->hdr.count;
281 for (i = 0; i < end; sfe = XFS_ATTR_SF_NEXTENTRY(sfe), 280 for (i = 0; i < end; sfe = XFS_ATTR_SF_NEXTENTRY(sfe),
282 base += size, i++) { 281 base += size, i++) {
283 size = XFS_ATTR_SF_ENTSIZE(sfe); 282 size = XFS_ATTR_SF_ENTSIZE(sfe);
@@ -300,11 +299,11 @@ xfs_attr_shortform_remove(xfs_da_args_t *args)
300 * Fix up the attribute fork data, covering the hole 299 * Fix up the attribute fork data, covering the hole
301 */ 300 */
302 end = base + size; 301 end = base + size;
303 totsize = INT_GET(sf->hdr.totsize, ARCH_CONVERT); 302 totsize = be16_to_cpu(sf->hdr.totsize);
304 if (end != totsize) 303 if (end != totsize)
305 memmove(&((char *)sf)[base], &((char *)sf)[end], totsize - end); 304 memmove(&((char *)sf)[base], &((char *)sf)[end], totsize - end);
306 INT_MOD(sf->hdr.count, ARCH_CONVERT, -1); 305 sf->hdr.count--;
307 INT_MOD(sf->hdr.totsize, ARCH_CONVERT, -size); 306 be16_add(&sf->hdr.totsize, -size);
308 307
309 /* 308 /*
310 * Fix up the start offset of the attribute fork 309 * Fix up the start offset of the attribute fork
@@ -360,7 +359,7 @@ xfs_attr_shortform_lookup(xfs_da_args_t *args)
360 ASSERT(ifp->if_flags & XFS_IFINLINE); 359 ASSERT(ifp->if_flags & XFS_IFINLINE);
361 sf = (xfs_attr_shortform_t *)ifp->if_u1.if_data; 360 sf = (xfs_attr_shortform_t *)ifp->if_u1.if_data;
362 sfe = &sf->list[0]; 361 sfe = &sf->list[0];
363 for (i = 0; i < INT_GET(sf->hdr.count, ARCH_CONVERT); 362 for (i = 0; i < sf->hdr.count;
364 sfe = XFS_ATTR_SF_NEXTENTRY(sfe), i++) { 363 sfe = XFS_ATTR_SF_NEXTENTRY(sfe), i++) {
365 if (sfe->namelen != args->namelen) 364 if (sfe->namelen != args->namelen)
366 continue; 365 continue;
@@ -391,7 +390,7 @@ xfs_attr_shortform_getvalue(xfs_da_args_t *args)
391 ASSERT(args->dp->i_d.di_aformat == XFS_IFINLINE); 390 ASSERT(args->dp->i_d.di_aformat == XFS_IFINLINE);
392 sf = (xfs_attr_shortform_t *)args->dp->i_afp->if_u1.if_data; 391 sf = (xfs_attr_shortform_t *)args->dp->i_afp->if_u1.if_data;
393 sfe = &sf->list[0]; 392 sfe = &sf->list[0];
394 for (i = 0; i < INT_GET(sf->hdr.count, ARCH_CONVERT); 393 for (i = 0; i < sf->hdr.count;
395 sfe = XFS_ATTR_SF_NEXTENTRY(sfe), i++) { 394 sfe = XFS_ATTR_SF_NEXTENTRY(sfe), i++) {
396 if (sfe->namelen != args->namelen) 395 if (sfe->namelen != args->namelen)
397 continue; 396 continue;
@@ -404,14 +403,14 @@ xfs_attr_shortform_getvalue(xfs_da_args_t *args)
404 ((sfe->flags & XFS_ATTR_ROOT) != 0)) 403 ((sfe->flags & XFS_ATTR_ROOT) != 0))
405 continue; 404 continue;
406 if (args->flags & ATTR_KERNOVAL) { 405 if (args->flags & ATTR_KERNOVAL) {
407 args->valuelen = INT_GET(sfe->valuelen, ARCH_CONVERT); 406 args->valuelen = sfe->valuelen;
408 return(XFS_ERROR(EEXIST)); 407 return(XFS_ERROR(EEXIST));
409 } 408 }
410 if (args->valuelen < INT_GET(sfe->valuelen, ARCH_CONVERT)) { 409 if (args->valuelen < sfe->valuelen) {
411 args->valuelen = INT_GET(sfe->valuelen, ARCH_CONVERT); 410 args->valuelen = sfe->valuelen;
412 return(XFS_ERROR(ERANGE)); 411 return(XFS_ERROR(ERANGE));
413 } 412 }
414 args->valuelen = INT_GET(sfe->valuelen, ARCH_CONVERT); 413 args->valuelen = sfe->valuelen;
415 memcpy(args->value, &sfe->nameval[args->namelen], 414 memcpy(args->value, &sfe->nameval[args->namelen],
416 args->valuelen); 415 args->valuelen);
417 return(XFS_ERROR(EEXIST)); 416 return(XFS_ERROR(EEXIST));
@@ -438,7 +437,7 @@ xfs_attr_shortform_to_leaf(xfs_da_args_t *args)
438 dp = args->dp; 437 dp = args->dp;
439 ifp = dp->i_afp; 438 ifp = dp->i_afp;
440 sf = (xfs_attr_shortform_t *)ifp->if_u1.if_data; 439 sf = (xfs_attr_shortform_t *)ifp->if_u1.if_data;
441 size = INT_GET(sf->hdr.totsize, ARCH_CONVERT); 440 size = be16_to_cpu(sf->hdr.totsize);
442 tmpbuffer = kmem_alloc(size, KM_SLEEP); 441 tmpbuffer = kmem_alloc(size, KM_SLEEP);
443 ASSERT(tmpbuffer != NULL); 442 ASSERT(tmpbuffer != NULL);
444 memcpy(tmpbuffer, ifp->if_u1.if_data, size); 443 memcpy(tmpbuffer, ifp->if_u1.if_data, size);
@@ -481,11 +480,11 @@ xfs_attr_shortform_to_leaf(xfs_da_args_t *args)
481 nargs.oknoent = 1; 480 nargs.oknoent = 1;
482 481
483 sfe = &sf->list[0]; 482 sfe = &sf->list[0];
484 for (i = 0; i < INT_GET(sf->hdr.count, ARCH_CONVERT); i++) { 483 for (i = 0; i < sf->hdr.count; i++) {
485 nargs.name = (char *)sfe->nameval; 484 nargs.name = (char *)sfe->nameval;
486 nargs.namelen = sfe->namelen; 485 nargs.namelen = sfe->namelen;
487 nargs.value = (char *)&sfe->nameval[nargs.namelen]; 486 nargs.value = (char *)&sfe->nameval[nargs.namelen];
488 nargs.valuelen = INT_GET(sfe->valuelen, ARCH_CONVERT); 487 nargs.valuelen = sfe->valuelen;
489 nargs.hashval = xfs_da_hashname((char *)sfe->nameval, 488 nargs.hashval = xfs_da_hashname((char *)sfe->nameval,
490 sfe->namelen); 489 sfe->namelen);
491 nargs.flags = (sfe->flags & XFS_ATTR_SECURE) ? ATTR_SECURE : 490 nargs.flags = (sfe->flags & XFS_ATTR_SECURE) ? ATTR_SECURE :
@@ -514,11 +513,9 @@ xfs_attr_shortform_compare(const void *a, const void *b)
514 513
515 sa = (xfs_attr_sf_sort_t *)a; 514 sa = (xfs_attr_sf_sort_t *)a;
516 sb = (xfs_attr_sf_sort_t *)b; 515 sb = (xfs_attr_sf_sort_t *)b;
517 if (INT_GET(sa->hash, ARCH_CONVERT) 516 if (sa->hash < sb->hash) {
518 < INT_GET(sb->hash, ARCH_CONVERT)) {
519 return(-1); 517 return(-1);
520 } else if (INT_GET(sa->hash, ARCH_CONVERT) 518 } else if (sa->hash > sb->hash) {
521 > INT_GET(sb->hash, ARCH_CONVERT)) {
522 return(1); 519 return(1);
523 } else { 520 } else {
524 return(sa->entno - sb->entno); 521 return(sa->entno - sb->entno);
@@ -560,10 +557,8 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
560 * If the buffer is large enough, do not bother with sorting. 557 * If the buffer is large enough, do not bother with sorting.
561 * Note the generous fudge factor of 16 overhead bytes per entry. 558 * Note the generous fudge factor of 16 overhead bytes per entry.
562 */ 559 */
563 if ((dp->i_afp->if_bytes + INT_GET(sf->hdr.count, ARCH_CONVERT) * 16) 560 if ((dp->i_afp->if_bytes + sf->hdr.count * 16) < context->bufsize) {
564 < context->bufsize) { 561 for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) {
565 for (i = 0, sfe = &sf->list[0];
566 i < INT_GET(sf->hdr.count, ARCH_CONVERT); i++) {
567 attrnames_t *namesp; 562 attrnames_t *namesp;
568 563
569 if (((context->flags & ATTR_SECURE) != 0) != 564 if (((context->flags & ATTR_SECURE) != 0) !=
@@ -584,14 +579,13 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
584 if (context->flags & ATTR_KERNOVAL) { 579 if (context->flags & ATTR_KERNOVAL) {
585 ASSERT(context->flags & ATTR_KERNAMELS); 580 ASSERT(context->flags & ATTR_KERNAMELS);
586 context->count += namesp->attr_namelen + 581 context->count += namesp->attr_namelen +
587 INT_GET(sfe->namelen, ARCH_CONVERT) + 1; 582 sfe->namelen + 1;
588 } 583 }
589 else { 584 else {
590 if (xfs_attr_put_listent(context, namesp, 585 if (xfs_attr_put_listent(context, namesp,
591 (char *)sfe->nameval, 586 (char *)sfe->nameval,
592 (int)sfe->namelen, 587 (int)sfe->namelen,
593 (int)INT_GET(sfe->valuelen, 588 (int)sfe->valuelen))
594 ARCH_CONVERT)))
595 break; 589 break;
596 } 590 }
597 sfe = XFS_ATTR_SF_NEXTENTRY(sfe); 591 sfe = XFS_ATTR_SF_NEXTENTRY(sfe);
@@ -603,7 +597,7 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
603 /* 597 /*
604 * It didn't all fit, so we have to sort everything on hashval. 598 * It didn't all fit, so we have to sort everything on hashval.
605 */ 599 */
606 sbsize = INT_GET(sf->hdr.count, ARCH_CONVERT) * sizeof(*sbuf); 600 sbsize = sf->hdr.count * sizeof(*sbuf);
607 sbp = sbuf = kmem_alloc(sbsize, KM_SLEEP); 601 sbp = sbuf = kmem_alloc(sbsize, KM_SLEEP);
608 602
609 /* 603 /*
@@ -611,8 +605,7 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
611 * the relevant info from only those that match into a buffer. 605 * the relevant info from only those that match into a buffer.
612 */ 606 */
613 nsbuf = 0; 607 nsbuf = 0;
614 for (i = 0, sfe = &sf->list[0]; 608 for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) {
615 i < INT_GET(sf->hdr.count, ARCH_CONVERT); i++) {
616 if (unlikely( 609 if (unlikely(
617 ((char *)sfe < (char *)sf) || 610 ((char *)sfe < (char *)sf) ||
618 ((char *)sfe >= ((char *)sf + dp->i_afp->if_bytes)))) { 611 ((char *)sfe >= ((char *)sf + dp->i_afp->if_bytes)))) {
@@ -636,8 +629,7 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
636 continue; 629 continue;
637 } 630 }
638 sbp->entno = i; 631 sbp->entno = i;
639 INT_SET(sbp->hash, ARCH_CONVERT, 632 sbp->hash = xfs_da_hashname((char *)sfe->nameval, sfe->namelen);
640 xfs_da_hashname((char *)sfe->nameval, sfe->namelen));
641 sbp->name = (char *)sfe->nameval; 633 sbp->name = (char *)sfe->nameval;
642 sbp->namelen = sfe->namelen; 634 sbp->namelen = sfe->namelen;
643 /* These are bytes, and both on-disk, don't endian-flip */ 635 /* These are bytes, and both on-disk, don't endian-flip */
@@ -660,12 +652,12 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
660 cursor->initted = 1; 652 cursor->initted = 1;
661 cursor->blkno = 0; 653 cursor->blkno = 0;
662 for (sbp = sbuf, i = 0; i < nsbuf; i++, sbp++) { 654 for (sbp = sbuf, i = 0; i < nsbuf; i++, sbp++) {
663 if (INT_GET(sbp->hash, ARCH_CONVERT) == cursor->hashval) { 655 if (sbp->hash == cursor->hashval) {
664 if (cursor->offset == count) { 656 if (cursor->offset == count) {
665 break; 657 break;
666 } 658 }
667 count++; 659 count++;
668 } else if (INT_GET(sbp->hash, ARCH_CONVERT) > cursor->hashval) { 660 } else if (sbp->hash > cursor->hashval) {
669 break; 661 break;
670 } 662 }
671 } 663 }
@@ -685,8 +677,8 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
685 ((sbp->flags & XFS_ATTR_ROOT) ? &attr_trusted : 677 ((sbp->flags & XFS_ATTR_ROOT) ? &attr_trusted :
686 &attr_user); 678 &attr_user);
687 679
688 if (cursor->hashval != INT_GET(sbp->hash, ARCH_CONVERT)) { 680 if (cursor->hashval != sbp->hash) {
689 cursor->hashval = INT_GET(sbp->hash, ARCH_CONVERT); 681 cursor->hashval = sbp->hash;
690 cursor->offset = 0; 682 cursor->offset = 0;
691 } 683 }
692 if (context->flags & ATTR_KERNOVAL) { 684 if (context->flags & ATTR_KERNOVAL) {
@@ -696,7 +688,7 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
696 } else { 688 } else {
697 if (xfs_attr_put_listent(context, namesp, 689 if (xfs_attr_put_listent(context, namesp,
698 sbp->name, sbp->namelen, 690 sbp->name, sbp->namelen,
699 INT_GET(sbp->valuelen, ARCH_CONVERT))) 691 sbp->valuelen))
700 break; 692 break;
701 } 693 }
702 cursor->offset++; 694 cursor->offset++;
@@ -720,12 +712,11 @@ xfs_attr_shortform_allfit(xfs_dabuf_t *bp, xfs_inode_t *dp)
720 int bytes, i; 712 int bytes, i;
721 713
722 leaf = bp->data; 714 leaf = bp->data;
723 ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) 715 ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
724 == XFS_ATTR_LEAF_MAGIC);
725 716
726 entry = &leaf->entries[0]; 717 entry = &leaf->entries[0];
727 bytes = sizeof(struct xfs_attr_sf_hdr); 718 bytes = sizeof(struct xfs_attr_sf_hdr);
728 for (i = 0; i < INT_GET(leaf->hdr.count, ARCH_CONVERT); entry++, i++) { 719 for (i = 0; i < be16_to_cpu(leaf->hdr.count); entry++, i++) {
729 if (entry->flags & XFS_ATTR_INCOMPLETE) 720 if (entry->flags & XFS_ATTR_INCOMPLETE)
730 continue; /* don't copy partial entries */ 721 continue; /* don't copy partial entries */
731 if (!(entry->flags & XFS_ATTR_LOCAL)) 722 if (!(entry->flags & XFS_ATTR_LOCAL))
@@ -733,11 +724,11 @@ xfs_attr_shortform_allfit(xfs_dabuf_t *bp, xfs_inode_t *dp)
733 name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, i); 724 name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, i);
734 if (name_loc->namelen >= XFS_ATTR_SF_ENTSIZE_MAX) 725 if (name_loc->namelen >= XFS_ATTR_SF_ENTSIZE_MAX)
735 return(0); 726 return(0);
736 if (INT_GET(name_loc->valuelen, ARCH_CONVERT) >= XFS_ATTR_SF_ENTSIZE_MAX) 727 if (be16_to_cpu(name_loc->valuelen) >= XFS_ATTR_SF_ENTSIZE_MAX)
737 return(0); 728 return(0);
738 bytes += sizeof(struct xfs_attr_sf_entry)-1 729 bytes += sizeof(struct xfs_attr_sf_entry)-1
739 + name_loc->namelen 730 + name_loc->namelen
740 + INT_GET(name_loc->valuelen, ARCH_CONVERT); 731 + be16_to_cpu(name_loc->valuelen);
741 } 732 }
742 if ((dp->i_mount->m_flags & XFS_MOUNT_ATTR2) && 733 if ((dp->i_mount->m_flags & XFS_MOUNT_ATTR2) &&
743 (bytes == sizeof(struct xfs_attr_sf_hdr))) 734 (bytes == sizeof(struct xfs_attr_sf_hdr)))
@@ -766,8 +757,7 @@ xfs_attr_leaf_to_shortform(xfs_dabuf_t *bp, xfs_da_args_t *args, int forkoff)
766 ASSERT(bp != NULL); 757 ASSERT(bp != NULL);
767 memcpy(tmpbuffer, bp->data, XFS_LBSIZE(dp->i_mount)); 758 memcpy(tmpbuffer, bp->data, XFS_LBSIZE(dp->i_mount));
768 leaf = (xfs_attr_leafblock_t *)tmpbuffer; 759 leaf = (xfs_attr_leafblock_t *)tmpbuffer;
769 ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) 760 ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
770 == XFS_ATTR_LEAF_MAGIC);
771 memset(bp->data, 0, XFS_LBSIZE(dp->i_mount)); 761 memset(bp->data, 0, XFS_LBSIZE(dp->i_mount));
772 762
773 /* 763 /*
@@ -810,7 +800,7 @@ xfs_attr_leaf_to_shortform(xfs_dabuf_t *bp, xfs_da_args_t *args, int forkoff)
810 nargs.trans = args->trans; 800 nargs.trans = args->trans;
811 nargs.oknoent = 1; 801 nargs.oknoent = 1;
812 entry = &leaf->entries[0]; 802 entry = &leaf->entries[0];
813 for (i = 0; i < INT_GET(leaf->hdr.count, ARCH_CONVERT); entry++, i++) { 803 for (i = 0; i < be16_to_cpu(leaf->hdr.count); entry++, i++) {
814 if (entry->flags & XFS_ATTR_INCOMPLETE) 804 if (entry->flags & XFS_ATTR_INCOMPLETE)
815 continue; /* don't copy partial entries */ 805 continue; /* don't copy partial entries */
816 if (!entry->nameidx) 806 if (!entry->nameidx)
@@ -820,8 +810,8 @@ xfs_attr_leaf_to_shortform(xfs_dabuf_t *bp, xfs_da_args_t *args, int forkoff)
820 nargs.name = (char *)name_loc->nameval; 810 nargs.name = (char *)name_loc->nameval;
821 nargs.namelen = name_loc->namelen; 811 nargs.namelen = name_loc->namelen;
822 nargs.value = (char *)&name_loc->nameval[nargs.namelen]; 812 nargs.value = (char *)&name_loc->nameval[nargs.namelen];
823 nargs.valuelen = INT_GET(name_loc->valuelen, ARCH_CONVERT); 813 nargs.valuelen = be16_to_cpu(name_loc->valuelen);
824 nargs.hashval = INT_GET(entry->hashval, ARCH_CONVERT); 814 nargs.hashval = be32_to_cpu(entry->hashval);
825 nargs.flags = (entry->flags & XFS_ATTR_SECURE) ? ATTR_SECURE : 815 nargs.flags = (entry->flags & XFS_ATTR_SECURE) ? ATTR_SECURE :
826 ((entry->flags & XFS_ATTR_ROOT) ? ATTR_ROOT : 0); 816 ((entry->flags & XFS_ATTR_ROOT) ? ATTR_ROOT : 0);
827 xfs_attr_shortform_add(&nargs, forkoff); 817 xfs_attr_shortform_add(&nargs, forkoff);
@@ -875,13 +865,12 @@ xfs_attr_leaf_to_node(xfs_da_args_t *args)
875 goto out; 865 goto out;
876 node = bp1->data; 866 node = bp1->data;
877 leaf = bp2->data; 867 leaf = bp2->data;
878 ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) 868 ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
879 == XFS_ATTR_LEAF_MAGIC);
880 /* both on-disk, don't endian-flip twice */ 869 /* both on-disk, don't endian-flip twice */
881 node->btree[0].hashval = 870 node->btree[0].hashval =
882 leaf->entries[INT_GET(leaf->hdr.count, ARCH_CONVERT)-1 ].hashval; 871 leaf->entries[be16_to_cpu(leaf->hdr.count)-1 ].hashval;
883 INT_SET(node->btree[0].before, ARCH_CONVERT, blkno); 872 node->btree[0].before = cpu_to_be32(blkno);
884 INT_SET(node->hdr.count, ARCH_CONVERT, 1); 873 node->hdr.count = cpu_to_be16(1);
885 xfs_da_log_buf(args->trans, bp1, 0, XFS_LBSIZE(dp->i_mount) - 1); 874 xfs_da_log_buf(args->trans, bp1, 0, XFS_LBSIZE(dp->i_mount) - 1);
886 error = 0; 875 error = 0;
887out: 876out:
@@ -920,19 +909,16 @@ xfs_attr_leaf_create(xfs_da_args_t *args, xfs_dablk_t blkno, xfs_dabuf_t **bpp)
920 leaf = bp->data; 909 leaf = bp->data;
921 memset((char *)leaf, 0, XFS_LBSIZE(dp->i_mount)); 910 memset((char *)leaf, 0, XFS_LBSIZE(dp->i_mount));
922 hdr = &leaf->hdr; 911 hdr = &leaf->hdr;
923 INT_SET(hdr->info.magic, ARCH_CONVERT, XFS_ATTR_LEAF_MAGIC); 912 hdr->info.magic = cpu_to_be16(XFS_ATTR_LEAF_MAGIC);
924 INT_SET(hdr->firstused, ARCH_CONVERT, XFS_LBSIZE(dp->i_mount)); 913 hdr->firstused = cpu_to_be16(XFS_LBSIZE(dp->i_mount));
925 if (!hdr->firstused) { 914 if (!hdr->firstused) {
926 INT_SET(hdr->firstused, ARCH_CONVERT, 915 hdr->firstused = cpu_to_be16(
927 XFS_LBSIZE(dp->i_mount) - XFS_ATTR_LEAF_NAME_ALIGN); 916 XFS_LBSIZE(dp->i_mount) - XFS_ATTR_LEAF_NAME_ALIGN);
928 } 917 }
929 918
930 INT_SET(hdr->freemap[0].base, ARCH_CONVERT, 919 hdr->freemap[0].base = cpu_to_be16(sizeof(xfs_attr_leaf_hdr_t));
931 sizeof(xfs_attr_leaf_hdr_t)); 920 hdr->freemap[0].size = cpu_to_be16(be16_to_cpu(hdr->firstused) -
932 INT_SET(hdr->freemap[0].size, ARCH_CONVERT, 921 sizeof(xfs_attr_leaf_hdr_t));
933 INT_GET(hdr->firstused, ARCH_CONVERT)
934 - INT_GET(hdr->freemap[0].base,
935 ARCH_CONVERT));
936 922
937 xfs_da_log_buf(args->trans, bp, 0, XFS_LBSIZE(dp->i_mount) - 1); 923 xfs_da_log_buf(args->trans, bp, 0, XFS_LBSIZE(dp->i_mount) - 1);
938 924
@@ -1004,10 +990,9 @@ xfs_attr_leaf_add(xfs_dabuf_t *bp, xfs_da_args_t *args)
1004 int tablesize, entsize, sum, tmp, i; 990 int tablesize, entsize, sum, tmp, i;
1005 991
1006 leaf = bp->data; 992 leaf = bp->data;
1007 ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) 993 ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
1008 == XFS_ATTR_LEAF_MAGIC);
1009 ASSERT((args->index >= 0) 994 ASSERT((args->index >= 0)
1010 && (args->index <= INT_GET(leaf->hdr.count, ARCH_CONVERT))); 995 && (args->index <= be16_to_cpu(leaf->hdr.count)));
1011 hdr = &leaf->hdr; 996 hdr = &leaf->hdr;
1012 entsize = xfs_attr_leaf_newentsize(args->namelen, args->valuelen, 997 entsize = xfs_attr_leaf_newentsize(args->namelen, args->valuelen,
1013 args->trans->t_mountp->m_sb.sb_blocksize, NULL); 998 args->trans->t_mountp->m_sb.sb_blocksize, NULL);
@@ -1016,26 +1001,25 @@ xfs_attr_leaf_add(xfs_dabuf_t *bp, xfs_da_args_t *args)
1016 * Search through freemap for first-fit on new name length. 1001 * Search through freemap for first-fit on new name length.
1017 * (may need to figure in size of entry struct too) 1002 * (may need to figure in size of entry struct too)
1018 */ 1003 */
1019 tablesize = (INT_GET(hdr->count, ARCH_CONVERT) + 1) 1004 tablesize = (be16_to_cpu(hdr->count) + 1)
1020 * sizeof(xfs_attr_leaf_entry_t) 1005 * sizeof(xfs_attr_leaf_entry_t)
1021 + sizeof(xfs_attr_leaf_hdr_t); 1006 + sizeof(xfs_attr_leaf_hdr_t);
1022 map = &hdr->freemap[XFS_ATTR_LEAF_MAPSIZE-1]; 1007 map = &hdr->freemap[XFS_ATTR_LEAF_MAPSIZE-1];
1023 for (sum = 0, i = XFS_ATTR_LEAF_MAPSIZE-1; i >= 0; map--, i--) { 1008 for (sum = 0, i = XFS_ATTR_LEAF_MAPSIZE-1; i >= 0; map--, i--) {
1024 if (tablesize > INT_GET(hdr->firstused, ARCH_CONVERT)) { 1009 if (tablesize > be16_to_cpu(hdr->firstused)) {
1025 sum += INT_GET(map->size, ARCH_CONVERT); 1010 sum += be16_to_cpu(map->size);
1026 continue; 1011 continue;
1027 } 1012 }
1028 if (!map->size) 1013 if (!map->size)
1029 continue; /* no space in this map */ 1014 continue; /* no space in this map */
1030 tmp = entsize; 1015 tmp = entsize;
1031 if (INT_GET(map->base, ARCH_CONVERT) 1016 if (be16_to_cpu(map->base) < be16_to_cpu(hdr->firstused))
1032 < INT_GET(hdr->firstused, ARCH_CONVERT))
1033 tmp += sizeof(xfs_attr_leaf_entry_t); 1017 tmp += sizeof(xfs_attr_leaf_entry_t);
1034 if (INT_GET(map->size, ARCH_CONVERT) >= tmp) { 1018 if (be16_to_cpu(map->size) >= tmp) {
1035 tmp = xfs_attr_leaf_add_work(bp, args, i); 1019 tmp = xfs_attr_leaf_add_work(bp, args, i);
1036 return(tmp); 1020 return(tmp);
1037 } 1021 }
1038 sum += INT_GET(map->size, ARCH_CONVERT); 1022 sum += be16_to_cpu(map->size);
1039 } 1023 }
1040 1024
1041 /* 1025 /*
@@ -1056,7 +1040,7 @@ xfs_attr_leaf_add(xfs_dabuf_t *bp, xfs_da_args_t *args)
1056 * After compaction, the block is guaranteed to have only one 1040 * After compaction, the block is guaranteed to have only one
1057 * free region, in freemap[0]. If it is not big enough, give up. 1041 * free region, in freemap[0]. If it is not big enough, give up.
1058 */ 1042 */
1059 if (INT_GET(hdr->freemap[0].size, ARCH_CONVERT) 1043 if (be16_to_cpu(hdr->freemap[0].size)
1060 < (entsize + sizeof(xfs_attr_leaf_entry_t))) 1044 < (entsize + sizeof(xfs_attr_leaf_entry_t)))
1061 return(XFS_ERROR(ENOSPC)); 1045 return(XFS_ERROR(ENOSPC));
1062 1046
@@ -1079,45 +1063,42 @@ xfs_attr_leaf_add_work(xfs_dabuf_t *bp, xfs_da_args_t *args, int mapindex)
1079 int tmp, i; 1063 int tmp, i;
1080 1064
1081 leaf = bp->data; 1065 leaf = bp->data;
1082 ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) 1066 ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
1083 == XFS_ATTR_LEAF_MAGIC);
1084 hdr = &leaf->hdr; 1067 hdr = &leaf->hdr;
1085 ASSERT((mapindex >= 0) && (mapindex < XFS_ATTR_LEAF_MAPSIZE)); 1068 ASSERT((mapindex >= 0) && (mapindex < XFS_ATTR_LEAF_MAPSIZE));
1086 ASSERT((args->index >= 0) 1069 ASSERT((args->index >= 0) && (args->index <= be16_to_cpu(hdr->count)));
1087 && (args->index <= INT_GET(hdr->count, ARCH_CONVERT)));
1088 1070
1089 /* 1071 /*
1090 * Force open some space in the entry array and fill it in. 1072 * Force open some space in the entry array and fill it in.
1091 */ 1073 */
1092 entry = &leaf->entries[args->index]; 1074 entry = &leaf->entries[args->index];
1093 if (args->index < INT_GET(hdr->count, ARCH_CONVERT)) { 1075 if (args->index < be16_to_cpu(hdr->count)) {
1094 tmp = INT_GET(hdr->count, ARCH_CONVERT) - args->index; 1076 tmp = be16_to_cpu(hdr->count) - args->index;
1095 tmp *= sizeof(xfs_attr_leaf_entry_t); 1077 tmp *= sizeof(xfs_attr_leaf_entry_t);
1096 memmove((char *)(entry+1), (char *)entry, tmp); 1078 memmove((char *)(entry+1), (char *)entry, tmp);
1097 xfs_da_log_buf(args->trans, bp, 1079 xfs_da_log_buf(args->trans, bp,
1098 XFS_DA_LOGRANGE(leaf, entry, tmp + sizeof(*entry))); 1080 XFS_DA_LOGRANGE(leaf, entry, tmp + sizeof(*entry)));
1099 } 1081 }
1100 INT_MOD(hdr->count, ARCH_CONVERT, 1); 1082 be16_add(&hdr->count, 1);
1101 1083
1102 /* 1084 /*
1103 * Allocate space for the new string (at the end of the run). 1085 * Allocate space for the new string (at the end of the run).
1104 */ 1086 */
1105 map = &hdr->freemap[mapindex]; 1087 map = &hdr->freemap[mapindex];
1106 mp = args->trans->t_mountp; 1088 mp = args->trans->t_mountp;
1107 ASSERT(INT_GET(map->base, ARCH_CONVERT) < XFS_LBSIZE(mp)); 1089 ASSERT(be16_to_cpu(map->base) < XFS_LBSIZE(mp));
1108 ASSERT((INT_GET(map->base, ARCH_CONVERT) & 0x3) == 0); 1090 ASSERT((be16_to_cpu(map->base) & 0x3) == 0);
1109 ASSERT(INT_GET(map->size, ARCH_CONVERT) >= 1091 ASSERT(be16_to_cpu(map->size) >=
1110 xfs_attr_leaf_newentsize(args->namelen, args->valuelen, 1092 xfs_attr_leaf_newentsize(args->namelen, args->valuelen,
1111 mp->m_sb.sb_blocksize, NULL)); 1093 mp->m_sb.sb_blocksize, NULL));
1112 ASSERT(INT_GET(map->size, ARCH_CONVERT) < XFS_LBSIZE(mp)); 1094 ASSERT(be16_to_cpu(map->size) < XFS_LBSIZE(mp));
1113 ASSERT((INT_GET(map->size, ARCH_CONVERT) & 0x3) == 0); 1095 ASSERT((be16_to_cpu(map->size) & 0x3) == 0);
1114 INT_MOD(map->size, ARCH_CONVERT, 1096 be16_add(&map->size,
1115 -xfs_attr_leaf_newentsize(args->namelen, args->valuelen, 1097 -xfs_attr_leaf_newentsize(args->namelen, args->valuelen,
1116 mp->m_sb.sb_blocksize, &tmp)); 1098 mp->m_sb.sb_blocksize, &tmp));
1117 INT_SET(entry->nameidx, ARCH_CONVERT, 1099 entry->nameidx = cpu_to_be16(be16_to_cpu(map->base) +
1118 INT_GET(map->base, ARCH_CONVERT) 1100 be16_to_cpu(map->size));
1119 + INT_GET(map->size, ARCH_CONVERT)); 1101 entry->hashval = cpu_to_be32(args->hashval);
1120 INT_SET(entry->hashval, ARCH_CONVERT, args->hashval);
1121 entry->flags = tmp ? XFS_ATTR_LOCAL : 0; 1102 entry->flags = tmp ? XFS_ATTR_LOCAL : 0;
1122 entry->flags |= (args->flags & ATTR_SECURE) ? XFS_ATTR_SECURE : 1103 entry->flags |= (args->flags & ATTR_SECURE) ? XFS_ATTR_SECURE :
1123 ((args->flags & ATTR_ROOT) ? XFS_ATTR_ROOT : 0); 1104 ((args->flags & ATTR_ROOT) ? XFS_ATTR_ROOT : 0);
@@ -1130,12 +1111,10 @@ xfs_attr_leaf_add_work(xfs_dabuf_t *bp, xfs_da_args_t *args, int mapindex)
1130 } 1111 }
1131 xfs_da_log_buf(args->trans, bp, 1112 xfs_da_log_buf(args->trans, bp,
1132 XFS_DA_LOGRANGE(leaf, entry, sizeof(*entry))); 1113 XFS_DA_LOGRANGE(leaf, entry, sizeof(*entry)));
1133 ASSERT((args->index == 0) || (INT_GET(entry->hashval, ARCH_CONVERT) 1114 ASSERT((args->index == 0) ||
1134 >= INT_GET((entry-1)->hashval, 1115 (be32_to_cpu(entry->hashval) >= be32_to_cpu((entry-1)->hashval)));
1135 ARCH_CONVERT))); 1116 ASSERT((args->index == be16_to_cpu(hdr->count)-1) ||
1136 ASSERT((args->index == INT_GET(hdr->count, ARCH_CONVERT)-1) || 1117 (be32_to_cpu(entry->hashval) <= be32_to_cpu((entry+1)->hashval)));
1137 (INT_GET(entry->hashval, ARCH_CONVERT)
1138 <= (INT_GET((entry+1)->hashval, ARCH_CONVERT))));
1139 1118
1140 /* 1119 /*
1141 * Copy the attribute name and value into the new space. 1120 * Copy the attribute name and value into the new space.
@@ -1149,10 +1128,10 @@ xfs_attr_leaf_add_work(xfs_dabuf_t *bp, xfs_da_args_t *args, int mapindex)
1149 if (entry->flags & XFS_ATTR_LOCAL) { 1128 if (entry->flags & XFS_ATTR_LOCAL) {
1150 name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, args->index); 1129 name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, args->index);
1151 name_loc->namelen = args->namelen; 1130 name_loc->namelen = args->namelen;
1152 INT_SET(name_loc->valuelen, ARCH_CONVERT, args->valuelen); 1131 name_loc->valuelen = cpu_to_be16(args->valuelen);
1153 memcpy((char *)name_loc->nameval, args->name, args->namelen); 1132 memcpy((char *)name_loc->nameval, args->name, args->namelen);
1154 memcpy((char *)&name_loc->nameval[args->namelen], args->value, 1133 memcpy((char *)&name_loc->nameval[args->namelen], args->value,
1155 INT_GET(name_loc->valuelen, ARCH_CONVERT)); 1134 be16_to_cpu(name_loc->valuelen));
1156 } else { 1135 } else {
1157 name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, args->index); 1136 name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, args->index);
1158 name_rmt->namelen = args->namelen; 1137 name_rmt->namelen = args->namelen;
@@ -1171,28 +1150,23 @@ xfs_attr_leaf_add_work(xfs_dabuf_t *bp, xfs_da_args_t *args, int mapindex)
1171 /* 1150 /*
1172 * Update the control info for this leaf node 1151 * Update the control info for this leaf node
1173 */ 1152 */
1174 if (INT_GET(entry->nameidx, ARCH_CONVERT) 1153 if (be16_to_cpu(entry->nameidx) < be16_to_cpu(hdr->firstused)) {
1175 < INT_GET(hdr->firstused, ARCH_CONVERT)) {
1176 /* both on-disk, don't endian-flip twice */ 1154 /* both on-disk, don't endian-flip twice */
1177 hdr->firstused = entry->nameidx; 1155 hdr->firstused = entry->nameidx;
1178 } 1156 }
1179 ASSERT(INT_GET(hdr->firstused, ARCH_CONVERT) 1157 ASSERT(be16_to_cpu(hdr->firstused) >=
1180 >= ((INT_GET(hdr->count, ARCH_CONVERT) 1158 ((be16_to_cpu(hdr->count) * sizeof(*entry)) + sizeof(*hdr)));
1181 * sizeof(*entry))+sizeof(*hdr))); 1159 tmp = (be16_to_cpu(hdr->count)-1) * sizeof(xfs_attr_leaf_entry_t)
1182 tmp = (INT_GET(hdr->count, ARCH_CONVERT)-1)
1183 * sizeof(xfs_attr_leaf_entry_t)
1184 + sizeof(xfs_attr_leaf_hdr_t); 1160 + sizeof(xfs_attr_leaf_hdr_t);
1185 map = &hdr->freemap[0]; 1161 map = &hdr->freemap[0];
1186 for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; map++, i++) { 1162 for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; map++, i++) {
1187 if (INT_GET(map->base, ARCH_CONVERT) == tmp) { 1163 if (be16_to_cpu(map->base) == tmp) {
1188 INT_MOD(map->base, ARCH_CONVERT, 1164 be16_add(&map->base, sizeof(xfs_attr_leaf_entry_t));
1189 sizeof(xfs_attr_leaf_entry_t)); 1165 be16_add(&map->size,
1190 INT_MOD(map->size, ARCH_CONVERT, 1166 -((int)sizeof(xfs_attr_leaf_entry_t)));
1191 -sizeof(xfs_attr_leaf_entry_t));
1192 } 1167 }
1193 } 1168 }
1194 INT_MOD(hdr->usedbytes, ARCH_CONVERT, 1169 be16_add(&hdr->usedbytes, xfs_attr_leaf_entsize(leaf, args->index));
1195 xfs_attr_leaf_entsize(leaf, args->index));
1196 xfs_da_log_buf(args->trans, bp, 1170 xfs_da_log_buf(args->trans, bp,
1197 XFS_DA_LOGRANGE(leaf, hdr, sizeof(*hdr))); 1171 XFS_DA_LOGRANGE(leaf, hdr, sizeof(*hdr)));
1198 return(0); 1172 return(0);
@@ -1223,28 +1197,25 @@ xfs_attr_leaf_compact(xfs_trans_t *trans, xfs_dabuf_t *bp)
1223 hdr_s = &leaf_s->hdr; 1197 hdr_s = &leaf_s->hdr;
1224 hdr_d = &leaf_d->hdr; 1198 hdr_d = &leaf_d->hdr;
1225 hdr_d->info = hdr_s->info; /* struct copy */ 1199 hdr_d->info = hdr_s->info; /* struct copy */
1226 INT_SET(hdr_d->firstused, ARCH_CONVERT, XFS_LBSIZE(mp)); 1200 hdr_d->firstused = cpu_to_be16(XFS_LBSIZE(mp));
1227 /* handle truncation gracefully */ 1201 /* handle truncation gracefully */
1228 if (!hdr_d->firstused) { 1202 if (!hdr_d->firstused) {
1229 INT_SET(hdr_d->firstused, ARCH_CONVERT, 1203 hdr_d->firstused = cpu_to_be16(
1230 XFS_LBSIZE(mp) - XFS_ATTR_LEAF_NAME_ALIGN); 1204 XFS_LBSIZE(mp) - XFS_ATTR_LEAF_NAME_ALIGN);
1231 } 1205 }
1232 hdr_d->usedbytes = 0; 1206 hdr_d->usedbytes = 0;
1233 hdr_d->count = 0; 1207 hdr_d->count = 0;
1234 hdr_d->holes = 0; 1208 hdr_d->holes = 0;
1235 INT_SET(hdr_d->freemap[0].base, ARCH_CONVERT, 1209 hdr_d->freemap[0].base = cpu_to_be16(sizeof(xfs_attr_leaf_hdr_t));
1236 sizeof(xfs_attr_leaf_hdr_t)); 1210 hdr_d->freemap[0].size = cpu_to_be16(be16_to_cpu(hdr_d->firstused) -
1237 INT_SET(hdr_d->freemap[0].size, ARCH_CONVERT, 1211 sizeof(xfs_attr_leaf_hdr_t));
1238 INT_GET(hdr_d->firstused, ARCH_CONVERT)
1239 - INT_GET(hdr_d->freemap[0].base, ARCH_CONVERT));
1240 1212
1241 /* 1213 /*
1242 * Copy all entry's in the same (sorted) order, 1214 * Copy all entry's in the same (sorted) order,
1243 * but allocate name/value pairs packed and in sequence. 1215 * but allocate name/value pairs packed and in sequence.
1244 */ 1216 */
1245 xfs_attr_leaf_moveents(leaf_s, 0, leaf_d, 0, 1217 xfs_attr_leaf_moveents(leaf_s, 0, leaf_d, 0,
1246 (int)INT_GET(hdr_s->count, ARCH_CONVERT), mp); 1218 be16_to_cpu(hdr_s->count), mp);
1247
1248 xfs_da_log_buf(trans, bp, 0, XFS_LBSIZE(mp) - 1); 1219 xfs_da_log_buf(trans, bp, 0, XFS_LBSIZE(mp) - 1);
1249 1220
1250 kmem_free(tmpbuffer, XFS_LBSIZE(mp)); 1221 kmem_free(tmpbuffer, XFS_LBSIZE(mp));
@@ -1279,10 +1250,8 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
1279 ASSERT(blk2->magic == XFS_ATTR_LEAF_MAGIC); 1250 ASSERT(blk2->magic == XFS_ATTR_LEAF_MAGIC);
1280 leaf1 = blk1->bp->data; 1251 leaf1 = blk1->bp->data;
1281 leaf2 = blk2->bp->data; 1252 leaf2 = blk2->bp->data;
1282 ASSERT(INT_GET(leaf1->hdr.info.magic, ARCH_CONVERT) 1253 ASSERT(be16_to_cpu(leaf1->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
1283 == XFS_ATTR_LEAF_MAGIC); 1254 ASSERT(be16_to_cpu(leaf2->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
1284 ASSERT(INT_GET(leaf2->hdr.info.magic, ARCH_CONVERT)
1285 == XFS_ATTR_LEAF_MAGIC);
1286 args = state->args; 1255 args = state->args;
1287 1256
1288 /* 1257 /*
@@ -1319,22 +1288,21 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
1319 /* 1288 /*
1320 * Move any entries required from leaf to leaf: 1289 * Move any entries required from leaf to leaf:
1321 */ 1290 */
1322 if (count < INT_GET(hdr1->count, ARCH_CONVERT)) { 1291 if (count < be16_to_cpu(hdr1->count)) {
1323 /* 1292 /*
1324 * Figure the total bytes to be added to the destination leaf. 1293 * Figure the total bytes to be added to the destination leaf.
1325 */ 1294 */
1326 /* number entries being moved */ 1295 /* number entries being moved */
1327 count = INT_GET(hdr1->count, ARCH_CONVERT) - count; 1296 count = be16_to_cpu(hdr1->count) - count;
1328 space = INT_GET(hdr1->usedbytes, ARCH_CONVERT) - totallen; 1297 space = be16_to_cpu(hdr1->usedbytes) - totallen;
1329 space += count * sizeof(xfs_attr_leaf_entry_t); 1298 space += count * sizeof(xfs_attr_leaf_entry_t);
1330 1299
1331 /* 1300 /*
1332 * leaf2 is the destination, compact it if it looks tight. 1301 * leaf2 is the destination, compact it if it looks tight.
1333 */ 1302 */
1334 max = INT_GET(hdr2->firstused, ARCH_CONVERT) 1303 max = be16_to_cpu(hdr2->firstused)
1335 - sizeof(xfs_attr_leaf_hdr_t); 1304 - sizeof(xfs_attr_leaf_hdr_t);
1336 max -= INT_GET(hdr2->count, ARCH_CONVERT) 1305 max -= be16_to_cpu(hdr2->count) * sizeof(xfs_attr_leaf_entry_t);
1337 * sizeof(xfs_attr_leaf_entry_t);
1338 if (space > max) { 1306 if (space > max) {
1339 xfs_attr_leaf_compact(args->trans, blk2->bp); 1307 xfs_attr_leaf_compact(args->trans, blk2->bp);
1340 } 1308 }
@@ -1342,13 +1310,12 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
1342 /* 1310 /*
1343 * Move high entries from leaf1 to low end of leaf2. 1311 * Move high entries from leaf1 to low end of leaf2.
1344 */ 1312 */
1345 xfs_attr_leaf_moveents(leaf1, 1313 xfs_attr_leaf_moveents(leaf1, be16_to_cpu(hdr1->count) - count,
1346 INT_GET(hdr1->count, ARCH_CONVERT)-count,
1347 leaf2, 0, count, state->mp); 1314 leaf2, 0, count, state->mp);
1348 1315
1349 xfs_da_log_buf(args->trans, blk1->bp, 0, state->blocksize-1); 1316 xfs_da_log_buf(args->trans, blk1->bp, 0, state->blocksize-1);
1350 xfs_da_log_buf(args->trans, blk2->bp, 0, state->blocksize-1); 1317 xfs_da_log_buf(args->trans, blk2->bp, 0, state->blocksize-1);
1351 } else if (count > INT_GET(hdr1->count, ARCH_CONVERT)) { 1318 } else if (count > be16_to_cpu(hdr1->count)) {
1352 /* 1319 /*
1353 * I assert that since all callers pass in an empty 1320 * I assert that since all callers pass in an empty
1354 * second buffer, this code should never execute. 1321 * second buffer, this code should never execute.
@@ -1358,17 +1325,16 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
1358 * Figure the total bytes to be added to the destination leaf. 1325 * Figure the total bytes to be added to the destination leaf.
1359 */ 1326 */
1360 /* number entries being moved */ 1327 /* number entries being moved */
1361 count -= INT_GET(hdr1->count, ARCH_CONVERT); 1328 count -= be16_to_cpu(hdr1->count);
1362 space = totallen - INT_GET(hdr1->usedbytes, ARCH_CONVERT); 1329 space = totallen - be16_to_cpu(hdr1->usedbytes);
1363 space += count * sizeof(xfs_attr_leaf_entry_t); 1330 space += count * sizeof(xfs_attr_leaf_entry_t);
1364 1331
1365 /* 1332 /*
1366 * leaf1 is the destination, compact it if it looks tight. 1333 * leaf1 is the destination, compact it if it looks tight.
1367 */ 1334 */
1368 max = INT_GET(hdr1->firstused, ARCH_CONVERT) 1335 max = be16_to_cpu(hdr1->firstused)
1369 - sizeof(xfs_attr_leaf_hdr_t); 1336 - sizeof(xfs_attr_leaf_hdr_t);
1370 max -= INT_GET(hdr1->count, ARCH_CONVERT) 1337 max -= be16_to_cpu(hdr1->count) * sizeof(xfs_attr_leaf_entry_t);
1371 * sizeof(xfs_attr_leaf_entry_t);
1372 if (space > max) { 1338 if (space > max) {
1373 xfs_attr_leaf_compact(args->trans, blk1->bp); 1339 xfs_attr_leaf_compact(args->trans, blk1->bp);
1374 } 1340 }
@@ -1377,8 +1343,7 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
1377 * Move low entries from leaf2 to high end of leaf1. 1343 * Move low entries from leaf2 to high end of leaf1.
1378 */ 1344 */
1379 xfs_attr_leaf_moveents(leaf2, 0, leaf1, 1345 xfs_attr_leaf_moveents(leaf2, 0, leaf1,
1380 (int)INT_GET(hdr1->count, ARCH_CONVERT), count, 1346 be16_to_cpu(hdr1->count), count, state->mp);
1381 state->mp);
1382 1347
1383 xfs_da_log_buf(args->trans, blk1->bp, 0, state->blocksize-1); 1348 xfs_da_log_buf(args->trans, blk1->bp, 0, state->blocksize-1);
1384 xfs_da_log_buf(args->trans, blk2->bp, 0, state->blocksize-1); 1349 xfs_da_log_buf(args->trans, blk2->bp, 0, state->blocksize-1);
@@ -1387,12 +1352,10 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
1387 /* 1352 /*
1388 * Copy out last hashval in each block for B-tree code. 1353 * Copy out last hashval in each block for B-tree code.
1389 */ 1354 */
1390 blk1->hashval = 1355 blk1->hashval = be32_to_cpu(
1391 INT_GET(leaf1->entries[INT_GET(leaf1->hdr.count, 1356 leaf1->entries[be16_to_cpu(leaf1->hdr.count)-1].hashval);
1392 ARCH_CONVERT)-1].hashval, ARCH_CONVERT); 1357 blk2->hashval = be32_to_cpu(
1393 blk2->hashval = 1358 leaf2->entries[be16_to_cpu(leaf2->hdr.count)-1].hashval);
1394 INT_GET(leaf2->entries[INT_GET(leaf2->hdr.count,
1395 ARCH_CONVERT)-1].hashval, ARCH_CONVERT);
1396 1359
1397 /* 1360 /*
1398 * Adjust the expected index for insertion. 1361 * Adjust the expected index for insertion.
@@ -1406,13 +1369,12 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
1406 * inserting. The index/blkno fields refer to the "old" entry, 1369 * inserting. The index/blkno fields refer to the "old" entry,
1407 * while the index2/blkno2 fields refer to the "new" entry. 1370 * while the index2/blkno2 fields refer to the "new" entry.
1408 */ 1371 */
1409 if (blk1->index > INT_GET(leaf1->hdr.count, ARCH_CONVERT)) { 1372 if (blk1->index > be16_to_cpu(leaf1->hdr.count)) {
1410 ASSERT(state->inleaf == 0); 1373 ASSERT(state->inleaf == 0);
1411 blk2->index = blk1->index 1374 blk2->index = blk1->index - be16_to_cpu(leaf1->hdr.count);
1412 - INT_GET(leaf1->hdr.count, ARCH_CONVERT);
1413 args->index = args->index2 = blk2->index; 1375 args->index = args->index2 = blk2->index;
1414 args->blkno = args->blkno2 = blk2->blkno; 1376 args->blkno = args->blkno2 = blk2->blkno;
1415 } else if (blk1->index == INT_GET(leaf1->hdr.count, ARCH_CONVERT)) { 1377 } else if (blk1->index == be16_to_cpu(leaf1->hdr.count)) {
1416 if (state->inleaf) { 1378 if (state->inleaf) {
1417 args->index = blk1->index; 1379 args->index = blk1->index;
1418 args->blkno = blk1->blkno; 1380 args->blkno = blk1->blkno;
@@ -1420,7 +1382,7 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
1420 args->blkno2 = blk2->blkno; 1382 args->blkno2 = blk2->blkno;
1421 } else { 1383 } else {
1422 blk2->index = blk1->index 1384 blk2->index = blk1->index
1423 - INT_GET(leaf1->hdr.count, ARCH_CONVERT); 1385 - be16_to_cpu(leaf1->hdr.count);
1424 args->index = args->index2 = blk2->index; 1386 args->index = args->index2 = blk2->index;
1425 args->blkno = args->blkno2 = blk2->blkno; 1387 args->blkno = args->blkno2 = blk2->blkno;
1426 } 1388 }
@@ -1464,15 +1426,14 @@ xfs_attr_leaf_figure_balance(xfs_da_state_t *state,
1464 * Examine entries until we reduce the absolute difference in 1426 * Examine entries until we reduce the absolute difference in
1465 * byte usage between the two blocks to a minimum. 1427 * byte usage between the two blocks to a minimum.
1466 */ 1428 */
1467 max = INT_GET(hdr1->count, ARCH_CONVERT) 1429 max = be16_to_cpu(hdr1->count) + be16_to_cpu(hdr2->count);
1468 + INT_GET(hdr2->count, ARCH_CONVERT);
1469 half = (max+1) * sizeof(*entry); 1430 half = (max+1) * sizeof(*entry);
1470 half += INT_GET(hdr1->usedbytes, ARCH_CONVERT) 1431 half += be16_to_cpu(hdr1->usedbytes) +
1471 + INT_GET(hdr2->usedbytes, ARCH_CONVERT) 1432 be16_to_cpu(hdr2->usedbytes) +
1472 + xfs_attr_leaf_newentsize( 1433 xfs_attr_leaf_newentsize(
1473 state->args->namelen, 1434 state->args->namelen,
1474 state->args->valuelen, 1435 state->args->valuelen,
1475 state->blocksize, NULL); 1436 state->blocksize, NULL);
1476 half /= 2; 1437 half /= 2;
1477 lastdelta = state->blocksize; 1438 lastdelta = state->blocksize;
1478 entry = &leaf1->entries[0]; 1439 entry = &leaf1->entries[0];
@@ -1498,7 +1459,7 @@ xfs_attr_leaf_figure_balance(xfs_da_state_t *state,
1498 /* 1459 /*
1499 * Wrap around into the second block if necessary. 1460 * Wrap around into the second block if necessary.
1500 */ 1461 */
1501 if (count == INT_GET(hdr1->count, ARCH_CONVERT)) { 1462 if (count == be16_to_cpu(hdr1->count)) {
1502 leaf1 = leaf2; 1463 leaf1 = leaf2;
1503 entry = &leaf1->entries[0]; 1464 entry = &leaf1->entries[0];
1504 index = 0; 1465 index = 0;
@@ -1566,12 +1527,12 @@ xfs_attr_leaf_toosmall(xfs_da_state_t *state, int *action)
1566 */ 1527 */
1567 blk = &state->path.blk[ state->path.active-1 ]; 1528 blk = &state->path.blk[ state->path.active-1 ];
1568 info = blk->bp->data; 1529 info = blk->bp->data;
1569 ASSERT(INT_GET(info->magic, ARCH_CONVERT) == XFS_ATTR_LEAF_MAGIC); 1530 ASSERT(be16_to_cpu(info->magic) == XFS_ATTR_LEAF_MAGIC);
1570 leaf = (xfs_attr_leafblock_t *)info; 1531 leaf = (xfs_attr_leafblock_t *)info;
1571 count = INT_GET(leaf->hdr.count, ARCH_CONVERT); 1532 count = be16_to_cpu(leaf->hdr.count);
1572 bytes = sizeof(xfs_attr_leaf_hdr_t) + 1533 bytes = sizeof(xfs_attr_leaf_hdr_t) +
1573 count * sizeof(xfs_attr_leaf_entry_t) + 1534 count * sizeof(xfs_attr_leaf_entry_t) +
1574 INT_GET(leaf->hdr.usedbytes, ARCH_CONVERT); 1535 be16_to_cpu(leaf->hdr.usedbytes);
1575 if (bytes > (state->blocksize >> 1)) { 1536 if (bytes > (state->blocksize >> 1)) {
1576 *action = 0; /* blk over 50%, don't try to join */ 1537 *action = 0; /* blk over 50%, don't try to join */
1577 return(0); 1538 return(0);
@@ -1588,7 +1549,7 @@ xfs_attr_leaf_toosmall(xfs_da_state_t *state, int *action)
1588 * Make altpath point to the block we want to keep and 1549 * Make altpath point to the block we want to keep and
1589 * path point to the block we want to drop (this one). 1550 * path point to the block we want to drop (this one).
1590 */ 1551 */
1591 forward = info->forw; 1552 forward = (info->forw != 0);
1592 memcpy(&state->altpath, &state->path, sizeof(state->path)); 1553 memcpy(&state->altpath, &state->path, sizeof(state->path));
1593 error = xfs_da_path_shift(state, &state->altpath, forward, 1554 error = xfs_da_path_shift(state, &state->altpath, forward,
1594 0, &retval); 1555 0, &retval);
@@ -1610,13 +1571,12 @@ xfs_attr_leaf_toosmall(xfs_da_state_t *state, int *action)
1610 * to shrink an attribute list over time. 1571 * to shrink an attribute list over time.
1611 */ 1572 */
1612 /* start with smaller blk num */ 1573 /* start with smaller blk num */
1613 forward = (INT_GET(info->forw, ARCH_CONVERT) 1574 forward = (be32_to_cpu(info->forw) < be32_to_cpu(info->back));
1614 < INT_GET(info->back, ARCH_CONVERT));
1615 for (i = 0; i < 2; forward = !forward, i++) { 1575 for (i = 0; i < 2; forward = !forward, i++) {
1616 if (forward) 1576 if (forward)
1617 blkno = INT_GET(info->forw, ARCH_CONVERT); 1577 blkno = be32_to_cpu(info->forw);
1618 else 1578 else
1619 blkno = INT_GET(info->back, ARCH_CONVERT); 1579 blkno = be32_to_cpu(info->back);
1620 if (blkno == 0) 1580 if (blkno == 0)
1621 continue; 1581 continue;
1622 error = xfs_da_read_buf(state->args->trans, state->args->dp, 1582 error = xfs_da_read_buf(state->args->trans, state->args->dp,
@@ -1626,14 +1586,13 @@ xfs_attr_leaf_toosmall(xfs_da_state_t *state, int *action)
1626 ASSERT(bp != NULL); 1586 ASSERT(bp != NULL);
1627 1587
1628 leaf = (xfs_attr_leafblock_t *)info; 1588 leaf = (xfs_attr_leafblock_t *)info;
1629 count = INT_GET(leaf->hdr.count, ARCH_CONVERT); 1589 count = be16_to_cpu(leaf->hdr.count);
1630 bytes = state->blocksize - (state->blocksize>>2); 1590 bytes = state->blocksize - (state->blocksize>>2);
1631 bytes -= INT_GET(leaf->hdr.usedbytes, ARCH_CONVERT); 1591 bytes -= be16_to_cpu(leaf->hdr.usedbytes);
1632 leaf = bp->data; 1592 leaf = bp->data;
1633 ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) 1593 ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
1634 == XFS_ATTR_LEAF_MAGIC); 1594 count += be16_to_cpu(leaf->hdr.count);
1635 count += INT_GET(leaf->hdr.count, ARCH_CONVERT); 1595 bytes -= be16_to_cpu(leaf->hdr.usedbytes);
1636 bytes -= INT_GET(leaf->hdr.usedbytes, ARCH_CONVERT);
1637 bytes -= count * sizeof(xfs_attr_leaf_entry_t); 1596 bytes -= count * sizeof(xfs_attr_leaf_entry_t);
1638 bytes -= sizeof(xfs_attr_leaf_hdr_t); 1597 bytes -= sizeof(xfs_attr_leaf_hdr_t);
1639 xfs_da_brelse(state->args->trans, bp); 1598 xfs_da_brelse(state->args->trans, bp);
@@ -1685,21 +1644,18 @@ xfs_attr_leaf_remove(xfs_dabuf_t *bp, xfs_da_args_t *args)
1685 xfs_mount_t *mp; 1644 xfs_mount_t *mp;
1686 1645
1687 leaf = bp->data; 1646 leaf = bp->data;
1688 ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) 1647 ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
1689 == XFS_ATTR_LEAF_MAGIC);
1690 hdr = &leaf->hdr; 1648 hdr = &leaf->hdr;
1691 mp = args->trans->t_mountp; 1649 mp = args->trans->t_mountp;
1692 ASSERT((INT_GET(hdr->count, ARCH_CONVERT) > 0) 1650 ASSERT((be16_to_cpu(hdr->count) > 0)
1693 && (INT_GET(hdr->count, ARCH_CONVERT) < (XFS_LBSIZE(mp)/8))); 1651 && (be16_to_cpu(hdr->count) < (XFS_LBSIZE(mp)/8)));
1694 ASSERT((args->index >= 0) 1652 ASSERT((args->index >= 0)
1695 && (args->index < INT_GET(hdr->count, ARCH_CONVERT))); 1653 && (args->index < be16_to_cpu(hdr->count)));
1696 ASSERT(INT_GET(hdr->firstused, ARCH_CONVERT) 1654 ASSERT(be16_to_cpu(hdr->firstused) >=
1697 >= ((INT_GET(hdr->count, ARCH_CONVERT) 1655 ((be16_to_cpu(hdr->count) * sizeof(*entry)) + sizeof(*hdr)));
1698 * sizeof(*entry))+sizeof(*hdr)));
1699 entry = &leaf->entries[args->index]; 1656 entry = &leaf->entries[args->index];
1700 ASSERT(INT_GET(entry->nameidx, ARCH_CONVERT) 1657 ASSERT(be16_to_cpu(entry->nameidx) >= be16_to_cpu(hdr->firstused));
1701 >= INT_GET(hdr->firstused, ARCH_CONVERT)); 1658 ASSERT(be16_to_cpu(entry->nameidx) < XFS_LBSIZE(mp));
1702 ASSERT(INT_GET(entry->nameidx, ARCH_CONVERT) < XFS_LBSIZE(mp));
1703 1659
1704 /* 1660 /*
1705 * Scan through free region table: 1661 * Scan through free region table:
@@ -1707,33 +1663,30 @@ xfs_attr_leaf_remove(xfs_dabuf_t *bp, xfs_da_args_t *args)
1707 * find smallest free region in case we need to replace it, 1663 * find smallest free region in case we need to replace it,
1708 * adjust any map that borders the entry table, 1664 * adjust any map that borders the entry table,
1709 */ 1665 */
1710 tablesize = INT_GET(hdr->count, ARCH_CONVERT) 1666 tablesize = be16_to_cpu(hdr->count) * sizeof(xfs_attr_leaf_entry_t)
1711 * sizeof(xfs_attr_leaf_entry_t)
1712 + sizeof(xfs_attr_leaf_hdr_t); 1667 + sizeof(xfs_attr_leaf_hdr_t);
1713 map = &hdr->freemap[0]; 1668 map = &hdr->freemap[0];
1714 tmp = INT_GET(map->size, ARCH_CONVERT); 1669 tmp = be16_to_cpu(map->size);
1715 before = after = -1; 1670 before = after = -1;
1716 smallest = XFS_ATTR_LEAF_MAPSIZE - 1; 1671 smallest = XFS_ATTR_LEAF_MAPSIZE - 1;
1717 entsize = xfs_attr_leaf_entsize(leaf, args->index); 1672 entsize = xfs_attr_leaf_entsize(leaf, args->index);
1718 for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; map++, i++) { 1673 for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; map++, i++) {
1719 ASSERT(INT_GET(map->base, ARCH_CONVERT) < XFS_LBSIZE(mp)); 1674 ASSERT(be16_to_cpu(map->base) < XFS_LBSIZE(mp));
1720 ASSERT(INT_GET(map->size, ARCH_CONVERT) < XFS_LBSIZE(mp)); 1675 ASSERT(be16_to_cpu(map->size) < XFS_LBSIZE(mp));
1721 if (INT_GET(map->base, ARCH_CONVERT) == tablesize) { 1676 if (be16_to_cpu(map->base) == tablesize) {
1722 INT_MOD(map->base, ARCH_CONVERT, 1677 be16_add(&map->base,
1723 -sizeof(xfs_attr_leaf_entry_t)); 1678 -((int)sizeof(xfs_attr_leaf_entry_t)));
1724 INT_MOD(map->size, ARCH_CONVERT, 1679 be16_add(&map->size, sizeof(xfs_attr_leaf_entry_t));
1725 sizeof(xfs_attr_leaf_entry_t));
1726 } 1680 }
1727 1681
1728 if ((INT_GET(map->base, ARCH_CONVERT) 1682 if ((be16_to_cpu(map->base) + be16_to_cpu(map->size))
1729 + INT_GET(map->size, ARCH_CONVERT)) 1683 == be16_to_cpu(entry->nameidx)) {
1730 == INT_GET(entry->nameidx, ARCH_CONVERT)) {
1731 before = i; 1684 before = i;
1732 } else if (INT_GET(map->base, ARCH_CONVERT) 1685 } else if (be16_to_cpu(map->base)
1733 == (INT_GET(entry->nameidx, ARCH_CONVERT) + entsize)) { 1686 == (be16_to_cpu(entry->nameidx) + entsize)) {
1734 after = i; 1687 after = i;
1735 } else if (INT_GET(map->size, ARCH_CONVERT) < tmp) { 1688 } else if (be16_to_cpu(map->size) < tmp) {
1736 tmp = INT_GET(map->size, ARCH_CONVERT); 1689 tmp = be16_to_cpu(map->size);
1737 smallest = i; 1690 smallest = i;
1738 } 1691 }
1739 } 1692 }
@@ -1745,38 +1698,35 @@ xfs_attr_leaf_remove(xfs_dabuf_t *bp, xfs_da_args_t *args)
1745 if ((before >= 0) || (after >= 0)) { 1698 if ((before >= 0) || (after >= 0)) {
1746 if ((before >= 0) && (after >= 0)) { 1699 if ((before >= 0) && (after >= 0)) {
1747 map = &hdr->freemap[before]; 1700 map = &hdr->freemap[before];
1748 INT_MOD(map->size, ARCH_CONVERT, entsize); 1701 be16_add(&map->size, entsize);
1749 INT_MOD(map->size, ARCH_CONVERT, 1702 be16_add(&map->size,
1750 INT_GET(hdr->freemap[after].size, 1703 be16_to_cpu(hdr->freemap[after].size));
1751 ARCH_CONVERT));
1752 hdr->freemap[after].base = 0; 1704 hdr->freemap[after].base = 0;
1753 hdr->freemap[after].size = 0; 1705 hdr->freemap[after].size = 0;
1754 } else if (before >= 0) { 1706 } else if (before >= 0) {
1755 map = &hdr->freemap[before]; 1707 map = &hdr->freemap[before];
1756 INT_MOD(map->size, ARCH_CONVERT, entsize); 1708 be16_add(&map->size, entsize);
1757 } else { 1709 } else {
1758 map = &hdr->freemap[after]; 1710 map = &hdr->freemap[after];
1759 /* both on-disk, don't endian flip twice */ 1711 /* both on-disk, don't endian flip twice */
1760 map->base = entry->nameidx; 1712 map->base = entry->nameidx;
1761 INT_MOD(map->size, ARCH_CONVERT, entsize); 1713 be16_add(&map->size, entsize);
1762 } 1714 }
1763 } else { 1715 } else {
1764 /* 1716 /*
1765 * Replace smallest region (if it is smaller than free'd entry) 1717 * Replace smallest region (if it is smaller than free'd entry)
1766 */ 1718 */
1767 map = &hdr->freemap[smallest]; 1719 map = &hdr->freemap[smallest];
1768 if (INT_GET(map->size, ARCH_CONVERT) < entsize) { 1720 if (be16_to_cpu(map->size) < entsize) {
1769 INT_SET(map->base, ARCH_CONVERT, 1721 map->base = cpu_to_be16(be16_to_cpu(entry->nameidx));
1770 INT_GET(entry->nameidx, ARCH_CONVERT)); 1722 map->size = cpu_to_be16(entsize);
1771 INT_SET(map->size, ARCH_CONVERT, entsize);
1772 } 1723 }
1773 } 1724 }
1774 1725
1775 /* 1726 /*
1776 * Did we remove the first entry? 1727 * Did we remove the first entry?
1777 */ 1728 */
1778 if (INT_GET(entry->nameidx, ARCH_CONVERT) 1729 if (be16_to_cpu(entry->nameidx) == be16_to_cpu(hdr->firstused))
1779 == INT_GET(hdr->firstused, ARCH_CONVERT))
1780 smallest = 1; 1730 smallest = 1;
1781 else 1731 else
1782 smallest = 0; 1732 smallest = 0;
@@ -1785,18 +1735,18 @@ xfs_attr_leaf_remove(xfs_dabuf_t *bp, xfs_da_args_t *args)
1785 * Compress the remaining entries and zero out the removed stuff. 1735 * Compress the remaining entries and zero out the removed stuff.
1786 */ 1736 */
1787 memset(XFS_ATTR_LEAF_NAME(leaf, args->index), 0, entsize); 1737 memset(XFS_ATTR_LEAF_NAME(leaf, args->index), 0, entsize);
1788 INT_MOD(hdr->usedbytes, ARCH_CONVERT, -entsize); 1738 be16_add(&hdr->usedbytes, -entsize);
1789 xfs_da_log_buf(args->trans, bp, 1739 xfs_da_log_buf(args->trans, bp,
1790 XFS_DA_LOGRANGE(leaf, XFS_ATTR_LEAF_NAME(leaf, args->index), 1740 XFS_DA_LOGRANGE(leaf, XFS_ATTR_LEAF_NAME(leaf, args->index),
1791 entsize)); 1741 entsize));
1792 1742
1793 tmp = (INT_GET(hdr->count, ARCH_CONVERT) - args->index) 1743 tmp = (be16_to_cpu(hdr->count) - args->index)
1794 * sizeof(xfs_attr_leaf_entry_t); 1744 * sizeof(xfs_attr_leaf_entry_t);
1795 memmove((char *)entry, (char *)(entry+1), tmp); 1745 memmove((char *)entry, (char *)(entry+1), tmp);
1796 INT_MOD(hdr->count, ARCH_CONVERT, -1); 1746 be16_add(&hdr->count, -1);
1797 xfs_da_log_buf(args->trans, bp, 1747 xfs_da_log_buf(args->trans, bp,
1798 XFS_DA_LOGRANGE(leaf, entry, tmp + sizeof(*entry))); 1748 XFS_DA_LOGRANGE(leaf, entry, tmp + sizeof(*entry)));
1799 entry = &leaf->entries[INT_GET(hdr->count, ARCH_CONVERT)]; 1749 entry = &leaf->entries[be16_to_cpu(hdr->count)];
1800 memset((char *)entry, 0, sizeof(xfs_attr_leaf_entry_t)); 1750 memset((char *)entry, 0, sizeof(xfs_attr_leaf_entry_t));
1801 1751
1802 /* 1752 /*
@@ -1808,18 +1758,17 @@ xfs_attr_leaf_remove(xfs_dabuf_t *bp, xfs_da_args_t *args)
1808 if (smallest) { 1758 if (smallest) {
1809 tmp = XFS_LBSIZE(mp); 1759 tmp = XFS_LBSIZE(mp);
1810 entry = &leaf->entries[0]; 1760 entry = &leaf->entries[0];
1811 for (i = INT_GET(hdr->count, ARCH_CONVERT)-1; 1761 for (i = be16_to_cpu(hdr->count)-1; i >= 0; entry++, i--) {
1812 i >= 0; entry++, i--) { 1762 ASSERT(be16_to_cpu(entry->nameidx) >=
1813 ASSERT(INT_GET(entry->nameidx, ARCH_CONVERT) 1763 be16_to_cpu(hdr->firstused));
1814 >= INT_GET(hdr->firstused, ARCH_CONVERT)); 1764 ASSERT(be16_to_cpu(entry->nameidx) < XFS_LBSIZE(mp));
1815 ASSERT(INT_GET(entry->nameidx, ARCH_CONVERT) 1765
1816 < XFS_LBSIZE(mp)); 1766 if (be16_to_cpu(entry->nameidx) < tmp)
1817 if (INT_GET(entry->nameidx, ARCH_CONVERT) < tmp) 1767 tmp = be16_to_cpu(entry->nameidx);
1818 tmp = INT_GET(entry->nameidx, ARCH_CONVERT);
1819 } 1768 }
1820 INT_SET(hdr->firstused, ARCH_CONVERT, tmp); 1769 hdr->firstused = cpu_to_be16(tmp);
1821 if (!hdr->firstused) { 1770 if (!hdr->firstused) {
1822 INT_SET(hdr->firstused, ARCH_CONVERT, 1771 hdr->firstused = cpu_to_be16(
1823 tmp - XFS_ATTR_LEAF_NAME_ALIGN); 1772 tmp - XFS_ATTR_LEAF_NAME_ALIGN);
1824 } 1773 }
1825 } else { 1774 } else {
@@ -1833,9 +1782,8 @@ xfs_attr_leaf_remove(xfs_dabuf_t *bp, xfs_da_args_t *args)
1833 * "join" the leaf with a sibling if so. 1782 * "join" the leaf with a sibling if so.
1834 */ 1783 */
1835 tmp = sizeof(xfs_attr_leaf_hdr_t); 1784 tmp = sizeof(xfs_attr_leaf_hdr_t);
1836 tmp += INT_GET(leaf->hdr.count, ARCH_CONVERT) 1785 tmp += be16_to_cpu(leaf->hdr.count) * sizeof(xfs_attr_leaf_entry_t);
1837 * sizeof(xfs_attr_leaf_entry_t); 1786 tmp += be16_to_cpu(leaf->hdr.usedbytes);
1838 tmp += INT_GET(leaf->hdr.usedbytes, ARCH_CONVERT);
1839 return(tmp < mp->m_attr_magicpct); /* leaf is < 37% full */ 1787 return(tmp < mp->m_attr_magicpct); /* leaf is < 37% full */
1840} 1788}
1841 1789
@@ -1859,20 +1807,16 @@ xfs_attr_leaf_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
1859 ASSERT(save_blk->magic == XFS_ATTR_LEAF_MAGIC); 1807 ASSERT(save_blk->magic == XFS_ATTR_LEAF_MAGIC);
1860 drop_leaf = drop_blk->bp->data; 1808 drop_leaf = drop_blk->bp->data;
1861 save_leaf = save_blk->bp->data; 1809 save_leaf = save_blk->bp->data;
1862 ASSERT(INT_GET(drop_leaf->hdr.info.magic, ARCH_CONVERT) 1810 ASSERT(be16_to_cpu(drop_leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
1863 == XFS_ATTR_LEAF_MAGIC); 1811 ASSERT(be16_to_cpu(save_leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
1864 ASSERT(INT_GET(save_leaf->hdr.info.magic, ARCH_CONVERT)
1865 == XFS_ATTR_LEAF_MAGIC);
1866 drop_hdr = &drop_leaf->hdr; 1812 drop_hdr = &drop_leaf->hdr;
1867 save_hdr = &save_leaf->hdr; 1813 save_hdr = &save_leaf->hdr;
1868 1814
1869 /* 1815 /*
1870 * Save last hashval from dying block for later Btree fixup. 1816 * Save last hashval from dying block for later Btree fixup.
1871 */ 1817 */
1872 drop_blk->hashval = 1818 drop_blk->hashval = be32_to_cpu(
1873 INT_GET(drop_leaf->entries[INT_GET(drop_leaf->hdr.count, 1819 drop_leaf->entries[be16_to_cpu(drop_leaf->hdr.count)-1].hashval);
1874 ARCH_CONVERT)-1].hashval,
1875 ARCH_CONVERT);
1876 1820
1877 /* 1821 /*
1878 * Check if we need a temp buffer, or can we do it in place. 1822 * Check if we need a temp buffer, or can we do it in place.
@@ -1886,12 +1830,11 @@ xfs_attr_leaf_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
1886 */ 1830 */
1887 if (xfs_attr_leaf_order(save_blk->bp, drop_blk->bp)) { 1831 if (xfs_attr_leaf_order(save_blk->bp, drop_blk->bp)) {
1888 xfs_attr_leaf_moveents(drop_leaf, 0, save_leaf, 0, 1832 xfs_attr_leaf_moveents(drop_leaf, 0, save_leaf, 0,
1889 (int)INT_GET(drop_hdr->count, ARCH_CONVERT), mp); 1833 be16_to_cpu(drop_hdr->count), mp);
1890 } else { 1834 } else {
1891 xfs_attr_leaf_moveents(drop_leaf, 0, save_leaf, 1835 xfs_attr_leaf_moveents(drop_leaf, 0, save_leaf,
1892 INT_GET(save_hdr->count, ARCH_CONVERT), 1836 be16_to_cpu(save_hdr->count),
1893 (int)INT_GET(drop_hdr->count, ARCH_CONVERT), 1837 be16_to_cpu(drop_hdr->count), mp);
1894 mp);
1895 } 1838 }
1896 } else { 1839 } else {
1897 /* 1840 /*
@@ -1905,28 +1848,24 @@ xfs_attr_leaf_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
1905 tmp_hdr = &tmp_leaf->hdr; 1848 tmp_hdr = &tmp_leaf->hdr;
1906 tmp_hdr->info = save_hdr->info; /* struct copy */ 1849 tmp_hdr->info = save_hdr->info; /* struct copy */
1907 tmp_hdr->count = 0; 1850 tmp_hdr->count = 0;
1908 INT_SET(tmp_hdr->firstused, ARCH_CONVERT, state->blocksize); 1851 tmp_hdr->firstused = cpu_to_be16(state->blocksize);
1909 if (!tmp_hdr->firstused) { 1852 if (!tmp_hdr->firstused) {
1910 INT_SET(tmp_hdr->firstused, ARCH_CONVERT, 1853 tmp_hdr->firstused = cpu_to_be16(
1911 state->blocksize - XFS_ATTR_LEAF_NAME_ALIGN); 1854 state->blocksize - XFS_ATTR_LEAF_NAME_ALIGN);
1912 } 1855 }
1913 tmp_hdr->usedbytes = 0; 1856 tmp_hdr->usedbytes = 0;
1914 if (xfs_attr_leaf_order(save_blk->bp, drop_blk->bp)) { 1857 if (xfs_attr_leaf_order(save_blk->bp, drop_blk->bp)) {
1915 xfs_attr_leaf_moveents(drop_leaf, 0, tmp_leaf, 0, 1858 xfs_attr_leaf_moveents(drop_leaf, 0, tmp_leaf, 0,
1916 (int)INT_GET(drop_hdr->count, ARCH_CONVERT), 1859 be16_to_cpu(drop_hdr->count), mp);
1917 mp);
1918 xfs_attr_leaf_moveents(save_leaf, 0, tmp_leaf, 1860 xfs_attr_leaf_moveents(save_leaf, 0, tmp_leaf,
1919 INT_GET(tmp_leaf->hdr.count, ARCH_CONVERT), 1861 be16_to_cpu(tmp_leaf->hdr.count),
1920 (int)INT_GET(save_hdr->count, ARCH_CONVERT), 1862 be16_to_cpu(save_hdr->count), mp);
1921 mp);
1922 } else { 1863 } else {
1923 xfs_attr_leaf_moveents(save_leaf, 0, tmp_leaf, 0, 1864 xfs_attr_leaf_moveents(save_leaf, 0, tmp_leaf, 0,
1924 (int)INT_GET(save_hdr->count, ARCH_CONVERT), 1865 be16_to_cpu(save_hdr->count), mp);
1925 mp);
1926 xfs_attr_leaf_moveents(drop_leaf, 0, tmp_leaf, 1866 xfs_attr_leaf_moveents(drop_leaf, 0, tmp_leaf,
1927 INT_GET(tmp_leaf->hdr.count, ARCH_CONVERT), 1867 be16_to_cpu(tmp_leaf->hdr.count),
1928 (int)INT_GET(drop_hdr->count, ARCH_CONVERT), 1868 be16_to_cpu(drop_hdr->count), mp);
1929 mp);
1930 } 1869 }
1931 memcpy((char *)save_leaf, (char *)tmp_leaf, state->blocksize); 1870 memcpy((char *)save_leaf, (char *)tmp_leaf, state->blocksize);
1932 kmem_free(tmpbuffer, state->blocksize); 1871 kmem_free(tmpbuffer, state->blocksize);
@@ -1938,10 +1877,8 @@ xfs_attr_leaf_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
1938 /* 1877 /*
1939 * Copy out last hashval in each block for B-tree code. 1878 * Copy out last hashval in each block for B-tree code.
1940 */ 1879 */
1941 save_blk->hashval = 1880 save_blk->hashval = be32_to_cpu(
1942 INT_GET(save_leaf->entries[INT_GET(save_leaf->hdr.count, 1881 save_leaf->entries[be16_to_cpu(save_leaf->hdr.count)-1].hashval);
1943 ARCH_CONVERT)-1].hashval,
1944 ARCH_CONVERT);
1945} 1882}
1946 1883
1947/*======================================================================== 1884/*========================================================================
@@ -1972,48 +1909,45 @@ xfs_attr_leaf_lookup_int(xfs_dabuf_t *bp, xfs_da_args_t *args)
1972 xfs_dahash_t hashval; 1909 xfs_dahash_t hashval;
1973 1910
1974 leaf = bp->data; 1911 leaf = bp->data;
1975 ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) 1912 ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
1976 == XFS_ATTR_LEAF_MAGIC); 1913 ASSERT(be16_to_cpu(leaf->hdr.count)
1977 ASSERT(INT_GET(leaf->hdr.count, ARCH_CONVERT)
1978 < (XFS_LBSIZE(args->dp->i_mount)/8)); 1914 < (XFS_LBSIZE(args->dp->i_mount)/8));
1979 1915
1980 /* 1916 /*
1981 * Binary search. (note: small blocks will skip this loop) 1917 * Binary search. (note: small blocks will skip this loop)
1982 */ 1918 */
1983 hashval = args->hashval; 1919 hashval = args->hashval;
1984 probe = span = INT_GET(leaf->hdr.count, ARCH_CONVERT) / 2; 1920 probe = span = be16_to_cpu(leaf->hdr.count) / 2;
1985 for (entry = &leaf->entries[probe]; span > 4; 1921 for (entry = &leaf->entries[probe]; span > 4;
1986 entry = &leaf->entries[probe]) { 1922 entry = &leaf->entries[probe]) {
1987 span /= 2; 1923 span /= 2;
1988 if (INT_GET(entry->hashval, ARCH_CONVERT) < hashval) 1924 if (be32_to_cpu(entry->hashval) < hashval)
1989 probe += span; 1925 probe += span;
1990 else if (INT_GET(entry->hashval, ARCH_CONVERT) > hashval) 1926 else if (be32_to_cpu(entry->hashval) > hashval)
1991 probe -= span; 1927 probe -= span;
1992 else 1928 else
1993 break; 1929 break;
1994 } 1930 }
1995 ASSERT((probe >= 0) && 1931 ASSERT((probe >= 0) &&
1996 (!leaf->hdr.count 1932 (!leaf->hdr.count
1997 || (probe < INT_GET(leaf->hdr.count, ARCH_CONVERT)))); 1933 || (probe < be16_to_cpu(leaf->hdr.count))));
1998 ASSERT((span <= 4) || (INT_GET(entry->hashval, ARCH_CONVERT) 1934 ASSERT((span <= 4) || (be32_to_cpu(entry->hashval) == hashval));
1999 == hashval));
2000 1935
2001 /* 1936 /*
2002 * Since we may have duplicate hashval's, find the first matching 1937 * Since we may have duplicate hashval's, find the first matching
2003 * hashval in the leaf. 1938 * hashval in the leaf.
2004 */ 1939 */
2005 while ((probe > 0) && (INT_GET(entry->hashval, ARCH_CONVERT) 1940 while ((probe > 0) && (be32_to_cpu(entry->hashval) >= hashval)) {
2006 >= hashval)) {
2007 entry--; 1941 entry--;
2008 probe--; 1942 probe--;
2009 } 1943 }
2010 while ((probe < INT_GET(leaf->hdr.count, ARCH_CONVERT)) 1944 while ((probe < be16_to_cpu(leaf->hdr.count)) &&
2011 && (INT_GET(entry->hashval, ARCH_CONVERT) < hashval)) { 1945 (be32_to_cpu(entry->hashval) < hashval)) {
2012 entry++; 1946 entry++;
2013 probe++; 1947 probe++;
2014 } 1948 }
2015 if ((probe == INT_GET(leaf->hdr.count, ARCH_CONVERT)) 1949 if ((probe == be16_to_cpu(leaf->hdr.count)) ||
2016 || (INT_GET(entry->hashval, ARCH_CONVERT) != hashval)) { 1950 (be32_to_cpu(entry->hashval) != hashval)) {
2017 args->index = probe; 1951 args->index = probe;
2018 return(XFS_ERROR(ENOATTR)); 1952 return(XFS_ERROR(ENOATTR));
2019 } 1953 }
@@ -2021,8 +1955,8 @@ xfs_attr_leaf_lookup_int(xfs_dabuf_t *bp, xfs_da_args_t *args)
2021 /* 1955 /*
2022 * Duplicate keys may be present, so search all of them for a match. 1956 * Duplicate keys may be present, so search all of them for a match.
2023 */ 1957 */
2024 for ( ; (probe < INT_GET(leaf->hdr.count, ARCH_CONVERT)) 1958 for ( ; (probe < be16_to_cpu(leaf->hdr.count)) &&
2025 && (INT_GET(entry->hashval, ARCH_CONVERT) == hashval); 1959 (be32_to_cpu(entry->hashval) == hashval);
2026 entry++, probe++) { 1960 entry++, probe++) {
2027/* 1961/*
2028 * GROT: Add code to remove incomplete entries. 1962 * GROT: Add code to remove incomplete entries.
@@ -2064,11 +1998,9 @@ xfs_attr_leaf_lookup_int(xfs_dabuf_t *bp, xfs_da_args_t *args)
2064 ((entry->flags & XFS_ATTR_ROOT) != 0)) 1998 ((entry->flags & XFS_ATTR_ROOT) != 0))
2065 continue; 1999 continue;
2066 args->index = probe; 2000 args->index = probe;
2067 args->rmtblkno 2001 args->rmtblkno = be32_to_cpu(name_rmt->valueblk);
2068 = INT_GET(name_rmt->valueblk, ARCH_CONVERT);
2069 args->rmtblkcnt = XFS_B_TO_FSB(args->dp->i_mount, 2002 args->rmtblkcnt = XFS_B_TO_FSB(args->dp->i_mount,
2070 INT_GET(name_rmt->valuelen, 2003 be32_to_cpu(name_rmt->valuelen));
2071 ARCH_CONVERT));
2072 return(XFS_ERROR(EEXIST)); 2004 return(XFS_ERROR(EEXIST));
2073 } 2005 }
2074 } 2006 }
@@ -2090,18 +2022,17 @@ xfs_attr_leaf_getvalue(xfs_dabuf_t *bp, xfs_da_args_t *args)
2090 xfs_attr_leaf_name_remote_t *name_rmt; 2022 xfs_attr_leaf_name_remote_t *name_rmt;
2091 2023
2092 leaf = bp->data; 2024 leaf = bp->data;
2093 ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) 2025 ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
2094 == XFS_ATTR_LEAF_MAGIC); 2026 ASSERT(be16_to_cpu(leaf->hdr.count)
2095 ASSERT(INT_GET(leaf->hdr.count, ARCH_CONVERT)
2096 < (XFS_LBSIZE(args->dp->i_mount)/8)); 2027 < (XFS_LBSIZE(args->dp->i_mount)/8));
2097 ASSERT(args->index < ((int)INT_GET(leaf->hdr.count, ARCH_CONVERT))); 2028 ASSERT(args->index < be16_to_cpu(leaf->hdr.count));
2098 2029
2099 entry = &leaf->entries[args->index]; 2030 entry = &leaf->entries[args->index];
2100 if (entry->flags & XFS_ATTR_LOCAL) { 2031 if (entry->flags & XFS_ATTR_LOCAL) {
2101 name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, args->index); 2032 name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, args->index);
2102 ASSERT(name_loc->namelen == args->namelen); 2033 ASSERT(name_loc->namelen == args->namelen);
2103 ASSERT(memcmp(args->name, name_loc->nameval, args->namelen) == 0); 2034 ASSERT(memcmp(args->name, name_loc->nameval, args->namelen) == 0);
2104 valuelen = INT_GET(name_loc->valuelen, ARCH_CONVERT); 2035 valuelen = be16_to_cpu(name_loc->valuelen);
2105 if (args->flags & ATTR_KERNOVAL) { 2036 if (args->flags & ATTR_KERNOVAL) {
2106 args->valuelen = valuelen; 2037 args->valuelen = valuelen;
2107 return(0); 2038 return(0);
@@ -2116,8 +2047,8 @@ xfs_attr_leaf_getvalue(xfs_dabuf_t *bp, xfs_da_args_t *args)
2116 name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, args->index); 2047 name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, args->index);
2117 ASSERT(name_rmt->namelen == args->namelen); 2048 ASSERT(name_rmt->namelen == args->namelen);
2118 ASSERT(memcmp(args->name, name_rmt->name, args->namelen) == 0); 2049 ASSERT(memcmp(args->name, name_rmt->name, args->namelen) == 0);
2119 valuelen = INT_GET(name_rmt->valuelen, ARCH_CONVERT); 2050 valuelen = be32_to_cpu(name_rmt->valuelen);
2120 args->rmtblkno = INT_GET(name_rmt->valueblk, ARCH_CONVERT); 2051 args->rmtblkno = be32_to_cpu(name_rmt->valueblk);
2121 args->rmtblkcnt = XFS_B_TO_FSB(args->dp->i_mount, valuelen); 2052 args->rmtblkcnt = XFS_B_TO_FSB(args->dp->i_mount, valuelen);
2122 if (args->flags & ATTR_KERNOVAL) { 2053 if (args->flags & ATTR_KERNOVAL) {
2123 args->valuelen = valuelen; 2054 args->valuelen = valuelen;
@@ -2159,32 +2090,29 @@ xfs_attr_leaf_moveents(xfs_attr_leafblock_t *leaf_s, int start_s,
2159 /* 2090 /*
2160 * Set up environment. 2091 * Set up environment.
2161 */ 2092 */
2162 ASSERT(INT_GET(leaf_s->hdr.info.magic, ARCH_CONVERT) 2093 ASSERT(be16_to_cpu(leaf_s->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
2163 == XFS_ATTR_LEAF_MAGIC); 2094 ASSERT(be16_to_cpu(leaf_d->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
2164 ASSERT(INT_GET(leaf_d->hdr.info.magic, ARCH_CONVERT)
2165 == XFS_ATTR_LEAF_MAGIC);
2166 hdr_s = &leaf_s->hdr; 2095 hdr_s = &leaf_s->hdr;
2167 hdr_d = &leaf_d->hdr; 2096 hdr_d = &leaf_d->hdr;
2168 ASSERT((INT_GET(hdr_s->count, ARCH_CONVERT) > 0) 2097 ASSERT((be16_to_cpu(hdr_s->count) > 0) &&
2169 && (INT_GET(hdr_s->count, ARCH_CONVERT) 2098 (be16_to_cpu(hdr_s->count) < (XFS_LBSIZE(mp)/8)));
2170 < (XFS_LBSIZE(mp)/8))); 2099 ASSERT(be16_to_cpu(hdr_s->firstused) >=
2171 ASSERT(INT_GET(hdr_s->firstused, ARCH_CONVERT) >= 2100 ((be16_to_cpu(hdr_s->count)
2172 ((INT_GET(hdr_s->count, ARCH_CONVERT)
2173 * sizeof(*entry_s))+sizeof(*hdr_s))); 2101 * sizeof(*entry_s))+sizeof(*hdr_s)));
2174 ASSERT(INT_GET(hdr_d->count, ARCH_CONVERT) < (XFS_LBSIZE(mp)/8)); 2102 ASSERT(be16_to_cpu(hdr_d->count) < (XFS_LBSIZE(mp)/8));
2175 ASSERT(INT_GET(hdr_d->firstused, ARCH_CONVERT) >= 2103 ASSERT(be16_to_cpu(hdr_d->firstused) >=
2176 ((INT_GET(hdr_d->count, ARCH_CONVERT) 2104 ((be16_to_cpu(hdr_d->count)
2177 * sizeof(*entry_d))+sizeof(*hdr_d))); 2105 * sizeof(*entry_d))+sizeof(*hdr_d)));
2178 2106
2179 ASSERT(start_s < INT_GET(hdr_s->count, ARCH_CONVERT)); 2107 ASSERT(start_s < be16_to_cpu(hdr_s->count));
2180 ASSERT(start_d <= INT_GET(hdr_d->count, ARCH_CONVERT)); 2108 ASSERT(start_d <= be16_to_cpu(hdr_d->count));
2181 ASSERT(count <= INT_GET(hdr_s->count, ARCH_CONVERT)); 2109 ASSERT(count <= be16_to_cpu(hdr_s->count));
2182 2110
2183 /* 2111 /*
2184 * Move the entries in the destination leaf up to make a hole? 2112 * Move the entries in the destination leaf up to make a hole?
2185 */ 2113 */
2186 if (start_d < INT_GET(hdr_d->count, ARCH_CONVERT)) { 2114 if (start_d < be16_to_cpu(hdr_d->count)) {
2187 tmp = INT_GET(hdr_d->count, ARCH_CONVERT) - start_d; 2115 tmp = be16_to_cpu(hdr_d->count) - start_d;
2188 tmp *= sizeof(xfs_attr_leaf_entry_t); 2116 tmp *= sizeof(xfs_attr_leaf_entry_t);
2189 entry_s = &leaf_d->entries[start_d]; 2117 entry_s = &leaf_d->entries[start_d];
2190 entry_d = &leaf_d->entries[start_d + count]; 2118 entry_d = &leaf_d->entries[start_d + count];
@@ -2199,8 +2127,8 @@ xfs_attr_leaf_moveents(xfs_attr_leafblock_t *leaf_s, int start_s,
2199 entry_d = &leaf_d->entries[start_d]; 2127 entry_d = &leaf_d->entries[start_d];
2200 desti = start_d; 2128 desti = start_d;
2201 for (i = 0; i < count; entry_s++, entry_d++, desti++, i++) { 2129 for (i = 0; i < count; entry_s++, entry_d++, desti++, i++) {
2202 ASSERT(INT_GET(entry_s->nameidx, ARCH_CONVERT) 2130 ASSERT(be16_to_cpu(entry_s->nameidx)
2203 >= INT_GET(hdr_s->firstused, ARCH_CONVERT)); 2131 >= be16_to_cpu(hdr_s->firstused));
2204 tmp = xfs_attr_leaf_entsize(leaf_s, start_s + i); 2132 tmp = xfs_attr_leaf_entsize(leaf_s, start_s + i);
2205#ifdef GROT 2133#ifdef GROT
2206 /* 2134 /*
@@ -2210,35 +2138,35 @@ xfs_attr_leaf_moveents(xfs_attr_leafblock_t *leaf_s, int start_s,
2210 */ 2138 */
2211 if (entry_s->flags & XFS_ATTR_INCOMPLETE) { /* skip partials? */ 2139 if (entry_s->flags & XFS_ATTR_INCOMPLETE) { /* skip partials? */
2212 memset(XFS_ATTR_LEAF_NAME(leaf_s, start_s + i), 0, tmp); 2140 memset(XFS_ATTR_LEAF_NAME(leaf_s, start_s + i), 0, tmp);
2213 INT_MOD(hdr_s->usedbytes, ARCH_CONVERT, -tmp); 2141 be16_add(&hdr_s->usedbytes, -tmp);
2214 INT_MOD(hdr_s->count, ARCH_CONVERT, -1); 2142 be16_add(&hdr_s->count, -1);
2215 entry_d--; /* to compensate for ++ in loop hdr */ 2143 entry_d--; /* to compensate for ++ in loop hdr */
2216 desti--; 2144 desti--;
2217 if ((start_s + i) < offset) 2145 if ((start_s + i) < offset)
2218 result++; /* insertion index adjustment */ 2146 result++; /* insertion index adjustment */
2219 } else { 2147 } else {
2220#endif /* GROT */ 2148#endif /* GROT */
2221 INT_MOD(hdr_d->firstused, ARCH_CONVERT, -tmp); 2149 be16_add(&hdr_d->firstused, -tmp);
2222 /* both on-disk, don't endian flip twice */ 2150 /* both on-disk, don't endian flip twice */
2223 entry_d->hashval = entry_s->hashval; 2151 entry_d->hashval = entry_s->hashval;
2224 /* both on-disk, don't endian flip twice */ 2152 /* both on-disk, don't endian flip twice */
2225 entry_d->nameidx = hdr_d->firstused; 2153 entry_d->nameidx = hdr_d->firstused;
2226 entry_d->flags = entry_s->flags; 2154 entry_d->flags = entry_s->flags;
2227 ASSERT(INT_GET(entry_d->nameidx, ARCH_CONVERT) + tmp 2155 ASSERT(be16_to_cpu(entry_d->nameidx) + tmp
2228 <= XFS_LBSIZE(mp)); 2156 <= XFS_LBSIZE(mp));
2229 memmove(XFS_ATTR_LEAF_NAME(leaf_d, desti), 2157 memmove(XFS_ATTR_LEAF_NAME(leaf_d, desti),
2230 XFS_ATTR_LEAF_NAME(leaf_s, start_s + i), tmp); 2158 XFS_ATTR_LEAF_NAME(leaf_s, start_s + i), tmp);
2231 ASSERT(INT_GET(entry_s->nameidx, ARCH_CONVERT) + tmp 2159 ASSERT(be16_to_cpu(entry_s->nameidx) + tmp
2232 <= XFS_LBSIZE(mp)); 2160 <= XFS_LBSIZE(mp));
2233 memset(XFS_ATTR_LEAF_NAME(leaf_s, start_s + i), 0, tmp); 2161 memset(XFS_ATTR_LEAF_NAME(leaf_s, start_s + i), 0, tmp);
2234 INT_MOD(hdr_s->usedbytes, ARCH_CONVERT, -tmp); 2162 be16_add(&hdr_s->usedbytes, -tmp);
2235 INT_MOD(hdr_d->usedbytes, ARCH_CONVERT, tmp); 2163 be16_add(&hdr_d->usedbytes, tmp);
2236 INT_MOD(hdr_s->count, ARCH_CONVERT, -1); 2164 be16_add(&hdr_s->count, -1);
2237 INT_MOD(hdr_d->count, ARCH_CONVERT, 1); 2165 be16_add(&hdr_d->count, 1);
2238 tmp = INT_GET(hdr_d->count, ARCH_CONVERT) 2166 tmp = be16_to_cpu(hdr_d->count)
2239 * sizeof(xfs_attr_leaf_entry_t) 2167 * sizeof(xfs_attr_leaf_entry_t)
2240 + sizeof(xfs_attr_leaf_hdr_t); 2168 + sizeof(xfs_attr_leaf_hdr_t);
2241 ASSERT(INT_GET(hdr_d->firstused, ARCH_CONVERT) >= tmp); 2169 ASSERT(be16_to_cpu(hdr_d->firstused) >= tmp);
2242#ifdef GROT 2170#ifdef GROT
2243 } 2171 }
2244#endif /* GROT */ 2172#endif /* GROT */
@@ -2247,7 +2175,7 @@ xfs_attr_leaf_moveents(xfs_attr_leafblock_t *leaf_s, int start_s,
2247 /* 2175 /*
2248 * Zero out the entries we just copied. 2176 * Zero out the entries we just copied.
2249 */ 2177 */
2250 if (start_s == INT_GET(hdr_s->count, ARCH_CONVERT)) { 2178 if (start_s == be16_to_cpu(hdr_s->count)) {
2251 tmp = count * sizeof(xfs_attr_leaf_entry_t); 2179 tmp = count * sizeof(xfs_attr_leaf_entry_t);
2252 entry_s = &leaf_s->entries[start_s]; 2180 entry_s = &leaf_s->entries[start_s];
2253 ASSERT(((char *)entry_s + tmp) <= 2181 ASSERT(((char *)entry_s + tmp) <=
@@ -2258,15 +2186,14 @@ xfs_attr_leaf_moveents(xfs_attr_leafblock_t *leaf_s, int start_s,
2258 * Move the remaining entries down to fill the hole, 2186 * Move the remaining entries down to fill the hole,
2259 * then zero the entries at the top. 2187 * then zero the entries at the top.
2260 */ 2188 */
2261 tmp = INT_GET(hdr_s->count, ARCH_CONVERT) - count; 2189 tmp = be16_to_cpu(hdr_s->count) - count;
2262 tmp *= sizeof(xfs_attr_leaf_entry_t); 2190 tmp *= sizeof(xfs_attr_leaf_entry_t);
2263 entry_s = &leaf_s->entries[start_s + count]; 2191 entry_s = &leaf_s->entries[start_s + count];
2264 entry_d = &leaf_s->entries[start_s]; 2192 entry_d = &leaf_s->entries[start_s];
2265 memmove((char *)entry_d, (char *)entry_s, tmp); 2193 memmove((char *)entry_d, (char *)entry_s, tmp);
2266 2194
2267 tmp = count * sizeof(xfs_attr_leaf_entry_t); 2195 tmp = count * sizeof(xfs_attr_leaf_entry_t);
2268 entry_s = &leaf_s->entries[INT_GET(hdr_s->count, 2196 entry_s = &leaf_s->entries[be16_to_cpu(hdr_s->count)];
2269 ARCH_CONVERT)];
2270 ASSERT(((char *)entry_s + tmp) <= 2197 ASSERT(((char *)entry_s + tmp) <=
2271 ((char *)leaf_s + XFS_LBSIZE(mp))); 2198 ((char *)leaf_s + XFS_LBSIZE(mp)));
2272 memset((char *)entry_s, 0, tmp); 2199 memset((char *)entry_s, 0, tmp);
@@ -2275,14 +2202,11 @@ xfs_attr_leaf_moveents(xfs_attr_leafblock_t *leaf_s, int start_s,
2275 /* 2202 /*
2276 * Fill in the freemap information 2203 * Fill in the freemap information
2277 */ 2204 */
2278 INT_SET(hdr_d->freemap[0].base, ARCH_CONVERT, 2205 hdr_d->freemap[0].base = cpu_to_be16(sizeof(xfs_attr_leaf_hdr_t));
2279 sizeof(xfs_attr_leaf_hdr_t)); 2206 be16_add(&hdr_d->freemap[0].base, be16_to_cpu(hdr_d->count) *
2280 INT_MOD(hdr_d->freemap[0].base, ARCH_CONVERT, 2207 sizeof(xfs_attr_leaf_entry_t));
2281 INT_GET(hdr_d->count, ARCH_CONVERT) 2208 hdr_d->freemap[0].size = cpu_to_be16(be16_to_cpu(hdr_d->firstused)
2282 * sizeof(xfs_attr_leaf_entry_t)); 2209 - be16_to_cpu(hdr_d->freemap[0].base));
2283 INT_SET(hdr_d->freemap[0].size, ARCH_CONVERT,
2284 INT_GET(hdr_d->firstused, ARCH_CONVERT)
2285 - INT_GET(hdr_d->freemap[0].base, ARCH_CONVERT));
2286 hdr_d->freemap[1].base = 0; 2210 hdr_d->freemap[1].base = 0;
2287 hdr_d->freemap[2].base = 0; 2211 hdr_d->freemap[2].base = 0;
2288 hdr_d->freemap[1].size = 0; 2212 hdr_d->freemap[1].size = 0;
@@ -2301,18 +2225,16 @@ xfs_attr_leaf_order(xfs_dabuf_t *leaf1_bp, xfs_dabuf_t *leaf2_bp)
2301 2225
2302 leaf1 = leaf1_bp->data; 2226 leaf1 = leaf1_bp->data;
2303 leaf2 = leaf2_bp->data; 2227 leaf2 = leaf2_bp->data;
2304 ASSERT((INT_GET(leaf1->hdr.info.magic, ARCH_CONVERT) 2228 ASSERT((be16_to_cpu(leaf1->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC) &&
2305 == XFS_ATTR_LEAF_MAGIC) && 2229 (be16_to_cpu(leaf2->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC));
2306 (INT_GET(leaf2->hdr.info.magic, ARCH_CONVERT) 2230 if ((be16_to_cpu(leaf1->hdr.count) > 0) &&
2307 == XFS_ATTR_LEAF_MAGIC)); 2231 (be16_to_cpu(leaf2->hdr.count) > 0) &&
2308 if ( (INT_GET(leaf1->hdr.count, ARCH_CONVERT) > 0) 2232 ((be32_to_cpu(leaf2->entries[0].hashval) <
2309 && (INT_GET(leaf2->hdr.count, ARCH_CONVERT) > 0) 2233 be32_to_cpu(leaf1->entries[0].hashval)) ||
2310 && ( (INT_GET(leaf2->entries[ 0 ].hashval, ARCH_CONVERT) < 2234 (be32_to_cpu(leaf2->entries[
2311 INT_GET(leaf1->entries[ 0 ].hashval, ARCH_CONVERT)) 2235 be16_to_cpu(leaf2->hdr.count)-1].hashval) <
2312 || (INT_GET(leaf2->entries[INT_GET(leaf2->hdr.count, 2236 be32_to_cpu(leaf1->entries[
2313 ARCH_CONVERT)-1].hashval, ARCH_CONVERT) < 2237 be16_to_cpu(leaf1->hdr.count)-1].hashval)))) {
2314 INT_GET(leaf1->entries[INT_GET(leaf1->hdr.count,
2315 ARCH_CONVERT)-1].hashval, ARCH_CONVERT))) ) {
2316 return(1); 2238 return(1);
2317 } 2239 }
2318 return(0); 2240 return(0);
@@ -2327,14 +2249,12 @@ xfs_attr_leaf_lasthash(xfs_dabuf_t *bp, int *count)
2327 xfs_attr_leafblock_t *leaf; 2249 xfs_attr_leafblock_t *leaf;
2328 2250
2329 leaf = bp->data; 2251 leaf = bp->data;
2330 ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) 2252 ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
2331 == XFS_ATTR_LEAF_MAGIC);
2332 if (count) 2253 if (count)
2333 *count = INT_GET(leaf->hdr.count, ARCH_CONVERT); 2254 *count = be16_to_cpu(leaf->hdr.count);
2334 if (!leaf->hdr.count) 2255 if (!leaf->hdr.count)
2335 return(0); 2256 return(0);
2336 return(INT_GET(leaf->entries[INT_GET(leaf->hdr.count, 2257 return be32_to_cpu(leaf->entries[be16_to_cpu(leaf->hdr.count)-1].hashval);
2337 ARCH_CONVERT)-1].hashval, ARCH_CONVERT));
2338} 2258}
2339 2259
2340/* 2260/*
@@ -2348,13 +2268,11 @@ xfs_attr_leaf_entsize(xfs_attr_leafblock_t *leaf, int index)
2348 xfs_attr_leaf_name_remote_t *name_rmt; 2268 xfs_attr_leaf_name_remote_t *name_rmt;
2349 int size; 2269 int size;
2350 2270
2351 ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) 2271 ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
2352 == XFS_ATTR_LEAF_MAGIC);
2353 if (leaf->entries[index].flags & XFS_ATTR_LOCAL) { 2272 if (leaf->entries[index].flags & XFS_ATTR_LOCAL) {
2354 name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, index); 2273 name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, index);
2355 size = XFS_ATTR_LEAF_ENTSIZE_LOCAL(name_loc->namelen, 2274 size = XFS_ATTR_LEAF_ENTSIZE_LOCAL(name_loc->namelen,
2356 INT_GET(name_loc->valuelen, 2275 be16_to_cpu(name_loc->valuelen));
2357 ARCH_CONVERT));
2358 } else { 2276 } else {
2359 name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, index); 2277 name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, index);
2360 size = XFS_ATTR_LEAF_ENTSIZE_REMOTE(name_rmt->namelen); 2278 size = XFS_ATTR_LEAF_ENTSIZE_REMOTE(name_rmt->namelen);
@@ -2412,22 +2330,20 @@ xfs_attr_leaf_list_int(xfs_dabuf_t *bp, xfs_attr_list_context_t *context)
2412 */ 2330 */
2413 if (context->resynch) { 2331 if (context->resynch) {
2414 entry = &leaf->entries[0]; 2332 entry = &leaf->entries[0];
2415 for (i = 0; i < INT_GET(leaf->hdr.count, ARCH_CONVERT); 2333 for (i = 0; i < be16_to_cpu(leaf->hdr.count); entry++, i++) {
2416 entry++, i++) { 2334 if (be32_to_cpu(entry->hashval) == cursor->hashval) {
2417 if (INT_GET(entry->hashval, ARCH_CONVERT)
2418 == cursor->hashval) {
2419 if (cursor->offset == context->dupcnt) { 2335 if (cursor->offset == context->dupcnt) {
2420 context->dupcnt = 0; 2336 context->dupcnt = 0;
2421 break; 2337 break;
2422 } 2338 }
2423 context->dupcnt++; 2339 context->dupcnt++;
2424 } else if (INT_GET(entry->hashval, ARCH_CONVERT) 2340 } else if (be32_to_cpu(entry->hashval) >
2425 > cursor->hashval) { 2341 cursor->hashval) {
2426 context->dupcnt = 0; 2342 context->dupcnt = 0;
2427 break; 2343 break;
2428 } 2344 }
2429 } 2345 }
2430 if (i == INT_GET(leaf->hdr.count, ARCH_CONVERT)) { 2346 if (i == be16_to_cpu(leaf->hdr.count)) {
2431 xfs_attr_trace_l_c("not found", context); 2347 xfs_attr_trace_l_c("not found", context);
2432 return(0); 2348 return(0);
2433 } 2349 }
@@ -2441,12 +2357,12 @@ xfs_attr_leaf_list_int(xfs_dabuf_t *bp, xfs_attr_list_context_t *context)
2441 * We have found our place, start copying out the new attributes. 2357 * We have found our place, start copying out the new attributes.
2442 */ 2358 */
2443 retval = 0; 2359 retval = 0;
2444 for ( ; (i < INT_GET(leaf->hdr.count, ARCH_CONVERT)) 2360 for ( ; (i < be16_to_cpu(leaf->hdr.count))
2445 && (retval == 0); entry++, i++) { 2361 && (retval == 0); entry++, i++) {
2446 attrnames_t *namesp; 2362 attrnames_t *namesp;
2447 2363
2448 if (INT_GET(entry->hashval, ARCH_CONVERT) != cursor->hashval) { 2364 if (be32_to_cpu(entry->hashval) != cursor->hashval) {
2449 cursor->hashval = INT_GET(entry->hashval, ARCH_CONVERT); 2365 cursor->hashval = be32_to_cpu(entry->hashval);
2450 cursor->offset = 0; 2366 cursor->offset = 0;
2451 } 2367 }
2452 2368
@@ -2475,8 +2391,7 @@ xfs_attr_leaf_list_int(xfs_dabuf_t *bp, xfs_attr_list_context_t *context)
2475 retval = xfs_attr_put_listent(context, namesp, 2391 retval = xfs_attr_put_listent(context, namesp,
2476 (char *)name_loc->nameval, 2392 (char *)name_loc->nameval,
2477 (int)name_loc->namelen, 2393 (int)name_loc->namelen,
2478 (int)INT_GET(name_loc->valuelen, 2394 be16_to_cpu(name_loc->valuelen));
2479 ARCH_CONVERT));
2480 } 2395 }
2481 } else { 2396 } else {
2482 name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, i); 2397 name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, i);
@@ -2488,8 +2403,7 @@ xfs_attr_leaf_list_int(xfs_dabuf_t *bp, xfs_attr_list_context_t *context)
2488 retval = xfs_attr_put_listent(context, namesp, 2403 retval = xfs_attr_put_listent(context, namesp,
2489 (char *)name_rmt->name, 2404 (char *)name_rmt->name,
2490 (int)name_rmt->namelen, 2405 (int)name_rmt->namelen,
2491 (int)INT_GET(name_rmt->valuelen, 2406 be32_to_cpu(name_rmt->valuelen));
2492 ARCH_CONVERT));
2493 } 2407 }
2494 } 2408 }
2495 if (retval == 0) { 2409 if (retval == 0) {
@@ -2596,9 +2510,8 @@ xfs_attr_leaf_clearflag(xfs_da_args_t *args)
2596 ASSERT(bp != NULL); 2510 ASSERT(bp != NULL);
2597 2511
2598 leaf = bp->data; 2512 leaf = bp->data;
2599 ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) 2513 ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
2600 == XFS_ATTR_LEAF_MAGIC); 2514 ASSERT(args->index < be16_to_cpu(leaf->hdr.count));
2601 ASSERT(args->index < INT_GET(leaf->hdr.count, ARCH_CONVERT));
2602 ASSERT(args->index >= 0); 2515 ASSERT(args->index >= 0);
2603 entry = &leaf->entries[ args->index ]; 2516 entry = &leaf->entries[ args->index ];
2604 ASSERT(entry->flags & XFS_ATTR_INCOMPLETE); 2517 ASSERT(entry->flags & XFS_ATTR_INCOMPLETE);
@@ -2613,7 +2526,7 @@ xfs_attr_leaf_clearflag(xfs_da_args_t *args)
2613 namelen = name_rmt->namelen; 2526 namelen = name_rmt->namelen;
2614 name = (char *)name_rmt->name; 2527 name = (char *)name_rmt->name;
2615 } 2528 }
2616 ASSERT(INT_GET(entry->hashval, ARCH_CONVERT) == args->hashval); 2529 ASSERT(be32_to_cpu(entry->hashval) == args->hashval);
2617 ASSERT(namelen == args->namelen); 2530 ASSERT(namelen == args->namelen);
2618 ASSERT(memcmp(name, args->name, namelen) == 0); 2531 ASSERT(memcmp(name, args->name, namelen) == 0);
2619#endif /* DEBUG */ 2532#endif /* DEBUG */
@@ -2625,8 +2538,8 @@ xfs_attr_leaf_clearflag(xfs_da_args_t *args)
2625 if (args->rmtblkno) { 2538 if (args->rmtblkno) {
2626 ASSERT((entry->flags & XFS_ATTR_LOCAL) == 0); 2539 ASSERT((entry->flags & XFS_ATTR_LOCAL) == 0);
2627 name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, args->index); 2540 name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, args->index);
2628 INT_SET(name_rmt->valueblk, ARCH_CONVERT, args->rmtblkno); 2541 name_rmt->valueblk = cpu_to_be32(args->rmtblkno);
2629 INT_SET(name_rmt->valuelen, ARCH_CONVERT, args->valuelen); 2542 name_rmt->valuelen = cpu_to_be32(args->valuelen);
2630 xfs_da_log_buf(args->trans, bp, 2543 xfs_da_log_buf(args->trans, bp,
2631 XFS_DA_LOGRANGE(leaf, name_rmt, sizeof(*name_rmt))); 2544 XFS_DA_LOGRANGE(leaf, name_rmt, sizeof(*name_rmt)));
2632 } 2545 }
@@ -2663,9 +2576,8 @@ xfs_attr_leaf_setflag(xfs_da_args_t *args)
2663 ASSERT(bp != NULL); 2576 ASSERT(bp != NULL);
2664 2577
2665 leaf = bp->data; 2578 leaf = bp->data;
2666 ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) 2579 ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
2667 == XFS_ATTR_LEAF_MAGIC); 2580 ASSERT(args->index < be16_to_cpu(leaf->hdr.count));
2668 ASSERT(args->index < INT_GET(leaf->hdr.count, ARCH_CONVERT));
2669 ASSERT(args->index >= 0); 2581 ASSERT(args->index >= 0);
2670 entry = &leaf->entries[ args->index ]; 2582 entry = &leaf->entries[ args->index ];
2671 2583
@@ -2736,16 +2648,14 @@ xfs_attr_leaf_flipflags(xfs_da_args_t *args)
2736 } 2648 }
2737 2649
2738 leaf1 = bp1->data; 2650 leaf1 = bp1->data;
2739 ASSERT(INT_GET(leaf1->hdr.info.magic, ARCH_CONVERT) 2651 ASSERT(be16_to_cpu(leaf1->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
2740 == XFS_ATTR_LEAF_MAGIC); 2652 ASSERT(args->index < be16_to_cpu(leaf1->hdr.count));
2741 ASSERT(args->index < INT_GET(leaf1->hdr.count, ARCH_CONVERT));
2742 ASSERT(args->index >= 0); 2653 ASSERT(args->index >= 0);
2743 entry1 = &leaf1->entries[ args->index ]; 2654 entry1 = &leaf1->entries[ args->index ];
2744 2655
2745 leaf2 = bp2->data; 2656 leaf2 = bp2->data;
2746 ASSERT(INT_GET(leaf2->hdr.info.magic, ARCH_CONVERT) 2657 ASSERT(be16_to_cpu(leaf2->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
2747 == XFS_ATTR_LEAF_MAGIC); 2658 ASSERT(args->index2 < be16_to_cpu(leaf2->hdr.count));
2748 ASSERT(args->index2 < INT_GET(leaf2->hdr.count, ARCH_CONVERT));
2749 ASSERT(args->index2 >= 0); 2659 ASSERT(args->index2 >= 0);
2750 entry2 = &leaf2->entries[ args->index2 ]; 2660 entry2 = &leaf2->entries[ args->index2 ];
2751 2661
@@ -2768,7 +2678,7 @@ xfs_attr_leaf_flipflags(xfs_da_args_t *args)
2768 namelen2 = name_rmt->namelen; 2678 namelen2 = name_rmt->namelen;
2769 name2 = (char *)name_rmt->name; 2679 name2 = (char *)name_rmt->name;
2770 } 2680 }
2771 ASSERT(INT_GET(entry1->hashval, ARCH_CONVERT) == INT_GET(entry2->hashval, ARCH_CONVERT)); 2681 ASSERT(be32_to_cpu(entry1->hashval) == be32_to_cpu(entry2->hashval));
2772 ASSERT(namelen1 == namelen2); 2682 ASSERT(namelen1 == namelen2);
2773 ASSERT(memcmp(name1, name2, namelen1) == 0); 2683 ASSERT(memcmp(name1, name2, namelen1) == 0);
2774#endif /* DEBUG */ 2684#endif /* DEBUG */
@@ -2782,8 +2692,8 @@ xfs_attr_leaf_flipflags(xfs_da_args_t *args)
2782 if (args->rmtblkno) { 2692 if (args->rmtblkno) {
2783 ASSERT((entry1->flags & XFS_ATTR_LOCAL) == 0); 2693 ASSERT((entry1->flags & XFS_ATTR_LOCAL) == 0);
2784 name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf1, args->index); 2694 name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf1, args->index);
2785 INT_SET(name_rmt->valueblk, ARCH_CONVERT, args->rmtblkno); 2695 name_rmt->valueblk = cpu_to_be32(args->rmtblkno);
2786 INT_SET(name_rmt->valuelen, ARCH_CONVERT, args->valuelen); 2696 name_rmt->valuelen = cpu_to_be32(args->valuelen);
2787 xfs_da_log_buf(args->trans, bp1, 2697 xfs_da_log_buf(args->trans, bp1,
2788 XFS_DA_LOGRANGE(leaf1, name_rmt, sizeof(*name_rmt))); 2698 XFS_DA_LOGRANGE(leaf1, name_rmt, sizeof(*name_rmt)));
2789 } 2699 }
@@ -2842,9 +2752,9 @@ xfs_attr_root_inactive(xfs_trans_t **trans, xfs_inode_t *dp)
2842 * This is a depth-first traversal! 2752 * This is a depth-first traversal!
2843 */ 2753 */
2844 info = bp->data; 2754 info = bp->data;
2845 if (INT_GET(info->magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC) { 2755 if (be16_to_cpu(info->magic) == XFS_DA_NODE_MAGIC) {
2846 error = xfs_attr_node_inactive(trans, dp, bp, 1); 2756 error = xfs_attr_node_inactive(trans, dp, bp, 1);
2847 } else if (INT_GET(info->magic, ARCH_CONVERT) == XFS_ATTR_LEAF_MAGIC) { 2757 } else if (be16_to_cpu(info->magic) == XFS_ATTR_LEAF_MAGIC) {
2848 error = xfs_attr_leaf_inactive(trans, dp, bp); 2758 error = xfs_attr_leaf_inactive(trans, dp, bp);
2849 } else { 2759 } else {
2850 error = XFS_ERROR(EIO); 2760 error = XFS_ERROR(EIO);
@@ -2892,15 +2802,14 @@ xfs_attr_node_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp,
2892 } 2802 }
2893 2803
2894 node = bp->data; 2804 node = bp->data;
2895 ASSERT(INT_GET(node->hdr.info.magic, ARCH_CONVERT) 2805 ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC);
2896 == XFS_DA_NODE_MAGIC);
2897 parent_blkno = xfs_da_blkno(bp); /* save for re-read later */ 2806 parent_blkno = xfs_da_blkno(bp); /* save for re-read later */
2898 count = INT_GET(node->hdr.count, ARCH_CONVERT); 2807 count = be16_to_cpu(node->hdr.count);
2899 if (!count) { 2808 if (!count) {
2900 xfs_da_brelse(*trans, bp); 2809 xfs_da_brelse(*trans, bp);
2901 return(0); 2810 return(0);
2902 } 2811 }
2903 child_fsb = INT_GET(node->btree[0].before, ARCH_CONVERT); 2812 child_fsb = be32_to_cpu(node->btree[0].before);
2904 xfs_da_brelse(*trans, bp); /* no locks for later trans */ 2813 xfs_da_brelse(*trans, bp); /* no locks for later trans */
2905 2814
2906 /* 2815 /*
@@ -2927,12 +2836,10 @@ xfs_attr_node_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp,
2927 * Invalidate the subtree, however we have to. 2836 * Invalidate the subtree, however we have to.
2928 */ 2837 */
2929 info = child_bp->data; 2838 info = child_bp->data;
2930 if (INT_GET(info->magic, ARCH_CONVERT) 2839 if (be16_to_cpu(info->magic) == XFS_DA_NODE_MAGIC) {
2931 == XFS_DA_NODE_MAGIC) {
2932 error = xfs_attr_node_inactive(trans, dp, 2840 error = xfs_attr_node_inactive(trans, dp,
2933 child_bp, level+1); 2841 child_bp, level+1);
2934 } else if (INT_GET(info->magic, ARCH_CONVERT) 2842 } else if (be16_to_cpu(info->magic) == XFS_ATTR_LEAF_MAGIC) {
2935 == XFS_ATTR_LEAF_MAGIC) {
2936 error = xfs_attr_leaf_inactive(trans, dp, 2843 error = xfs_attr_leaf_inactive(trans, dp,
2937 child_bp); 2844 child_bp);
2938 } else { 2845 } else {
@@ -2962,7 +2869,7 @@ xfs_attr_node_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp,
2962 &bp, XFS_ATTR_FORK); 2869 &bp, XFS_ATTR_FORK);
2963 if (error) 2870 if (error)
2964 return(error); 2871 return(error);
2965 child_fsb = INT_GET(node->btree[i+1].before, ARCH_CONVERT); 2872 child_fsb = be32_to_cpu(node->btree[i+1].before);
2966 xfs_da_brelse(*trans, bp); 2873 xfs_da_brelse(*trans, bp);
2967 } 2874 }
2968 /* 2875 /*
@@ -2991,17 +2898,16 @@ xfs_attr_leaf_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp)
2991 int error, count, size, tmp, i; 2898 int error, count, size, tmp, i;
2992 2899
2993 leaf = bp->data; 2900 leaf = bp->data;
2994 ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) 2901 ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
2995 == XFS_ATTR_LEAF_MAGIC);
2996 2902
2997 /* 2903 /*
2998 * Count the number of "remote" value extents. 2904 * Count the number of "remote" value extents.
2999 */ 2905 */
3000 count = 0; 2906 count = 0;
3001 entry = &leaf->entries[0]; 2907 entry = &leaf->entries[0];
3002 for (i = 0; i < INT_GET(leaf->hdr.count, ARCH_CONVERT); entry++, i++) { 2908 for (i = 0; i < be16_to_cpu(leaf->hdr.count); entry++, i++) {
3003 if ( INT_GET(entry->nameidx, ARCH_CONVERT) 2909 if (be16_to_cpu(entry->nameidx) &&
3004 && ((entry->flags & XFS_ATTR_LOCAL) == 0)) { 2910 ((entry->flags & XFS_ATTR_LOCAL) == 0)) {
3005 name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, i); 2911 name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, i);
3006 if (name_rmt->valueblk) 2912 if (name_rmt->valueblk)
3007 count++; 2913 count++;
@@ -3027,17 +2933,14 @@ xfs_attr_leaf_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp)
3027 */ 2933 */
3028 lp = list; 2934 lp = list;
3029 entry = &leaf->entries[0]; 2935 entry = &leaf->entries[0];
3030 for (i = 0; i < INT_GET(leaf->hdr.count, ARCH_CONVERT); entry++, i++) { 2936 for (i = 0; i < be16_to_cpu(leaf->hdr.count); entry++, i++) {
3031 if ( INT_GET(entry->nameidx, ARCH_CONVERT) 2937 if (be16_to_cpu(entry->nameidx) &&
3032 && ((entry->flags & XFS_ATTR_LOCAL) == 0)) { 2938 ((entry->flags & XFS_ATTR_LOCAL) == 0)) {
3033 name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, i); 2939 name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, i);
3034 if (name_rmt->valueblk) { 2940 if (name_rmt->valueblk) {
3035 /* both on-disk, don't endian flip twice */ 2941 lp->valueblk = be32_to_cpu(name_rmt->valueblk);
3036 lp->valueblk = name_rmt->valueblk; 2942 lp->valuelen = XFS_B_TO_FSB(dp->i_mount,
3037 INT_SET(lp->valuelen, ARCH_CONVERT, 2943 be32_to_cpu(name_rmt->valuelen));
3038 XFS_B_TO_FSB(dp->i_mount,
3039 INT_GET(name_rmt->valuelen,
3040 ARCH_CONVERT)));
3041 lp++; 2944 lp++;
3042 } 2945 }
3043 } 2946 }
@@ -3050,10 +2953,8 @@ xfs_attr_leaf_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp)
3050 error = 0; 2953 error = 0;
3051 for (lp = list, i = 0; i < count; i++, lp++) { 2954 for (lp = list, i = 0; i < count; i++, lp++) {
3052 tmp = xfs_attr_leaf_freextent(trans, dp, 2955 tmp = xfs_attr_leaf_freextent(trans, dp,
3053 INT_GET(lp->valueblk, 2956 lp->valueblk, lp->valuelen);
3054 ARCH_CONVERT), 2957
3055 INT_GET(lp->valuelen,
3056 ARCH_CONVERT));
3057 if (error == 0) 2958 if (error == 0)
3058 error = tmp; /* save only the 1st errno */ 2959 error = tmp; /* save only the 1st errno */
3059 } 2960 }
diff --git a/fs/xfs/xfs_attr_leaf.h b/fs/xfs/xfs_attr_leaf.h
index 541e34109bb9..51c3ee156b2f 100644
--- a/fs/xfs/xfs_attr_leaf.h
+++ b/fs/xfs/xfs_attr_leaf.h
@@ -73,39 +73,39 @@ struct xfs_trans;
73#define XFS_ATTR_LEAF_MAPSIZE 3 /* how many freespace slots */ 73#define XFS_ATTR_LEAF_MAPSIZE 3 /* how many freespace slots */
74 74
75typedef struct xfs_attr_leaf_map { /* RLE map of free bytes */ 75typedef struct xfs_attr_leaf_map { /* RLE map of free bytes */
76 __uint16_t base; /* base of free region */ 76 __be16 base; /* base of free region */
77 __uint16_t size; /* length of free region */ 77 __be16 size; /* length of free region */
78} xfs_attr_leaf_map_t; 78} xfs_attr_leaf_map_t;
79 79
80typedef struct xfs_attr_leaf_hdr { /* constant-structure header block */ 80typedef struct xfs_attr_leaf_hdr { /* constant-structure header block */
81 xfs_da_blkinfo_t info; /* block type, links, etc. */ 81 xfs_da_blkinfo_t info; /* block type, links, etc. */
82 __uint16_t count; /* count of active leaf_entry's */ 82 __be16 count; /* count of active leaf_entry's */
83 __uint16_t usedbytes; /* num bytes of names/values stored */ 83 __be16 usedbytes; /* num bytes of names/values stored */
84 __uint16_t firstused; /* first used byte in name area */ 84 __be16 firstused; /* first used byte in name area */
85 __uint8_t holes; /* != 0 if blk needs compaction */ 85 __u8 holes; /* != 0 if blk needs compaction */
86 __uint8_t pad1; 86 __u8 pad1;
87 xfs_attr_leaf_map_t freemap[XFS_ATTR_LEAF_MAPSIZE]; 87 xfs_attr_leaf_map_t freemap[XFS_ATTR_LEAF_MAPSIZE];
88 /* N largest free regions */ 88 /* N largest free regions */
89} xfs_attr_leaf_hdr_t; 89} xfs_attr_leaf_hdr_t;
90 90
91typedef struct xfs_attr_leaf_entry { /* sorted on key, not name */ 91typedef struct xfs_attr_leaf_entry { /* sorted on key, not name */
92 xfs_dahash_t hashval; /* hash value of name */ 92 __be32 hashval; /* hash value of name */
93 __uint16_t nameidx; /* index into buffer of name/value */ 93 __be16 nameidx; /* index into buffer of name/value */
94 __uint8_t flags; /* LOCAL/ROOT/SECURE/INCOMPLETE flag */ 94 __u8 flags; /* LOCAL/ROOT/SECURE/INCOMPLETE flag */
95 __uint8_t pad2; /* unused pad byte */ 95 __u8 pad2; /* unused pad byte */
96} xfs_attr_leaf_entry_t; 96} xfs_attr_leaf_entry_t;
97 97
98typedef struct xfs_attr_leaf_name_local { 98typedef struct xfs_attr_leaf_name_local {
99 __uint16_t valuelen; /* number of bytes in value */ 99 __be16 valuelen; /* number of bytes in value */
100 __uint8_t namelen; /* length of name bytes */ 100 __u8 namelen; /* length of name bytes */
101 __uint8_t nameval[1]; /* name/value bytes */ 101 __u8 nameval[1]; /* name/value bytes */
102} xfs_attr_leaf_name_local_t; 102} xfs_attr_leaf_name_local_t;
103 103
104typedef struct xfs_attr_leaf_name_remote { 104typedef struct xfs_attr_leaf_name_remote {
105 xfs_dablk_t valueblk; /* block number of value bytes */ 105 __be32 valueblk; /* block number of value bytes */
106 __uint32_t valuelen; /* number of bytes in value */ 106 __be32 valuelen; /* number of bytes in value */
107 __uint8_t namelen; /* length of name bytes */ 107 __u8 namelen; /* length of name bytes */
108 __uint8_t name[1]; /* name bytes */ 108 __u8 name[1]; /* name bytes */
109} xfs_attr_leaf_name_remote_t; 109} xfs_attr_leaf_name_remote_t;
110 110
111typedef struct xfs_attr_leafblock { 111typedef struct xfs_attr_leafblock {
@@ -143,8 +143,8 @@ typedef struct xfs_attr_leafblock {
143static inline xfs_attr_leaf_name_remote_t * 143static inline xfs_attr_leaf_name_remote_t *
144xfs_attr_leaf_name_remote(xfs_attr_leafblock_t *leafp, int idx) 144xfs_attr_leaf_name_remote(xfs_attr_leafblock_t *leafp, int idx)
145{ 145{
146 return (xfs_attr_leaf_name_remote_t *) &((char *) 146 return (xfs_attr_leaf_name_remote_t *)
147 (leafp))[INT_GET((leafp)->entries[idx].nameidx, ARCH_CONVERT)]; 147 &((char *)leafp)[be16_to_cpu(leafp->entries[idx].nameidx)];
148} 148}
149 149
150#define XFS_ATTR_LEAF_NAME_LOCAL(leafp,idx) \ 150#define XFS_ATTR_LEAF_NAME_LOCAL(leafp,idx) \
@@ -152,16 +152,15 @@ xfs_attr_leaf_name_remote(xfs_attr_leafblock_t *leafp, int idx)
152static inline xfs_attr_leaf_name_local_t * 152static inline xfs_attr_leaf_name_local_t *
153xfs_attr_leaf_name_local(xfs_attr_leafblock_t *leafp, int idx) 153xfs_attr_leaf_name_local(xfs_attr_leafblock_t *leafp, int idx)
154{ 154{
155 return (xfs_attr_leaf_name_local_t *) &((char *) 155 return (xfs_attr_leaf_name_local_t *)
156 (leafp))[INT_GET((leafp)->entries[idx].nameidx, ARCH_CONVERT)]; 156 &((char *)leafp)[be16_to_cpu(leafp->entries[idx].nameidx)];
157} 157}
158 158
159#define XFS_ATTR_LEAF_NAME(leafp,idx) \ 159#define XFS_ATTR_LEAF_NAME(leafp,idx) \
160 xfs_attr_leaf_name(leafp,idx) 160 xfs_attr_leaf_name(leafp,idx)
161static inline char *xfs_attr_leaf_name(xfs_attr_leafblock_t *leafp, int idx) 161static inline char *xfs_attr_leaf_name(xfs_attr_leafblock_t *leafp, int idx)
162{ 162{
163 return (&((char *) 163 return &((char *)leafp)[be16_to_cpu(leafp->entries[idx].nameidx)];
164 (leafp))[INT_GET((leafp)->entries[idx].nameidx, ARCH_CONVERT)]);
165} 164}
166 165
167/* 166/*
diff --git a/fs/xfs/xfs_attr_sf.h b/fs/xfs/xfs_attr_sf.h
index ffed6ca81a52..f67f917803b1 100644
--- a/fs/xfs/xfs_attr_sf.h
+++ b/fs/xfs/xfs_attr_sf.h
@@ -32,8 +32,8 @@ struct xfs_inode;
32 */ 32 */
33typedef struct xfs_attr_shortform { 33typedef struct xfs_attr_shortform {
34 struct xfs_attr_sf_hdr { /* constant-structure header block */ 34 struct xfs_attr_sf_hdr { /* constant-structure header block */
35 __uint16_t totsize; /* total bytes in shortform list */ 35 __be16 totsize; /* total bytes in shortform list */
36 __uint8_t count; /* count of active entries */ 36 __u8 count; /* count of active entries */
37 } hdr; 37 } hdr;
38 struct xfs_attr_sf_entry { 38 struct xfs_attr_sf_entry {
39 __uint8_t namelen; /* actual length of name (no NULL) */ 39 __uint8_t namelen; /* actual length of name (no NULL) */
@@ -66,8 +66,8 @@ typedef struct xfs_attr_sf_sort {
66#define XFS_ATTR_SF_NEXTENTRY(sfep) /* next entry in struct */ \ 66#define XFS_ATTR_SF_NEXTENTRY(sfep) /* next entry in struct */ \
67 ((xfs_attr_sf_entry_t *)((char *)(sfep) + XFS_ATTR_SF_ENTSIZE(sfep))) 67 ((xfs_attr_sf_entry_t *)((char *)(sfep) + XFS_ATTR_SF_ENTSIZE(sfep)))
68#define XFS_ATTR_SF_TOTSIZE(dp) /* total space in use */ \ 68#define XFS_ATTR_SF_TOTSIZE(dp) /* total space in use */ \
69 (INT_GET(((xfs_attr_shortform_t *) \ 69 (be16_to_cpu(((xfs_attr_shortform_t *) \
70 ((dp)->i_afp->if_u1.if_data))->hdr.totsize, ARCH_CONVERT)) 70 ((dp)->i_afp->if_u1.if_data))->hdr.totsize))
71 71
72#if defined(XFS_ATTR_TRACE) 72#if defined(XFS_ATTR_TRACE)
73/* 73/*
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index 70625e577c70..2d702e4a74a3 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -89,7 +89,7 @@ xfs_bmap_add_attrfork_local(
89 int *flags); /* inode logging flags */ 89 int *flags); /* inode logging flags */
90 90
91/* 91/*
92 * Called by xfs_bmapi to update extent list structure and the btree 92 * Called by xfs_bmapi to update file extent records and the btree
93 * after allocating space (or doing a delayed allocation). 93 * after allocating space (or doing a delayed allocation).
94 */ 94 */
95STATIC int /* error */ 95STATIC int /* error */
@@ -97,7 +97,7 @@ xfs_bmap_add_extent(
97 xfs_inode_t *ip, /* incore inode pointer */ 97 xfs_inode_t *ip, /* incore inode pointer */
98 xfs_extnum_t idx, /* extent number to update/insert */ 98 xfs_extnum_t idx, /* extent number to update/insert */
99 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ 99 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
100 xfs_bmbt_irec_t *new, /* new data to put in extent list */ 100 xfs_bmbt_irec_t *new, /* new data to add to file extents */
101 xfs_fsblock_t *first, /* pointer to firstblock variable */ 101 xfs_fsblock_t *first, /* pointer to firstblock variable */
102 xfs_bmap_free_t *flist, /* list of extents to be freed */ 102 xfs_bmap_free_t *flist, /* list of extents to be freed */
103 int *logflagsp, /* inode logging flags */ 103 int *logflagsp, /* inode logging flags */
@@ -113,7 +113,7 @@ xfs_bmap_add_extent_delay_real(
113 xfs_inode_t *ip, /* incore inode pointer */ 113 xfs_inode_t *ip, /* incore inode pointer */
114 xfs_extnum_t idx, /* extent number to update/insert */ 114 xfs_extnum_t idx, /* extent number to update/insert */
115 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ 115 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
116 xfs_bmbt_irec_t *new, /* new data to put in extent list */ 116 xfs_bmbt_irec_t *new, /* new data to add to file extents */
117 xfs_filblks_t *dnew, /* new delayed-alloc indirect blocks */ 117 xfs_filblks_t *dnew, /* new delayed-alloc indirect blocks */
118 xfs_fsblock_t *first, /* pointer to firstblock variable */ 118 xfs_fsblock_t *first, /* pointer to firstblock variable */
119 xfs_bmap_free_t *flist, /* list of extents to be freed */ 119 xfs_bmap_free_t *flist, /* list of extents to be freed */
@@ -129,7 +129,7 @@ xfs_bmap_add_extent_hole_delay(
129 xfs_inode_t *ip, /* incore inode pointer */ 129 xfs_inode_t *ip, /* incore inode pointer */
130 xfs_extnum_t idx, /* extent number to update/insert */ 130 xfs_extnum_t idx, /* extent number to update/insert */
131 xfs_btree_cur_t *cur, /* if null, not a btree */ 131 xfs_btree_cur_t *cur, /* if null, not a btree */
132 xfs_bmbt_irec_t *new, /* new data to put in extent list */ 132 xfs_bmbt_irec_t *new, /* new data to add to file extents */
133 int *logflagsp,/* inode logging flags */ 133 int *logflagsp,/* inode logging flags */
134 int rsvd); /* OK to allocate reserved blocks */ 134 int rsvd); /* OK to allocate reserved blocks */
135 135
@@ -142,7 +142,7 @@ xfs_bmap_add_extent_hole_real(
142 xfs_inode_t *ip, /* incore inode pointer */ 142 xfs_inode_t *ip, /* incore inode pointer */
143 xfs_extnum_t idx, /* extent number to update/insert */ 143 xfs_extnum_t idx, /* extent number to update/insert */
144 xfs_btree_cur_t *cur, /* if null, not a btree */ 144 xfs_btree_cur_t *cur, /* if null, not a btree */
145 xfs_bmbt_irec_t *new, /* new data to put in extent list */ 145 xfs_bmbt_irec_t *new, /* new data to add to file extents */
146 int *logflagsp, /* inode logging flags */ 146 int *logflagsp, /* inode logging flags */
147 int whichfork); /* data or attr fork */ 147 int whichfork); /* data or attr fork */
148 148
@@ -155,7 +155,7 @@ xfs_bmap_add_extent_unwritten_real(
155 xfs_inode_t *ip, /* incore inode pointer */ 155 xfs_inode_t *ip, /* incore inode pointer */
156 xfs_extnum_t idx, /* extent number to update/insert */ 156 xfs_extnum_t idx, /* extent number to update/insert */
157 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ 157 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
158 xfs_bmbt_irec_t *new, /* new data to put in extent list */ 158 xfs_bmbt_irec_t *new, /* new data to add to file extents */
159 int *logflagsp); /* inode logging flags */ 159 int *logflagsp); /* inode logging flags */
160 160
161/* 161/*
@@ -169,7 +169,7 @@ xfs_bmap_alloc(
169/* 169/*
170 * Transform a btree format file with only one leaf node, where the 170 * Transform a btree format file with only one leaf node, where the
171 * extents list will fit in the inode, into an extents format file. 171 * extents list will fit in the inode, into an extents format file.
172 * Since the extent list is already in-core, all we have to do is 172 * Since the file extents are already in-core, all we have to do is
173 * give up the space for the btree root and pitch the leaf block. 173 * give up the space for the btree root and pitch the leaf block.
174 */ 174 */
175STATIC int /* error */ 175STATIC int /* error */
@@ -191,7 +191,7 @@ xfs_bmap_check_extents(
191#endif 191#endif
192 192
193/* 193/*
194 * Called by xfs_bmapi to update extent list structure and the btree 194 * Called by xfs_bmapi to update file extent records and the btree
195 * after removing space (or undoing a delayed allocation). 195 * after removing space (or undoing a delayed allocation).
196 */ 196 */
197STATIC int /* error */ 197STATIC int /* error */
@@ -201,7 +201,7 @@ xfs_bmap_del_extent(
201 xfs_extnum_t idx, /* extent number to update/insert */ 201 xfs_extnum_t idx, /* extent number to update/insert */
202 xfs_bmap_free_t *flist, /* list of extents to be freed */ 202 xfs_bmap_free_t *flist, /* list of extents to be freed */
203 xfs_btree_cur_t *cur, /* if null, not a btree */ 203 xfs_btree_cur_t *cur, /* if null, not a btree */
204 xfs_bmbt_irec_t *new, /* new data to put in extent list */ 204 xfs_bmbt_irec_t *new, /* new data to add to file extents */
205 int *logflagsp,/* inode logging flags */ 205 int *logflagsp,/* inode logging flags */
206 int whichfork, /* data or attr fork */ 206 int whichfork, /* data or attr fork */
207 int rsvd); /* OK to allocate reserved blocks */ 207 int rsvd); /* OK to allocate reserved blocks */
@@ -217,18 +217,6 @@ xfs_bmap_del_free(
217 xfs_bmap_free_item_t *free); /* list item to be freed */ 217 xfs_bmap_free_item_t *free); /* list item to be freed */
218 218
219/* 219/*
220 * Remove count entries from the extents array for inode "ip", starting
221 * at index "idx". Copies the remaining items down over the deleted ones,
222 * and gives back the excess memory.
223 */
224STATIC void
225xfs_bmap_delete_exlist(
226 xfs_inode_t *ip, /* incode inode pointer */
227 xfs_extnum_t idx, /* starting delete index */
228 xfs_extnum_t count, /* count of items to delete */
229 int whichfork); /* data or attr fork */
230
231/*
232 * Convert an extents-format file into a btree-format file. 220 * Convert an extents-format file into a btree-format file.
233 * The new file will have a root block (in the inode) and a single child block. 221 * The new file will have a root block (in the inode) and a single child block.
234 */ 222 */
@@ -244,18 +232,6 @@ xfs_bmap_extents_to_btree(
244 int whichfork); /* data or attr fork */ 232 int whichfork); /* data or attr fork */
245 233
246/* 234/*
247 * Insert new item(s) in the extent list for inode "ip".
248 * Count new items are inserted at offset idx.
249 */
250STATIC void
251xfs_bmap_insert_exlist(
252 xfs_inode_t *ip, /* incore inode pointer */
253 xfs_extnum_t idx, /* starting index of new items */
254 xfs_extnum_t count, /* number of inserted items */
255 xfs_bmbt_irec_t *new, /* items to insert */
256 int whichfork); /* data or attr fork */
257
258/*
259 * Convert a local file to an extents file. 235 * Convert a local file to an extents file.
260 * This code is sort of bogus, since the file data needs to get 236 * This code is sort of bogus, since the file data needs to get
261 * logged so it won't be lost. The bmap-level manipulations are ok, though. 237 * logged so it won't be lost. The bmap-level manipulations are ok, though.
@@ -316,7 +292,7 @@ xfs_bmap_trace_addentry(
316 int whichfork); /* data or attr fork */ 292 int whichfork); /* data or attr fork */
317 293
318/* 294/*
319 * Add bmap trace entry prior to a call to xfs_bmap_delete_exlist. 295 * Add bmap trace entry prior to a call to xfs_iext_remove.
320 */ 296 */
321STATIC void 297STATIC void
322xfs_bmap_trace_delete( 298xfs_bmap_trace_delete(
@@ -328,7 +304,7 @@ xfs_bmap_trace_delete(
328 int whichfork); /* data or attr fork */ 304 int whichfork); /* data or attr fork */
329 305
330/* 306/*
331 * Add bmap trace entry prior to a call to xfs_bmap_insert_exlist, or 307 * Add bmap trace entry prior to a call to xfs_iext_insert, or
332 * reading in the extents list from the disk (in the btree). 308 * reading in the extents list from the disk (in the btree).
333 */ 309 */
334STATIC void 310STATIC void
@@ -343,7 +319,7 @@ xfs_bmap_trace_insert(
343 int whichfork); /* data or attr fork */ 319 int whichfork); /* data or attr fork */
344 320
345/* 321/*
346 * Add bmap trace entry after updating an extent list entry in place. 322 * Add bmap trace entry after updating an extent record in place.
347 */ 323 */
348STATIC void 324STATIC void
349xfs_bmap_trace_post_update( 325xfs_bmap_trace_post_update(
@@ -354,7 +330,7 @@ xfs_bmap_trace_post_update(
354 int whichfork); /* data or attr fork */ 330 int whichfork); /* data or attr fork */
355 331
356/* 332/*
357 * Add bmap trace entry prior to updating an extent list entry in place. 333 * Add bmap trace entry prior to updating an extent record in place.
358 */ 334 */
359STATIC void 335STATIC void
360xfs_bmap_trace_pre_update( 336xfs_bmap_trace_pre_update(
@@ -413,19 +389,24 @@ STATIC int
413xfs_bmap_count_tree( 389xfs_bmap_count_tree(
414 xfs_mount_t *mp, 390 xfs_mount_t *mp,
415 xfs_trans_t *tp, 391 xfs_trans_t *tp,
392 xfs_ifork_t *ifp,
416 xfs_fsblock_t blockno, 393 xfs_fsblock_t blockno,
417 int levelin, 394 int levelin,
418 int *count); 395 int *count);
419 396
420STATIC int 397STATIC int
421xfs_bmap_count_leaves( 398xfs_bmap_count_leaves(
422 xfs_bmbt_rec_t *frp, 399 xfs_ifork_t *ifp,
400 xfs_extnum_t idx,
423 int numrecs, 401 int numrecs,
424 int *count); 402 int *count);
425 403
426STATIC int 404STATIC int
427xfs_bmap_disk_count_leaves( 405xfs_bmap_disk_count_leaves(
428 xfs_bmbt_rec_t *frp, 406 xfs_ifork_t *ifp,
407 xfs_mount_t *mp,
408 xfs_extnum_t idx,
409 xfs_bmbt_block_t *block,
429 int numrecs, 410 int numrecs,
430 int *count); 411 int *count);
431 412
@@ -537,7 +518,7 @@ xfs_bmap_add_attrfork_local(
537} 518}
538 519
539/* 520/*
540 * Called by xfs_bmapi to update extent list structure and the btree 521 * Called by xfs_bmapi to update file extent records and the btree
541 * after allocating space (or doing a delayed allocation). 522 * after allocating space (or doing a delayed allocation).
542 */ 523 */
543STATIC int /* error */ 524STATIC int /* error */
@@ -545,7 +526,7 @@ xfs_bmap_add_extent(
545 xfs_inode_t *ip, /* incore inode pointer */ 526 xfs_inode_t *ip, /* incore inode pointer */
546 xfs_extnum_t idx, /* extent number to update/insert */ 527 xfs_extnum_t idx, /* extent number to update/insert */
547 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ 528 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
548 xfs_bmbt_irec_t *new, /* new data to put in extent list */ 529 xfs_bmbt_irec_t *new, /* new data to add to file extents */
549 xfs_fsblock_t *first, /* pointer to firstblock variable */ 530 xfs_fsblock_t *first, /* pointer to firstblock variable */
550 xfs_bmap_free_t *flist, /* list of extents to be freed */ 531 xfs_bmap_free_t *flist, /* list of extents to be freed */
551 int *logflagsp, /* inode logging flags */ 532 int *logflagsp, /* inode logging flags */
@@ -578,7 +559,7 @@ xfs_bmap_add_extent(
578 if (nextents == 0) { 559 if (nextents == 0) {
579 xfs_bmap_trace_insert(fname, "insert empty", ip, 0, 1, new, 560 xfs_bmap_trace_insert(fname, "insert empty", ip, 0, 1, new,
580 NULL, whichfork); 561 NULL, whichfork);
581 xfs_bmap_insert_exlist(ip, 0, 1, new, whichfork); 562 xfs_iext_insert(ifp, 0, 1, new);
582 ASSERT(cur == NULL); 563 ASSERT(cur == NULL);
583 ifp->if_lastex = 0; 564 ifp->if_lastex = 0;
584 if (!ISNULLSTARTBLOCK(new->br_startblock)) { 565 if (!ISNULLSTARTBLOCK(new->br_startblock)) {
@@ -614,7 +595,7 @@ xfs_bmap_add_extent(
614 /* 595 /*
615 * Get the record referred to by idx. 596 * Get the record referred to by idx.
616 */ 597 */
617 xfs_bmbt_get_all(&ifp->if_u1.if_extents[idx], &prev); 598 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx), &prev);
618 /* 599 /*
619 * If it's a real allocation record, and the new allocation ends 600 * If it's a real allocation record, and the new allocation ends
620 * after the start of the referred to record, then we're filling 601 * after the start of the referred to record, then we're filling
@@ -714,14 +695,13 @@ xfs_bmap_add_extent_delay_real(
714 xfs_inode_t *ip, /* incore inode pointer */ 695 xfs_inode_t *ip, /* incore inode pointer */
715 xfs_extnum_t idx, /* extent number to update/insert */ 696 xfs_extnum_t idx, /* extent number to update/insert */
716 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ 697 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
717 xfs_bmbt_irec_t *new, /* new data to put in extent list */ 698 xfs_bmbt_irec_t *new, /* new data to add to file extents */
718 xfs_filblks_t *dnew, /* new delayed-alloc indirect blocks */ 699 xfs_filblks_t *dnew, /* new delayed-alloc indirect blocks */
719 xfs_fsblock_t *first, /* pointer to firstblock variable */ 700 xfs_fsblock_t *first, /* pointer to firstblock variable */
720 xfs_bmap_free_t *flist, /* list of extents to be freed */ 701 xfs_bmap_free_t *flist, /* list of extents to be freed */
721 int *logflagsp, /* inode logging flags */ 702 int *logflagsp, /* inode logging flags */
722 int rsvd) /* OK to use reserved data block allocation */ 703 int rsvd) /* OK to use reserved data block allocation */
723{ 704{
724 xfs_bmbt_rec_t *base; /* base of extent entry list */
725 xfs_btree_cur_t *cur; /* btree cursor */ 705 xfs_btree_cur_t *cur; /* btree cursor */
726 int diff; /* temp value */ 706 int diff; /* temp value */
727 xfs_bmbt_rec_t *ep; /* extent entry for idx */ 707 xfs_bmbt_rec_t *ep; /* extent entry for idx */
@@ -730,6 +710,7 @@ xfs_bmap_add_extent_delay_real(
730 static char fname[] = "xfs_bmap_add_extent_delay_real"; 710 static char fname[] = "xfs_bmap_add_extent_delay_real";
731#endif 711#endif
732 int i; /* temp state */ 712 int i; /* temp state */
713 xfs_ifork_t *ifp; /* inode fork pointer */
733 xfs_fileoff_t new_endoff; /* end offset of new entry */ 714 xfs_fileoff_t new_endoff; /* end offset of new entry */
734 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */ 715 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */
735 /* left is 0, right is 1, prev is 2 */ 716 /* left is 0, right is 1, prev is 2 */
@@ -763,8 +744,8 @@ xfs_bmap_add_extent_delay_real(
763 * Set up a bunch of variables to make the tests simpler. 744 * Set up a bunch of variables to make the tests simpler.
764 */ 745 */
765 cur = *curp; 746 cur = *curp;
766 base = ip->i_df.if_u1.if_extents; 747 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
767 ep = &base[idx]; 748 ep = xfs_iext_get_ext(ifp, idx);
768 xfs_bmbt_get_all(ep, &PREV); 749 xfs_bmbt_get_all(ep, &PREV);
769 new_endoff = new->br_startoff + new->br_blockcount; 750 new_endoff = new->br_startoff + new->br_blockcount;
770 ASSERT(PREV.br_startoff <= new->br_startoff); 751 ASSERT(PREV.br_startoff <= new->br_startoff);
@@ -781,7 +762,7 @@ xfs_bmap_add_extent_delay_real(
781 * Don't set contiguous if the combined extent would be too large. 762 * Don't set contiguous if the combined extent would be too large.
782 */ 763 */
783 if (STATE_SET_TEST(LEFT_VALID, idx > 0)) { 764 if (STATE_SET_TEST(LEFT_VALID, idx > 0)) {
784 xfs_bmbt_get_all(ep - 1, &LEFT); 765 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &LEFT);
785 STATE_SET(LEFT_DELAY, ISNULLSTARTBLOCK(LEFT.br_startblock)); 766 STATE_SET(LEFT_DELAY, ISNULLSTARTBLOCK(LEFT.br_startblock));
786 } 767 }
787 STATE_SET(LEFT_CONTIG, 768 STATE_SET(LEFT_CONTIG,
@@ -798,7 +779,7 @@ xfs_bmap_add_extent_delay_real(
798 if (STATE_SET_TEST(RIGHT_VALID, 779 if (STATE_SET_TEST(RIGHT_VALID,
799 idx < 780 idx <
800 ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1)) { 781 ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1)) {
801 xfs_bmbt_get_all(ep + 1, &RIGHT); 782 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx + 1), &RIGHT);
802 STATE_SET(RIGHT_DELAY, ISNULLSTARTBLOCK(RIGHT.br_startblock)); 783 STATE_SET(RIGHT_DELAY, ISNULLSTARTBLOCK(RIGHT.br_startblock));
803 } 784 }
804 STATE_SET(RIGHT_CONTIG, 785 STATE_SET(RIGHT_CONTIG,
@@ -825,14 +806,14 @@ xfs_bmap_add_extent_delay_real(
825 */ 806 */
826 xfs_bmap_trace_pre_update(fname, "LF|RF|LC|RC", ip, idx - 1, 807 xfs_bmap_trace_pre_update(fname, "LF|RF|LC|RC", ip, idx - 1,
827 XFS_DATA_FORK); 808 XFS_DATA_FORK);
828 xfs_bmbt_set_blockcount(ep - 1, 809 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
829 LEFT.br_blockcount + PREV.br_blockcount + 810 LEFT.br_blockcount + PREV.br_blockcount +
830 RIGHT.br_blockcount); 811 RIGHT.br_blockcount);
831 xfs_bmap_trace_post_update(fname, "LF|RF|LC|RC", ip, idx - 1, 812 xfs_bmap_trace_post_update(fname, "LF|RF|LC|RC", ip, idx - 1,
832 XFS_DATA_FORK); 813 XFS_DATA_FORK);
833 xfs_bmap_trace_delete(fname, "LF|RF|LC|RC", ip, idx, 2, 814 xfs_bmap_trace_delete(fname, "LF|RF|LC|RC", ip, idx, 2,
834 XFS_DATA_FORK); 815 XFS_DATA_FORK);
835 xfs_bmap_delete_exlist(ip, idx, 2, XFS_DATA_FORK); 816 xfs_iext_remove(ifp, idx, 2);
836 ip->i_df.if_lastex = idx - 1; 817 ip->i_df.if_lastex = idx - 1;
837 ip->i_d.di_nextents--; 818 ip->i_d.di_nextents--;
838 if (cur == NULL) 819 if (cur == NULL)
@@ -867,14 +848,14 @@ xfs_bmap_add_extent_delay_real(
867 */ 848 */
868 xfs_bmap_trace_pre_update(fname, "LF|RF|LC", ip, idx - 1, 849 xfs_bmap_trace_pre_update(fname, "LF|RF|LC", ip, idx - 1,
869 XFS_DATA_FORK); 850 XFS_DATA_FORK);
870 xfs_bmbt_set_blockcount(ep - 1, 851 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
871 LEFT.br_blockcount + PREV.br_blockcount); 852 LEFT.br_blockcount + PREV.br_blockcount);
872 xfs_bmap_trace_post_update(fname, "LF|RF|LC", ip, idx - 1, 853 xfs_bmap_trace_post_update(fname, "LF|RF|LC", ip, idx - 1,
873 XFS_DATA_FORK); 854 XFS_DATA_FORK);
874 ip->i_df.if_lastex = idx - 1; 855 ip->i_df.if_lastex = idx - 1;
875 xfs_bmap_trace_delete(fname, "LF|RF|LC", ip, idx, 1, 856 xfs_bmap_trace_delete(fname, "LF|RF|LC", ip, idx, 1,
876 XFS_DATA_FORK); 857 XFS_DATA_FORK);
877 xfs_bmap_delete_exlist(ip, idx, 1, XFS_DATA_FORK); 858 xfs_iext_remove(ifp, idx, 1);
878 if (cur == NULL) 859 if (cur == NULL)
879 rval = XFS_ILOG_DEXT; 860 rval = XFS_ILOG_DEXT;
880 else { 861 else {
@@ -908,7 +889,7 @@ xfs_bmap_add_extent_delay_real(
908 ip->i_df.if_lastex = idx; 889 ip->i_df.if_lastex = idx;
909 xfs_bmap_trace_delete(fname, "LF|RF|RC", ip, idx + 1, 1, 890 xfs_bmap_trace_delete(fname, "LF|RF|RC", ip, idx + 1, 1,
910 XFS_DATA_FORK); 891 XFS_DATA_FORK);
911 xfs_bmap_delete_exlist(ip, idx + 1, 1, XFS_DATA_FORK); 892 xfs_iext_remove(ifp, idx + 1, 1);
912 if (cur == NULL) 893 if (cur == NULL)
913 rval = XFS_ILOG_DEXT; 894 rval = XFS_ILOG_DEXT;
914 else { 895 else {
@@ -964,7 +945,7 @@ xfs_bmap_add_extent_delay_real(
964 */ 945 */
965 xfs_bmap_trace_pre_update(fname, "LF|LC", ip, idx - 1, 946 xfs_bmap_trace_pre_update(fname, "LF|LC", ip, idx - 1,
966 XFS_DATA_FORK); 947 XFS_DATA_FORK);
967 xfs_bmbt_set_blockcount(ep - 1, 948 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
968 LEFT.br_blockcount + new->br_blockcount); 949 LEFT.br_blockcount + new->br_blockcount);
969 xfs_bmbt_set_startoff(ep, 950 xfs_bmbt_set_startoff(ep,
970 PREV.br_startoff + new->br_blockcount); 951 PREV.br_startoff + new->br_blockcount);
@@ -1010,7 +991,7 @@ xfs_bmap_add_extent_delay_real(
1010 xfs_bmbt_set_blockcount(ep, temp); 991 xfs_bmbt_set_blockcount(ep, temp);
1011 xfs_bmap_trace_insert(fname, "LF", ip, idx, 1, new, NULL, 992 xfs_bmap_trace_insert(fname, "LF", ip, idx, 1, new, NULL,
1012 XFS_DATA_FORK); 993 XFS_DATA_FORK);
1013 xfs_bmap_insert_exlist(ip, idx, 1, new, XFS_DATA_FORK); 994 xfs_iext_insert(ifp, idx, 1, new);
1014 ip->i_df.if_lastex = idx; 995 ip->i_df.if_lastex = idx;
1015 ip->i_d.di_nextents++; 996 ip->i_d.di_nextents++;
1016 if (cur == NULL) 997 if (cur == NULL)
@@ -1039,8 +1020,7 @@ xfs_bmap_add_extent_delay_real(
1039 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 1020 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
1040 STARTBLOCKVAL(PREV.br_startblock) - 1021 STARTBLOCKVAL(PREV.br_startblock) -
1041 (cur ? cur->bc_private.b.allocated : 0)); 1022 (cur ? cur->bc_private.b.allocated : 0));
1042 base = ip->i_df.if_u1.if_extents; 1023 ep = xfs_iext_get_ext(ifp, idx + 1);
1043 ep = &base[idx + 1];
1044 xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp)); 1024 xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp));
1045 xfs_bmap_trace_post_update(fname, "LF", ip, idx + 1, 1025 xfs_bmap_trace_post_update(fname, "LF", ip, idx + 1,
1046 XFS_DATA_FORK); 1026 XFS_DATA_FORK);
@@ -1058,7 +1038,8 @@ xfs_bmap_add_extent_delay_real(
1058 xfs_bmap_trace_pre_update(fname, "RF|RC", ip, idx + 1, 1038 xfs_bmap_trace_pre_update(fname, "RF|RC", ip, idx + 1,
1059 XFS_DATA_FORK); 1039 XFS_DATA_FORK);
1060 xfs_bmbt_set_blockcount(ep, temp); 1040 xfs_bmbt_set_blockcount(ep, temp);
1061 xfs_bmbt_set_allf(ep + 1, new->br_startoff, new->br_startblock, 1041 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, idx + 1),
1042 new->br_startoff, new->br_startblock,
1062 new->br_blockcount + RIGHT.br_blockcount, 1043 new->br_blockcount + RIGHT.br_blockcount,
1063 RIGHT.br_state); 1044 RIGHT.br_state);
1064 xfs_bmap_trace_post_update(fname, "RF|RC", ip, idx + 1, 1045 xfs_bmap_trace_post_update(fname, "RF|RC", ip, idx + 1,
@@ -1098,7 +1079,7 @@ xfs_bmap_add_extent_delay_real(
1098 xfs_bmbt_set_blockcount(ep, temp); 1079 xfs_bmbt_set_blockcount(ep, temp);
1099 xfs_bmap_trace_insert(fname, "RF", ip, idx + 1, 1, 1080 xfs_bmap_trace_insert(fname, "RF", ip, idx + 1, 1,
1100 new, NULL, XFS_DATA_FORK); 1081 new, NULL, XFS_DATA_FORK);
1101 xfs_bmap_insert_exlist(ip, idx + 1, 1, new, XFS_DATA_FORK); 1082 xfs_iext_insert(ifp, idx + 1, 1, new);
1102 ip->i_df.if_lastex = idx + 1; 1083 ip->i_df.if_lastex = idx + 1;
1103 ip->i_d.di_nextents++; 1084 ip->i_d.di_nextents++;
1104 if (cur == NULL) 1085 if (cur == NULL)
@@ -1127,8 +1108,7 @@ xfs_bmap_add_extent_delay_real(
1127 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 1108 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
1128 STARTBLOCKVAL(PREV.br_startblock) - 1109 STARTBLOCKVAL(PREV.br_startblock) -
1129 (cur ? cur->bc_private.b.allocated : 0)); 1110 (cur ? cur->bc_private.b.allocated : 0));
1130 base = ip->i_df.if_u1.if_extents; 1111 ep = xfs_iext_get_ext(ifp, idx);
1131 ep = &base[idx];
1132 xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp)); 1112 xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp));
1133 xfs_bmap_trace_post_update(fname, "RF", ip, idx, XFS_DATA_FORK); 1113 xfs_bmap_trace_post_update(fname, "RF", ip, idx, XFS_DATA_FORK);
1134 *dnew = temp; 1114 *dnew = temp;
@@ -1149,7 +1129,7 @@ xfs_bmap_add_extent_delay_real(
1149 r[1].br_blockcount = temp2; 1129 r[1].br_blockcount = temp2;
1150 xfs_bmap_trace_insert(fname, "0", ip, idx + 1, 2, &r[0], &r[1], 1130 xfs_bmap_trace_insert(fname, "0", ip, idx + 1, 2, &r[0], &r[1],
1151 XFS_DATA_FORK); 1131 XFS_DATA_FORK);
1152 xfs_bmap_insert_exlist(ip, idx + 1, 2, &r[0], XFS_DATA_FORK); 1132 xfs_iext_insert(ifp, idx + 1, 2, &r[0]);
1153 ip->i_df.if_lastex = idx + 1; 1133 ip->i_df.if_lastex = idx + 1;
1154 ip->i_d.di_nextents++; 1134 ip->i_d.di_nextents++;
1155 if (cur == NULL) 1135 if (cur == NULL)
@@ -1204,13 +1184,13 @@ xfs_bmap_add_extent_delay_real(
1204 } 1184 }
1205 } 1185 }
1206 } 1186 }
1207 base = ip->i_df.if_u1.if_extents; 1187 ep = xfs_iext_get_ext(ifp, idx);
1208 ep = &base[idx];
1209 xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp)); 1188 xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp));
1210 xfs_bmap_trace_post_update(fname, "0", ip, idx, XFS_DATA_FORK); 1189 xfs_bmap_trace_post_update(fname, "0", ip, idx, XFS_DATA_FORK);
1211 xfs_bmap_trace_pre_update(fname, "0", ip, idx + 2, 1190 xfs_bmap_trace_pre_update(fname, "0", ip, idx + 2,
1212 XFS_DATA_FORK); 1191 XFS_DATA_FORK);
1213 xfs_bmbt_set_startblock(ep + 2, NULLSTARTBLOCK((int)temp2)); 1192 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx + 2),
1193 NULLSTARTBLOCK((int)temp2));
1214 xfs_bmap_trace_post_update(fname, "0", ip, idx + 2, 1194 xfs_bmap_trace_post_update(fname, "0", ip, idx + 2,
1215 XFS_DATA_FORK); 1195 XFS_DATA_FORK);
1216 *dnew = temp + temp2; 1196 *dnew = temp + temp2;
@@ -1254,10 +1234,9 @@ xfs_bmap_add_extent_unwritten_real(
1254 xfs_inode_t *ip, /* incore inode pointer */ 1234 xfs_inode_t *ip, /* incore inode pointer */
1255 xfs_extnum_t idx, /* extent number to update/insert */ 1235 xfs_extnum_t idx, /* extent number to update/insert */
1256 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ 1236 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
1257 xfs_bmbt_irec_t *new, /* new data to put in extent list */ 1237 xfs_bmbt_irec_t *new, /* new data to add to file extents */
1258 int *logflagsp) /* inode logging flags */ 1238 int *logflagsp) /* inode logging flags */
1259{ 1239{
1260 xfs_bmbt_rec_t *base; /* base of extent entry list */
1261 xfs_btree_cur_t *cur; /* btree cursor */ 1240 xfs_btree_cur_t *cur; /* btree cursor */
1262 xfs_bmbt_rec_t *ep; /* extent entry for idx */ 1241 xfs_bmbt_rec_t *ep; /* extent entry for idx */
1263 int error; /* error return value */ 1242 int error; /* error return value */
@@ -1265,6 +1244,7 @@ xfs_bmap_add_extent_unwritten_real(
1265 static char fname[] = "xfs_bmap_add_extent_unwritten_real"; 1244 static char fname[] = "xfs_bmap_add_extent_unwritten_real";
1266#endif 1245#endif
1267 int i; /* temp state */ 1246 int i; /* temp state */
1247 xfs_ifork_t *ifp; /* inode fork pointer */
1268 xfs_fileoff_t new_endoff; /* end offset of new entry */ 1248 xfs_fileoff_t new_endoff; /* end offset of new entry */
1269 xfs_exntst_t newext; /* new extent state */ 1249 xfs_exntst_t newext; /* new extent state */
1270 xfs_exntst_t oldext; /* old extent state */ 1250 xfs_exntst_t oldext; /* old extent state */
@@ -1298,8 +1278,8 @@ xfs_bmap_add_extent_unwritten_real(
1298 */ 1278 */
1299 error = 0; 1279 error = 0;
1300 cur = *curp; 1280 cur = *curp;
1301 base = ip->i_df.if_u1.if_extents; 1281 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1302 ep = &base[idx]; 1282 ep = xfs_iext_get_ext(ifp, idx);
1303 xfs_bmbt_get_all(ep, &PREV); 1283 xfs_bmbt_get_all(ep, &PREV);
1304 newext = new->br_state; 1284 newext = new->br_state;
1305 oldext = (newext == XFS_EXT_UNWRITTEN) ? 1285 oldext = (newext == XFS_EXT_UNWRITTEN) ?
@@ -1320,7 +1300,7 @@ xfs_bmap_add_extent_unwritten_real(
1320 * Don't set contiguous if the combined extent would be too large. 1300 * Don't set contiguous if the combined extent would be too large.
1321 */ 1301 */
1322 if (STATE_SET_TEST(LEFT_VALID, idx > 0)) { 1302 if (STATE_SET_TEST(LEFT_VALID, idx > 0)) {
1323 xfs_bmbt_get_all(ep - 1, &LEFT); 1303 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &LEFT);
1324 STATE_SET(LEFT_DELAY, ISNULLSTARTBLOCK(LEFT.br_startblock)); 1304 STATE_SET(LEFT_DELAY, ISNULLSTARTBLOCK(LEFT.br_startblock));
1325 } 1305 }
1326 STATE_SET(LEFT_CONTIG, 1306 STATE_SET(LEFT_CONTIG,
@@ -1337,7 +1317,7 @@ xfs_bmap_add_extent_unwritten_real(
1337 if (STATE_SET_TEST(RIGHT_VALID, 1317 if (STATE_SET_TEST(RIGHT_VALID,
1338 idx < 1318 idx <
1339 ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1)) { 1319 ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1)) {
1340 xfs_bmbt_get_all(ep + 1, &RIGHT); 1320 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx + 1), &RIGHT);
1341 STATE_SET(RIGHT_DELAY, ISNULLSTARTBLOCK(RIGHT.br_startblock)); 1321 STATE_SET(RIGHT_DELAY, ISNULLSTARTBLOCK(RIGHT.br_startblock));
1342 } 1322 }
1343 STATE_SET(RIGHT_CONTIG, 1323 STATE_SET(RIGHT_CONTIG,
@@ -1363,14 +1343,14 @@ xfs_bmap_add_extent_unwritten_real(
1363 */ 1343 */
1364 xfs_bmap_trace_pre_update(fname, "LF|RF|LC|RC", ip, idx - 1, 1344 xfs_bmap_trace_pre_update(fname, "LF|RF|LC|RC", ip, idx - 1,
1365 XFS_DATA_FORK); 1345 XFS_DATA_FORK);
1366 xfs_bmbt_set_blockcount(ep - 1, 1346 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
1367 LEFT.br_blockcount + PREV.br_blockcount + 1347 LEFT.br_blockcount + PREV.br_blockcount +
1368 RIGHT.br_blockcount); 1348 RIGHT.br_blockcount);
1369 xfs_bmap_trace_post_update(fname, "LF|RF|LC|RC", ip, idx - 1, 1349 xfs_bmap_trace_post_update(fname, "LF|RF|LC|RC", ip, idx - 1,
1370 XFS_DATA_FORK); 1350 XFS_DATA_FORK);
1371 xfs_bmap_trace_delete(fname, "LF|RF|LC|RC", ip, idx, 2, 1351 xfs_bmap_trace_delete(fname, "LF|RF|LC|RC", ip, idx, 2,
1372 XFS_DATA_FORK); 1352 XFS_DATA_FORK);
1373 xfs_bmap_delete_exlist(ip, idx, 2, XFS_DATA_FORK); 1353 xfs_iext_remove(ifp, idx, 2);
1374 ip->i_df.if_lastex = idx - 1; 1354 ip->i_df.if_lastex = idx - 1;
1375 ip->i_d.di_nextents -= 2; 1355 ip->i_d.di_nextents -= 2;
1376 if (cur == NULL) 1356 if (cur == NULL)
@@ -1409,14 +1389,14 @@ xfs_bmap_add_extent_unwritten_real(
1409 */ 1389 */
1410 xfs_bmap_trace_pre_update(fname, "LF|RF|LC", ip, idx - 1, 1390 xfs_bmap_trace_pre_update(fname, "LF|RF|LC", ip, idx - 1,
1411 XFS_DATA_FORK); 1391 XFS_DATA_FORK);
1412 xfs_bmbt_set_blockcount(ep - 1, 1392 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
1413 LEFT.br_blockcount + PREV.br_blockcount); 1393 LEFT.br_blockcount + PREV.br_blockcount);
1414 xfs_bmap_trace_post_update(fname, "LF|RF|LC", ip, idx - 1, 1394 xfs_bmap_trace_post_update(fname, "LF|RF|LC", ip, idx - 1,
1415 XFS_DATA_FORK); 1395 XFS_DATA_FORK);
1416 ip->i_df.if_lastex = idx - 1; 1396 ip->i_df.if_lastex = idx - 1;
1417 xfs_bmap_trace_delete(fname, "LF|RF|LC", ip, idx, 1, 1397 xfs_bmap_trace_delete(fname, "LF|RF|LC", ip, idx, 1,
1418 XFS_DATA_FORK); 1398 XFS_DATA_FORK);
1419 xfs_bmap_delete_exlist(ip, idx, 1, XFS_DATA_FORK); 1399 xfs_iext_remove(ifp, idx, 1);
1420 ip->i_d.di_nextents--; 1400 ip->i_d.di_nextents--;
1421 if (cur == NULL) 1401 if (cur == NULL)
1422 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1402 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
@@ -1456,7 +1436,7 @@ xfs_bmap_add_extent_unwritten_real(
1456 ip->i_df.if_lastex = idx; 1436 ip->i_df.if_lastex = idx;
1457 xfs_bmap_trace_delete(fname, "LF|RF|RC", ip, idx + 1, 1, 1437 xfs_bmap_trace_delete(fname, "LF|RF|RC", ip, idx + 1, 1,
1458 XFS_DATA_FORK); 1438 XFS_DATA_FORK);
1459 xfs_bmap_delete_exlist(ip, idx + 1, 1, XFS_DATA_FORK); 1439 xfs_iext_remove(ifp, idx + 1, 1);
1460 ip->i_d.di_nextents--; 1440 ip->i_d.di_nextents--;
1461 if (cur == NULL) 1441 if (cur == NULL)
1462 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1442 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
@@ -1516,7 +1496,7 @@ xfs_bmap_add_extent_unwritten_real(
1516 */ 1496 */
1517 xfs_bmap_trace_pre_update(fname, "LF|LC", ip, idx - 1, 1497 xfs_bmap_trace_pre_update(fname, "LF|LC", ip, idx - 1,
1518 XFS_DATA_FORK); 1498 XFS_DATA_FORK);
1519 xfs_bmbt_set_blockcount(ep - 1, 1499 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
1520 LEFT.br_blockcount + new->br_blockcount); 1500 LEFT.br_blockcount + new->br_blockcount);
1521 xfs_bmbt_set_startoff(ep, 1501 xfs_bmbt_set_startoff(ep,
1522 PREV.br_startoff + new->br_blockcount); 1502 PREV.br_startoff + new->br_blockcount);
@@ -1571,7 +1551,7 @@ xfs_bmap_add_extent_unwritten_real(
1571 xfs_bmap_trace_post_update(fname, "LF", ip, idx, XFS_DATA_FORK); 1551 xfs_bmap_trace_post_update(fname, "LF", ip, idx, XFS_DATA_FORK);
1572 xfs_bmap_trace_insert(fname, "LF", ip, idx, 1, new, NULL, 1552 xfs_bmap_trace_insert(fname, "LF", ip, idx, 1, new, NULL,
1573 XFS_DATA_FORK); 1553 XFS_DATA_FORK);
1574 xfs_bmap_insert_exlist(ip, idx, 1, new, XFS_DATA_FORK); 1554 xfs_iext_insert(ifp, idx, 1, new);
1575 ip->i_df.if_lastex = idx; 1555 ip->i_df.if_lastex = idx;
1576 ip->i_d.di_nextents++; 1556 ip->i_d.di_nextents++;
1577 if (cur == NULL) 1557 if (cur == NULL)
@@ -1609,7 +1589,8 @@ xfs_bmap_add_extent_unwritten_real(
1609 PREV.br_blockcount - new->br_blockcount); 1589 PREV.br_blockcount - new->br_blockcount);
1610 xfs_bmap_trace_post_update(fname, "RF|RC", ip, idx, 1590 xfs_bmap_trace_post_update(fname, "RF|RC", ip, idx,
1611 XFS_DATA_FORK); 1591 XFS_DATA_FORK);
1612 xfs_bmbt_set_allf(ep + 1, new->br_startoff, new->br_startblock, 1592 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, idx + 1),
1593 new->br_startoff, new->br_startblock,
1613 new->br_blockcount + RIGHT.br_blockcount, newext); 1594 new->br_blockcount + RIGHT.br_blockcount, newext);
1614 xfs_bmap_trace_post_update(fname, "RF|RC", ip, idx + 1, 1595 xfs_bmap_trace_post_update(fname, "RF|RC", ip, idx + 1,
1615 XFS_DATA_FORK); 1596 XFS_DATA_FORK);
@@ -1649,7 +1630,7 @@ xfs_bmap_add_extent_unwritten_real(
1649 xfs_bmap_trace_post_update(fname, "RF", ip, idx, XFS_DATA_FORK); 1630 xfs_bmap_trace_post_update(fname, "RF", ip, idx, XFS_DATA_FORK);
1650 xfs_bmap_trace_insert(fname, "RF", ip, idx + 1, 1, 1631 xfs_bmap_trace_insert(fname, "RF", ip, idx + 1, 1,
1651 new, NULL, XFS_DATA_FORK); 1632 new, NULL, XFS_DATA_FORK);
1652 xfs_bmap_insert_exlist(ip, idx + 1, 1, new, XFS_DATA_FORK); 1633 xfs_iext_insert(ifp, idx + 1, 1, new);
1653 ip->i_df.if_lastex = idx + 1; 1634 ip->i_df.if_lastex = idx + 1;
1654 ip->i_d.di_nextents++; 1635 ip->i_d.di_nextents++;
1655 if (cur == NULL) 1636 if (cur == NULL)
@@ -1696,7 +1677,7 @@ xfs_bmap_add_extent_unwritten_real(
1696 r[1].br_state = oldext; 1677 r[1].br_state = oldext;
1697 xfs_bmap_trace_insert(fname, "0", ip, idx + 1, 2, &r[0], &r[1], 1678 xfs_bmap_trace_insert(fname, "0", ip, idx + 1, 2, &r[0], &r[1],
1698 XFS_DATA_FORK); 1679 XFS_DATA_FORK);
1699 xfs_bmap_insert_exlist(ip, idx + 1, 2, &r[0], XFS_DATA_FORK); 1680 xfs_iext_insert(ifp, idx + 1, 2, &r[0]);
1700 ip->i_df.if_lastex = idx + 1; 1681 ip->i_df.if_lastex = idx + 1;
1701 ip->i_d.di_nextents += 2; 1682 ip->i_d.di_nextents += 2;
1702 if (cur == NULL) 1683 if (cur == NULL)
@@ -1770,15 +1751,15 @@ xfs_bmap_add_extent_hole_delay(
1770 xfs_inode_t *ip, /* incore inode pointer */ 1751 xfs_inode_t *ip, /* incore inode pointer */
1771 xfs_extnum_t idx, /* extent number to update/insert */ 1752 xfs_extnum_t idx, /* extent number to update/insert */
1772 xfs_btree_cur_t *cur, /* if null, not a btree */ 1753 xfs_btree_cur_t *cur, /* if null, not a btree */
1773 xfs_bmbt_irec_t *new, /* new data to put in extent list */ 1754 xfs_bmbt_irec_t *new, /* new data to add to file extents */
1774 int *logflagsp, /* inode logging flags */ 1755 int *logflagsp, /* inode logging flags */
1775 int rsvd) /* OK to allocate reserved blocks */ 1756 int rsvd) /* OK to allocate reserved blocks */
1776{ 1757{
1777 xfs_bmbt_rec_t *base; /* base of extent entry list */ 1758 xfs_bmbt_rec_t *ep; /* extent record for idx */
1778 xfs_bmbt_rec_t *ep; /* extent list entry for idx */
1779#ifdef XFS_BMAP_TRACE 1759#ifdef XFS_BMAP_TRACE
1780 static char fname[] = "xfs_bmap_add_extent_hole_delay"; 1760 static char fname[] = "xfs_bmap_add_extent_hole_delay";
1781#endif 1761#endif
1762 xfs_ifork_t *ifp; /* inode fork pointer */
1782 xfs_bmbt_irec_t left; /* left neighbor extent entry */ 1763 xfs_bmbt_irec_t left; /* left neighbor extent entry */
1783 xfs_filblks_t newlen=0; /* new indirect size */ 1764 xfs_filblks_t newlen=0; /* new indirect size */
1784 xfs_filblks_t oldlen=0; /* old indirect size */ 1765 xfs_filblks_t oldlen=0; /* old indirect size */
@@ -1799,15 +1780,15 @@ xfs_bmap_add_extent_hole_delay(
1799 ((state &= ~MASK(b)), 0)) 1780 ((state &= ~MASK(b)), 0))
1800#define SWITCH_STATE (state & MASK2(LEFT_CONTIG, RIGHT_CONTIG)) 1781#define SWITCH_STATE (state & MASK2(LEFT_CONTIG, RIGHT_CONTIG))
1801 1782
1802 base = ip->i_df.if_u1.if_extents; 1783 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1803 ep = &base[idx]; 1784 ep = xfs_iext_get_ext(ifp, idx);
1804 state = 0; 1785 state = 0;
1805 ASSERT(ISNULLSTARTBLOCK(new->br_startblock)); 1786 ASSERT(ISNULLSTARTBLOCK(new->br_startblock));
1806 /* 1787 /*
1807 * Check and set flags if this segment has a left neighbor 1788 * Check and set flags if this segment has a left neighbor
1808 */ 1789 */
1809 if (STATE_SET_TEST(LEFT_VALID, idx > 0)) { 1790 if (STATE_SET_TEST(LEFT_VALID, idx > 0)) {
1810 xfs_bmbt_get_all(ep - 1, &left); 1791 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &left);
1811 STATE_SET(LEFT_DELAY, ISNULLSTARTBLOCK(left.br_startblock)); 1792 STATE_SET(LEFT_DELAY, ISNULLSTARTBLOCK(left.br_startblock));
1812 } 1793 }
1813 /* 1794 /*
@@ -1844,23 +1825,24 @@ xfs_bmap_add_extent_hole_delay(
1844 /* 1825 /*
1845 * New allocation is contiguous with delayed allocations 1826 * New allocation is contiguous with delayed allocations
1846 * on the left and on the right. 1827 * on the left and on the right.
1847 * Merge all three into a single extent list entry. 1828 * Merge all three into a single extent record.
1848 */ 1829 */
1849 temp = left.br_blockcount + new->br_blockcount + 1830 temp = left.br_blockcount + new->br_blockcount +
1850 right.br_blockcount; 1831 right.br_blockcount;
1851 xfs_bmap_trace_pre_update(fname, "LC|RC", ip, idx - 1, 1832 xfs_bmap_trace_pre_update(fname, "LC|RC", ip, idx - 1,
1852 XFS_DATA_FORK); 1833 XFS_DATA_FORK);
1853 xfs_bmbt_set_blockcount(ep - 1, temp); 1834 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), temp);
1854 oldlen = STARTBLOCKVAL(left.br_startblock) + 1835 oldlen = STARTBLOCKVAL(left.br_startblock) +
1855 STARTBLOCKVAL(new->br_startblock) + 1836 STARTBLOCKVAL(new->br_startblock) +
1856 STARTBLOCKVAL(right.br_startblock); 1837 STARTBLOCKVAL(right.br_startblock);
1857 newlen = xfs_bmap_worst_indlen(ip, temp); 1838 newlen = xfs_bmap_worst_indlen(ip, temp);
1858 xfs_bmbt_set_startblock(ep - 1, NULLSTARTBLOCK((int)newlen)); 1839 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx - 1),
1840 NULLSTARTBLOCK((int)newlen));
1859 xfs_bmap_trace_post_update(fname, "LC|RC", ip, idx - 1, 1841 xfs_bmap_trace_post_update(fname, "LC|RC", ip, idx - 1,
1860 XFS_DATA_FORK); 1842 XFS_DATA_FORK);
1861 xfs_bmap_trace_delete(fname, "LC|RC", ip, idx, 1, 1843 xfs_bmap_trace_delete(fname, "LC|RC", ip, idx, 1,
1862 XFS_DATA_FORK); 1844 XFS_DATA_FORK);
1863 xfs_bmap_delete_exlist(ip, idx, 1, XFS_DATA_FORK); 1845 xfs_iext_remove(ifp, idx, 1);
1864 ip->i_df.if_lastex = idx - 1; 1846 ip->i_df.if_lastex = idx - 1;
1865 break; 1847 break;
1866 1848
@@ -1873,11 +1855,12 @@ xfs_bmap_add_extent_hole_delay(
1873 temp = left.br_blockcount + new->br_blockcount; 1855 temp = left.br_blockcount + new->br_blockcount;
1874 xfs_bmap_trace_pre_update(fname, "LC", ip, idx - 1, 1856 xfs_bmap_trace_pre_update(fname, "LC", ip, idx - 1,
1875 XFS_DATA_FORK); 1857 XFS_DATA_FORK);
1876 xfs_bmbt_set_blockcount(ep - 1, temp); 1858 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), temp);
1877 oldlen = STARTBLOCKVAL(left.br_startblock) + 1859 oldlen = STARTBLOCKVAL(left.br_startblock) +
1878 STARTBLOCKVAL(new->br_startblock); 1860 STARTBLOCKVAL(new->br_startblock);
1879 newlen = xfs_bmap_worst_indlen(ip, temp); 1861 newlen = xfs_bmap_worst_indlen(ip, temp);
1880 xfs_bmbt_set_startblock(ep - 1, NULLSTARTBLOCK((int)newlen)); 1862 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx - 1),
1863 NULLSTARTBLOCK((int)newlen));
1881 xfs_bmap_trace_post_update(fname, "LC", ip, idx - 1, 1864 xfs_bmap_trace_post_update(fname, "LC", ip, idx - 1,
1882 XFS_DATA_FORK); 1865 XFS_DATA_FORK);
1883 ip->i_df.if_lastex = idx - 1; 1866 ip->i_df.if_lastex = idx - 1;
@@ -1909,7 +1892,7 @@ xfs_bmap_add_extent_hole_delay(
1909 oldlen = newlen = 0; 1892 oldlen = newlen = 0;
1910 xfs_bmap_trace_insert(fname, "0", ip, idx, 1, new, NULL, 1893 xfs_bmap_trace_insert(fname, "0", ip, idx, 1, new, NULL,
1911 XFS_DATA_FORK); 1894 XFS_DATA_FORK);
1912 xfs_bmap_insert_exlist(ip, idx, 1, new, XFS_DATA_FORK); 1895 xfs_iext_insert(ifp, idx, 1, new);
1913 ip->i_df.if_lastex = idx; 1896 ip->i_df.if_lastex = idx;
1914 break; 1897 break;
1915 } 1898 }
@@ -1940,7 +1923,7 @@ xfs_bmap_add_extent_hole_real(
1940 xfs_inode_t *ip, /* incore inode pointer */ 1923 xfs_inode_t *ip, /* incore inode pointer */
1941 xfs_extnum_t idx, /* extent number to update/insert */ 1924 xfs_extnum_t idx, /* extent number to update/insert */
1942 xfs_btree_cur_t *cur, /* if null, not a btree */ 1925 xfs_btree_cur_t *cur, /* if null, not a btree */
1943 xfs_bmbt_irec_t *new, /* new data to put in extent list */ 1926 xfs_bmbt_irec_t *new, /* new data to add to file extents */
1944 int *logflagsp, /* inode logging flags */ 1927 int *logflagsp, /* inode logging flags */
1945 int whichfork) /* data or attr fork */ 1928 int whichfork) /* data or attr fork */
1946{ 1929{
@@ -1970,13 +1953,13 @@ xfs_bmap_add_extent_hole_real(
1970 1953
1971 ifp = XFS_IFORK_PTR(ip, whichfork); 1954 ifp = XFS_IFORK_PTR(ip, whichfork);
1972 ASSERT(idx <= ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)); 1955 ASSERT(idx <= ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t));
1973 ep = &ifp->if_u1.if_extents[idx]; 1956 ep = xfs_iext_get_ext(ifp, idx);
1974 state = 0; 1957 state = 0;
1975 /* 1958 /*
1976 * Check and set flags if this segment has a left neighbor. 1959 * Check and set flags if this segment has a left neighbor.
1977 */ 1960 */
1978 if (STATE_SET_TEST(LEFT_VALID, idx > 0)) { 1961 if (STATE_SET_TEST(LEFT_VALID, idx > 0)) {
1979 xfs_bmbt_get_all(ep - 1, &left); 1962 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &left);
1980 STATE_SET(LEFT_DELAY, ISNULLSTARTBLOCK(left.br_startblock)); 1963 STATE_SET(LEFT_DELAY, ISNULLSTARTBLOCK(left.br_startblock));
1981 } 1964 }
1982 /* 1965 /*
@@ -2019,18 +2002,18 @@ xfs_bmap_add_extent_hole_real(
2019 /* 2002 /*
2020 * New allocation is contiguous with real allocations on the 2003 * New allocation is contiguous with real allocations on the
2021 * left and on the right. 2004 * left and on the right.
2022 * Merge all three into a single extent list entry. 2005 * Merge all three into a single extent record.
2023 */ 2006 */
2024 xfs_bmap_trace_pre_update(fname, "LC|RC", ip, idx - 1, 2007 xfs_bmap_trace_pre_update(fname, "LC|RC", ip, idx - 1,
2025 whichfork); 2008 whichfork);
2026 xfs_bmbt_set_blockcount(ep - 1, 2009 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
2027 left.br_blockcount + new->br_blockcount + 2010 left.br_blockcount + new->br_blockcount +
2028 right.br_blockcount); 2011 right.br_blockcount);
2029 xfs_bmap_trace_post_update(fname, "LC|RC", ip, idx - 1, 2012 xfs_bmap_trace_post_update(fname, "LC|RC", ip, idx - 1,
2030 whichfork); 2013 whichfork);
2031 xfs_bmap_trace_delete(fname, "LC|RC", ip, 2014 xfs_bmap_trace_delete(fname, "LC|RC", ip,
2032 idx, 1, whichfork); 2015 idx, 1, whichfork);
2033 xfs_bmap_delete_exlist(ip, idx, 1, whichfork); 2016 xfs_iext_remove(ifp, idx, 1);
2034 ifp->if_lastex = idx - 1; 2017 ifp->if_lastex = idx - 1;
2035 XFS_IFORK_NEXT_SET(ip, whichfork, 2018 XFS_IFORK_NEXT_SET(ip, whichfork,
2036 XFS_IFORK_NEXTENTS(ip, whichfork) - 1); 2019 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
@@ -2062,7 +2045,7 @@ xfs_bmap_add_extent_hole_real(
2062 * Merge the new allocation with the left neighbor. 2045 * Merge the new allocation with the left neighbor.
2063 */ 2046 */
2064 xfs_bmap_trace_pre_update(fname, "LC", ip, idx - 1, whichfork); 2047 xfs_bmap_trace_pre_update(fname, "LC", ip, idx - 1, whichfork);
2065 xfs_bmbt_set_blockcount(ep - 1, 2048 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
2066 left.br_blockcount + new->br_blockcount); 2049 left.br_blockcount + new->br_blockcount);
2067 xfs_bmap_trace_post_update(fname, "LC", ip, idx - 1, whichfork); 2050 xfs_bmap_trace_post_update(fname, "LC", ip, idx - 1, whichfork);
2068 ifp->if_lastex = idx - 1; 2051 ifp->if_lastex = idx - 1;
@@ -2116,7 +2099,7 @@ xfs_bmap_add_extent_hole_real(
2116 */ 2099 */
2117 xfs_bmap_trace_insert(fname, "0", ip, idx, 1, new, NULL, 2100 xfs_bmap_trace_insert(fname, "0", ip, idx, 1, new, NULL,
2118 whichfork); 2101 whichfork);
2119 xfs_bmap_insert_exlist(ip, idx, 1, new, whichfork); 2102 xfs_iext_insert(ifp, idx, 1, new);
2120 ifp->if_lastex = idx; 2103 ifp->if_lastex = idx;
2121 XFS_IFORK_NEXT_SET(ip, whichfork, 2104 XFS_IFORK_NEXT_SET(ip, whichfork,
2122 XFS_IFORK_NEXTENTS(ip, whichfork) + 1); 2105 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
@@ -2311,25 +2294,15 @@ xfs_bmap_extsize_align(
2311 2294
2312#define XFS_ALLOC_GAP_UNITS 4 2295#define XFS_ALLOC_GAP_UNITS 4
2313 2296
2314/*
2315 * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file.
2316 * It figures out where to ask the underlying allocator to put the new extent.
2317 */
2318STATIC int 2297STATIC int
2319xfs_bmap_alloc( 2298xfs_bmap_adjacent(
2320 xfs_bmalloca_t *ap) /* bmap alloc argument struct */ 2299 xfs_bmalloca_t *ap) /* bmap alloc argument struct */
2321{ 2300{
2322 xfs_fsblock_t adjust; /* adjustment to block numbers */ 2301 xfs_fsblock_t adjust; /* adjustment to block numbers */
2323 xfs_alloctype_t atype=0; /* type for allocation routines */
2324 int error; /* error return value */
2325 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */ 2302 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */
2326 xfs_mount_t *mp; /* mount point structure */ 2303 xfs_mount_t *mp; /* mount point structure */
2327 int nullfb; /* true if ap->firstblock isn't set */ 2304 int nullfb; /* true if ap->firstblock isn't set */
2328 int rt; /* true if inode is realtime */ 2305 int rt; /* true if inode is realtime */
2329 xfs_extlen_t prod = 0; /* product factor for allocators */
2330 xfs_extlen_t ralen = 0; /* realtime allocation length */
2331 xfs_extlen_t align; /* minimum allocation alignment */
2332 xfs_rtblock_t rtx;
2333 2306
2334#define ISVALID(x,y) \ 2307#define ISVALID(x,y) \
2335 (rt ? \ 2308 (rt ? \
@@ -2338,75 +2311,10 @@ xfs_bmap_alloc(
2338 XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount && \ 2311 XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount && \
2339 XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks) 2312 XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks)
2340 2313
2341 /*
2342 * Set up variables.
2343 */
2344 mp = ap->ip->i_mount; 2314 mp = ap->ip->i_mount;
2345 nullfb = ap->firstblock == NULLFSBLOCK; 2315 nullfb = ap->firstblock == NULLFSBLOCK;
2346 rt = XFS_IS_REALTIME_INODE(ap->ip) && ap->userdata; 2316 rt = XFS_IS_REALTIME_INODE(ap->ip) && ap->userdata;
2347 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, ap->firstblock); 2317 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, ap->firstblock);
2348 if (rt) {
2349 align = ap->ip->i_d.di_extsize ?
2350 ap->ip->i_d.di_extsize : mp->m_sb.sb_rextsize;
2351 /* Set prod to match the extent size */
2352 prod = align / mp->m_sb.sb_rextsize;
2353
2354 error = xfs_bmap_extsize_align(mp, ap->gotp, ap->prevp,
2355 align, rt, ap->eof, 0,
2356 ap->conv, &ap->off, &ap->alen);
2357 if (error)
2358 return error;
2359 ASSERT(ap->alen);
2360 ASSERT(ap->alen % mp->m_sb.sb_rextsize == 0);
2361
2362 /*
2363 * If the offset & length are not perfectly aligned
2364 * then kill prod, it will just get us in trouble.
2365 */
2366 if (do_mod(ap->off, align) || ap->alen % align)
2367 prod = 1;
2368 /*
2369 * Set ralen to be the actual requested length in rtextents.
2370 */
2371 ralen = ap->alen / mp->m_sb.sb_rextsize;
2372 /*
2373 * If the old value was close enough to MAXEXTLEN that
2374 * we rounded up to it, cut it back so it's valid again.
2375 * Note that if it's a really large request (bigger than
2376 * MAXEXTLEN), we don't hear about that number, and can't
2377 * adjust the starting point to match it.
2378 */
2379 if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN)
2380 ralen = MAXEXTLEN / mp->m_sb.sb_rextsize;
2381 /*
2382 * If it's an allocation to an empty file at offset 0,
2383 * pick an extent that will space things out in the rt area.
2384 */
2385 if (ap->eof && ap->off == 0) {
2386 error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx);
2387 if (error)
2388 return error;
2389 ap->rval = rtx * mp->m_sb.sb_rextsize;
2390 } else
2391 ap->rval = 0;
2392 } else {
2393 align = (ap->userdata && ap->ip->i_d.di_extsize &&
2394 (ap->ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE)) ?
2395 ap->ip->i_d.di_extsize : 0;
2396 if (unlikely(align)) {
2397 error = xfs_bmap_extsize_align(mp, ap->gotp, ap->prevp,
2398 align, rt,
2399 ap->eof, 0, ap->conv,
2400 &ap->off, &ap->alen);
2401 ASSERT(!error);
2402 ASSERT(ap->alen);
2403 }
2404 if (nullfb)
2405 ap->rval = XFS_INO_TO_FSB(mp, ap->ip->i_ino);
2406 else
2407 ap->rval = ap->firstblock;
2408 }
2409
2410 /* 2318 /*
2411 * If allocating at eof, and there's a previous real block, 2319 * If allocating at eof, and there's a previous real block,
2412 * try to use it's last block as our starting point. 2320 * try to use it's last block as our starting point.
@@ -2531,287 +2439,384 @@ xfs_bmap_alloc(
2531 else if (gotbno != NULLFSBLOCK) 2439 else if (gotbno != NULLFSBLOCK)
2532 ap->rval = gotbno; 2440 ap->rval = gotbno;
2533 } 2441 }
2442#undef ISVALID
2443 return 0;
2444}
2445
2446STATIC int
2447xfs_bmap_rtalloc(
2448 xfs_bmalloca_t *ap) /* bmap alloc argument struct */
2449{
2450 xfs_alloctype_t atype = 0; /* type for allocation routines */
2451 int error; /* error return value */
2452 xfs_mount_t *mp; /* mount point structure */
2453 xfs_extlen_t prod = 0; /* product factor for allocators */
2454 xfs_extlen_t ralen = 0; /* realtime allocation length */
2455 xfs_extlen_t align; /* minimum allocation alignment */
2456 xfs_rtblock_t rtx; /* realtime extent number */
2457 xfs_rtblock_t rtb;
2458
2459 mp = ap->ip->i_mount;
2460 align = ap->ip->i_d.di_extsize ?
2461 ap->ip->i_d.di_extsize : mp->m_sb.sb_rextsize;
2462 prod = align / mp->m_sb.sb_rextsize;
2463 error = xfs_bmap_extsize_align(mp, ap->gotp, ap->prevp,
2464 align, 1, ap->eof, 0,
2465 ap->conv, &ap->off, &ap->alen);
2466 if (error)
2467 return error;
2468 ASSERT(ap->alen);
2469 ASSERT(ap->alen % mp->m_sb.sb_rextsize == 0);
2470
2471 /*
2472 * If the offset & length are not perfectly aligned
2473 * then kill prod, it will just get us in trouble.
2474 */
2475 if (do_mod(ap->off, align) || ap->alen % align)
2476 prod = 1;
2477 /*
2478 * Set ralen to be the actual requested length in rtextents.
2479 */
2480 ralen = ap->alen / mp->m_sb.sb_rextsize;
2481 /*
2482 * If the old value was close enough to MAXEXTLEN that
2483 * we rounded up to it, cut it back so it's valid again.
2484 * Note that if it's a really large request (bigger than
2485 * MAXEXTLEN), we don't hear about that number, and can't
2486 * adjust the starting point to match it.
2487 */
2488 if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN)
2489 ralen = MAXEXTLEN / mp->m_sb.sb_rextsize;
2490 /*
2491 * If it's an allocation to an empty file at offset 0,
2492 * pick an extent that will space things out in the rt area.
2493 */
2494 if (ap->eof && ap->off == 0) {
2495 error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx);
2496 if (error)
2497 return error;
2498 ap->rval = rtx * mp->m_sb.sb_rextsize;
2499 } else {
2500 ap->rval = 0;
2501 }
2502
2503 xfs_bmap_adjacent(ap);
2504
2505 /*
2506 * Realtime allocation, done through xfs_rtallocate_extent.
2507 */
2508 atype = ap->rval == 0 ? XFS_ALLOCTYPE_ANY_AG : XFS_ALLOCTYPE_NEAR_BNO;
2509 do_div(ap->rval, mp->m_sb.sb_rextsize);
2510 rtb = ap->rval;
2511 ap->alen = ralen;
2512 if ((error = xfs_rtallocate_extent(ap->tp, ap->rval, 1, ap->alen,
2513 &ralen, atype, ap->wasdel, prod, &rtb)))
2514 return error;
2515 if (rtb == NULLFSBLOCK && prod > 1 &&
2516 (error = xfs_rtallocate_extent(ap->tp, ap->rval, 1,
2517 ap->alen, &ralen, atype,
2518 ap->wasdel, 1, &rtb)))
2519 return error;
2520 ap->rval = rtb;
2521 if (ap->rval != NULLFSBLOCK) {
2522 ap->rval *= mp->m_sb.sb_rextsize;
2523 ralen *= mp->m_sb.sb_rextsize;
2524 ap->alen = ralen;
2525 ap->ip->i_d.di_nblocks += ralen;
2526 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
2527 if (ap->wasdel)
2528 ap->ip->i_delayed_blks -= ralen;
2529 /*
2530 * Adjust the disk quota also. This was reserved
2531 * earlier.
2532 */
2533 XFS_TRANS_MOD_DQUOT_BYINO(mp, ap->tp, ap->ip,
2534 ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
2535 XFS_TRANS_DQ_RTBCOUNT, (long) ralen);
2536 } else {
2537 ap->alen = 0;
2538 }
2539 return 0;
2540}
2541
2542STATIC int
2543xfs_bmap_btalloc(
2544 xfs_bmalloca_t *ap) /* bmap alloc argument struct */
2545{
2546 xfs_mount_t *mp; /* mount point structure */
2547 xfs_alloctype_t atype = 0; /* type for allocation routines */
2548 xfs_extlen_t align; /* minimum allocation alignment */
2549 xfs_agnumber_t ag;
2550 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */
2551 xfs_agnumber_t startag;
2552 xfs_alloc_arg_t args;
2553 xfs_extlen_t blen;
2554 xfs_extlen_t delta;
2555 xfs_extlen_t longest;
2556 xfs_extlen_t need;
2557 xfs_extlen_t nextminlen = 0;
2558 xfs_perag_t *pag;
2559 int nullfb; /* true if ap->firstblock isn't set */
2560 int isaligned;
2561 int notinit;
2562 int tryagain;
2563 int error;
2564
2565 mp = ap->ip->i_mount;
2566 align = (ap->userdata && ap->ip->i_d.di_extsize &&
2567 (ap->ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE)) ?
2568 ap->ip->i_d.di_extsize : 0;
2569 if (unlikely(align)) {
2570 error = xfs_bmap_extsize_align(mp, ap->gotp, ap->prevp,
2571 align, 0, ap->eof, 0, ap->conv,
2572 &ap->off, &ap->alen);
2573 ASSERT(!error);
2574 ASSERT(ap->alen);
2575 }
2576 nullfb = ap->firstblock == NULLFSBLOCK;
2577 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, ap->firstblock);
2578 if (nullfb)
2579 ap->rval = XFS_INO_TO_FSB(mp, ap->ip->i_ino);
2580 else
2581 ap->rval = ap->firstblock;
2582
2583 xfs_bmap_adjacent(ap);
2584
2534 /* 2585 /*
2535 * If allowed, use ap->rval; otherwise must use firstblock since 2586 * If allowed, use ap->rval; otherwise must use firstblock since
2536 * it's in the right allocation group. 2587 * it's in the right allocation group.
2537 */ 2588 */
2538 if (nullfb || rt || XFS_FSB_TO_AGNO(mp, ap->rval) == fb_agno) 2589 if (nullfb || XFS_FSB_TO_AGNO(mp, ap->rval) == fb_agno)
2539 ; 2590 ;
2540 else 2591 else
2541 ap->rval = ap->firstblock; 2592 ap->rval = ap->firstblock;
2542 /* 2593 /*
2543 * Realtime allocation, done through xfs_rtallocate_extent. 2594 * Normal allocation, done through xfs_alloc_vextent.
2544 */ 2595 */
2545 if (rt) { 2596 tryagain = isaligned = 0;
2546#ifndef __KERNEL__ 2597 args.tp = ap->tp;
2547 ASSERT(0); 2598 args.mp = mp;
2548#else 2599 args.fsbno = ap->rval;
2549 xfs_rtblock_t rtb; 2600 args.maxlen = MIN(ap->alen, mp->m_sb.sb_agblocks);
2550 2601 blen = 0;
2551 atype = ap->rval == 0 ? 2602 if (nullfb) {
2552 XFS_ALLOCTYPE_ANY_AG : XFS_ALLOCTYPE_NEAR_BNO; 2603 args.type = XFS_ALLOCTYPE_START_BNO;
2553 do_div(ap->rval, mp->m_sb.sb_rextsize); 2604 args.total = ap->total;
2554 rtb = ap->rval; 2605 /*
2555 ap->alen = ralen; 2606 * Find the longest available space.
2556 if ((error = xfs_rtallocate_extent(ap->tp, ap->rval, 1, ap->alen, 2607 * We're going to try for the whole allocation at once.
2557 &ralen, atype, ap->wasdel, prod, &rtb))) 2608 */
2558 return error; 2609 startag = ag = XFS_FSB_TO_AGNO(mp, args.fsbno);
2559 if (rtb == NULLFSBLOCK && prod > 1 && 2610 notinit = 0;
2560 (error = xfs_rtallocate_extent(ap->tp, ap->rval, 1, 2611 down_read(&mp->m_peraglock);
2561 ap->alen, &ralen, atype, 2612 while (blen < ap->alen) {
2562 ap->wasdel, 1, &rtb))) 2613 pag = &mp->m_perag[ag];
2563 return error; 2614 if (!pag->pagf_init &&
2564 ap->rval = rtb; 2615 (error = xfs_alloc_pagf_init(mp, args.tp,
2565 if (ap->rval != NULLFSBLOCK) { 2616 ag, XFS_ALLOC_FLAG_TRYLOCK))) {
2566 ap->rval *= mp->m_sb.sb_rextsize; 2617 up_read(&mp->m_peraglock);
2567 ralen *= mp->m_sb.sb_rextsize; 2618 return error;
2568 ap->alen = ralen; 2619 }
2569 ap->ip->i_d.di_nblocks += ralen;
2570 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
2571 if (ap->wasdel)
2572 ap->ip->i_delayed_blks -= ralen;
2573 /* 2620 /*
2574 * Adjust the disk quota also. This was reserved 2621 * See xfs_alloc_fix_freelist...
2575 * earlier.
2576 */ 2622 */
2577 XFS_TRANS_MOD_DQUOT_BYINO(mp, ap->tp, ap->ip, 2623 if (pag->pagf_init) {
2578 ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT : 2624 need = XFS_MIN_FREELIST_PAG(pag, mp);
2579 XFS_TRANS_DQ_RTBCOUNT, 2625 delta = need > pag->pagf_flcount ?
2580 (long) ralen); 2626 need - pag->pagf_flcount : 0;
2581 } else 2627 longest = (pag->pagf_longest > delta) ?
2582 ap->alen = 0; 2628 (pag->pagf_longest - delta) :
2583#endif /* __KERNEL__ */ 2629 (pag->pagf_flcount > 0 ||
2630 pag->pagf_longest > 0);
2631 if (blen < longest)
2632 blen = longest;
2633 } else
2634 notinit = 1;
2635 if (++ag == mp->m_sb.sb_agcount)
2636 ag = 0;
2637 if (ag == startag)
2638 break;
2639 }
2640 up_read(&mp->m_peraglock);
2641 /*
2642 * Since the above loop did a BUF_TRYLOCK, it is
2643 * possible that there is space for this request.
2644 */
2645 if (notinit || blen < ap->minlen)
2646 args.minlen = ap->minlen;
2647 /*
2648 * If the best seen length is less than the request
2649 * length, use the best as the minimum.
2650 */
2651 else if (blen < ap->alen)
2652 args.minlen = blen;
2653 /*
2654 * Otherwise we've seen an extent as big as alen,
2655 * use that as the minimum.
2656 */
2657 else
2658 args.minlen = ap->alen;
2659 } else if (ap->low) {
2660 args.type = XFS_ALLOCTYPE_FIRST_AG;
2661 args.total = args.minlen = ap->minlen;
2662 } else {
2663 args.type = XFS_ALLOCTYPE_NEAR_BNO;
2664 args.total = ap->total;
2665 args.minlen = ap->minlen;
2666 }
2667 if (unlikely(ap->userdata && ap->ip->i_d.di_extsize &&
2668 (ap->ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE))) {
2669 args.prod = ap->ip->i_d.di_extsize;
2670 if ((args.mod = (xfs_extlen_t)do_mod(ap->off, args.prod)))
2671 args.mod = (xfs_extlen_t)(args.prod - args.mod);
2672 } else if (unlikely(mp->m_sb.sb_blocksize >= NBPP)) {
2673 args.prod = 1;
2674 args.mod = 0;
2675 } else {
2676 args.prod = NBPP >> mp->m_sb.sb_blocklog;
2677 if ((args.mod = (xfs_extlen_t)(do_mod(ap->off, args.prod))))
2678 args.mod = (xfs_extlen_t)(args.prod - args.mod);
2584 } 2679 }
2585 /* 2680 /*
2586 * Normal allocation, done through xfs_alloc_vextent. 2681 * If we are not low on available data blocks, and the
2682 * underlying logical volume manager is a stripe, and
2683 * the file offset is zero then try to allocate data
2684 * blocks on stripe unit boundary.
2685 * NOTE: ap->aeof is only set if the allocation length
2686 * is >= the stripe unit and the allocation offset is
2687 * at the end of file.
2587 */ 2688 */
2588 else { 2689 if (!ap->low && ap->aeof) {
2589 xfs_agnumber_t ag; 2690 if (!ap->off) {
2590 xfs_alloc_arg_t args; 2691 args.alignment = mp->m_dalign;
2591 xfs_extlen_t blen; 2692 atype = args.type;
2592 xfs_extlen_t delta; 2693 isaligned = 1;
2593 int isaligned;
2594 xfs_extlen_t longest;
2595 xfs_extlen_t need;
2596 xfs_extlen_t nextminlen=0;
2597 int notinit;
2598 xfs_perag_t *pag;
2599 xfs_agnumber_t startag;
2600 int tryagain;
2601
2602 tryagain = isaligned = 0;
2603 args.tp = ap->tp;
2604 args.mp = mp;
2605 args.fsbno = ap->rval;
2606 args.maxlen = MIN(ap->alen, mp->m_sb.sb_agblocks);
2607 blen = 0;
2608 if (nullfb) {
2609 args.type = XFS_ALLOCTYPE_START_BNO;
2610 args.total = ap->total;
2611 /*
2612 * Find the longest available space.
2613 * We're going to try for the whole allocation at once.
2614 */
2615 startag = ag = XFS_FSB_TO_AGNO(mp, args.fsbno);
2616 notinit = 0;
2617 down_read(&mp->m_peraglock);
2618 while (blen < ap->alen) {
2619 pag = &mp->m_perag[ag];
2620 if (!pag->pagf_init &&
2621 (error = xfs_alloc_pagf_init(mp, args.tp,
2622 ag, XFS_ALLOC_FLAG_TRYLOCK))) {
2623 up_read(&mp->m_peraglock);
2624 return error;
2625 }
2626 /*
2627 * See xfs_alloc_fix_freelist...
2628 */
2629 if (pag->pagf_init) {
2630 need = XFS_MIN_FREELIST_PAG(pag, mp);
2631 delta = need > pag->pagf_flcount ?
2632 need - pag->pagf_flcount : 0;
2633 longest = (pag->pagf_longest > delta) ?
2634 (pag->pagf_longest - delta) :
2635 (pag->pagf_flcount > 0 ||
2636 pag->pagf_longest > 0);
2637 if (blen < longest)
2638 blen = longest;
2639 } else
2640 notinit = 1;
2641 if (++ag == mp->m_sb.sb_agcount)
2642 ag = 0;
2643 if (ag == startag)
2644 break;
2645 }
2646 up_read(&mp->m_peraglock);
2647 /* 2694 /*
2648 * Since the above loop did a BUF_TRYLOCK, it is 2695 * Adjust for alignment
2649 * possible that there is space for this request.
2650 */ 2696 */
2651 if (notinit || blen < ap->minlen) 2697 if (blen > args.alignment && blen <= ap->alen)
2652 args.minlen = ap->minlen; 2698 args.minlen = blen - args.alignment;
2699 args.minalignslop = 0;
2700 } else {
2653 /* 2701 /*
2654 * If the best seen length is less than the request 2702 * First try an exact bno allocation.
2655 * length, use the best as the minimum. 2703 * If it fails then do a near or start bno
2704 * allocation with alignment turned on.
2656 */ 2705 */
2657 else if (blen < ap->alen) 2706 atype = args.type;
2658 args.minlen = blen; 2707 tryagain = 1;
2708 args.type = XFS_ALLOCTYPE_THIS_BNO;
2709 args.alignment = 1;
2659 /* 2710 /*
2660 * Otherwise we've seen an extent as big as alen, 2711 * Compute the minlen+alignment for the
2661 * use that as the minimum. 2712 * next case. Set slop so that the value
2713 * of minlen+alignment+slop doesn't go up
2714 * between the calls.
2662 */ 2715 */
2716 if (blen > mp->m_dalign && blen <= ap->alen)
2717 nextminlen = blen - mp->m_dalign;
2663 else 2718 else
2664 args.minlen = ap->alen; 2719 nextminlen = args.minlen;
2665 } else if (ap->low) { 2720 if (nextminlen + mp->m_dalign > args.minlen + 1)
2666 args.type = XFS_ALLOCTYPE_FIRST_AG; 2721 args.minalignslop =
2667 args.total = args.minlen = ap->minlen; 2722 nextminlen + mp->m_dalign -
2668 } else { 2723 args.minlen - 1;
2669 args.type = XFS_ALLOCTYPE_NEAR_BNO; 2724 else
2670 args.total = ap->total; 2725 args.minalignslop = 0;
2671 args.minlen = ap->minlen;
2672 }
2673 if (unlikely(ap->userdata && ap->ip->i_d.di_extsize &&
2674 (ap->ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE))) {
2675 args.prod = ap->ip->i_d.di_extsize;
2676 if ((args.mod = (xfs_extlen_t)do_mod(ap->off, args.prod)))
2677 args.mod = (xfs_extlen_t)(args.prod - args.mod);
2678 } else if (unlikely(mp->m_sb.sb_blocksize >= NBPP)) {
2679 args.prod = 1;
2680 args.mod = 0;
2681 } else {
2682 args.prod = NBPP >> mp->m_sb.sb_blocklog;
2683 if ((args.mod = (xfs_extlen_t)(do_mod(ap->off, args.prod))))
2684 args.mod = (xfs_extlen_t)(args.prod - args.mod);
2685 } 2726 }
2727 } else {
2728 args.alignment = 1;
2729 args.minalignslop = 0;
2730 }
2731 args.minleft = ap->minleft;
2732 args.wasdel = ap->wasdel;
2733 args.isfl = 0;
2734 args.userdata = ap->userdata;
2735 if ((error = xfs_alloc_vextent(&args)))
2736 return error;
2737 if (tryagain && args.fsbno == NULLFSBLOCK) {
2686 /* 2738 /*
2687 * If we are not low on available data blocks, and the 2739 * Exact allocation failed. Now try with alignment
2688 * underlying logical volume manager is a stripe, and 2740 * turned on.
2689 * the file offset is zero then try to allocate data
2690 * blocks on stripe unit boundary.
2691 * NOTE: ap->aeof is only set if the allocation length
2692 * is >= the stripe unit and the allocation offset is
2693 * at the end of file.
2694 */ 2741 */
2695 if (!ap->low && ap->aeof) { 2742 args.type = atype;
2696 if (!ap->off) { 2743 args.fsbno = ap->rval;
2697 args.alignment = mp->m_dalign; 2744 args.alignment = mp->m_dalign;
2698 atype = args.type; 2745 args.minlen = nextminlen;
2699 isaligned = 1; 2746 args.minalignslop = 0;
2700 /* 2747 isaligned = 1;
2701 * Adjust for alignment 2748 if ((error = xfs_alloc_vextent(&args)))
2702 */ 2749 return error;
2703 if (blen > args.alignment && blen <= ap->alen) 2750 }
2704 args.minlen = blen - args.alignment; 2751 if (isaligned && args.fsbno == NULLFSBLOCK) {
2705 args.minalignslop = 0; 2752 /*
2706 } else { 2753 * allocation failed, so turn off alignment and
2707 /* 2754 * try again.
2708 * First try an exact bno allocation. 2755 */
2709 * If it fails then do a near or start bno 2756 args.type = atype;
2710 * allocation with alignment turned on. 2757 args.fsbno = ap->rval;
2711 */ 2758 args.alignment = 0;
2712 atype = args.type; 2759 if ((error = xfs_alloc_vextent(&args)))
2713 tryagain = 1; 2760 return error;
2714 args.type = XFS_ALLOCTYPE_THIS_BNO; 2761 }
2715 args.alignment = 1; 2762 if (args.fsbno == NULLFSBLOCK && nullfb &&
2716 /* 2763 args.minlen > ap->minlen) {
2717 * Compute the minlen+alignment for the 2764 args.minlen = ap->minlen;
2718 * next case. Set slop so that the value 2765 args.type = XFS_ALLOCTYPE_START_BNO;
2719 * of minlen+alignment+slop doesn't go up 2766 args.fsbno = ap->rval;
2720 * between the calls.
2721 */
2722 if (blen > mp->m_dalign && blen <= ap->alen)
2723 nextminlen = blen - mp->m_dalign;
2724 else
2725 nextminlen = args.minlen;
2726 if (nextminlen + mp->m_dalign > args.minlen + 1)
2727 args.minalignslop =
2728 nextminlen + mp->m_dalign -
2729 args.minlen - 1;
2730 else
2731 args.minalignslop = 0;
2732 }
2733 } else {
2734 args.alignment = 1;
2735 args.minalignslop = 0;
2736 }
2737 args.minleft = ap->minleft;
2738 args.wasdel = ap->wasdel;
2739 args.isfl = 0;
2740 args.userdata = ap->userdata;
2741 if ((error = xfs_alloc_vextent(&args))) 2767 if ((error = xfs_alloc_vextent(&args)))
2742 return error; 2768 return error;
2743 if (tryagain && args.fsbno == NULLFSBLOCK) { 2769 }
2744 /* 2770 if (args.fsbno == NULLFSBLOCK && nullfb) {
2745 * Exact allocation failed. Now try with alignment 2771 args.fsbno = 0;
2746 * turned on. 2772 args.type = XFS_ALLOCTYPE_FIRST_AG;
2747 */ 2773 args.total = ap->minlen;
2748 args.type = atype; 2774 args.minleft = 0;
2749 args.fsbno = ap->rval; 2775 if ((error = xfs_alloc_vextent(&args)))
2750 args.alignment = mp->m_dalign; 2776 return error;
2751 args.minlen = nextminlen; 2777 ap->low = 1;
2752 args.minalignslop = 0; 2778 }
2753 isaligned = 1; 2779 if (args.fsbno != NULLFSBLOCK) {
2754 if ((error = xfs_alloc_vextent(&args))) 2780 ap->firstblock = ap->rval = args.fsbno;
2755 return error; 2781 ASSERT(nullfb || fb_agno == args.agno ||
2756 } 2782 (ap->low && fb_agno < args.agno));
2757 if (isaligned && args.fsbno == NULLFSBLOCK) { 2783 ap->alen = args.len;
2758 /* 2784 ap->ip->i_d.di_nblocks += args.len;
2759 * allocation failed, so turn off alignment and 2785 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
2760 * try again. 2786 if (ap->wasdel)
2761 */ 2787 ap->ip->i_delayed_blks -= args.len;
2762 args.type = atype; 2788 /*
2763 args.fsbno = ap->rval; 2789 * Adjust the disk quota also. This was reserved
2764 args.alignment = 0; 2790 * earlier.
2765 if ((error = xfs_alloc_vextent(&args))) 2791 */
2766 return error; 2792 XFS_TRANS_MOD_DQUOT_BYINO(mp, ap->tp, ap->ip,
2767 } 2793 ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT :
2768 if (args.fsbno == NULLFSBLOCK && nullfb && 2794 XFS_TRANS_DQ_BCOUNT,
2769 args.minlen > ap->minlen) { 2795 (long) args.len);
2770 args.minlen = ap->minlen; 2796 } else {
2771 args.type = XFS_ALLOCTYPE_START_BNO; 2797 ap->rval = NULLFSBLOCK;
2772 args.fsbno = ap->rval; 2798 ap->alen = 0;
2773 if ((error = xfs_alloc_vextent(&args)))
2774 return error;
2775 }
2776 if (args.fsbno == NULLFSBLOCK && nullfb) {
2777 args.fsbno = 0;
2778 args.type = XFS_ALLOCTYPE_FIRST_AG;
2779 args.total = ap->minlen;
2780 args.minleft = 0;
2781 if ((error = xfs_alloc_vextent(&args)))
2782 return error;
2783 ap->low = 1;
2784 }
2785 if (args.fsbno != NULLFSBLOCK) {
2786 ap->firstblock = ap->rval = args.fsbno;
2787 ASSERT(nullfb || fb_agno == args.agno ||
2788 (ap->low && fb_agno < args.agno));
2789 ap->alen = args.len;
2790 ap->ip->i_d.di_nblocks += args.len;
2791 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
2792 if (ap->wasdel)
2793 ap->ip->i_delayed_blks -= args.len;
2794 /*
2795 * Adjust the disk quota also. This was reserved
2796 * earlier.
2797 */
2798 XFS_TRANS_MOD_DQUOT_BYINO(mp, ap->tp, ap->ip,
2799 ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT :
2800 XFS_TRANS_DQ_BCOUNT,
2801 (long) args.len);
2802 } else {
2803 ap->rval = NULLFSBLOCK;
2804 ap->alen = 0;
2805 }
2806 } 2799 }
2807 return 0; 2800 return 0;
2808#undef ISVALID 2801}
2802
2803/*
2804 * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file.
2805 * It figures out where to ask the underlying allocator to put the new extent.
2806 */
2807STATIC int
2808xfs_bmap_alloc(
2809 xfs_bmalloca_t *ap) /* bmap alloc argument struct */
2810{
2811 if ((ap->ip->i_d.di_flags & XFS_DIFLAG_REALTIME) && ap->userdata)
2812 return xfs_bmap_rtalloc(ap);
2813 return xfs_bmap_btalloc(ap);
2809} 2814}
2810 2815
2811/* 2816/*
2812 * Transform a btree format file with only one leaf node, where the 2817 * Transform a btree format file with only one leaf node, where the
2813 * extents list will fit in the inode, into an extents format file. 2818 * extents list will fit in the inode, into an extents format file.
2814 * Since the extent list is already in-core, all we have to do is 2819 * Since the file extents are already in-core, all we have to do is
2815 * give up the space for the btree root and pitch the leaf block. 2820 * give up the space for the btree root and pitch the leaf block.
2816 */ 2821 */
2817STATIC int /* error */ 2822STATIC int /* error */
@@ -2868,7 +2873,7 @@ xfs_bmap_btree_to_extents(
2868} 2873}
2869 2874
2870/* 2875/*
2871 * Called by xfs_bmapi to update extent list structure and the btree 2876 * Called by xfs_bmapi to update file extent records and the btree
2872 * after removing space (or undoing a delayed allocation). 2877 * after removing space (or undoing a delayed allocation).
2873 */ 2878 */
2874STATIC int /* error */ 2879STATIC int /* error */
@@ -2878,7 +2883,7 @@ xfs_bmap_del_extent(
2878 xfs_extnum_t idx, /* extent number to update/delete */ 2883 xfs_extnum_t idx, /* extent number to update/delete */
2879 xfs_bmap_free_t *flist, /* list of extents to be freed */ 2884 xfs_bmap_free_t *flist, /* list of extents to be freed */
2880 xfs_btree_cur_t *cur, /* if null, not a btree */ 2885 xfs_btree_cur_t *cur, /* if null, not a btree */
2881 xfs_bmbt_irec_t *del, /* data to remove from extent list */ 2886 xfs_bmbt_irec_t *del, /* data to remove from extents */
2882 int *logflagsp, /* inode logging flags */ 2887 int *logflagsp, /* inode logging flags */
2883 int whichfork, /* data or attr fork */ 2888 int whichfork, /* data or attr fork */
2884 int rsvd) /* OK to allocate reserved blocks */ 2889 int rsvd) /* OK to allocate reserved blocks */
@@ -2903,7 +2908,6 @@ xfs_bmap_del_extent(
2903 xfs_filblks_t nblks; /* quota/sb block count */ 2908 xfs_filblks_t nblks; /* quota/sb block count */
2904 xfs_bmbt_irec_t new; /* new record to be inserted */ 2909 xfs_bmbt_irec_t new; /* new record to be inserted */
2905 /* REFERENCED */ 2910 /* REFERENCED */
2906 xfs_extnum_t nextents; /* number of extents in list */
2907 uint qfield; /* quota field to update */ 2911 uint qfield; /* quota field to update */
2908 xfs_filblks_t temp; /* for indirect length calculations */ 2912 xfs_filblks_t temp; /* for indirect length calculations */
2909 xfs_filblks_t temp2; /* for indirect length calculations */ 2913 xfs_filblks_t temp2; /* for indirect length calculations */
@@ -2911,10 +2915,10 @@ xfs_bmap_del_extent(
2911 XFS_STATS_INC(xs_del_exlist); 2915 XFS_STATS_INC(xs_del_exlist);
2912 mp = ip->i_mount; 2916 mp = ip->i_mount;
2913 ifp = XFS_IFORK_PTR(ip, whichfork); 2917 ifp = XFS_IFORK_PTR(ip, whichfork);
2914 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 2918 ASSERT((idx >= 0) && (idx < ifp->if_bytes /
2915 ASSERT(idx >= 0 && idx < nextents); 2919 (uint)sizeof(xfs_bmbt_rec_t)));
2916 ASSERT(del->br_blockcount > 0); 2920 ASSERT(del->br_blockcount > 0);
2917 ep = &ifp->if_u1.if_extents[idx]; 2921 ep = xfs_iext_get_ext(ifp, idx);
2918 xfs_bmbt_get_all(ep, &got); 2922 xfs_bmbt_get_all(ep, &got);
2919 ASSERT(got.br_startoff <= del->br_startoff); 2923 ASSERT(got.br_startoff <= del->br_startoff);
2920 del_endoff = del->br_startoff + del->br_blockcount; 2924 del_endoff = del->br_startoff + del->br_blockcount;
@@ -2990,7 +2994,7 @@ xfs_bmap_del_extent(
2990 * Matches the whole extent. Delete the entry. 2994 * Matches the whole extent. Delete the entry.
2991 */ 2995 */
2992 xfs_bmap_trace_delete(fname, "3", ip, idx, 1, whichfork); 2996 xfs_bmap_trace_delete(fname, "3", ip, idx, 1, whichfork);
2993 xfs_bmap_delete_exlist(ip, idx, 1, whichfork); 2997 xfs_iext_remove(ifp, idx, 1);
2994 ifp->if_lastex = idx; 2998 ifp->if_lastex = idx;
2995 if (delay) 2999 if (delay)
2996 break; 3000 break;
@@ -3160,7 +3164,7 @@ xfs_bmap_del_extent(
3160 xfs_bmap_trace_post_update(fname, "0", ip, idx, whichfork); 3164 xfs_bmap_trace_post_update(fname, "0", ip, idx, whichfork);
3161 xfs_bmap_trace_insert(fname, "0", ip, idx + 1, 1, &new, NULL, 3165 xfs_bmap_trace_insert(fname, "0", ip, idx + 1, 1, &new, NULL,
3162 whichfork); 3166 whichfork);
3163 xfs_bmap_insert_exlist(ip, idx + 1, 1, &new, whichfork); 3167 xfs_iext_insert(ifp, idx + 1, 1, &new);
3164 ifp->if_lastex = idx + 1; 3168 ifp->if_lastex = idx + 1;
3165 break; 3169 break;
3166 } 3170 }
@@ -3213,31 +3217,6 @@ xfs_bmap_del_free(
3213} 3217}
3214 3218
3215/* 3219/*
3216 * Remove count entries from the extents array for inode "ip", starting
3217 * at index "idx". Copies the remaining items down over the deleted ones,
3218 * and gives back the excess memory.
3219 */
3220STATIC void
3221xfs_bmap_delete_exlist(
3222 xfs_inode_t *ip, /* incore inode pointer */
3223 xfs_extnum_t idx, /* starting delete index */
3224 xfs_extnum_t count, /* count of items to delete */
3225 int whichfork) /* data or attr fork */
3226{
3227 xfs_bmbt_rec_t *base; /* base of extent list */
3228 xfs_ifork_t *ifp; /* inode fork pointer */
3229 xfs_extnum_t nextents; /* number of extents in list after */
3230
3231 ifp = XFS_IFORK_PTR(ip, whichfork);
3232 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
3233 base = ifp->if_u1.if_extents;
3234 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - count;
3235 memmove(&base[idx], &base[idx + count],
3236 (nextents - idx) * sizeof(*base));
3237 xfs_iext_realloc(ip, -count, whichfork);
3238}
3239
3240/*
3241 * Convert an extents-format file into a btree-format file. 3220 * Convert an extents-format file into a btree-format file.
3242 * The new file will have a root block (in the inode) and a single child block. 3221 * The new file will have a root block (in the inode) and a single child block.
3243 */ 3222 */
@@ -3258,13 +3237,13 @@ xfs_bmap_extents_to_btree(
3258 xfs_bmbt_rec_t *arp; /* child record pointer */ 3237 xfs_bmbt_rec_t *arp; /* child record pointer */
3259 xfs_bmbt_block_t *block; /* btree root block */ 3238 xfs_bmbt_block_t *block; /* btree root block */
3260 xfs_btree_cur_t *cur; /* bmap btree cursor */ 3239 xfs_btree_cur_t *cur; /* bmap btree cursor */
3261 xfs_bmbt_rec_t *ep; /* extent list pointer */ 3240 xfs_bmbt_rec_t *ep; /* extent record pointer */
3262 int error; /* error return value */ 3241 int error; /* error return value */
3263 xfs_extnum_t i, cnt; /* extent list index */ 3242 xfs_extnum_t i, cnt; /* extent record index */
3264 xfs_ifork_t *ifp; /* inode fork pointer */ 3243 xfs_ifork_t *ifp; /* inode fork pointer */
3265 xfs_bmbt_key_t *kp; /* root block key pointer */ 3244 xfs_bmbt_key_t *kp; /* root block key pointer */
3266 xfs_mount_t *mp; /* mount structure */ 3245 xfs_mount_t *mp; /* mount structure */
3267 xfs_extnum_t nextents; /* extent list size */ 3246 xfs_extnum_t nextents; /* number of file extents */
3268 xfs_bmbt_ptr_t *pp; /* root block address pointer */ 3247 xfs_bmbt_ptr_t *pp; /* root block address pointer */
3269 3248
3270 ifp = XFS_IFORK_PTR(ip, whichfork); 3249 ifp = XFS_IFORK_PTR(ip, whichfork);
@@ -3343,7 +3322,8 @@ xfs_bmap_extents_to_btree(
3343 ablock->bb_rightsib = cpu_to_be64(NULLDFSBNO); 3322 ablock->bb_rightsib = cpu_to_be64(NULLDFSBNO);
3344 arp = XFS_BMAP_REC_IADDR(ablock, 1, cur); 3323 arp = XFS_BMAP_REC_IADDR(ablock, 1, cur);
3345 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 3324 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3346 for (ep = ifp->if_u1.if_extents, cnt = i = 0; i < nextents; i++, ep++) { 3325 for (cnt = i = 0; i < nextents; i++) {
3326 ep = xfs_iext_get_ext(ifp, i);
3347 if (!ISNULLSTARTBLOCK(xfs_bmbt_get_startblock(ep))) { 3327 if (!ISNULLSTARTBLOCK(xfs_bmbt_get_startblock(ep))) {
3348 arp->l0 = INT_GET(ep->l0, ARCH_CONVERT); 3328 arp->l0 = INT_GET(ep->l0, ARCH_CONVERT);
3349 arp->l1 = INT_GET(ep->l1, ARCH_CONVERT); 3329 arp->l1 = INT_GET(ep->l1, ARCH_CONVERT);
@@ -3373,34 +3353,6 @@ xfs_bmap_extents_to_btree(
3373} 3353}
3374 3354
3375/* 3355/*
3376 * Insert new item(s) in the extent list for inode "ip".
3377 * Count new items are inserted at offset idx.
3378 */
3379STATIC void
3380xfs_bmap_insert_exlist(
3381 xfs_inode_t *ip, /* incore inode pointer */
3382 xfs_extnum_t idx, /* starting index of new items */
3383 xfs_extnum_t count, /* number of inserted items */
3384 xfs_bmbt_irec_t *new, /* items to insert */
3385 int whichfork) /* data or attr fork */
3386{
3387 xfs_bmbt_rec_t *base; /* extent list base */
3388 xfs_ifork_t *ifp; /* inode fork pointer */
3389 xfs_extnum_t nextents; /* extent list size */
3390 xfs_extnum_t to; /* extent list index */
3391
3392 ifp = XFS_IFORK_PTR(ip, whichfork);
3393 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
3394 xfs_iext_realloc(ip, count, whichfork);
3395 base = ifp->if_u1.if_extents;
3396 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3397 memmove(&base[idx + count], &base[idx],
3398 (nextents - (idx + count)) * sizeof(*base));
3399 for (to = idx; to < idx + count; to++, new++)
3400 xfs_bmbt_set_all(&base[to], new);
3401}
3402
3403/*
3404 * Helper routine to reset inode di_forkoff field when switching 3356 * Helper routine to reset inode di_forkoff field when switching
3405 * attribute fork from local to extent format - we reset it where 3357 * attribute fork from local to extent format - we reset it where
3406 * possible to make space available for inline data fork extents. 3358 * possible to make space available for inline data fork extents.
@@ -3457,12 +3409,13 @@ xfs_bmap_local_to_extents(
3457 error = 0; 3409 error = 0;
3458 if (ifp->if_bytes) { 3410 if (ifp->if_bytes) {
3459 xfs_alloc_arg_t args; /* allocation arguments */ 3411 xfs_alloc_arg_t args; /* allocation arguments */
3460 xfs_buf_t *bp; /* buffer for extent list block */ 3412 xfs_buf_t *bp; /* buffer for extent block */
3461 xfs_bmbt_rec_t *ep; /* extent list pointer */ 3413 xfs_bmbt_rec_t *ep; /* extent record pointer */
3462 3414
3463 args.tp = tp; 3415 args.tp = tp;
3464 args.mp = ip->i_mount; 3416 args.mp = ip->i_mount;
3465 ASSERT(ifp->if_flags & XFS_IFINLINE); 3417 ASSERT((ifp->if_flags &
3418 (XFS_IFINLINE|XFS_IFEXTENTS|XFS_IFEXTIREC)) == XFS_IFINLINE);
3466 /* 3419 /*
3467 * Allocate a block. We know we need only one, since the 3420 * Allocate a block. We know we need only one, since the
3468 * file currently fits in an inode. 3421 * file currently fits in an inode.
@@ -3492,8 +3445,8 @@ xfs_bmap_local_to_extents(
3492 xfs_trans_log_buf(tp, bp, 0, ifp->if_bytes - 1); 3445 xfs_trans_log_buf(tp, bp, 0, ifp->if_bytes - 1);
3493 xfs_bmap_forkoff_reset(args.mp, ip, whichfork); 3446 xfs_bmap_forkoff_reset(args.mp, ip, whichfork);
3494 xfs_idata_realloc(ip, -ifp->if_bytes, whichfork); 3447 xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
3495 xfs_iext_realloc(ip, 1, whichfork); 3448 xfs_iext_add(ifp, 0, 1);
3496 ep = ifp->if_u1.if_extents; 3449 ep = xfs_iext_get_ext(ifp, 0);
3497 xfs_bmbt_set_allf(ep, 0, args.fsbno, 1, XFS_EXT_NORM); 3450 xfs_bmbt_set_allf(ep, 0, args.fsbno, 1, XFS_EXT_NORM);
3498 xfs_bmap_trace_post_update(fname, "new", ip, 0, whichfork); 3451 xfs_bmap_trace_post_update(fname, "new", ip, 0, whichfork);
3499 XFS_IFORK_NEXT_SET(ip, whichfork, 1); 3452 XFS_IFORK_NEXT_SET(ip, whichfork, 1);
@@ -3518,7 +3471,7 @@ xfs_bmbt_rec_t * /* pointer to found extent entry */
3518xfs_bmap_do_search_extents( 3471xfs_bmap_do_search_extents(
3519 xfs_bmbt_rec_t *base, /* base of extent list */ 3472 xfs_bmbt_rec_t *base, /* base of extent list */
3520 xfs_extnum_t lastx, /* last extent index used */ 3473 xfs_extnum_t lastx, /* last extent index used */
3521 xfs_extnum_t nextents, /* extent list size */ 3474 xfs_extnum_t nextents, /* number of file extents */
3522 xfs_fileoff_t bno, /* block number searched for */ 3475 xfs_fileoff_t bno, /* block number searched for */
3523 int *eofp, /* out: end of file found */ 3476 int *eofp, /* out: end of file found */
3524 xfs_extnum_t *lastxp, /* out: last extent index */ 3477 xfs_extnum_t *lastxp, /* out: last extent index */
@@ -3569,9 +3522,9 @@ xfs_bmap_do_search_extents(
3569 got.br_blockcount = xfs_bmbt_get_blockcount(ep); 3522 got.br_blockcount = xfs_bmbt_get_blockcount(ep);
3570 *eofp = 0; 3523 *eofp = 0;
3571 } else { 3524 } else {
3572 /* binary search the extents array */
3573 low = 0; 3525 low = 0;
3574 high = nextents - 1; 3526 high = nextents - 1;
3527 /* binary search the extents array */
3575 while (low <= high) { 3528 while (low <= high) {
3576 XFS_STATS_INC(xs_cmp_exlist); 3529 XFS_STATS_INC(xs_cmp_exlist);
3577 lastx = (low + high) >> 1; 3530 lastx = (low + high) >> 1;
@@ -3622,6 +3575,57 @@ xfs_bmap_do_search_extents(
3622} 3575}
3623 3576
3624/* 3577/*
3578 * Search the extent records for the entry containing block bno.
3579 * If bno lies in a hole, point to the next entry. If bno lies
3580 * past eof, *eofp will be set, and *prevp will contain the last
3581 * entry (null if none). Else, *lastxp will be set to the index
3582 * of the found entry; *gotp will contain the entry.
3583 */
3584xfs_bmbt_rec_t * /* pointer to found extent entry */
3585xfs_bmap_search_multi_extents(
3586 xfs_ifork_t *ifp, /* inode fork pointer */
3587 xfs_fileoff_t bno, /* block number searched for */
3588 int *eofp, /* out: end of file found */
3589 xfs_extnum_t *lastxp, /* out: last extent index */
3590 xfs_bmbt_irec_t *gotp, /* out: extent entry found */
3591 xfs_bmbt_irec_t *prevp) /* out: previous extent entry found */
3592{
3593 xfs_bmbt_rec_t *ep; /* extent record pointer */
3594 xfs_extnum_t lastx; /* last extent index */
3595
3596 /*
3597 * Initialize the extent entry structure to catch access to
3598 * uninitialized br_startblock field.
3599 */
3600 gotp->br_startoff = 0xffa5a5a5a5a5a5a5LL;
3601 gotp->br_blockcount = 0xa55a5a5a5a5a5a5aLL;
3602 gotp->br_state = XFS_EXT_INVALID;
3603#if XFS_BIG_BLKNOS
3604 gotp->br_startblock = 0xffffa5a5a5a5a5a5LL;
3605#else
3606 gotp->br_startblock = 0xffffa5a5;
3607#endif
3608 prevp->br_startoff = NULLFILEOFF;
3609
3610 ep = xfs_iext_bno_to_ext(ifp, bno, &lastx);
3611 if (lastx > 0) {
3612 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, lastx - 1), prevp);
3613 }
3614 if (lastx < (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))) {
3615 xfs_bmbt_get_all(ep, gotp);
3616 *eofp = 0;
3617 } else {
3618 if (lastx > 0) {
3619 *gotp = *prevp;
3620 }
3621 *eofp = 1;
3622 ep = NULL;
3623 }
3624 *lastxp = lastx;
3625 return ep;
3626}
3627
3628/*
3625 * Search the extents list for the inode, for the extent containing bno. 3629 * Search the extents list for the inode, for the extent containing bno.
3626 * If bno lies in a hole, point to the next entry. If bno lies past eof, 3630 * If bno lies in a hole, point to the next entry. If bno lies past eof,
3627 * *eofp will be set, and *prevp will contain the last entry (null if none). 3631 * *eofp will be set, and *prevp will contain the last entry (null if none).
@@ -3639,20 +3643,14 @@ xfs_bmap_search_extents(
3639 xfs_bmbt_irec_t *prevp) /* out: previous extent entry found */ 3643 xfs_bmbt_irec_t *prevp) /* out: previous extent entry found */
3640{ 3644{
3641 xfs_ifork_t *ifp; /* inode fork pointer */ 3645 xfs_ifork_t *ifp; /* inode fork pointer */
3642 xfs_bmbt_rec_t *base; /* base of extent list */ 3646 xfs_bmbt_rec_t *ep; /* extent record pointer */
3643 xfs_extnum_t lastx; /* last extent index used */
3644 xfs_extnum_t nextents; /* extent list size */
3645 xfs_bmbt_rec_t *ep; /* extent list entry pointer */
3646 int rt; /* realtime flag */ 3647 int rt; /* realtime flag */
3647 3648
3648 XFS_STATS_INC(xs_look_exlist); 3649 XFS_STATS_INC(xs_look_exlist);
3649 ifp = XFS_IFORK_PTR(ip, whichfork); 3650 ifp = XFS_IFORK_PTR(ip, whichfork);
3650 lastx = ifp->if_lastex;
3651 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3652 base = &ifp->if_u1.if_extents[0];
3653 3651
3654 ep = xfs_bmap_do_search_extents(base, lastx, nextents, bno, eofp, 3652 ep = xfs_bmap_search_multi_extents(ifp, bno, eofp, lastxp, gotp, prevp);
3655 lastxp, gotp, prevp); 3653
3656 rt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip); 3654 rt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
3657 if (unlikely(!rt && !gotp->br_startblock && (*lastxp != NULLEXTNUM))) { 3655 if (unlikely(!rt && !gotp->br_startblock && (*lastxp != NULLEXTNUM))) {
3658 cmn_err(CE_PANIC,"Access to block zero: fs: <%s> inode: %lld " 3656 cmn_err(CE_PANIC,"Access to block zero: fs: <%s> inode: %lld "
@@ -3732,7 +3730,7 @@ xfs_bmap_trace_addentry(
3732} 3730}
3733 3731
3734/* 3732/*
3735 * Add bmap trace entry prior to a call to xfs_bmap_delete_exlist. 3733 * Add bmap trace entry prior to a call to xfs_iext_remove.
3736 */ 3734 */
3737STATIC void 3735STATIC void
3738xfs_bmap_trace_delete( 3736xfs_bmap_trace_delete(
@@ -3747,13 +3745,13 @@ xfs_bmap_trace_delete(
3747 3745
3748 ifp = XFS_IFORK_PTR(ip, whichfork); 3746 ifp = XFS_IFORK_PTR(ip, whichfork);
3749 xfs_bmap_trace_addentry(XFS_BMAP_KTRACE_DELETE, fname, desc, ip, idx, 3747 xfs_bmap_trace_addentry(XFS_BMAP_KTRACE_DELETE, fname, desc, ip, idx,
3750 cnt, &ifp->if_u1.if_extents[idx], 3748 cnt, xfs_iext_get_ext(ifp, idx),
3751 cnt == 2 ? &ifp->if_u1.if_extents[idx + 1] : NULL, 3749 cnt == 2 ? xfs_iext_get_ext(ifp, idx + 1) : NULL,
3752 whichfork); 3750 whichfork);
3753} 3751}
3754 3752
3755/* 3753/*
3756 * Add bmap trace entry prior to a call to xfs_bmap_insert_exlist, or 3754 * Add bmap trace entry prior to a call to xfs_iext_insert, or
3757 * reading in the extents list from the disk (in the btree). 3755 * reading in the extents list from the disk (in the btree).
3758 */ 3756 */
3759STATIC void 3757STATIC void
@@ -3783,7 +3781,7 @@ xfs_bmap_trace_insert(
3783} 3781}
3784 3782
3785/* 3783/*
3786 * Add bmap trace entry after updating an extent list entry in place. 3784 * Add bmap trace entry after updating an extent record in place.
3787 */ 3785 */
3788STATIC void 3786STATIC void
3789xfs_bmap_trace_post_update( 3787xfs_bmap_trace_post_update(
@@ -3797,11 +3795,11 @@ xfs_bmap_trace_post_update(
3797 3795
3798 ifp = XFS_IFORK_PTR(ip, whichfork); 3796 ifp = XFS_IFORK_PTR(ip, whichfork);
3799 xfs_bmap_trace_addentry(XFS_BMAP_KTRACE_POST_UP, fname, desc, ip, idx, 3797 xfs_bmap_trace_addentry(XFS_BMAP_KTRACE_POST_UP, fname, desc, ip, idx,
3800 1, &ifp->if_u1.if_extents[idx], NULL, whichfork); 3798 1, xfs_iext_get_ext(ifp, idx), NULL, whichfork);
3801} 3799}
3802 3800
3803/* 3801/*
3804 * Add bmap trace entry prior to updating an extent list entry in place. 3802 * Add bmap trace entry prior to updating an extent record in place.
3805 */ 3803 */
3806STATIC void 3804STATIC void
3807xfs_bmap_trace_pre_update( 3805xfs_bmap_trace_pre_update(
@@ -3815,7 +3813,7 @@ xfs_bmap_trace_pre_update(
3815 3813
3816 ifp = XFS_IFORK_PTR(ip, whichfork); 3814 ifp = XFS_IFORK_PTR(ip, whichfork);
3817 xfs_bmap_trace_addentry(XFS_BMAP_KTRACE_PRE_UP, fname, desc, ip, idx, 1, 3815 xfs_bmap_trace_addentry(XFS_BMAP_KTRACE_PRE_UP, fname, desc, ip, idx, 1,
3818 &ifp->if_u1.if_extents[idx], NULL, whichfork); 3816 xfs_iext_get_ext(ifp, idx), NULL, whichfork);
3819} 3817}
3820#endif /* XFS_BMAP_TRACE */ 3818#endif /* XFS_BMAP_TRACE */
3821 3819
@@ -3892,7 +3890,7 @@ xfs_bmap_add_attrfork(
3892 int rsvd) /* xact may use reserved blks */ 3890 int rsvd) /* xact may use reserved blks */
3893{ 3891{
3894 xfs_fsblock_t firstblock; /* 1st block/ag allocated */ 3892 xfs_fsblock_t firstblock; /* 1st block/ag allocated */
3895 xfs_bmap_free_t flist; /* freed extent list */ 3893 xfs_bmap_free_t flist; /* freed extent records */
3896 xfs_mount_t *mp; /* mount structure */ 3894 xfs_mount_t *mp; /* mount structure */
3897 xfs_trans_t *tp; /* transaction pointer */ 3895 xfs_trans_t *tp; /* transaction pointer */
3898 unsigned long s; /* spinlock spl value */ 3896 unsigned long s; /* spinlock spl value */
@@ -4146,7 +4144,7 @@ xfs_bmap_finish(
4146 xfs_efd_log_item_t *efd; /* extent free data */ 4144 xfs_efd_log_item_t *efd; /* extent free data */
4147 xfs_efi_log_item_t *efi; /* extent free intention */ 4145 xfs_efi_log_item_t *efi; /* extent free intention */
4148 int error; /* error return value */ 4146 int error; /* error return value */
4149 xfs_bmap_free_item_t *free; /* free extent list item */ 4147 xfs_bmap_free_item_t *free; /* free extent item */
4150 unsigned int logres; /* new log reservation */ 4148 unsigned int logres; /* new log reservation */
4151 unsigned int logcount; /* new log count */ 4149 unsigned int logcount; /* new log count */
4152 xfs_mount_t *mp; /* filesystem mount structure */ 4150 xfs_mount_t *mp; /* filesystem mount structure */
@@ -4242,9 +4240,9 @@ xfs_bmap_first_unused(
4242 xfs_fileoff_t *first_unused, /* unused block */ 4240 xfs_fileoff_t *first_unused, /* unused block */
4243 int whichfork) /* data or attr fork */ 4241 int whichfork) /* data or attr fork */
4244{ 4242{
4245 xfs_bmbt_rec_t *base; /* base of extent array */
4246 xfs_bmbt_rec_t *ep; /* pointer to an extent entry */ 4243 xfs_bmbt_rec_t *ep; /* pointer to an extent entry */
4247 int error; /* error return value */ 4244 int error; /* error return value */
4245 int idx; /* extent record index */
4248 xfs_ifork_t *ifp; /* inode fork pointer */ 4246 xfs_ifork_t *ifp; /* inode fork pointer */
4249 xfs_fileoff_t lastaddr; /* last block number seen */ 4247 xfs_fileoff_t lastaddr; /* last block number seen */
4250 xfs_fileoff_t lowest; /* lowest useful block */ 4248 xfs_fileoff_t lowest; /* lowest useful block */
@@ -4265,10 +4263,8 @@ xfs_bmap_first_unused(
4265 return error; 4263 return error;
4266 lowest = *first_unused; 4264 lowest = *first_unused;
4267 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 4265 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4268 base = &ifp->if_u1.if_extents[0]; 4266 for (idx = 0, lastaddr = 0, max = lowest; idx < nextents; idx++) {
4269 for (lastaddr = 0, max = lowest, ep = base; 4267 ep = xfs_iext_get_ext(ifp, idx);
4270 ep < &base[nextents];
4271 ep++) {
4272 off = xfs_bmbt_get_startoff(ep); 4268 off = xfs_bmbt_get_startoff(ep);
4273 /* 4269 /*
4274 * See if the hole before this extent will work. 4270 * See if the hole before this extent will work.
@@ -4287,8 +4283,8 @@ xfs_bmap_first_unused(
4287/* 4283/*
4288 * Returns the file-relative block number of the last block + 1 before 4284 * Returns the file-relative block number of the last block + 1 before
4289 * last_block (input value) in the file. 4285 * last_block (input value) in the file.
4290 * This is not based on i_size, it is based on the extent list. 4286 * This is not based on i_size, it is based on the extent records.
4291 * Returns 0 for local files, as they do not have an extent list. 4287 * Returns 0 for local files, as they do not have extent records.
4292 */ 4288 */
4293int /* error */ 4289int /* error */
4294xfs_bmap_last_before( 4290xfs_bmap_last_before(
@@ -4335,8 +4331,8 @@ xfs_bmap_last_before(
4335 4331
4336/* 4332/*
4337 * Returns the file-relative block number of the first block past eof in 4333 * Returns the file-relative block number of the first block past eof in
4338 * the file. This is not based on i_size, it is based on the extent list. 4334 * the file. This is not based on i_size, it is based on the extent records.
4339 * Returns 0 for local files, as they do not have an extent list. 4335 * Returns 0 for local files, as they do not have extent records.
4340 */ 4336 */
4341int /* error */ 4337int /* error */
4342xfs_bmap_last_offset( 4338xfs_bmap_last_offset(
@@ -4345,7 +4341,6 @@ xfs_bmap_last_offset(
4345 xfs_fileoff_t *last_block, /* last block */ 4341 xfs_fileoff_t *last_block, /* last block */
4346 int whichfork) /* data or attr fork */ 4342 int whichfork) /* data or attr fork */
4347{ 4343{
4348 xfs_bmbt_rec_t *base; /* base of extent array */
4349 xfs_bmbt_rec_t *ep; /* pointer to last extent */ 4344 xfs_bmbt_rec_t *ep; /* pointer to last extent */
4350 int error; /* error return value */ 4345 int error; /* error return value */
4351 xfs_ifork_t *ifp; /* inode fork pointer */ 4346 xfs_ifork_t *ifp; /* inode fork pointer */
@@ -4368,9 +4363,7 @@ xfs_bmap_last_offset(
4368 *last_block = 0; 4363 *last_block = 0;
4369 return 0; 4364 return 0;
4370 } 4365 }
4371 base = &ifp->if_u1.if_extents[0]; 4366 ep = xfs_iext_get_ext(ifp, nextents - 1);
4372 ASSERT(base != NULL);
4373 ep = &base[nextents - 1];
4374 *last_block = xfs_bmbt_get_startoff(ep) + xfs_bmbt_get_blockcount(ep); 4367 *last_block = xfs_bmbt_get_startoff(ep) + xfs_bmbt_get_blockcount(ep);
4375 return 0; 4368 return 0;
4376} 4369}
@@ -4400,7 +4393,7 @@ xfs_bmap_one_block(
4400 return 0; 4393 return 0;
4401 ifp = XFS_IFORK_PTR(ip, whichfork); 4394 ifp = XFS_IFORK_PTR(ip, whichfork);
4402 ASSERT(ifp->if_flags & XFS_IFEXTENTS); 4395 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
4403 ep = ifp->if_u1.if_extents; 4396 ep = xfs_iext_get_ext(ifp, 0);
4404 xfs_bmbt_get_all(ep, &s); 4397 xfs_bmbt_get_all(ep, &s);
4405 rval = s.br_startoff == 0 && s.br_blockcount == 1; 4398 rval = s.br_startoff == 0 && s.br_blockcount == 1;
4406 if (rval && whichfork == XFS_DATA_FORK) 4399 if (rval && whichfork == XFS_DATA_FORK)
@@ -4435,7 +4428,6 @@ xfs_bmap_read_extents(
4435 xfs_bmbt_ptr_t *pp; /* pointer to block address */ 4428 xfs_bmbt_ptr_t *pp; /* pointer to block address */
4436 /* REFERENCED */ 4429 /* REFERENCED */
4437 xfs_extnum_t room; /* number of entries there's room for */ 4430 xfs_extnum_t room; /* number of entries there's room for */
4438 xfs_bmbt_rec_t *trp; /* target record pointer */
4439 4431
4440 bno = NULLFSBLOCK; 4432 bno = NULLFSBLOCK;
4441 mp = ip->i_mount; 4433 mp = ip->i_mount;
@@ -4478,16 +4470,16 @@ xfs_bmap_read_extents(
4478 /* 4470 /*
4479 * Here with bp and block set to the leftmost leaf node in the tree. 4471 * Here with bp and block set to the leftmost leaf node in the tree.
4480 */ 4472 */
4481 room = ifp->if_bytes / (uint)sizeof(*trp); 4473 room = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4482 trp = ifp->if_u1.if_extents;
4483 i = 0; 4474 i = 0;
4484 /* 4475 /*
4485 * Loop over all leaf nodes. Copy information to the extent list. 4476 * Loop over all leaf nodes. Copy information to the extent records.
4486 */ 4477 */
4487 for (;;) { 4478 for (;;) {
4488 xfs_bmbt_rec_t *frp, *temp; 4479 xfs_bmbt_rec_t *frp, *trp;
4489 xfs_fsblock_t nextbno; 4480 xfs_fsblock_t nextbno;
4490 xfs_extnum_t num_recs; 4481 xfs_extnum_t num_recs;
4482 xfs_extnum_t start;
4491 4483
4492 4484
4493 num_recs = be16_to_cpu(block->bb_numrecs); 4485 num_recs = be16_to_cpu(block->bb_numrecs);
@@ -4511,12 +4503,13 @@ xfs_bmap_read_extents(
4511 if (nextbno != NULLFSBLOCK) 4503 if (nextbno != NULLFSBLOCK)
4512 xfs_btree_reada_bufl(mp, nextbno, 1); 4504 xfs_btree_reada_bufl(mp, nextbno, 1);
4513 /* 4505 /*
4514 * Copy records into the extent list. 4506 * Copy records into the extent records.
4515 */ 4507 */
4516 frp = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize, xfs_bmbt, 4508 frp = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize, xfs_bmbt,
4517 block, 1, mp->m_bmap_dmxr[0]); 4509 block, 1, mp->m_bmap_dmxr[0]);
4518 temp = trp; 4510 start = i;
4519 for (j = 0; j < num_recs; j++, frp++, trp++) { 4511 for (j = 0; j < num_recs; j++, i++, frp++) {
4512 trp = xfs_iext_get_ext(ifp, i);
4520 trp->l0 = INT_GET(frp->l0, ARCH_CONVERT); 4513 trp->l0 = INT_GET(frp->l0, ARCH_CONVERT);
4521 trp->l1 = INT_GET(frp->l1, ARCH_CONVERT); 4514 trp->l1 = INT_GET(frp->l1, ARCH_CONVERT);
4522 } 4515 }
@@ -4526,14 +4519,14 @@ xfs_bmap_read_extents(
4526 * any "older" data bmap btree records for a 4519 * any "older" data bmap btree records for a
4527 * set bit in the "extent flag" position. 4520 * set bit in the "extent flag" position.
4528 */ 4521 */
4529 if (unlikely(xfs_check_nostate_extents(temp, num_recs))) { 4522 if (unlikely(xfs_check_nostate_extents(ifp,
4523 start, num_recs))) {
4530 XFS_ERROR_REPORT("xfs_bmap_read_extents(2)", 4524 XFS_ERROR_REPORT("xfs_bmap_read_extents(2)",
4531 XFS_ERRLEVEL_LOW, 4525 XFS_ERRLEVEL_LOW,
4532 ip->i_mount); 4526 ip->i_mount);
4533 goto error0; 4527 goto error0;
4534 } 4528 }
4535 } 4529 }
4536 i += num_recs;
4537 xfs_trans_brelse(tp, bp); 4530 xfs_trans_brelse(tp, bp);
4538 bno = nextbno; 4531 bno = nextbno;
4539 /* 4532 /*
@@ -4546,7 +4539,7 @@ xfs_bmap_read_extents(
4546 return error; 4539 return error;
4547 block = XFS_BUF_TO_BMBT_BLOCK(bp); 4540 block = XFS_BUF_TO_BMBT_BLOCK(bp);
4548 } 4541 }
4549 ASSERT(i == ifp->if_bytes / (uint)sizeof(*trp)); 4542 ASSERT(i == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)));
4550 ASSERT(i == XFS_IFORK_NEXTENTS(ip, whichfork)); 4543 ASSERT(i == XFS_IFORK_NEXTENTS(ip, whichfork));
4551 xfs_bmap_trace_exlist(fname, ip, i, whichfork); 4544 xfs_bmap_trace_exlist(fname, ip, i, whichfork);
4552 return 0; 4545 return 0;
@@ -4557,7 +4550,7 @@ error0:
4557 4550
4558#ifdef XFS_BMAP_TRACE 4551#ifdef XFS_BMAP_TRACE
4559/* 4552/*
4560 * Add bmap trace insert entries for all the contents of the extent list. 4553 * Add bmap trace insert entries for all the contents of the extent records.
4561 */ 4554 */
4562void 4555void
4563xfs_bmap_trace_exlist( 4556xfs_bmap_trace_exlist(
@@ -4566,16 +4559,15 @@ xfs_bmap_trace_exlist(
4566 xfs_extnum_t cnt, /* count of entries in the list */ 4559 xfs_extnum_t cnt, /* count of entries in the list */
4567 int whichfork) /* data or attr fork */ 4560 int whichfork) /* data or attr fork */
4568{ 4561{
4569 xfs_bmbt_rec_t *base; /* base of extent list */ 4562 xfs_bmbt_rec_t *ep; /* current extent record */
4570 xfs_bmbt_rec_t *ep; /* current entry in extent list */ 4563 xfs_extnum_t idx; /* extent record index */
4571 xfs_extnum_t idx; /* extent list entry number */
4572 xfs_ifork_t *ifp; /* inode fork pointer */ 4564 xfs_ifork_t *ifp; /* inode fork pointer */
4573 xfs_bmbt_irec_t s; /* extent list record */ 4565 xfs_bmbt_irec_t s; /* file extent record */
4574 4566
4575 ifp = XFS_IFORK_PTR(ip, whichfork); 4567 ifp = XFS_IFORK_PTR(ip, whichfork);
4576 ASSERT(cnt == ifp->if_bytes / (uint)sizeof(*base)); 4568 ASSERT(cnt == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)));
4577 base = ifp->if_u1.if_extents; 4569 for (idx = 0; idx < cnt; idx++) {
4578 for (idx = 0, ep = base; idx < cnt; idx++, ep++) { 4570 ep = xfs_iext_get_ext(ifp, idx);
4579 xfs_bmbt_get_all(ep, &s); 4571 xfs_bmbt_get_all(ep, &s);
4580 xfs_bmap_trace_insert(fname, "exlist", ip, idx, 1, &s, NULL, 4572 xfs_bmap_trace_insert(fname, "exlist", ip, idx, 1, &s, NULL,
4581 whichfork); 4573 whichfork);
@@ -4661,14 +4653,10 @@ xfs_bmapi(
4661 xfs_bmalloca_t bma; /* args for xfs_bmap_alloc */ 4653 xfs_bmalloca_t bma; /* args for xfs_bmap_alloc */
4662 xfs_btree_cur_t *cur; /* bmap btree cursor */ 4654 xfs_btree_cur_t *cur; /* bmap btree cursor */
4663 xfs_fileoff_t end; /* end of mapped file region */ 4655 xfs_fileoff_t end; /* end of mapped file region */
4664 int eof; /* we've hit the end of extent list */ 4656 int eof; /* we've hit the end of extents */
4665 char contig; /* allocation must be one extent */ 4657 xfs_bmbt_rec_t *ep; /* extent record pointer */
4666 char delay; /* this request is for delayed alloc */
4667 char exact; /* don't do all of wasdelayed extent */
4668 char convert; /* unwritten extent I/O completion */
4669 xfs_bmbt_rec_t *ep; /* extent list entry pointer */
4670 int error; /* error return */ 4658 int error; /* error return */
4671 xfs_bmbt_irec_t got; /* current extent list record */ 4659 xfs_bmbt_irec_t got; /* current file extent record */
4672 xfs_ifork_t *ifp; /* inode fork pointer */ 4660 xfs_ifork_t *ifp; /* inode fork pointer */
4673 xfs_extlen_t indlen; /* indirect blocks length */ 4661 xfs_extlen_t indlen; /* indirect blocks length */
4674 xfs_extnum_t lastx; /* last useful extent number */ 4662 xfs_extnum_t lastx; /* last useful extent number */
@@ -4680,17 +4668,13 @@ xfs_bmapi(
4680 int nallocs; /* number of extents alloc\'d */ 4668 int nallocs; /* number of extents alloc\'d */
4681 xfs_extnum_t nextents; /* number of extents in file */ 4669 xfs_extnum_t nextents; /* number of extents in file */
4682 xfs_fileoff_t obno; /* old block number (offset) */ 4670 xfs_fileoff_t obno; /* old block number (offset) */
4683 xfs_bmbt_irec_t prev; /* previous extent list record */ 4671 xfs_bmbt_irec_t prev; /* previous file extent record */
4684 int tmp_logflags; /* temp flags holder */ 4672 int tmp_logflags; /* temp flags holder */
4685 int whichfork; /* data or attr fork */ 4673 int whichfork; /* data or attr fork */
4686 char inhole; /* current location is hole in file */ 4674 char inhole; /* current location is hole in file */
4687 char stateless; /* ignore state flag set */
4688 char trim; /* output trimmed to match range */
4689 char userdata; /* allocating non-metadata */
4690 char wasdelay; /* old extent was delayed */ 4675 char wasdelay; /* old extent was delayed */
4691 char wr; /* this is a write request */ 4676 char wr; /* this is a write request */
4692 char rt; /* this is a realtime file */ 4677 char rt; /* this is a realtime file */
4693 char rsvd; /* OK to allocate reserved blocks */
4694#ifdef DEBUG 4678#ifdef DEBUG
4695 xfs_fileoff_t orig_bno; /* original block number value */ 4679 xfs_fileoff_t orig_bno; /* original block number value */
4696 int orig_flags; /* original flags arg value */ 4680 int orig_flags; /* original flags arg value */
@@ -4727,15 +4711,8 @@ xfs_bmapi(
4727 XFS_STATS_INC(xs_blk_mapw); 4711 XFS_STATS_INC(xs_blk_mapw);
4728 else 4712 else
4729 XFS_STATS_INC(xs_blk_mapr); 4713 XFS_STATS_INC(xs_blk_mapr);
4730 delay = (flags & XFS_BMAPI_DELAY) != 0;
4731 trim = (flags & XFS_BMAPI_ENTIRE) == 0;
4732 userdata = (flags & XFS_BMAPI_METADATA) == 0;
4733 convert = (flags & XFS_BMAPI_CONVERT) != 0;
4734 exact = (flags & XFS_BMAPI_EXACT) != 0;
4735 rsvd = (flags & XFS_BMAPI_RSVBLOCKS) != 0;
4736 contig = (flags & XFS_BMAPI_CONTIG) != 0;
4737 /* 4714 /*
4738 * stateless is used to combine extents which 4715 * IGSTATE flag is used to combine extents which
4739 * differ only due to the state of the extents. 4716 * differ only due to the state of the extents.
4740 * This technique is used from xfs_getbmap() 4717 * This technique is used from xfs_getbmap()
4741 * when the caller does not wish to see the 4718 * when the caller does not wish to see the
@@ -4751,10 +4728,9 @@ xfs_bmapi(
4751 * xfs_strat_comp(), where the xfs_bmapi() call 4728 * xfs_strat_comp(), where the xfs_bmapi() call
4752 * is transactioned, and the extents combined. 4729 * is transactioned, and the extents combined.
4753 */ 4730 */
4754 stateless = (flags & XFS_BMAPI_IGSTATE) != 0; 4731 if ((flags & XFS_BMAPI_IGSTATE) && wr) /* if writing unwritten space */
4755 if (stateless && wr) /* if writing unwritten space, no */ 4732 wr = 0; /* no allocations are allowed */
4756 wr = 0; /* allocations are allowed */ 4733 ASSERT(wr || !(flags & XFS_BMAPI_DELAY));
4757 ASSERT(wr || !delay);
4758 logflags = 0; 4734 logflags = 0;
4759 nallocs = 0; 4735 nallocs = 0;
4760 cur = NULL; 4736 cur = NULL;
@@ -4789,7 +4765,7 @@ xfs_bmapi(
4789 if (eof && !wr) 4765 if (eof && !wr)
4790 got.br_startoff = end; 4766 got.br_startoff = end;
4791 inhole = eof || got.br_startoff > bno; 4767 inhole = eof || got.br_startoff > bno;
4792 wasdelay = wr && !inhole && !delay && 4768 wasdelay = wr && !inhole && !(flags & XFS_BMAPI_DELAY) &&
4793 ISNULLSTARTBLOCK(got.br_startblock); 4769 ISNULLSTARTBLOCK(got.br_startblock);
4794 /* 4770 /*
4795 * First, deal with the hole before the allocated space 4771 * First, deal with the hole before the allocated space
@@ -4801,11 +4777,11 @@ xfs_bmapi(
4801 * allocate the stuff asked for in this bmap call 4777 * allocate the stuff asked for in this bmap call
4802 * but that wouldn't be as good. 4778 * but that wouldn't be as good.
4803 */ 4779 */
4804 if (wasdelay && !exact) { 4780 if (wasdelay && !(flags & XFS_BMAPI_EXACT)) {
4805 alen = (xfs_extlen_t)got.br_blockcount; 4781 alen = (xfs_extlen_t)got.br_blockcount;
4806 aoff = got.br_startoff; 4782 aoff = got.br_startoff;
4807 if (lastx != NULLEXTNUM && lastx) { 4783 if (lastx != NULLEXTNUM && lastx) {
4808 ep = &ifp->if_u1.if_extents[lastx - 1]; 4784 ep = xfs_iext_get_ext(ifp, lastx - 1);
4809 xfs_bmbt_get_all(ep, &prev); 4785 xfs_bmbt_get_all(ep, &prev);
4810 } 4786 }
4811 } else if (wasdelay) { 4787 } else if (wasdelay) {
@@ -4823,8 +4799,8 @@ xfs_bmapi(
4823 got.br_startoff - bno); 4799 got.br_startoff - bno);
4824 aoff = bno; 4800 aoff = bno;
4825 } 4801 }
4826 minlen = contig ? alen : 1; 4802 minlen = (flags & XFS_BMAPI_CONTIG) ? alen : 1;
4827 if (delay) { 4803 if (flags & XFS_BMAPI_DELAY) {
4828 xfs_extlen_t extsz; 4804 xfs_extlen_t extsz;
4829 4805
4830 /* Figure out the extent size, adjust alen */ 4806 /* Figure out the extent size, adjust alen */
@@ -4837,7 +4813,9 @@ xfs_bmapi(
4837 if (extsz) { 4813 if (extsz) {
4838 error = xfs_bmap_extsize_align(mp, 4814 error = xfs_bmap_extsize_align(mp,
4839 &got, &prev, extsz, 4815 &got, &prev, extsz,
4840 rt, eof, delay, convert, 4816 rt, eof,
4817 flags&XFS_BMAPI_DELAY,
4818 flags&XFS_BMAPI_CONVERT,
4841 &aoff, &alen); 4819 &aoff, &alen);
4842 ASSERT(!error); 4820 ASSERT(!error);
4843 } 4821 }
@@ -4875,24 +4853,29 @@ xfs_bmapi(
4875 if (rt) { 4853 if (rt) {
4876 error = xfs_mod_incore_sb(mp, 4854 error = xfs_mod_incore_sb(mp,
4877 XFS_SBS_FREXTENTS, 4855 XFS_SBS_FREXTENTS,
4878 -(extsz), rsvd); 4856 -(extsz), (flags &
4857 XFS_BMAPI_RSVBLOCKS));
4879 } else { 4858 } else {
4880 error = xfs_mod_incore_sb(mp, 4859 error = xfs_mod_incore_sb(mp,
4881 XFS_SBS_FDBLOCKS, 4860 XFS_SBS_FDBLOCKS,
4882 -(alen), rsvd); 4861 -(alen), (flags &
4862 XFS_BMAPI_RSVBLOCKS));
4883 } 4863 }
4884 if (!error) { 4864 if (!error) {
4885 error = xfs_mod_incore_sb(mp, 4865 error = xfs_mod_incore_sb(mp,
4886 XFS_SBS_FDBLOCKS, 4866 XFS_SBS_FDBLOCKS,
4887 -(indlen), rsvd); 4867 -(indlen), (flags &
4868 XFS_BMAPI_RSVBLOCKS));
4888 if (error && rt) 4869 if (error && rt)
4889 xfs_mod_incore_sb(mp, 4870 xfs_mod_incore_sb(mp,
4890 XFS_SBS_FREXTENTS, 4871 XFS_SBS_FREXTENTS,
4891 extsz, rsvd); 4872 extsz, (flags &
4873 XFS_BMAPI_RSVBLOCKS));
4892 else if (error) 4874 else if (error)
4893 xfs_mod_incore_sb(mp, 4875 xfs_mod_incore_sb(mp,
4894 XFS_SBS_FDBLOCKS, 4876 XFS_SBS_FDBLOCKS,
4895 alen, rsvd); 4877 alen, (flags &
4878 XFS_BMAPI_RSVBLOCKS));
4896 } 4879 }
4897 4880
4898 if (error) { 4881 if (error) {
@@ -4925,7 +4908,7 @@ xfs_bmapi(
4925 /* Indicate if this is the first user data 4908 /* Indicate if this is the first user data
4926 * in the file, or just any user data. 4909 * in the file, or just any user data.
4927 */ 4910 */
4928 if (userdata) { 4911 if (!(flags & XFS_BMAPI_METADATA)) {
4929 bma.userdata = (aoff == 0) ? 4912 bma.userdata = (aoff == 0) ?
4930 XFS_ALLOC_INITIAL_USER_DATA : 4913 XFS_ALLOC_INITIAL_USER_DATA :
4931 XFS_ALLOC_USERDATA; 4914 XFS_ALLOC_USERDATA;
@@ -4937,7 +4920,7 @@ xfs_bmapi(
4937 bma.firstblock = *firstblock; 4920 bma.firstblock = *firstblock;
4938 bma.alen = alen; 4921 bma.alen = alen;
4939 bma.off = aoff; 4922 bma.off = aoff;
4940 bma.conv = convert; 4923 bma.conv = (flags & XFS_BMAPI_CONVERT);
4941 bma.wasdel = wasdelay; 4924 bma.wasdel = wasdelay;
4942 bma.minlen = minlen; 4925 bma.minlen = minlen;
4943 bma.low = flist->xbf_low; 4926 bma.low = flist->xbf_low;
@@ -4948,7 +4931,8 @@ xfs_bmapi(
4948 * is larger than a stripe unit. 4931 * is larger than a stripe unit.
4949 */ 4932 */
4950 if (mp->m_dalign && alen >= mp->m_dalign && 4933 if (mp->m_dalign && alen >= mp->m_dalign &&
4951 userdata && whichfork == XFS_DATA_FORK) { 4934 (!(flags & XFS_BMAPI_METADATA)) &&
4935 (whichfork == XFS_DATA_FORK)) {
4952 if ((error = xfs_bmap_isaeof(ip, aoff, 4936 if ((error = xfs_bmap_isaeof(ip, aoff,
4953 whichfork, &bma.aeof))) 4937 whichfork, &bma.aeof)))
4954 goto error0; 4938 goto error0;
@@ -5011,19 +4995,19 @@ xfs_bmapi(
5011 } 4995 }
5012 error = xfs_bmap_add_extent(ip, lastx, &cur, &got, 4996 error = xfs_bmap_add_extent(ip, lastx, &cur, &got,
5013 firstblock, flist, &tmp_logflags, whichfork, 4997 firstblock, flist, &tmp_logflags, whichfork,
5014 rsvd); 4998 (flags & XFS_BMAPI_RSVBLOCKS));
5015 logflags |= tmp_logflags; 4999 logflags |= tmp_logflags;
5016 if (error) 5000 if (error)
5017 goto error0; 5001 goto error0;
5018 lastx = ifp->if_lastex; 5002 lastx = ifp->if_lastex;
5019 ep = &ifp->if_u1.if_extents[lastx]; 5003 ep = xfs_iext_get_ext(ifp, lastx);
5020 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 5004 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
5021 xfs_bmbt_get_all(ep, &got); 5005 xfs_bmbt_get_all(ep, &got);
5022 ASSERT(got.br_startoff <= aoff); 5006 ASSERT(got.br_startoff <= aoff);
5023 ASSERT(got.br_startoff + got.br_blockcount >= 5007 ASSERT(got.br_startoff + got.br_blockcount >=
5024 aoff + alen); 5008 aoff + alen);
5025#ifdef DEBUG 5009#ifdef DEBUG
5026 if (delay) { 5010 if (flags & XFS_BMAPI_DELAY) {
5027 ASSERT(ISNULLSTARTBLOCK(got.br_startblock)); 5011 ASSERT(ISNULLSTARTBLOCK(got.br_startblock));
5028 ASSERT(STARTBLOCKVAL(got.br_startblock) > 0); 5012 ASSERT(STARTBLOCKVAL(got.br_startblock) > 0);
5029 } 5013 }
@@ -5052,14 +5036,15 @@ xfs_bmapi(
5052 * Then deal with the allocated space we found. 5036 * Then deal with the allocated space we found.
5053 */ 5037 */
5054 ASSERT(ep != NULL); 5038 ASSERT(ep != NULL);
5055 if (trim && (got.br_startoff + got.br_blockcount > obno)) { 5039 if (!(flags & XFS_BMAPI_ENTIRE) &&
5040 (got.br_startoff + got.br_blockcount > obno)) {
5056 if (obno > bno) 5041 if (obno > bno)
5057 bno = obno; 5042 bno = obno;
5058 ASSERT((bno >= obno) || (n == 0)); 5043 ASSERT((bno >= obno) || (n == 0));
5059 ASSERT(bno < end); 5044 ASSERT(bno < end);
5060 mval->br_startoff = bno; 5045 mval->br_startoff = bno;
5061 if (ISNULLSTARTBLOCK(got.br_startblock)) { 5046 if (ISNULLSTARTBLOCK(got.br_startblock)) {
5062 ASSERT(!wr || delay); 5047 ASSERT(!wr || (flags & XFS_BMAPI_DELAY));
5063 mval->br_startblock = DELAYSTARTBLOCK; 5048 mval->br_startblock = DELAYSTARTBLOCK;
5064 } else 5049 } else
5065 mval->br_startblock = 5050 mval->br_startblock =
@@ -5081,7 +5066,7 @@ xfs_bmapi(
5081 } else { 5066 } else {
5082 *mval = got; 5067 *mval = got;
5083 if (ISNULLSTARTBLOCK(mval->br_startblock)) { 5068 if (ISNULLSTARTBLOCK(mval->br_startblock)) {
5084 ASSERT(!wr || delay); 5069 ASSERT(!wr || (flags & XFS_BMAPI_DELAY));
5085 mval->br_startblock = DELAYSTARTBLOCK; 5070 mval->br_startblock = DELAYSTARTBLOCK;
5086 } 5071 }
5087 } 5072 }
@@ -5107,12 +5092,12 @@ xfs_bmapi(
5107 mval->br_state = XFS_EXT_NORM; 5092 mval->br_state = XFS_EXT_NORM;
5108 error = xfs_bmap_add_extent(ip, lastx, &cur, mval, 5093 error = xfs_bmap_add_extent(ip, lastx, &cur, mval,
5109 firstblock, flist, &tmp_logflags, whichfork, 5094 firstblock, flist, &tmp_logflags, whichfork,
5110 rsvd); 5095 (flags & XFS_BMAPI_RSVBLOCKS));
5111 logflags |= tmp_logflags; 5096 logflags |= tmp_logflags;
5112 if (error) 5097 if (error)
5113 goto error0; 5098 goto error0;
5114 lastx = ifp->if_lastex; 5099 lastx = ifp->if_lastex;
5115 ep = &ifp->if_u1.if_extents[lastx]; 5100 ep = xfs_iext_get_ext(ifp, lastx);
5116 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 5101 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
5117 xfs_bmbt_get_all(ep, &got); 5102 xfs_bmbt_get_all(ep, &got);
5118 /* 5103 /*
@@ -5124,9 +5109,10 @@ xfs_bmapi(
5124 continue; 5109 continue;
5125 } 5110 }
5126 5111
5127 ASSERT(!trim || 5112 ASSERT((flags & XFS_BMAPI_ENTIRE) ||
5128 ((mval->br_startoff + mval->br_blockcount) <= end)); 5113 ((mval->br_startoff + mval->br_blockcount) <= end));
5129 ASSERT(!trim || (mval->br_blockcount <= len) || 5114 ASSERT((flags & XFS_BMAPI_ENTIRE) ||
5115 (mval->br_blockcount <= len) ||
5130 (mval->br_startoff < obno)); 5116 (mval->br_startoff < obno));
5131 bno = mval->br_startoff + mval->br_blockcount; 5117 bno = mval->br_startoff + mval->br_blockcount;
5132 len = end - bno; 5118 len = end - bno;
@@ -5141,7 +5127,8 @@ xfs_bmapi(
5141 mval[-1].br_startblock != HOLESTARTBLOCK && 5127 mval[-1].br_startblock != HOLESTARTBLOCK &&
5142 mval->br_startblock == 5128 mval->br_startblock ==
5143 mval[-1].br_startblock + mval[-1].br_blockcount && 5129 mval[-1].br_startblock + mval[-1].br_blockcount &&
5144 (stateless || mval[-1].br_state == mval->br_state)) { 5130 ((flags & XFS_BMAPI_IGSTATE) ||
5131 mval[-1].br_state == mval->br_state)) {
5145 ASSERT(mval->br_startoff == 5132 ASSERT(mval->br_startoff ==
5146 mval[-1].br_startoff + mval[-1].br_blockcount); 5133 mval[-1].br_startoff + mval[-1].br_blockcount);
5147 mval[-1].br_blockcount += mval->br_blockcount; 5134 mval[-1].br_blockcount += mval->br_blockcount;
@@ -5168,8 +5155,7 @@ xfs_bmapi(
5168 /* 5155 /*
5169 * Else go on to the next record. 5156 * Else go on to the next record.
5170 */ 5157 */
5171 ep++; 5158 ep = xfs_iext_get_ext(ifp, ++lastx);
5172 lastx++;
5173 if (lastx >= nextents) { 5159 if (lastx >= nextents) {
5174 eof = 1; 5160 eof = 1;
5175 prev = got; 5161 prev = got;
@@ -5199,7 +5185,7 @@ xfs_bmapi(
5199error0: 5185error0:
5200 /* 5186 /*
5201 * Log everything. Do this after conversion, there's no point in 5187 * Log everything. Do this after conversion, there's no point in
5202 * logging the extent list if we've converted to btree format. 5188 * logging the extent records if we've converted to btree format.
5203 */ 5189 */
5204 if ((logflags & XFS_ILOG_FEXT(whichfork)) && 5190 if ((logflags & XFS_ILOG_FEXT(whichfork)) &&
5205 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) 5191 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
@@ -5252,12 +5238,12 @@ xfs_bmapi_single(
5252 xfs_fsblock_t *fsb, /* output: mapped block */ 5238 xfs_fsblock_t *fsb, /* output: mapped block */
5253 xfs_fileoff_t bno) /* starting file offs. mapped */ 5239 xfs_fileoff_t bno) /* starting file offs. mapped */
5254{ 5240{
5255 int eof; /* we've hit the end of extent list */ 5241 int eof; /* we've hit the end of extents */
5256 int error; /* error return */ 5242 int error; /* error return */
5257 xfs_bmbt_irec_t got; /* current extent list record */ 5243 xfs_bmbt_irec_t got; /* current file extent record */
5258 xfs_ifork_t *ifp; /* inode fork pointer */ 5244 xfs_ifork_t *ifp; /* inode fork pointer */
5259 xfs_extnum_t lastx; /* last useful extent number */ 5245 xfs_extnum_t lastx; /* last useful extent number */
5260 xfs_bmbt_irec_t prev; /* previous extent list record */ 5246 xfs_bmbt_irec_t prev; /* previous file extent record */
5261 5247
5262 ifp = XFS_IFORK_PTR(ip, whichfork); 5248 ifp = XFS_IFORK_PTR(ip, whichfork);
5263 if (unlikely( 5249 if (unlikely(
@@ -5312,18 +5298,18 @@ xfs_bunmapi(
5312 xfs_btree_cur_t *cur; /* bmap btree cursor */ 5298 xfs_btree_cur_t *cur; /* bmap btree cursor */
5313 xfs_bmbt_irec_t del; /* extent being deleted */ 5299 xfs_bmbt_irec_t del; /* extent being deleted */
5314 int eof; /* is deleting at eof */ 5300 int eof; /* is deleting at eof */
5315 xfs_bmbt_rec_t *ep; /* extent list entry pointer */ 5301 xfs_bmbt_rec_t *ep; /* extent record pointer */
5316 int error; /* error return value */ 5302 int error; /* error return value */
5317 xfs_extnum_t extno; /* extent number in list */ 5303 xfs_extnum_t extno; /* extent number in list */
5318 xfs_bmbt_irec_t got; /* current extent list entry */ 5304 xfs_bmbt_irec_t got; /* current extent record */
5319 xfs_ifork_t *ifp; /* inode fork pointer */ 5305 xfs_ifork_t *ifp; /* inode fork pointer */
5320 int isrt; /* freeing in rt area */ 5306 int isrt; /* freeing in rt area */
5321 xfs_extnum_t lastx; /* last extent index used */ 5307 xfs_extnum_t lastx; /* last extent index used */
5322 int logflags; /* transaction logging flags */ 5308 int logflags; /* transaction logging flags */
5323 xfs_extlen_t mod; /* rt extent offset */ 5309 xfs_extlen_t mod; /* rt extent offset */
5324 xfs_mount_t *mp; /* mount structure */ 5310 xfs_mount_t *mp; /* mount structure */
5325 xfs_extnum_t nextents; /* size of extent list */ 5311 xfs_extnum_t nextents; /* number of file extents */
5326 xfs_bmbt_irec_t prev; /* previous extent list entry */ 5312 xfs_bmbt_irec_t prev; /* previous extent record */
5327 xfs_fileoff_t start; /* first file offset deleted */ 5313 xfs_fileoff_t start; /* first file offset deleted */
5328 int tmp_logflags; /* partial logging flags */ 5314 int tmp_logflags; /* partial logging flags */
5329 int wasdel; /* was a delayed alloc extent */ 5315 int wasdel; /* was a delayed alloc extent */
@@ -5369,7 +5355,7 @@ xfs_bunmapi(
5369 * file, back up to the last block if so... 5355 * file, back up to the last block if so...
5370 */ 5356 */
5371 if (eof) { 5357 if (eof) {
5372 ep = &ifp->if_u1.if_extents[--lastx]; 5358 ep = xfs_iext_get_ext(ifp, --lastx);
5373 xfs_bmbt_get_all(ep, &got); 5359 xfs_bmbt_get_all(ep, &got);
5374 bno = got.br_startoff + got.br_blockcount - 1; 5360 bno = got.br_startoff + got.br_blockcount - 1;
5375 } 5361 }
@@ -5393,7 +5379,7 @@ xfs_bunmapi(
5393 if (got.br_startoff > bno) { 5379 if (got.br_startoff > bno) {
5394 if (--lastx < 0) 5380 if (--lastx < 0)
5395 break; 5381 break;
5396 ep--; 5382 ep = xfs_iext_get_ext(ifp, lastx);
5397 xfs_bmbt_get_all(ep, &got); 5383 xfs_bmbt_get_all(ep, &got);
5398 } 5384 }
5399 /* 5385 /*
@@ -5440,7 +5426,8 @@ xfs_bunmapi(
5440 del.br_blockcount : mod; 5426 del.br_blockcount : mod;
5441 if (bno < got.br_startoff) { 5427 if (bno < got.br_startoff) {
5442 if (--lastx >= 0) 5428 if (--lastx >= 0)
5443 xfs_bmbt_get_all(--ep, &got); 5429 xfs_bmbt_get_all(xfs_iext_get_ext(
5430 ifp, lastx), &got);
5444 } 5431 }
5445 continue; 5432 continue;
5446 } 5433 }
@@ -5500,7 +5487,8 @@ xfs_bunmapi(
5500 * try again. 5487 * try again.
5501 */ 5488 */
5502 ASSERT(lastx > 0); 5489 ASSERT(lastx > 0);
5503 xfs_bmbt_get_all(ep - 1, &prev); 5490 xfs_bmbt_get_all(xfs_iext_get_ext(ifp,
5491 lastx - 1), &prev);
5504 ASSERT(prev.br_state == XFS_EXT_NORM); 5492 ASSERT(prev.br_state == XFS_EXT_NORM);
5505 ASSERT(!ISNULLSTARTBLOCK(prev.br_startblock)); 5493 ASSERT(!ISNULLSTARTBLOCK(prev.br_startblock));
5506 ASSERT(del.br_startblock == 5494 ASSERT(del.br_startblock ==
@@ -5587,12 +5575,12 @@ nodelete:
5587 * If not done go on to the next (previous) record. 5575 * If not done go on to the next (previous) record.
5588 * Reset ep in case the extents array was re-alloced. 5576 * Reset ep in case the extents array was re-alloced.
5589 */ 5577 */
5590 ep = &ifp->if_u1.if_extents[lastx]; 5578 ep = xfs_iext_get_ext(ifp, lastx);
5591 if (bno != (xfs_fileoff_t)-1 && bno >= start) { 5579 if (bno != (xfs_fileoff_t)-1 && bno >= start) {
5592 if (lastx >= XFS_IFORK_NEXTENTS(ip, whichfork) || 5580 if (lastx >= XFS_IFORK_NEXTENTS(ip, whichfork) ||
5593 xfs_bmbt_get_startoff(ep) > bno) { 5581 xfs_bmbt_get_startoff(ep) > bno) {
5594 lastx--; 5582 if (--lastx >= 0)
5595 ep--; 5583 ep = xfs_iext_get_ext(ifp, lastx);
5596 } 5584 }
5597 if (lastx >= 0) 5585 if (lastx >= 0)
5598 xfs_bmbt_get_all(ep, &got); 5586 xfs_bmbt_get_all(ep, &got);
@@ -5636,7 +5624,7 @@ nodelete:
5636error0: 5624error0:
5637 /* 5625 /*
5638 * Log everything. Do this after conversion, there's no point in 5626 * Log everything. Do this after conversion, there's no point in
5639 * logging the extent list if we've converted to btree format. 5627 * logging the extent records if we've converted to btree format.
5640 */ 5628 */
5641 if ((logflags & XFS_ILOG_FEXT(whichfork)) && 5629 if ((logflags & XFS_ILOG_FEXT(whichfork)) &&
5642 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) 5630 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
@@ -5892,9 +5880,9 @@ xfs_bmap_isaeof(
5892{ 5880{
5893 int error; /* error return value */ 5881 int error; /* error return value */
5894 xfs_ifork_t *ifp; /* inode fork pointer */ 5882 xfs_ifork_t *ifp; /* inode fork pointer */
5895 xfs_bmbt_rec_t *lastrec; /* extent list entry pointer */ 5883 xfs_bmbt_rec_t *lastrec; /* extent record pointer */
5896 xfs_extnum_t nextents; /* size of extent list */ 5884 xfs_extnum_t nextents; /* number of file extents */
5897 xfs_bmbt_irec_t s; /* expanded extent list entry */ 5885 xfs_bmbt_irec_t s; /* expanded extent record */
5898 5886
5899 ASSERT(whichfork == XFS_DATA_FORK); 5887 ASSERT(whichfork == XFS_DATA_FORK);
5900 ifp = XFS_IFORK_PTR(ip, whichfork); 5888 ifp = XFS_IFORK_PTR(ip, whichfork);
@@ -5909,7 +5897,7 @@ xfs_bmap_isaeof(
5909 /* 5897 /*
5910 * Go to the last extent 5898 * Go to the last extent
5911 */ 5899 */
5912 lastrec = &ifp->if_u1.if_extents[nextents - 1]; 5900 lastrec = xfs_iext_get_ext(ifp, nextents - 1);
5913 xfs_bmbt_get_all(lastrec, &s); 5901 xfs_bmbt_get_all(lastrec, &s);
5914 /* 5902 /*
5915 * Check we are allocating in the last extent (for delayed allocations) 5903 * Check we are allocating in the last extent (for delayed allocations)
@@ -5936,8 +5924,8 @@ xfs_bmap_eof(
5936 xfs_fsblock_t blockcount; /* extent block count */ 5924 xfs_fsblock_t blockcount; /* extent block count */
5937 int error; /* error return value */ 5925 int error; /* error return value */
5938 xfs_ifork_t *ifp; /* inode fork pointer */ 5926 xfs_ifork_t *ifp; /* inode fork pointer */
5939 xfs_bmbt_rec_t *lastrec; /* extent list entry pointer */ 5927 xfs_bmbt_rec_t *lastrec; /* extent record pointer */
5940 xfs_extnum_t nextents; /* size of extent list */ 5928 xfs_extnum_t nextents; /* number of file extents */
5941 xfs_fileoff_t startoff; /* extent starting file offset */ 5929 xfs_fileoff_t startoff; /* extent starting file offset */
5942 5930
5943 ASSERT(whichfork == XFS_DATA_FORK); 5931 ASSERT(whichfork == XFS_DATA_FORK);
@@ -5953,7 +5941,7 @@ xfs_bmap_eof(
5953 /* 5941 /*
5954 * Go to the last extent 5942 * Go to the last extent
5955 */ 5943 */
5956 lastrec = &ifp->if_u1.if_extents[nextents - 1]; 5944 lastrec = xfs_iext_get_ext(ifp, nextents - 1);
5957 startoff = xfs_bmbt_get_startoff(lastrec); 5945 startoff = xfs_bmbt_get_startoff(lastrec);
5958 blockcount = xfs_bmbt_get_blockcount(lastrec); 5946 blockcount = xfs_bmbt_get_blockcount(lastrec);
5959 *eof = endoff >= startoff + blockcount; 5947 *eof = endoff >= startoff + blockcount;
@@ -5969,18 +5957,21 @@ xfs_bmap_check_extents(
5969 xfs_inode_t *ip, /* incore inode pointer */ 5957 xfs_inode_t *ip, /* incore inode pointer */
5970 int whichfork) /* data or attr fork */ 5958 int whichfork) /* data or attr fork */
5971{ 5959{
5972 xfs_bmbt_rec_t *base; /* base of extents list */
5973 xfs_bmbt_rec_t *ep; /* current extent entry */ 5960 xfs_bmbt_rec_t *ep; /* current extent entry */
5961 xfs_extnum_t idx; /* extent record index */
5974 xfs_ifork_t *ifp; /* inode fork pointer */ 5962 xfs_ifork_t *ifp; /* inode fork pointer */
5975 xfs_extnum_t nextents; /* number of extents in list */ 5963 xfs_extnum_t nextents; /* number of extents in list */
5964 xfs_bmbt_rec_t *nextp; /* next extent entry */
5976 5965
5977 ifp = XFS_IFORK_PTR(ip, whichfork); 5966 ifp = XFS_IFORK_PTR(ip, whichfork);
5978 ASSERT(ifp->if_flags & XFS_IFEXTENTS); 5967 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
5979 base = ifp->if_u1.if_extents;
5980 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 5968 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
5981 for (ep = base; ep < &base[nextents - 1]; ep++) { 5969 ep = xfs_iext_get_ext(ifp, 0);
5970 for (idx = 0; idx < nextents - 1; idx++) {
5971 nextp = xfs_iext_get_ext(ifp, idx + 1);
5982 xfs_btree_check_rec(XFS_BTNUM_BMAP, (void *)ep, 5972 xfs_btree_check_rec(XFS_BTNUM_BMAP, (void *)ep,
5983 (void *)(ep + 1)); 5973 (void *)(nextp));
5974 ep = nextp;
5984 } 5975 }
5985} 5976}
5986 5977
@@ -6119,12 +6110,14 @@ xfs_bmap_check_leaf_extents(
6119 xfs_fsblock_t bno; /* block # of "block" */ 6110 xfs_fsblock_t bno; /* block # of "block" */
6120 xfs_buf_t *bp; /* buffer for "block" */ 6111 xfs_buf_t *bp; /* buffer for "block" */
6121 int error; /* error return value */ 6112 int error; /* error return value */
6122 xfs_extnum_t i=0; /* index into the extents list */ 6113 xfs_extnum_t i=0, j; /* index into the extents list */
6123 xfs_ifork_t *ifp; /* fork structure */ 6114 xfs_ifork_t *ifp; /* fork structure */
6124 int level; /* btree level, for checking */ 6115 int level; /* btree level, for checking */
6125 xfs_mount_t *mp; /* file system mount structure */ 6116 xfs_mount_t *mp; /* file system mount structure */
6126 xfs_bmbt_ptr_t *pp; /* pointer to block address */ 6117 xfs_bmbt_ptr_t *pp; /* pointer to block address */
6127 xfs_bmbt_rec_t *ep, *lastp; /* extent pointers in block entry */ 6118 xfs_bmbt_rec_t *ep; /* pointer to current extent */
6119 xfs_bmbt_rec_t *lastp; /* pointer to previous extent */
6120 xfs_bmbt_rec_t *nextp; /* pointer to next extent */
6128 int bp_release = 0; 6121 int bp_release = 0;
6129 6122
6130 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) { 6123 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) {
@@ -6194,7 +6187,6 @@ xfs_bmap_check_leaf_extents(
6194 */ 6187 */
6195 lastp = NULL; 6188 lastp = NULL;
6196 for (;;) { 6189 for (;;) {
6197 xfs_bmbt_rec_t *frp;
6198 xfs_fsblock_t nextbno; 6190 xfs_fsblock_t nextbno;
6199 xfs_extnum_t num_recs; 6191 xfs_extnum_t num_recs;
6200 6192
@@ -6213,18 +6205,20 @@ xfs_bmap_check_leaf_extents(
6213 * conform with the first entry in this one. 6205 * conform with the first entry in this one.
6214 */ 6206 */
6215 6207
6216 frp = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize, xfs_bmbt, 6208 ep = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize, xfs_bmbt,
6217 block, 1, mp->m_bmap_dmxr[0]); 6209 block, 1, mp->m_bmap_dmxr[0]);
6218 6210 for (j = 1; j < num_recs; j++) {
6219 for (ep = frp;ep < frp + (num_recs - 1); ep++) { 6211 nextp = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize, xfs_bmbt,
6212 block, j + 1, mp->m_bmap_dmxr[0]);
6220 if (lastp) { 6213 if (lastp) {
6221 xfs_btree_check_rec(XFS_BTNUM_BMAP, 6214 xfs_btree_check_rec(XFS_BTNUM_BMAP,
6222 (void *)lastp, (void *)ep); 6215 (void *)lastp, (void *)ep);
6223 } 6216 }
6224 xfs_btree_check_rec(XFS_BTNUM_BMAP, (void *)ep, 6217 xfs_btree_check_rec(XFS_BTNUM_BMAP, (void *)ep,
6225 (void *)(ep + 1)); 6218 (void *)(nextp));
6219 lastp = ep;
6220 ep = nextp;
6226 } 6221 }
6227 lastp = frp + num_recs - 1; /* For the next iteration */
6228 6222
6229 i += num_recs; 6223 i += num_recs;
6230 if (bp_release) { 6224 if (bp_release) {
@@ -6288,7 +6282,7 @@ xfs_bmap_count_blocks(
6288 mp = ip->i_mount; 6282 mp = ip->i_mount;
6289 ifp = XFS_IFORK_PTR(ip, whichfork); 6283 ifp = XFS_IFORK_PTR(ip, whichfork);
6290 if ( XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ) { 6284 if ( XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ) {
6291 if (unlikely(xfs_bmap_count_leaves(ifp->if_u1.if_extents, 6285 if (unlikely(xfs_bmap_count_leaves(ifp, 0,
6292 ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t), 6286 ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t),
6293 count) < 0)) { 6287 count) < 0)) {
6294 XFS_ERROR_REPORT("xfs_bmap_count_blocks(1)", 6288 XFS_ERROR_REPORT("xfs_bmap_count_blocks(1)",
@@ -6310,7 +6304,7 @@ xfs_bmap_count_blocks(
6310 ASSERT(XFS_FSB_TO_AGBNO(mp, INT_GET(*pp, ARCH_CONVERT)) < mp->m_sb.sb_agblocks); 6304 ASSERT(XFS_FSB_TO_AGBNO(mp, INT_GET(*pp, ARCH_CONVERT)) < mp->m_sb.sb_agblocks);
6311 bno = INT_GET(*pp, ARCH_CONVERT); 6305 bno = INT_GET(*pp, ARCH_CONVERT);
6312 6306
6313 if (unlikely(xfs_bmap_count_tree(mp, tp, bno, level, count) < 0)) { 6307 if (unlikely(xfs_bmap_count_tree(mp, tp, ifp, bno, level, count) < 0)) {
6314 XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)", XFS_ERRLEVEL_LOW, 6308 XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)", XFS_ERRLEVEL_LOW,
6315 mp); 6309 mp);
6316 return XFS_ERROR(EFSCORRUPTED); 6310 return XFS_ERROR(EFSCORRUPTED);
@@ -6327,6 +6321,7 @@ int /* error */
6327xfs_bmap_count_tree( 6321xfs_bmap_count_tree(
6328 xfs_mount_t *mp, /* file system mount point */ 6322 xfs_mount_t *mp, /* file system mount point */
6329 xfs_trans_t *tp, /* transaction pointer */ 6323 xfs_trans_t *tp, /* transaction pointer */
6324 xfs_ifork_t *ifp, /* inode fork pointer */
6330 xfs_fsblock_t blockno, /* file system block number */ 6325 xfs_fsblock_t blockno, /* file system block number */
6331 int levelin, /* level in btree */ 6326 int levelin, /* level in btree */
6332 int *count) /* Count of blocks */ 6327 int *count) /* Count of blocks */
@@ -6339,7 +6334,6 @@ xfs_bmap_count_tree(
6339 xfs_fsblock_t nextbno; 6334 xfs_fsblock_t nextbno;
6340 xfs_bmbt_block_t *block, *nextblock; 6335 xfs_bmbt_block_t *block, *nextblock;
6341 int numrecs; 6336 int numrecs;
6342 xfs_bmbt_rec_t *frp;
6343 6337
6344 if ((error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, XFS_BMAP_BTREE_REF))) 6338 if ((error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, XFS_BMAP_BTREE_REF)))
6345 return error; 6339 return error;
@@ -6364,7 +6358,7 @@ xfs_bmap_count_tree(
6364 xfs_bmbt, block, 1, mp->m_bmap_dmxr[1]); 6358 xfs_bmbt, block, 1, mp->m_bmap_dmxr[1]);
6365 bno = INT_GET(*pp, ARCH_CONVERT); 6359 bno = INT_GET(*pp, ARCH_CONVERT);
6366 if (unlikely((error = 6360 if (unlikely((error =
6367 xfs_bmap_count_tree(mp, tp, bno, level, count)) < 0)) { 6361 xfs_bmap_count_tree(mp, tp, ifp, bno, level, count)) < 0)) {
6368 xfs_trans_brelse(tp, bp); 6362 xfs_trans_brelse(tp, bp);
6369 XFS_ERROR_REPORT("xfs_bmap_count_tree(1)", 6363 XFS_ERROR_REPORT("xfs_bmap_count_tree(1)",
6370 XFS_ERRLEVEL_LOW, mp); 6364 XFS_ERRLEVEL_LOW, mp);
@@ -6376,9 +6370,8 @@ xfs_bmap_count_tree(
6376 for (;;) { 6370 for (;;) {
6377 nextbno = be64_to_cpu(block->bb_rightsib); 6371 nextbno = be64_to_cpu(block->bb_rightsib);
6378 numrecs = be16_to_cpu(block->bb_numrecs); 6372 numrecs = be16_to_cpu(block->bb_numrecs);
6379 frp = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize, 6373 if (unlikely(xfs_bmap_disk_count_leaves(ifp, mp,
6380 xfs_bmbt, block, 1, mp->m_bmap_dmxr[0]); 6374 0, block, numrecs, count) < 0)) {
6381 if (unlikely(xfs_bmap_disk_count_leaves(frp, numrecs, count) < 0)) {
6382 xfs_trans_brelse(tp, bp); 6375 xfs_trans_brelse(tp, bp);
6383 XFS_ERROR_REPORT("xfs_bmap_count_tree(2)", 6376 XFS_ERROR_REPORT("xfs_bmap_count_tree(2)",
6384 XFS_ERRLEVEL_LOW, mp); 6377 XFS_ERRLEVEL_LOW, mp);
@@ -6399,33 +6392,45 @@ xfs_bmap_count_tree(
6399} 6392}
6400 6393
6401/* 6394/*
6402 * Count leaf blocks given a pointer to an extent list. 6395 * Count leaf blocks given a range of extent records.
6403 */ 6396 */
6404int 6397int
6405xfs_bmap_count_leaves( 6398xfs_bmap_count_leaves(
6406 xfs_bmbt_rec_t *frp, 6399 xfs_ifork_t *ifp,
6400 xfs_extnum_t idx,
6407 int numrecs, 6401 int numrecs,
6408 int *count) 6402 int *count)
6409{ 6403{
6410 int b; 6404 int b;
6405 xfs_bmbt_rec_t *frp;
6411 6406
6412 for ( b = 1; b <= numrecs; b++, frp++) 6407 for (b = 0; b < numrecs; b++) {
6408 frp = xfs_iext_get_ext(ifp, idx + b);
6413 *count += xfs_bmbt_get_blockcount(frp); 6409 *count += xfs_bmbt_get_blockcount(frp);
6410 }
6414 return 0; 6411 return 0;
6415} 6412}
6416 6413
6417/* 6414/*
6418 * Count leaf blocks given a pointer to an extent list originally in btree format. 6415 * Count leaf blocks given a range of extent records originally
6416 * in btree format.
6419 */ 6417 */
6420int 6418int
6421xfs_bmap_disk_count_leaves( 6419xfs_bmap_disk_count_leaves(
6422 xfs_bmbt_rec_t *frp, 6420 xfs_ifork_t *ifp,
6421 xfs_mount_t *mp,
6422 xfs_extnum_t idx,
6423 xfs_bmbt_block_t *block,
6423 int numrecs, 6424 int numrecs,
6424 int *count) 6425 int *count)
6425{ 6426{
6426 int b; 6427 int b;
6428 xfs_bmbt_rec_t *frp;
6427 6429
6428 for ( b = 1; b <= numrecs; b++, frp++) 6430 for (b = 1; b <= numrecs; b++) {
6431 frp = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize,
6432 xfs_bmbt, block, idx + b, mp->m_bmap_dmxr[0]);
6429 *count += xfs_bmbt_disk_get_blockcount(frp); 6433 *count += xfs_bmbt_disk_get_blockcount(frp);
6434 }
6430 return 0; 6435 return 0;
6431} 6436}
diff --git a/fs/xfs/xfs_bmap.h b/fs/xfs/xfs_bmap.h
index 12cc63dfc2c4..011ccaa9a1c0 100644
--- a/fs/xfs/xfs_bmap.h
+++ b/fs/xfs/xfs_bmap.h
@@ -20,6 +20,7 @@
20 20
21struct getbmap; 21struct getbmap;
22struct xfs_bmbt_irec; 22struct xfs_bmbt_irec;
23struct xfs_ifork;
23struct xfs_inode; 24struct xfs_inode;
24struct xfs_mount; 25struct xfs_mount;
25struct xfs_trans; 26struct xfs_trans;
@@ -347,9 +348,28 @@ xfs_bmap_count_blocks(
347 */ 348 */
348int 349int
349xfs_check_nostate_extents( 350xfs_check_nostate_extents(
350 xfs_bmbt_rec_t *ep, 351 struct xfs_ifork *ifp,
352 xfs_extnum_t idx,
351 xfs_extnum_t num); 353 xfs_extnum_t num);
352 354
355/*
356 * Call xfs_bmap_do_search_extents() to search for the extent
357 * record containing block bno. If in multi-level in-core extent
358 * allocation mode, find and extract the target extent buffer,
359 * otherwise just use the direct extent list.
360 */
361xfs_bmbt_rec_t *
362xfs_bmap_search_multi_extents(struct xfs_ifork *, xfs_fileoff_t, int *,
363 xfs_extnum_t *, xfs_bmbt_irec_t *, xfs_bmbt_irec_t *);
364
365/*
366 * Search an extent list for the extent which includes block
367 * bno.
368 */
369xfs_bmbt_rec_t *xfs_bmap_do_search_extents(xfs_bmbt_rec_t *,
370 xfs_extnum_t, xfs_extnum_t, xfs_fileoff_t, int *,
371 xfs_extnum_t *, xfs_bmbt_irec_t *, xfs_bmbt_irec_t *);
372
353#endif /* __KERNEL__ */ 373#endif /* __KERNEL__ */
354 374
355#endif /* __XFS_BMAP_H__ */ 375#endif /* __XFS_BMAP_H__ */
diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c
index 3f1383d160e8..bea44709afbe 100644
--- a/fs/xfs/xfs_bmap_btree.c
+++ b/fs/xfs/xfs_bmap_btree.c
@@ -2754,7 +2754,7 @@ xfs_bmbt_update(
2754} 2754}
2755 2755
2756/* 2756/*
2757 * Check an extent list, which has just been read, for 2757 * Check extent records, which have just been read, for
2758 * any bit in the extent flag field. ASSERT on debug 2758 * any bit in the extent flag field. ASSERT on debug
2759 * kernels, as this condition should not occur. 2759 * kernels, as this condition should not occur.
2760 * Return an error condition (1) if any flags found, 2760 * Return an error condition (1) if any flags found,
@@ -2763,10 +2763,14 @@ xfs_bmbt_update(
2763 2763
2764int 2764int
2765xfs_check_nostate_extents( 2765xfs_check_nostate_extents(
2766 xfs_bmbt_rec_t *ep, 2766 xfs_ifork_t *ifp,
2767 xfs_extnum_t idx,
2767 xfs_extnum_t num) 2768 xfs_extnum_t num)
2768{ 2769{
2769 for (; num > 0; num--, ep++) { 2770 xfs_bmbt_rec_t *ep;
2771
2772 for (; num > 0; num--, idx++) {
2773 ep = xfs_iext_get_ext(ifp, idx);
2770 if ((ep->l0 >> 2774 if ((ep->l0 >>
2771 (64 - BMBT_EXNTFLAG_BITLEN)) != 0) { 2775 (64 - BMBT_EXNTFLAG_BITLEN)) != 0) {
2772 ASSERT(0); 2776 ASSERT(0);
diff --git a/fs/xfs/xfs_bmap_btree.h b/fs/xfs/xfs_bmap_btree.h
index e095a2d344ae..6478cfa0e539 100644
--- a/fs/xfs/xfs_bmap_btree.h
+++ b/fs/xfs/xfs_bmap_btree.h
@@ -372,14 +372,6 @@ extern int xfs_bmbt_get_rec(struct xfs_btree_cur *, xfs_fileoff_t *,
372 xfs_exntst_t *, int *); 372 xfs_exntst_t *, int *);
373#endif 373#endif
374 374
375/*
376 * Search an extent list for the extent which includes block
377 * bno.
378 */
379xfs_bmbt_rec_t *xfs_bmap_do_search_extents(xfs_bmbt_rec_t *,
380 xfs_extnum_t, xfs_extnum_t, xfs_fileoff_t, int *,
381 xfs_extnum_t *, xfs_bmbt_irec_t *, xfs_bmbt_irec_t *);
382
383#endif /* __KERNEL__ */ 375#endif /* __KERNEL__ */
384 376
385#endif /* __XFS_BMAP_BTREE_H__ */ 377#endif /* __XFS_BMAP_BTREE_H__ */
diff --git a/fs/xfs/xfs_clnt.h b/fs/xfs/xfs_clnt.h
index f57cc9ac875e..022fff62085b 100644
--- a/fs/xfs/xfs_clnt.h
+++ b/fs/xfs/xfs_clnt.h
@@ -68,8 +68,6 @@ struct xfs_mount_args {
68 * enforcement */ 68 * enforcement */
69#define XFSMNT_PQUOTAENF 0x00000040 /* IRIX project quota limit 69#define XFSMNT_PQUOTAENF 0x00000040 /* IRIX project quota limit
70 * enforcement */ 70 * enforcement */
71#define XFSMNT_NOATIME 0x00000100 /* don't modify access
72 * times on reads */
73#define XFSMNT_NOALIGN 0x00000200 /* don't allocate at 71#define XFSMNT_NOALIGN 0x00000200 /* don't allocate at
74 * stripe boundaries*/ 72 * stripe boundaries*/
75#define XFSMNT_RETERR 0x00000400 /* return error to user */ 73#define XFSMNT_RETERR 0x00000400 /* return error to user */
diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c
index 473671fa5c13..4bae3a76c678 100644
--- a/fs/xfs/xfs_da_btree.c
+++ b/fs/xfs/xfs_da_btree.c
@@ -126,10 +126,10 @@ xfs_da_node_create(xfs_da_args_t *args, xfs_dablk_t blkno, int level,
126 node = bp->data; 126 node = bp->data;
127 node->hdr.info.forw = 0; 127 node->hdr.info.forw = 0;
128 node->hdr.info.back = 0; 128 node->hdr.info.back = 0;
129 INT_SET(node->hdr.info.magic, ARCH_CONVERT, XFS_DA_NODE_MAGIC); 129 node->hdr.info.magic = cpu_to_be16(XFS_DA_NODE_MAGIC);
130 node->hdr.info.pad = 0; 130 node->hdr.info.pad = 0;
131 node->hdr.count = 0; 131 node->hdr.count = 0;
132 INT_SET(node->hdr.level, ARCH_CONVERT, level); 132 node->hdr.level = cpu_to_be16(level);
133 133
134 xfs_da_log_buf(tp, bp, 134 xfs_da_log_buf(tp, bp,
135 XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr))); 135 XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr)));
@@ -290,28 +290,28 @@ xfs_da_split(xfs_da_state_t *state)
290 290
291 node = oldblk->bp->data; 291 node = oldblk->bp->data;
292 if (node->hdr.info.forw) { 292 if (node->hdr.info.forw) {
293 if (INT_GET(node->hdr.info.forw, ARCH_CONVERT) == addblk->blkno) { 293 if (be32_to_cpu(node->hdr.info.forw) == addblk->blkno) {
294 bp = addblk->bp; 294 bp = addblk->bp;
295 } else { 295 } else {
296 ASSERT(state->extravalid); 296 ASSERT(state->extravalid);
297 bp = state->extrablk.bp; 297 bp = state->extrablk.bp;
298 } 298 }
299 node = bp->data; 299 node = bp->data;
300 INT_SET(node->hdr.info.back, ARCH_CONVERT, oldblk->blkno); 300 node->hdr.info.back = cpu_to_be32(oldblk->blkno);
301 xfs_da_log_buf(state->args->trans, bp, 301 xfs_da_log_buf(state->args->trans, bp,
302 XFS_DA_LOGRANGE(node, &node->hdr.info, 302 XFS_DA_LOGRANGE(node, &node->hdr.info,
303 sizeof(node->hdr.info))); 303 sizeof(node->hdr.info)));
304 } 304 }
305 node = oldblk->bp->data; 305 node = oldblk->bp->data;
306 if (INT_GET(node->hdr.info.back, ARCH_CONVERT)) { 306 if (node->hdr.info.back) {
307 if (INT_GET(node->hdr.info.back, ARCH_CONVERT) == addblk->blkno) { 307 if (be32_to_cpu(node->hdr.info.back) == addblk->blkno) {
308 bp = addblk->bp; 308 bp = addblk->bp;
309 } else { 309 } else {
310 ASSERT(state->extravalid); 310 ASSERT(state->extravalid);
311 bp = state->extrablk.bp; 311 bp = state->extrablk.bp;
312 } 312 }
313 node = bp->data; 313 node = bp->data;
314 INT_SET(node->hdr.info.forw, ARCH_CONVERT, oldblk->blkno); 314 node->hdr.info.forw = cpu_to_be32(oldblk->blkno);
315 xfs_da_log_buf(state->args->trans, bp, 315 xfs_da_log_buf(state->args->trans, bp,
316 XFS_DA_LOGRANGE(node, &node->hdr.info, 316 XFS_DA_LOGRANGE(node, &node->hdr.info,
317 sizeof(node->hdr.info))); 317 sizeof(node->hdr.info)));
@@ -359,14 +359,14 @@ xfs_da_root_split(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
359 ASSERT(bp != NULL); 359 ASSERT(bp != NULL);
360 node = bp->data; 360 node = bp->data;
361 oldroot = blk1->bp->data; 361 oldroot = blk1->bp->data;
362 if (INT_GET(oldroot->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC) { 362 if (be16_to_cpu(oldroot->hdr.info.magic) == XFS_DA_NODE_MAGIC) {
363 size = (int)((char *)&oldroot->btree[INT_GET(oldroot->hdr.count, ARCH_CONVERT)] - 363 size = (int)((char *)&oldroot->btree[be16_to_cpu(oldroot->hdr.count)] -
364 (char *)oldroot); 364 (char *)oldroot);
365 } else { 365 } else {
366 ASSERT(XFS_DIR_IS_V2(mp)); 366 ASSERT(XFS_DIR_IS_V2(mp));
367 ASSERT(INT_GET(oldroot->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); 367 ASSERT(be16_to_cpu(oldroot->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC);
368 leaf = (xfs_dir2_leaf_t *)oldroot; 368 leaf = (xfs_dir2_leaf_t *)oldroot;
369 size = (int)((char *)&leaf->ents[INT_GET(leaf->hdr.count, ARCH_CONVERT)] - 369 size = (int)((char *)&leaf->ents[be16_to_cpu(leaf->hdr.count)] -
370 (char *)leaf); 370 (char *)leaf);
371 } 371 }
372 memcpy(node, oldroot, size); 372 memcpy(node, oldroot, size);
@@ -381,18 +381,18 @@ xfs_da_root_split(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
381 error = xfs_da_node_create(args, 381 error = xfs_da_node_create(args,
382 args->whichfork == XFS_DATA_FORK && 382 args->whichfork == XFS_DATA_FORK &&
383 XFS_DIR_IS_V2(mp) ? mp->m_dirleafblk : 0, 383 XFS_DIR_IS_V2(mp) ? mp->m_dirleafblk : 0,
384 INT_GET(node->hdr.level, ARCH_CONVERT) + 1, &bp, args->whichfork); 384 be16_to_cpu(node->hdr.level) + 1, &bp, args->whichfork);
385 if (error) 385 if (error)
386 return(error); 386 return(error);
387 node = bp->data; 387 node = bp->data;
388 INT_SET(node->btree[0].hashval, ARCH_CONVERT, blk1->hashval); 388 node->btree[0].hashval = cpu_to_be32(blk1->hashval);
389 INT_SET(node->btree[0].before, ARCH_CONVERT, blk1->blkno); 389 node->btree[0].before = cpu_to_be32(blk1->blkno);
390 INT_SET(node->btree[1].hashval, ARCH_CONVERT, blk2->hashval); 390 node->btree[1].hashval = cpu_to_be32(blk2->hashval);
391 INT_SET(node->btree[1].before, ARCH_CONVERT, blk2->blkno); 391 node->btree[1].before = cpu_to_be32(blk2->blkno);
392 INT_SET(node->hdr.count, ARCH_CONVERT, 2); 392 node->hdr.count = cpu_to_be16(2);
393 393
394#ifdef DEBUG 394#ifdef DEBUG
395 if (INT_GET(oldroot->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC) { 395 if (be16_to_cpu(oldroot->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC) {
396 ASSERT(blk1->blkno >= mp->m_dirleafblk && 396 ASSERT(blk1->blkno >= mp->m_dirleafblk &&
397 blk1->blkno < mp->m_dirfreeblk); 397 blk1->blkno < mp->m_dirfreeblk);
398 ASSERT(blk2->blkno >= mp->m_dirleafblk && 398 ASSERT(blk2->blkno >= mp->m_dirleafblk &&
@@ -424,7 +424,7 @@ xfs_da_node_split(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,
424 int useextra; 424 int useextra;
425 425
426 node = oldblk->bp->data; 426 node = oldblk->bp->data;
427 ASSERT(INT_GET(node->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); 427 ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC);
428 428
429 /* 429 /*
430 * With V2 the extra block is data or freespace. 430 * With V2 the extra block is data or freespace.
@@ -435,7 +435,7 @@ xfs_da_node_split(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,
435 /* 435 /*
436 * Do we have to split the node? 436 * Do we have to split the node?
437 */ 437 */
438 if ((INT_GET(node->hdr.count, ARCH_CONVERT) + newcount) > state->node_ents) { 438 if ((be16_to_cpu(node->hdr.count) + newcount) > state->node_ents) {
439 /* 439 /*
440 * Allocate a new node, add to the doubly linked chain of 440 * Allocate a new node, add to the doubly linked chain of
441 * nodes, then move some of our excess entries into it. 441 * nodes, then move some of our excess entries into it.
@@ -472,7 +472,7 @@ xfs_da_node_split(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,
472 * If we had double-split op below us, then add the extra block too. 472 * If we had double-split op below us, then add the extra block too.
473 */ 473 */
474 node = oldblk->bp->data; 474 node = oldblk->bp->data;
475 if (oldblk->index <= INT_GET(node->hdr.count, ARCH_CONVERT)) { 475 if (oldblk->index <= be16_to_cpu(node->hdr.count)) {
476 oldblk->index++; 476 oldblk->index++;
477 xfs_da_node_add(state, oldblk, addblk); 477 xfs_da_node_add(state, oldblk, addblk);
478 if (useextra) { 478 if (useextra) {
@@ -516,17 +516,17 @@ xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
516 * Figure out how many entries need to move, and in which direction. 516 * Figure out how many entries need to move, and in which direction.
517 * Swap the nodes around if that makes it simpler. 517 * Swap the nodes around if that makes it simpler.
518 */ 518 */
519 if ((INT_GET(node1->hdr.count, ARCH_CONVERT) > 0) && (INT_GET(node2->hdr.count, ARCH_CONVERT) > 0) && 519 if ((be16_to_cpu(node1->hdr.count) > 0) && (be16_to_cpu(node2->hdr.count) > 0) &&
520 ((INT_GET(node2->btree[ 0 ].hashval, ARCH_CONVERT) < INT_GET(node1->btree[ 0 ].hashval, ARCH_CONVERT)) || 520 ((be32_to_cpu(node2->btree[0].hashval) < be32_to_cpu(node1->btree[0].hashval)) ||
521 (INT_GET(node2->btree[ INT_GET(node2->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT) < 521 (be32_to_cpu(node2->btree[be16_to_cpu(node2->hdr.count)-1].hashval) <
522 INT_GET(node1->btree[ INT_GET(node1->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT)))) { 522 be32_to_cpu(node1->btree[be16_to_cpu(node1->hdr.count)-1].hashval)))) {
523 tmpnode = node1; 523 tmpnode = node1;
524 node1 = node2; 524 node1 = node2;
525 node2 = tmpnode; 525 node2 = tmpnode;
526 } 526 }
527 ASSERT(INT_GET(node1->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); 527 ASSERT(be16_to_cpu(node1->hdr.info.magic) == XFS_DA_NODE_MAGIC);
528 ASSERT(INT_GET(node2->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); 528 ASSERT(be16_to_cpu(node2->hdr.info.magic) == XFS_DA_NODE_MAGIC);
529 count = (INT_GET(node1->hdr.count, ARCH_CONVERT) - INT_GET(node2->hdr.count, ARCH_CONVERT)) / 2; 529 count = (be16_to_cpu(node1->hdr.count) - be16_to_cpu(node2->hdr.count)) / 2;
530 if (count == 0) 530 if (count == 0)
531 return; 531 return;
532 tp = state->args->trans; 532 tp = state->args->trans;
@@ -537,7 +537,7 @@ xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
537 /* 537 /*
538 * Move elements in node2 up to make a hole. 538 * Move elements in node2 up to make a hole.
539 */ 539 */
540 if ((tmp = INT_GET(node2->hdr.count, ARCH_CONVERT)) > 0) { 540 if ((tmp = be16_to_cpu(node2->hdr.count)) > 0) {
541 tmp *= (uint)sizeof(xfs_da_node_entry_t); 541 tmp *= (uint)sizeof(xfs_da_node_entry_t);
542 btree_s = &node2->btree[0]; 542 btree_s = &node2->btree[0];
543 btree_d = &node2->btree[count]; 543 btree_d = &node2->btree[count];
@@ -548,13 +548,12 @@ xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
548 * Move the req'd B-tree elements from high in node1 to 548 * Move the req'd B-tree elements from high in node1 to
549 * low in node2. 549 * low in node2.
550 */ 550 */
551 INT_MOD(node2->hdr.count, ARCH_CONVERT, count); 551 be16_add(&node2->hdr.count, count);
552 tmp = count * (uint)sizeof(xfs_da_node_entry_t); 552 tmp = count * (uint)sizeof(xfs_da_node_entry_t);
553 btree_s = &node1->btree[INT_GET(node1->hdr.count, ARCH_CONVERT) - count]; 553 btree_s = &node1->btree[be16_to_cpu(node1->hdr.count) - count];
554 btree_d = &node2->btree[0]; 554 btree_d = &node2->btree[0];
555 memcpy(btree_d, btree_s, tmp); 555 memcpy(btree_d, btree_s, tmp);
556 INT_MOD(node1->hdr.count, ARCH_CONVERT, -(count)); 556 be16_add(&node1->hdr.count, -count);
557
558 } else { 557 } else {
559 /* 558 /*
560 * Move the req'd B-tree elements from low in node2 to 559 * Move the req'd B-tree elements from low in node2 to
@@ -563,21 +562,21 @@ xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
563 count = -count; 562 count = -count;
564 tmp = count * (uint)sizeof(xfs_da_node_entry_t); 563 tmp = count * (uint)sizeof(xfs_da_node_entry_t);
565 btree_s = &node2->btree[0]; 564 btree_s = &node2->btree[0];
566 btree_d = &node1->btree[INT_GET(node1->hdr.count, ARCH_CONVERT)]; 565 btree_d = &node1->btree[be16_to_cpu(node1->hdr.count)];
567 memcpy(btree_d, btree_s, tmp); 566 memcpy(btree_d, btree_s, tmp);
568 INT_MOD(node1->hdr.count, ARCH_CONVERT, count); 567 be16_add(&node1->hdr.count, count);
569 xfs_da_log_buf(tp, blk1->bp, 568 xfs_da_log_buf(tp, blk1->bp,
570 XFS_DA_LOGRANGE(node1, btree_d, tmp)); 569 XFS_DA_LOGRANGE(node1, btree_d, tmp));
571 570
572 /* 571 /*
573 * Move elements in node2 down to fill the hole. 572 * Move elements in node2 down to fill the hole.
574 */ 573 */
575 tmp = INT_GET(node2->hdr.count, ARCH_CONVERT) - count; 574 tmp = be16_to_cpu(node2->hdr.count) - count;
576 tmp *= (uint)sizeof(xfs_da_node_entry_t); 575 tmp *= (uint)sizeof(xfs_da_node_entry_t);
577 btree_s = &node2->btree[count]; 576 btree_s = &node2->btree[count];
578 btree_d = &node2->btree[0]; 577 btree_d = &node2->btree[0];
579 memmove(btree_d, btree_s, tmp); 578 memmove(btree_d, btree_s, tmp);
580 INT_MOD(node2->hdr.count, ARCH_CONVERT, -(count)); 579 be16_add(&node2->hdr.count, -count);
581 } 580 }
582 581
583 /* 582 /*
@@ -588,7 +587,7 @@ xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
588 xfs_da_log_buf(tp, blk2->bp, 587 xfs_da_log_buf(tp, blk2->bp,
589 XFS_DA_LOGRANGE(node2, &node2->hdr, 588 XFS_DA_LOGRANGE(node2, &node2->hdr,
590 sizeof(node2->hdr) + 589 sizeof(node2->hdr) +
591 sizeof(node2->btree[0]) * INT_GET(node2->hdr.count, ARCH_CONVERT))); 590 sizeof(node2->btree[0]) * be16_to_cpu(node2->hdr.count)));
592 591
593 /* 592 /*
594 * Record the last hashval from each block for upward propagation. 593 * Record the last hashval from each block for upward propagation.
@@ -596,15 +595,15 @@ xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
596 */ 595 */
597 node1 = blk1->bp->data; 596 node1 = blk1->bp->data;
598 node2 = blk2->bp->data; 597 node2 = blk2->bp->data;
599 blk1->hashval = INT_GET(node1->btree[ INT_GET(node1->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT); 598 blk1->hashval = be32_to_cpu(node1->btree[be16_to_cpu(node1->hdr.count)-1].hashval);
600 blk2->hashval = INT_GET(node2->btree[ INT_GET(node2->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT); 599 blk2->hashval = be32_to_cpu(node2->btree[be16_to_cpu(node2->hdr.count)-1].hashval);
601 600
602 /* 601 /*
603 * Adjust the expected index for insertion. 602 * Adjust the expected index for insertion.
604 */ 603 */
605 if (blk1->index >= INT_GET(node1->hdr.count, ARCH_CONVERT)) { 604 if (blk1->index >= be16_to_cpu(node1->hdr.count)) {
606 blk2->index = blk1->index - INT_GET(node1->hdr.count, ARCH_CONVERT); 605 blk2->index = blk1->index - be16_to_cpu(node1->hdr.count);
607 blk1->index = INT_GET(node1->hdr.count, ARCH_CONVERT) + 1; /* make it invalid */ 606 blk1->index = be16_to_cpu(node1->hdr.count) + 1; /* make it invalid */
608 } 607 }
609} 608}
610 609
@@ -622,8 +621,8 @@ xfs_da_node_add(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,
622 621
623 node = oldblk->bp->data; 622 node = oldblk->bp->data;
624 mp = state->mp; 623 mp = state->mp;
625 ASSERT(INT_GET(node->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); 624 ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC);
626 ASSERT((oldblk->index >= 0) && (oldblk->index <= INT_GET(node->hdr.count, ARCH_CONVERT))); 625 ASSERT((oldblk->index >= 0) && (oldblk->index <= be16_to_cpu(node->hdr.count)));
627 ASSERT(newblk->blkno != 0); 626 ASSERT(newblk->blkno != 0);
628 if (state->args->whichfork == XFS_DATA_FORK && XFS_DIR_IS_V2(mp)) 627 if (state->args->whichfork == XFS_DATA_FORK && XFS_DIR_IS_V2(mp))
629 ASSERT(newblk->blkno >= mp->m_dirleafblk && 628 ASSERT(newblk->blkno >= mp->m_dirleafblk &&
@@ -634,22 +633,22 @@ xfs_da_node_add(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,
634 */ 633 */
635 tmp = 0; 634 tmp = 0;
636 btree = &node->btree[ oldblk->index ]; 635 btree = &node->btree[ oldblk->index ];
637 if (oldblk->index < INT_GET(node->hdr.count, ARCH_CONVERT)) { 636 if (oldblk->index < be16_to_cpu(node->hdr.count)) {
638 tmp = (INT_GET(node->hdr.count, ARCH_CONVERT) - oldblk->index) * (uint)sizeof(*btree); 637 tmp = (be16_to_cpu(node->hdr.count) - oldblk->index) * (uint)sizeof(*btree);
639 memmove(btree + 1, btree, tmp); 638 memmove(btree + 1, btree, tmp);
640 } 639 }
641 INT_SET(btree->hashval, ARCH_CONVERT, newblk->hashval); 640 btree->hashval = cpu_to_be32(newblk->hashval);
642 INT_SET(btree->before, ARCH_CONVERT, newblk->blkno); 641 btree->before = cpu_to_be32(newblk->blkno);
643 xfs_da_log_buf(state->args->trans, oldblk->bp, 642 xfs_da_log_buf(state->args->trans, oldblk->bp,
644 XFS_DA_LOGRANGE(node, btree, tmp + sizeof(*btree))); 643 XFS_DA_LOGRANGE(node, btree, tmp + sizeof(*btree)));
645 INT_MOD(node->hdr.count, ARCH_CONVERT, +1); 644 be16_add(&node->hdr.count, 1);
646 xfs_da_log_buf(state->args->trans, oldblk->bp, 645 xfs_da_log_buf(state->args->trans, oldblk->bp,
647 XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr))); 646 XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr)));
648 647
649 /* 648 /*
650 * Copy the last hash value from the oldblk to propagate upwards. 649 * Copy the last hash value from the oldblk to propagate upwards.
651 */ 650 */
652 oldblk->hashval = INT_GET(node->btree[ INT_GET(node->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT); 651 oldblk->hashval = be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1 ].hashval);
653} 652}
654 653
655/*======================================================================== 654/*========================================================================
@@ -768,21 +767,21 @@ xfs_da_root_join(xfs_da_state_t *state, xfs_da_state_blk_t *root_blk)
768 ASSERT(args != NULL); 767 ASSERT(args != NULL);
769 ASSERT(root_blk->magic == XFS_DA_NODE_MAGIC); 768 ASSERT(root_blk->magic == XFS_DA_NODE_MAGIC);
770 oldroot = root_blk->bp->data; 769 oldroot = root_blk->bp->data;
771 ASSERT(INT_GET(oldroot->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); 770 ASSERT(be16_to_cpu(oldroot->hdr.info.magic) == XFS_DA_NODE_MAGIC);
772 ASSERT(!oldroot->hdr.info.forw); 771 ASSERT(!oldroot->hdr.info.forw);
773 ASSERT(!oldroot->hdr.info.back); 772 ASSERT(!oldroot->hdr.info.back);
774 773
775 /* 774 /*
776 * If the root has more than one child, then don't do anything. 775 * If the root has more than one child, then don't do anything.
777 */ 776 */
778 if (INT_GET(oldroot->hdr.count, ARCH_CONVERT) > 1) 777 if (be16_to_cpu(oldroot->hdr.count) > 1)
779 return(0); 778 return(0);
780 779
781 /* 780 /*
782 * Read in the (only) child block, then copy those bytes into 781 * Read in the (only) child block, then copy those bytes into
783 * the root block's buffer and free the original child block. 782 * the root block's buffer and free the original child block.
784 */ 783 */
785 child = INT_GET(oldroot->btree[ 0 ].before, ARCH_CONVERT); 784 child = be32_to_cpu(oldroot->btree[0].before);
786 ASSERT(child != 0); 785 ASSERT(child != 0);
787 error = xfs_da_read_buf(args->trans, args->dp, child, -1, &bp, 786 error = xfs_da_read_buf(args->trans, args->dp, child, -1, &bp,
788 args->whichfork); 787 args->whichfork);
@@ -790,11 +789,11 @@ xfs_da_root_join(xfs_da_state_t *state, xfs_da_state_blk_t *root_blk)
790 return(error); 789 return(error);
791 ASSERT(bp != NULL); 790 ASSERT(bp != NULL);
792 blkinfo = bp->data; 791 blkinfo = bp->data;
793 if (INT_GET(oldroot->hdr.level, ARCH_CONVERT) == 1) { 792 if (be16_to_cpu(oldroot->hdr.level) == 1) {
794 ASSERT(INT_GET(blkinfo->magic, ARCH_CONVERT) == XFS_DIRX_LEAF_MAGIC(state->mp) || 793 ASSERT(be16_to_cpu(blkinfo->magic) == XFS_DIRX_LEAF_MAGIC(state->mp) ||
795 INT_GET(blkinfo->magic, ARCH_CONVERT) == XFS_ATTR_LEAF_MAGIC); 794 be16_to_cpu(blkinfo->magic) == XFS_ATTR_LEAF_MAGIC);
796 } else { 795 } else {
797 ASSERT(INT_GET(blkinfo->magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); 796 ASSERT(be16_to_cpu(blkinfo->magic) == XFS_DA_NODE_MAGIC);
798 } 797 }
799 ASSERT(!blkinfo->forw); 798 ASSERT(!blkinfo->forw);
800 ASSERT(!blkinfo->back); 799 ASSERT(!blkinfo->back);
@@ -830,9 +829,9 @@ xfs_da_node_toosmall(xfs_da_state_t *state, int *action)
830 */ 829 */
831 blk = &state->path.blk[ state->path.active-1 ]; 830 blk = &state->path.blk[ state->path.active-1 ];
832 info = blk->bp->data; 831 info = blk->bp->data;
833 ASSERT(INT_GET(info->magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); 832 ASSERT(be16_to_cpu(info->magic) == XFS_DA_NODE_MAGIC);
834 node = (xfs_da_intnode_t *)info; 833 node = (xfs_da_intnode_t *)info;
835 count = INT_GET(node->hdr.count, ARCH_CONVERT); 834 count = be16_to_cpu(node->hdr.count);
836 if (count > (state->node_ents >> 1)) { 835 if (count > (state->node_ents >> 1)) {
837 *action = 0; /* blk over 50%, don't try to join */ 836 *action = 0; /* blk over 50%, don't try to join */
838 return(0); /* blk over 50%, don't try to join */ 837 return(0); /* blk over 50%, don't try to join */
@@ -849,7 +848,7 @@ xfs_da_node_toosmall(xfs_da_state_t *state, int *action)
849 * Make altpath point to the block we want to keep and 848 * Make altpath point to the block we want to keep and
850 * path point to the block we want to drop (this one). 849 * path point to the block we want to drop (this one).
851 */ 850 */
852 forward = info->forw; 851 forward = (info->forw != 0);
853 memcpy(&state->altpath, &state->path, sizeof(state->path)); 852 memcpy(&state->altpath, &state->path, sizeof(state->path));
854 error = xfs_da_path_shift(state, &state->altpath, forward, 853 error = xfs_da_path_shift(state, &state->altpath, forward,
855 0, &retval); 854 0, &retval);
@@ -871,13 +870,12 @@ xfs_da_node_toosmall(xfs_da_state_t *state, int *action)
871 * to shrink a directory over time. 870 * to shrink a directory over time.
872 */ 871 */
873 /* start with smaller blk num */ 872 /* start with smaller blk num */
874 forward = (INT_GET(info->forw, ARCH_CONVERT) 873 forward = (be32_to_cpu(info->forw) < be32_to_cpu(info->back));
875 < INT_GET(info->back, ARCH_CONVERT));
876 for (i = 0; i < 2; forward = !forward, i++) { 874 for (i = 0; i < 2; forward = !forward, i++) {
877 if (forward) 875 if (forward)
878 blkno = INT_GET(info->forw, ARCH_CONVERT); 876 blkno = be32_to_cpu(info->forw);
879 else 877 else
880 blkno = INT_GET(info->back, ARCH_CONVERT); 878 blkno = be32_to_cpu(info->back);
881 if (blkno == 0) 879 if (blkno == 0)
882 continue; 880 continue;
883 error = xfs_da_read_buf(state->args->trans, state->args->dp, 881 error = xfs_da_read_buf(state->args->trans, state->args->dp,
@@ -889,10 +887,10 @@ xfs_da_node_toosmall(xfs_da_state_t *state, int *action)
889 node = (xfs_da_intnode_t *)info; 887 node = (xfs_da_intnode_t *)info;
890 count = state->node_ents; 888 count = state->node_ents;
891 count -= state->node_ents >> 2; 889 count -= state->node_ents >> 2;
892 count -= INT_GET(node->hdr.count, ARCH_CONVERT); 890 count -= be16_to_cpu(node->hdr.count);
893 node = bp->data; 891 node = bp->data;
894 ASSERT(INT_GET(node->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); 892 ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC);
895 count -= INT_GET(node->hdr.count, ARCH_CONVERT); 893 count -= be16_to_cpu(node->hdr.count);
896 xfs_da_brelse(state->args->trans, bp); 894 xfs_da_brelse(state->args->trans, bp);
897 if (count >= 0) 895 if (count >= 0)
898 break; /* fits with at least 25% to spare */ 896 break; /* fits with at least 25% to spare */
@@ -973,16 +971,16 @@ xfs_da_fixhashpath(xfs_da_state_t *state, xfs_da_state_path_t *path)
973 } 971 }
974 for (blk--, level--; level >= 0; blk--, level--) { 972 for (blk--, level--; level >= 0; blk--, level--) {
975 node = blk->bp->data; 973 node = blk->bp->data;
976 ASSERT(INT_GET(node->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); 974 ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC);
977 btree = &node->btree[ blk->index ]; 975 btree = &node->btree[ blk->index ];
978 if (INT_GET(btree->hashval, ARCH_CONVERT) == lasthash) 976 if (be32_to_cpu(btree->hashval) == lasthash)
979 break; 977 break;
980 blk->hashval = lasthash; 978 blk->hashval = lasthash;
981 INT_SET(btree->hashval, ARCH_CONVERT, lasthash); 979 btree->hashval = cpu_to_be32(lasthash);
982 xfs_da_log_buf(state->args->trans, blk->bp, 980 xfs_da_log_buf(state->args->trans, blk->bp,
983 XFS_DA_LOGRANGE(node, btree, sizeof(*btree))); 981 XFS_DA_LOGRANGE(node, btree, sizeof(*btree)));
984 982
985 lasthash = INT_GET(node->btree[ INT_GET(node->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT); 983 lasthash = be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1].hashval);
986 } 984 }
987} 985}
988 986
@@ -997,25 +995,25 @@ xfs_da_node_remove(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk)
997 int tmp; 995 int tmp;
998 996
999 node = drop_blk->bp->data; 997 node = drop_blk->bp->data;
1000 ASSERT(drop_blk->index < INT_GET(node->hdr.count, ARCH_CONVERT)); 998 ASSERT(drop_blk->index < be16_to_cpu(node->hdr.count));
1001 ASSERT(drop_blk->index >= 0); 999 ASSERT(drop_blk->index >= 0);
1002 1000
1003 /* 1001 /*
1004 * Copy over the offending entry, or just zero it out. 1002 * Copy over the offending entry, or just zero it out.
1005 */ 1003 */
1006 btree = &node->btree[drop_blk->index]; 1004 btree = &node->btree[drop_blk->index];
1007 if (drop_blk->index < (INT_GET(node->hdr.count, ARCH_CONVERT)-1)) { 1005 if (drop_blk->index < (be16_to_cpu(node->hdr.count)-1)) {
1008 tmp = INT_GET(node->hdr.count, ARCH_CONVERT) - drop_blk->index - 1; 1006 tmp = be16_to_cpu(node->hdr.count) - drop_blk->index - 1;
1009 tmp *= (uint)sizeof(xfs_da_node_entry_t); 1007 tmp *= (uint)sizeof(xfs_da_node_entry_t);
1010 memmove(btree, btree + 1, tmp); 1008 memmove(btree, btree + 1, tmp);
1011 xfs_da_log_buf(state->args->trans, drop_blk->bp, 1009 xfs_da_log_buf(state->args->trans, drop_blk->bp,
1012 XFS_DA_LOGRANGE(node, btree, tmp)); 1010 XFS_DA_LOGRANGE(node, btree, tmp));
1013 btree = &node->btree[ INT_GET(node->hdr.count, ARCH_CONVERT)-1 ]; 1011 btree = &node->btree[be16_to_cpu(node->hdr.count)-1];
1014 } 1012 }
1015 memset((char *)btree, 0, sizeof(xfs_da_node_entry_t)); 1013 memset((char *)btree, 0, sizeof(xfs_da_node_entry_t));
1016 xfs_da_log_buf(state->args->trans, drop_blk->bp, 1014 xfs_da_log_buf(state->args->trans, drop_blk->bp,
1017 XFS_DA_LOGRANGE(node, btree, sizeof(*btree))); 1015 XFS_DA_LOGRANGE(node, btree, sizeof(*btree)));
1018 INT_MOD(node->hdr.count, ARCH_CONVERT, -1); 1016 be16_add(&node->hdr.count, -1);
1019 xfs_da_log_buf(state->args->trans, drop_blk->bp, 1017 xfs_da_log_buf(state->args->trans, drop_blk->bp,
1020 XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr))); 1018 XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr)));
1021 1019
@@ -1023,7 +1021,7 @@ xfs_da_node_remove(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk)
1023 * Copy the last hash value from the block to propagate upwards. 1021 * Copy the last hash value from the block to propagate upwards.
1024 */ 1022 */
1025 btree--; 1023 btree--;
1026 drop_blk->hashval = INT_GET(btree->hashval, ARCH_CONVERT); 1024 drop_blk->hashval = be32_to_cpu(btree->hashval);
1027} 1025}
1028 1026
1029/* 1027/*
@@ -1041,40 +1039,40 @@ xfs_da_node_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
1041 1039
1042 drop_node = drop_blk->bp->data; 1040 drop_node = drop_blk->bp->data;
1043 save_node = save_blk->bp->data; 1041 save_node = save_blk->bp->data;
1044 ASSERT(INT_GET(drop_node->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); 1042 ASSERT(be16_to_cpu(drop_node->hdr.info.magic) == XFS_DA_NODE_MAGIC);
1045 ASSERT(INT_GET(save_node->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); 1043 ASSERT(be16_to_cpu(save_node->hdr.info.magic) == XFS_DA_NODE_MAGIC);
1046 tp = state->args->trans; 1044 tp = state->args->trans;
1047 1045
1048 /* 1046 /*
1049 * If the dying block has lower hashvals, then move all the 1047 * If the dying block has lower hashvals, then move all the
1050 * elements in the remaining block up to make a hole. 1048 * elements in the remaining block up to make a hole.
1051 */ 1049 */
1052 if ((INT_GET(drop_node->btree[ 0 ].hashval, ARCH_CONVERT) < INT_GET(save_node->btree[ 0 ].hashval, ARCH_CONVERT)) || 1050 if ((be32_to_cpu(drop_node->btree[0].hashval) < be32_to_cpu(save_node->btree[ 0 ].hashval)) ||
1053 (INT_GET(drop_node->btree[ INT_GET(drop_node->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT) < 1051 (be32_to_cpu(drop_node->btree[be16_to_cpu(drop_node->hdr.count)-1].hashval) <
1054 INT_GET(save_node->btree[ INT_GET(save_node->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT))) 1052 be32_to_cpu(save_node->btree[be16_to_cpu(save_node->hdr.count)-1].hashval)))
1055 { 1053 {
1056 btree = &save_node->btree[ INT_GET(drop_node->hdr.count, ARCH_CONVERT) ]; 1054 btree = &save_node->btree[be16_to_cpu(drop_node->hdr.count)];
1057 tmp = INT_GET(save_node->hdr.count, ARCH_CONVERT) * (uint)sizeof(xfs_da_node_entry_t); 1055 tmp = be16_to_cpu(save_node->hdr.count) * (uint)sizeof(xfs_da_node_entry_t);
1058 memmove(btree, &save_node->btree[0], tmp); 1056 memmove(btree, &save_node->btree[0], tmp);
1059 btree = &save_node->btree[0]; 1057 btree = &save_node->btree[0];
1060 xfs_da_log_buf(tp, save_blk->bp, 1058 xfs_da_log_buf(tp, save_blk->bp,
1061 XFS_DA_LOGRANGE(save_node, btree, 1059 XFS_DA_LOGRANGE(save_node, btree,
1062 (INT_GET(save_node->hdr.count, ARCH_CONVERT) + INT_GET(drop_node->hdr.count, ARCH_CONVERT)) * 1060 (be16_to_cpu(save_node->hdr.count) + be16_to_cpu(drop_node->hdr.count)) *
1063 sizeof(xfs_da_node_entry_t))); 1061 sizeof(xfs_da_node_entry_t)));
1064 } else { 1062 } else {
1065 btree = &save_node->btree[ INT_GET(save_node->hdr.count, ARCH_CONVERT) ]; 1063 btree = &save_node->btree[be16_to_cpu(save_node->hdr.count)];
1066 xfs_da_log_buf(tp, save_blk->bp, 1064 xfs_da_log_buf(tp, save_blk->bp,
1067 XFS_DA_LOGRANGE(save_node, btree, 1065 XFS_DA_LOGRANGE(save_node, btree,
1068 INT_GET(drop_node->hdr.count, ARCH_CONVERT) * 1066 be16_to_cpu(drop_node->hdr.count) *
1069 sizeof(xfs_da_node_entry_t))); 1067 sizeof(xfs_da_node_entry_t)));
1070 } 1068 }
1071 1069
1072 /* 1070 /*
1073 * Move all the B-tree elements from drop_blk to save_blk. 1071 * Move all the B-tree elements from drop_blk to save_blk.
1074 */ 1072 */
1075 tmp = INT_GET(drop_node->hdr.count, ARCH_CONVERT) * (uint)sizeof(xfs_da_node_entry_t); 1073 tmp = be16_to_cpu(drop_node->hdr.count) * (uint)sizeof(xfs_da_node_entry_t);
1076 memcpy(btree, &drop_node->btree[0], tmp); 1074 memcpy(btree, &drop_node->btree[0], tmp);
1077 INT_MOD(save_node->hdr.count, ARCH_CONVERT, INT_GET(drop_node->hdr.count, ARCH_CONVERT)); 1075 be16_add(&save_node->hdr.count, be16_to_cpu(drop_node->hdr.count));
1078 1076
1079 xfs_da_log_buf(tp, save_blk->bp, 1077 xfs_da_log_buf(tp, save_blk->bp,
1080 XFS_DA_LOGRANGE(save_node, &save_node->hdr, 1078 XFS_DA_LOGRANGE(save_node, &save_node->hdr,
@@ -1083,7 +1081,7 @@ xfs_da_node_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
1083 /* 1081 /*
1084 * Save the last hashval in the remaining block for upward propagation. 1082 * Save the last hashval in the remaining block for upward propagation.
1085 */ 1083 */
1086 save_blk->hashval = INT_GET(save_node->btree[ INT_GET(save_node->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT); 1084 save_blk->hashval = be32_to_cpu(save_node->btree[be16_to_cpu(save_node->hdr.count)-1].hashval);
1087} 1085}
1088 1086
1089/*======================================================================== 1087/*========================================================================
@@ -1138,46 +1136,46 @@ xfs_da_node_lookup_int(xfs_da_state_t *state, int *result)
1138 return(error); 1136 return(error);
1139 } 1137 }
1140 curr = blk->bp->data; 1138 curr = blk->bp->data;
1141 ASSERT(INT_GET(curr->magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC || 1139 ASSERT(be16_to_cpu(curr->magic) == XFS_DA_NODE_MAGIC ||
1142 INT_GET(curr->magic, ARCH_CONVERT) == XFS_DIRX_LEAF_MAGIC(state->mp) || 1140 be16_to_cpu(curr->magic) == XFS_DIRX_LEAF_MAGIC(state->mp) ||
1143 INT_GET(curr->magic, ARCH_CONVERT) == XFS_ATTR_LEAF_MAGIC); 1141 be16_to_cpu(curr->magic) == XFS_ATTR_LEAF_MAGIC);
1144 1142
1145 /* 1143 /*
1146 * Search an intermediate node for a match. 1144 * Search an intermediate node for a match.
1147 */ 1145 */
1148 blk->magic = INT_GET(curr->magic, ARCH_CONVERT); 1146 blk->magic = be16_to_cpu(curr->magic);
1149 if (INT_GET(curr->magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC) { 1147 if (blk->magic == XFS_DA_NODE_MAGIC) {
1150 node = blk->bp->data; 1148 node = blk->bp->data;
1151 blk->hashval = INT_GET(node->btree[ INT_GET(node->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT); 1149 blk->hashval = be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1].hashval);
1152 1150
1153 /* 1151 /*
1154 * Binary search. (note: small blocks will skip loop) 1152 * Binary search. (note: small blocks will skip loop)
1155 */ 1153 */
1156 max = INT_GET(node->hdr.count, ARCH_CONVERT); 1154 max = be16_to_cpu(node->hdr.count);
1157 probe = span = max / 2; 1155 probe = span = max / 2;
1158 hashval = args->hashval; 1156 hashval = args->hashval;
1159 for (btree = &node->btree[probe]; span > 4; 1157 for (btree = &node->btree[probe]; span > 4;
1160 btree = &node->btree[probe]) { 1158 btree = &node->btree[probe]) {
1161 span /= 2; 1159 span /= 2;
1162 if (INT_GET(btree->hashval, ARCH_CONVERT) < hashval) 1160 if (be32_to_cpu(btree->hashval) < hashval)
1163 probe += span; 1161 probe += span;
1164 else if (INT_GET(btree->hashval, ARCH_CONVERT) > hashval) 1162 else if (be32_to_cpu(btree->hashval) > hashval)
1165 probe -= span; 1163 probe -= span;
1166 else 1164 else
1167 break; 1165 break;
1168 } 1166 }
1169 ASSERT((probe >= 0) && (probe < max)); 1167 ASSERT((probe >= 0) && (probe < max));
1170 ASSERT((span <= 4) || (INT_GET(btree->hashval, ARCH_CONVERT) == hashval)); 1168 ASSERT((span <= 4) || (be32_to_cpu(btree->hashval) == hashval));
1171 1169
1172 /* 1170 /*
1173 * Since we may have duplicate hashval's, find the first 1171 * Since we may have duplicate hashval's, find the first
1174 * matching hashval in the node. 1172 * matching hashval in the node.
1175 */ 1173 */
1176 while ((probe > 0) && (INT_GET(btree->hashval, ARCH_CONVERT) >= hashval)) { 1174 while ((probe > 0) && (be32_to_cpu(btree->hashval) >= hashval)) {
1177 btree--; 1175 btree--;
1178 probe--; 1176 probe--;
1179 } 1177 }
1180 while ((probe < max) && (INT_GET(btree->hashval, ARCH_CONVERT) < hashval)) { 1178 while ((probe < max) && (be32_to_cpu(btree->hashval) < hashval)) {
1181 btree++; 1179 btree++;
1182 probe++; 1180 probe++;
1183 } 1181 }
@@ -1187,21 +1185,21 @@ xfs_da_node_lookup_int(xfs_da_state_t *state, int *result)
1187 */ 1185 */
1188 if (probe == max) { 1186 if (probe == max) {
1189 blk->index = max-1; 1187 blk->index = max-1;
1190 blkno = INT_GET(node->btree[ max-1 ].before, ARCH_CONVERT); 1188 blkno = be32_to_cpu(node->btree[max-1].before);
1191 } else { 1189 } else {
1192 blk->index = probe; 1190 blk->index = probe;
1193 blkno = INT_GET(btree->before, ARCH_CONVERT); 1191 blkno = be32_to_cpu(btree->before);
1194 } 1192 }
1195 } 1193 }
1196 else if (INT_GET(curr->magic, ARCH_CONVERT) == XFS_ATTR_LEAF_MAGIC) { 1194 else if (be16_to_cpu(curr->magic) == XFS_ATTR_LEAF_MAGIC) {
1197 blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL); 1195 blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL);
1198 break; 1196 break;
1199 } 1197 }
1200 else if (INT_GET(curr->magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC) { 1198 else if (be16_to_cpu(curr->magic) == XFS_DIR_LEAF_MAGIC) {
1201 blk->hashval = xfs_dir_leaf_lasthash(blk->bp, NULL); 1199 blk->hashval = xfs_dir_leaf_lasthash(blk->bp, NULL);
1202 break; 1200 break;
1203 } 1201 }
1204 else if (INT_GET(curr->magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC) { 1202 else if (be16_to_cpu(curr->magic) == XFS_DIR2_LEAFN_MAGIC) {
1205 blk->hashval = xfs_dir2_leafn_lasthash(blk->bp, NULL); 1203 blk->hashval = xfs_dir2_leafn_lasthash(blk->bp, NULL);
1206 break; 1204 break;
1207 } 1205 }
@@ -1274,8 +1272,8 @@ xfs_da_blk_link(xfs_da_state_t *state, xfs_da_state_blk_t *old_blk,
1274 ASSERT(old_blk->magic == XFS_DA_NODE_MAGIC || 1272 ASSERT(old_blk->magic == XFS_DA_NODE_MAGIC ||
1275 old_blk->magic == XFS_DIRX_LEAF_MAGIC(state->mp) || 1273 old_blk->magic == XFS_DIRX_LEAF_MAGIC(state->mp) ||
1276 old_blk->magic == XFS_ATTR_LEAF_MAGIC); 1274 old_blk->magic == XFS_ATTR_LEAF_MAGIC);
1277 ASSERT(old_blk->magic == INT_GET(old_info->magic, ARCH_CONVERT)); 1275 ASSERT(old_blk->magic == be16_to_cpu(old_info->magic));
1278 ASSERT(new_blk->magic == INT_GET(new_info->magic, ARCH_CONVERT)); 1276 ASSERT(new_blk->magic == be16_to_cpu(new_info->magic));
1279 ASSERT(old_blk->magic == new_blk->magic); 1277 ASSERT(old_blk->magic == new_blk->magic);
1280 1278
1281 switch (old_blk->magic) { 1279 switch (old_blk->magic) {
@@ -1302,47 +1300,44 @@ xfs_da_blk_link(xfs_da_state_t *state, xfs_da_state_blk_t *old_blk,
1302 /* 1300 /*
1303 * Link new block in before existing block. 1301 * Link new block in before existing block.
1304 */ 1302 */
1305 INT_SET(new_info->forw, ARCH_CONVERT, old_blk->blkno); 1303 new_info->forw = cpu_to_be32(old_blk->blkno);
1306 new_info->back = old_info->back; /* INT_: direct copy */ 1304 new_info->back = old_info->back;
1307 if (INT_GET(old_info->back, ARCH_CONVERT)) { 1305 if (old_info->back) {
1308 error = xfs_da_read_buf(args->trans, args->dp, 1306 error = xfs_da_read_buf(args->trans, args->dp,
1309 INT_GET(old_info->back, 1307 be32_to_cpu(old_info->back),
1310 ARCH_CONVERT), -1, &bp, 1308 -1, &bp, args->whichfork);
1311 args->whichfork);
1312 if (error) 1309 if (error)
1313 return(error); 1310 return(error);
1314 ASSERT(bp != NULL); 1311 ASSERT(bp != NULL);
1315 tmp_info = bp->data; 1312 tmp_info = bp->data;
1316 ASSERT(INT_GET(tmp_info->magic, ARCH_CONVERT) == INT_GET(old_info->magic, ARCH_CONVERT)); 1313 ASSERT(be16_to_cpu(tmp_info->magic) == be16_to_cpu(old_info->magic));
1317 ASSERT(INT_GET(tmp_info->forw, ARCH_CONVERT) == old_blk->blkno); 1314 ASSERT(be32_to_cpu(tmp_info->forw) == old_blk->blkno);
1318 INT_SET(tmp_info->forw, ARCH_CONVERT, new_blk->blkno); 1315 tmp_info->forw = cpu_to_be32(new_blk->blkno);
1319 xfs_da_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1); 1316 xfs_da_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
1320 xfs_da_buf_done(bp); 1317 xfs_da_buf_done(bp);
1321 } 1318 }
1322 INT_SET(old_info->back, ARCH_CONVERT, new_blk->blkno); 1319 old_info->back = cpu_to_be32(new_blk->blkno);
1323 } else { 1320 } else {
1324 /* 1321 /*
1325 * Link new block in after existing block. 1322 * Link new block in after existing block.
1326 */ 1323 */
1327 new_info->forw = old_info->forw; /* INT_: direct copy */ 1324 new_info->forw = old_info->forw;
1328 INT_SET(new_info->back, ARCH_CONVERT, old_blk->blkno); 1325 new_info->back = cpu_to_be32(old_blk->blkno);
1329 if (INT_GET(old_info->forw, ARCH_CONVERT)) { 1326 if (old_info->forw) {
1330 error = xfs_da_read_buf(args->trans, args->dp, 1327 error = xfs_da_read_buf(args->trans, args->dp,
1331 INT_GET(old_info->forw, ARCH_CONVERT), -1, &bp, 1328 be32_to_cpu(old_info->forw),
1332 args->whichfork); 1329 -1, &bp, args->whichfork);
1333 if (error) 1330 if (error)
1334 return(error); 1331 return(error);
1335 ASSERT(bp != NULL); 1332 ASSERT(bp != NULL);
1336 tmp_info = bp->data; 1333 tmp_info = bp->data;
1337 ASSERT(INT_GET(tmp_info->magic, ARCH_CONVERT) 1334 ASSERT(tmp_info->magic == old_info->magic);
1338 == INT_GET(old_info->magic, ARCH_CONVERT)); 1335 ASSERT(be32_to_cpu(tmp_info->back) == old_blk->blkno);
1339 ASSERT(INT_GET(tmp_info->back, ARCH_CONVERT) 1336 tmp_info->back = cpu_to_be32(new_blk->blkno);
1340 == old_blk->blkno);
1341 INT_SET(tmp_info->back, ARCH_CONVERT, new_blk->blkno);
1342 xfs_da_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1); 1337 xfs_da_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
1343 xfs_da_buf_done(bp); 1338 xfs_da_buf_done(bp);
1344 } 1339 }
1345 INT_SET(old_info->forw, ARCH_CONVERT, new_blk->blkno); 1340 old_info->forw = cpu_to_be32(new_blk->blkno);
1346 } 1341 }
1347 1342
1348 xfs_da_log_buf(args->trans, old_blk->bp, 0, sizeof(*tmp_info) - 1); 1343 xfs_da_log_buf(args->trans, old_blk->bp, 0, sizeof(*tmp_info) - 1);
@@ -1360,13 +1355,13 @@ xfs_da_node_order(xfs_dabuf_t *node1_bp, xfs_dabuf_t *node2_bp)
1360 1355
1361 node1 = node1_bp->data; 1356 node1 = node1_bp->data;
1362 node2 = node2_bp->data; 1357 node2 = node2_bp->data;
1363 ASSERT((INT_GET(node1->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC) && 1358 ASSERT((be16_to_cpu(node1->hdr.info.magic) == XFS_DA_NODE_MAGIC) &&
1364 (INT_GET(node2->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC)); 1359 (be16_to_cpu(node2->hdr.info.magic) == XFS_DA_NODE_MAGIC));
1365 if ((INT_GET(node1->hdr.count, ARCH_CONVERT) > 0) && (INT_GET(node2->hdr.count, ARCH_CONVERT) > 0) && 1360 if ((be16_to_cpu(node1->hdr.count) > 0) && (be16_to_cpu(node2->hdr.count) > 0) &&
1366 ((INT_GET(node2->btree[ 0 ].hashval, ARCH_CONVERT) < 1361 ((be32_to_cpu(node2->btree[0].hashval) <
1367 INT_GET(node1->btree[ 0 ].hashval, ARCH_CONVERT)) || 1362 be32_to_cpu(node1->btree[0].hashval)) ||
1368 (INT_GET(node2->btree[ INT_GET(node2->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT) < 1363 (be32_to_cpu(node2->btree[be16_to_cpu(node2->hdr.count)-1].hashval) <
1369 INT_GET(node1->btree[ INT_GET(node1->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT)))) { 1364 be32_to_cpu(node1->btree[be16_to_cpu(node1->hdr.count)-1].hashval)))) {
1370 return(1); 1365 return(1);
1371 } 1366 }
1372 return(0); 1367 return(0);
@@ -1381,12 +1376,12 @@ xfs_da_node_lasthash(xfs_dabuf_t *bp, int *count)
1381 xfs_da_intnode_t *node; 1376 xfs_da_intnode_t *node;
1382 1377
1383 node = bp->data; 1378 node = bp->data;
1384 ASSERT(INT_GET(node->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); 1379 ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC);
1385 if (count) 1380 if (count)
1386 *count = INT_GET(node->hdr.count, ARCH_CONVERT); 1381 *count = be16_to_cpu(node->hdr.count);
1387 if (!node->hdr.count) 1382 if (!node->hdr.count)
1388 return(0); 1383 return(0);
1389 return(INT_GET(node->btree[ INT_GET(node->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT)); 1384 return be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1].hashval);
1390} 1385}
1391 1386
1392/* 1387/*
@@ -1411,50 +1406,47 @@ xfs_da_blk_unlink(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
1411 ASSERT(save_blk->magic == XFS_DA_NODE_MAGIC || 1406 ASSERT(save_blk->magic == XFS_DA_NODE_MAGIC ||
1412 save_blk->magic == XFS_DIRX_LEAF_MAGIC(state->mp) || 1407 save_blk->magic == XFS_DIRX_LEAF_MAGIC(state->mp) ||
1413 save_blk->magic == XFS_ATTR_LEAF_MAGIC); 1408 save_blk->magic == XFS_ATTR_LEAF_MAGIC);
1414 ASSERT(save_blk->magic == INT_GET(save_info->magic, ARCH_CONVERT)); 1409 ASSERT(save_blk->magic == be16_to_cpu(save_info->magic));
1415 ASSERT(drop_blk->magic == INT_GET(drop_info->magic, ARCH_CONVERT)); 1410 ASSERT(drop_blk->magic == be16_to_cpu(drop_info->magic));
1416 ASSERT(save_blk->magic == drop_blk->magic); 1411 ASSERT(save_blk->magic == drop_blk->magic);
1417 ASSERT((INT_GET(save_info->forw, ARCH_CONVERT) == drop_blk->blkno) || 1412 ASSERT((be32_to_cpu(save_info->forw) == drop_blk->blkno) ||
1418 (INT_GET(save_info->back, ARCH_CONVERT) == drop_blk->blkno)); 1413 (be32_to_cpu(save_info->back) == drop_blk->blkno));
1419 ASSERT((INT_GET(drop_info->forw, ARCH_CONVERT) == save_blk->blkno) || 1414 ASSERT((be32_to_cpu(drop_info->forw) == save_blk->blkno) ||
1420 (INT_GET(drop_info->back, ARCH_CONVERT) == save_blk->blkno)); 1415 (be32_to_cpu(drop_info->back) == save_blk->blkno));
1421 1416
1422 /* 1417 /*
1423 * Unlink the leaf block from the doubly linked chain of leaves. 1418 * Unlink the leaf block from the doubly linked chain of leaves.
1424 */ 1419 */
1425 if (INT_GET(save_info->back, ARCH_CONVERT) == drop_blk->blkno) { 1420 if (be32_to_cpu(save_info->back) == drop_blk->blkno) {
1426 save_info->back = drop_info->back; /* INT_: direct copy */ 1421 save_info->back = drop_info->back;
1427 if (INT_GET(drop_info->back, ARCH_CONVERT)) { 1422 if (drop_info->back) {
1428 error = xfs_da_read_buf(args->trans, args->dp, 1423 error = xfs_da_read_buf(args->trans, args->dp,
1429 INT_GET(drop_info->back, 1424 be32_to_cpu(drop_info->back),
1430 ARCH_CONVERT), -1, &bp, 1425 -1, &bp, args->whichfork);
1431 args->whichfork);
1432 if (error) 1426 if (error)
1433 return(error); 1427 return(error);
1434 ASSERT(bp != NULL); 1428 ASSERT(bp != NULL);
1435 tmp_info = bp->data; 1429 tmp_info = bp->data;
1436 ASSERT(INT_GET(tmp_info->magic, ARCH_CONVERT) == INT_GET(save_info->magic, ARCH_CONVERT)); 1430 ASSERT(tmp_info->magic == save_info->magic);
1437 ASSERT(INT_GET(tmp_info->forw, ARCH_CONVERT) == drop_blk->blkno); 1431 ASSERT(be32_to_cpu(tmp_info->forw) == drop_blk->blkno);
1438 INT_SET(tmp_info->forw, ARCH_CONVERT, save_blk->blkno); 1432 tmp_info->forw = cpu_to_be32(save_blk->blkno);
1439 xfs_da_log_buf(args->trans, bp, 0, 1433 xfs_da_log_buf(args->trans, bp, 0,
1440 sizeof(*tmp_info) - 1); 1434 sizeof(*tmp_info) - 1);
1441 xfs_da_buf_done(bp); 1435 xfs_da_buf_done(bp);
1442 } 1436 }
1443 } else { 1437 } else {
1444 save_info->forw = drop_info->forw; /* INT_: direct copy */ 1438 save_info->forw = drop_info->forw;
1445 if (INT_GET(drop_info->forw, ARCH_CONVERT)) { 1439 if (drop_info->forw) {
1446 error = xfs_da_read_buf(args->trans, args->dp, 1440 error = xfs_da_read_buf(args->trans, args->dp,
1447 INT_GET(drop_info->forw, ARCH_CONVERT), -1, &bp, 1441 be32_to_cpu(drop_info->forw),
1448 args->whichfork); 1442 -1, &bp, args->whichfork);
1449 if (error) 1443 if (error)
1450 return(error); 1444 return(error);
1451 ASSERT(bp != NULL); 1445 ASSERT(bp != NULL);
1452 tmp_info = bp->data; 1446 tmp_info = bp->data;
1453 ASSERT(INT_GET(tmp_info->magic, ARCH_CONVERT) 1447 ASSERT(tmp_info->magic == save_info->magic);
1454 == INT_GET(save_info->magic, ARCH_CONVERT)); 1448 ASSERT(be32_to_cpu(tmp_info->back) == drop_blk->blkno);
1455 ASSERT(INT_GET(tmp_info->back, ARCH_CONVERT) 1449 tmp_info->back = cpu_to_be32(save_blk->blkno);
1456 == drop_blk->blkno);
1457 INT_SET(tmp_info->back, ARCH_CONVERT, save_blk->blkno);
1458 xfs_da_log_buf(args->trans, bp, 0, 1450 xfs_da_log_buf(args->trans, bp, 0,
1459 sizeof(*tmp_info) - 1); 1451 sizeof(*tmp_info) - 1);
1460 xfs_da_buf_done(bp); 1452 xfs_da_buf_done(bp);
@@ -1497,14 +1489,14 @@ xfs_da_path_shift(xfs_da_state_t *state, xfs_da_state_path_t *path,
1497 for (blk = &path->blk[level]; level >= 0; blk--, level--) { 1489 for (blk = &path->blk[level]; level >= 0; blk--, level--) {
1498 ASSERT(blk->bp != NULL); 1490 ASSERT(blk->bp != NULL);
1499 node = blk->bp->data; 1491 node = blk->bp->data;
1500 ASSERT(INT_GET(node->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); 1492 ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC);
1501 if (forward && (blk->index < INT_GET(node->hdr.count, ARCH_CONVERT)-1)) { 1493 if (forward && (blk->index < be16_to_cpu(node->hdr.count)-1)) {
1502 blk->index++; 1494 blk->index++;
1503 blkno = INT_GET(node->btree[ blk->index ].before, ARCH_CONVERT); 1495 blkno = be32_to_cpu(node->btree[blk->index].before);
1504 break; 1496 break;
1505 } else if (!forward && (blk->index > 0)) { 1497 } else if (!forward && (blk->index > 0)) {
1506 blk->index--; 1498 blk->index--;
1507 blkno = INT_GET(node->btree[ blk->index ].before, ARCH_CONVERT); 1499 blkno = be32_to_cpu(node->btree[blk->index].before);
1508 break; 1500 break;
1509 } 1501 }
1510 } 1502 }
@@ -1536,18 +1528,18 @@ xfs_da_path_shift(xfs_da_state_t *state, xfs_da_state_path_t *path,
1536 return(error); 1528 return(error);
1537 ASSERT(blk->bp != NULL); 1529 ASSERT(blk->bp != NULL);
1538 info = blk->bp->data; 1530 info = blk->bp->data;
1539 ASSERT(INT_GET(info->magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC || 1531 ASSERT(be16_to_cpu(info->magic) == XFS_DA_NODE_MAGIC ||
1540 INT_GET(info->magic, ARCH_CONVERT) == XFS_DIRX_LEAF_MAGIC(state->mp) || 1532 be16_to_cpu(info->magic) == XFS_DIRX_LEAF_MAGIC(state->mp) ||
1541 INT_GET(info->magic, ARCH_CONVERT) == XFS_ATTR_LEAF_MAGIC); 1533 be16_to_cpu(info->magic) == XFS_ATTR_LEAF_MAGIC);
1542 blk->magic = INT_GET(info->magic, ARCH_CONVERT); 1534 blk->magic = be16_to_cpu(info->magic);
1543 if (INT_GET(info->magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC) { 1535 if (blk->magic == XFS_DA_NODE_MAGIC) {
1544 node = (xfs_da_intnode_t *)info; 1536 node = (xfs_da_intnode_t *)info;
1545 blk->hashval = INT_GET(node->btree[ INT_GET(node->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT); 1537 blk->hashval = be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1].hashval);
1546 if (forward) 1538 if (forward)
1547 blk->index = 0; 1539 blk->index = 0;
1548 else 1540 else
1549 blk->index = INT_GET(node->hdr.count, ARCH_CONVERT)-1; 1541 blk->index = be16_to_cpu(node->hdr.count)-1;
1550 blkno = INT_GET(node->btree[ blk->index ].before, ARCH_CONVERT); 1542 blkno = be32_to_cpu(node->btree[blk->index].before);
1551 } else { 1543 } else {
1552 ASSERT(level == path->active-1); 1544 ASSERT(level == path->active-1);
1553 blk->index = 0; 1545 blk->index = 0;
@@ -1788,40 +1780,40 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop,
1788 /* 1780 /*
1789 * Get values from the moved block. 1781 * Get values from the moved block.
1790 */ 1782 */
1791 if (INT_GET(dead_info->magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC) { 1783 if (be16_to_cpu(dead_info->magic) == XFS_DIR_LEAF_MAGIC) {
1792 ASSERT(XFS_DIR_IS_V1(mp)); 1784 ASSERT(XFS_DIR_IS_V1(mp));
1793 dead_leaf = (xfs_dir_leafblock_t *)dead_info; 1785 dead_leaf = (xfs_dir_leafblock_t *)dead_info;
1794 dead_level = 0; 1786 dead_level = 0;
1795 dead_hash = 1787 dead_hash =
1796 INT_GET(dead_leaf->entries[INT_GET(dead_leaf->hdr.count, ARCH_CONVERT) - 1].hashval, ARCH_CONVERT); 1788 INT_GET(dead_leaf->entries[INT_GET(dead_leaf->hdr.count, ARCH_CONVERT) - 1].hashval, ARCH_CONVERT);
1797 } else if (INT_GET(dead_info->magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC) { 1789 } else if (be16_to_cpu(dead_info->magic) == XFS_DIR2_LEAFN_MAGIC) {
1798 ASSERT(XFS_DIR_IS_V2(mp)); 1790 ASSERT(XFS_DIR_IS_V2(mp));
1799 dead_leaf2 = (xfs_dir2_leaf_t *)dead_info; 1791 dead_leaf2 = (xfs_dir2_leaf_t *)dead_info;
1800 dead_level = 0; 1792 dead_level = 0;
1801 dead_hash = INT_GET(dead_leaf2->ents[INT_GET(dead_leaf2->hdr.count, ARCH_CONVERT) - 1].hashval, ARCH_CONVERT); 1793 dead_hash = be32_to_cpu(dead_leaf2->ents[be16_to_cpu(dead_leaf2->hdr.count) - 1].hashval);
1802 } else { 1794 } else {
1803 ASSERT(INT_GET(dead_info->magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); 1795 ASSERT(be16_to_cpu(dead_info->magic) == XFS_DA_NODE_MAGIC);
1804 dead_node = (xfs_da_intnode_t *)dead_info; 1796 dead_node = (xfs_da_intnode_t *)dead_info;
1805 dead_level = INT_GET(dead_node->hdr.level, ARCH_CONVERT); 1797 dead_level = be16_to_cpu(dead_node->hdr.level);
1806 dead_hash = INT_GET(dead_node->btree[INT_GET(dead_node->hdr.count, ARCH_CONVERT) - 1].hashval, ARCH_CONVERT); 1798 dead_hash = be32_to_cpu(dead_node->btree[be16_to_cpu(dead_node->hdr.count) - 1].hashval);
1807 } 1799 }
1808 sib_buf = par_buf = NULL; 1800 sib_buf = par_buf = NULL;
1809 /* 1801 /*
1810 * If the moved block has a left sibling, fix up the pointers. 1802 * If the moved block has a left sibling, fix up the pointers.
1811 */ 1803 */
1812 if ((sib_blkno = INT_GET(dead_info->back, ARCH_CONVERT))) { 1804 if ((sib_blkno = be32_to_cpu(dead_info->back))) {
1813 if ((error = xfs_da_read_buf(tp, ip, sib_blkno, -1, &sib_buf, w))) 1805 if ((error = xfs_da_read_buf(tp, ip, sib_blkno, -1, &sib_buf, w)))
1814 goto done; 1806 goto done;
1815 sib_info = sib_buf->data; 1807 sib_info = sib_buf->data;
1816 if (unlikely( 1808 if (unlikely(
1817 INT_GET(sib_info->forw, ARCH_CONVERT) != last_blkno || 1809 be32_to_cpu(sib_info->forw) != last_blkno ||
1818 INT_GET(sib_info->magic, ARCH_CONVERT) != INT_GET(dead_info->magic, ARCH_CONVERT))) { 1810 sib_info->magic != dead_info->magic)) {
1819 XFS_ERROR_REPORT("xfs_da_swap_lastblock(2)", 1811 XFS_ERROR_REPORT("xfs_da_swap_lastblock(2)",
1820 XFS_ERRLEVEL_LOW, mp); 1812 XFS_ERRLEVEL_LOW, mp);
1821 error = XFS_ERROR(EFSCORRUPTED); 1813 error = XFS_ERROR(EFSCORRUPTED);
1822 goto done; 1814 goto done;
1823 } 1815 }
1824 INT_SET(sib_info->forw, ARCH_CONVERT, dead_blkno); 1816 sib_info->forw = cpu_to_be32(dead_blkno);
1825 xfs_da_log_buf(tp, sib_buf, 1817 xfs_da_log_buf(tp, sib_buf,
1826 XFS_DA_LOGRANGE(sib_info, &sib_info->forw, 1818 XFS_DA_LOGRANGE(sib_info, &sib_info->forw,
1827 sizeof(sib_info->forw))); 1819 sizeof(sib_info->forw)));
@@ -1831,20 +1823,19 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop,
1831 /* 1823 /*
1832 * If the moved block has a right sibling, fix up the pointers. 1824 * If the moved block has a right sibling, fix up the pointers.
1833 */ 1825 */
1834 if ((sib_blkno = INT_GET(dead_info->forw, ARCH_CONVERT))) { 1826 if ((sib_blkno = be32_to_cpu(dead_info->forw))) {
1835 if ((error = xfs_da_read_buf(tp, ip, sib_blkno, -1, &sib_buf, w))) 1827 if ((error = xfs_da_read_buf(tp, ip, sib_blkno, -1, &sib_buf, w)))
1836 goto done; 1828 goto done;
1837 sib_info = sib_buf->data; 1829 sib_info = sib_buf->data;
1838 if (unlikely( 1830 if (unlikely(
1839 INT_GET(sib_info->back, ARCH_CONVERT) != last_blkno 1831 be32_to_cpu(sib_info->back) != last_blkno ||
1840 || INT_GET(sib_info->magic, ARCH_CONVERT) 1832 sib_info->magic != dead_info->magic)) {
1841 != INT_GET(dead_info->magic, ARCH_CONVERT))) {
1842 XFS_ERROR_REPORT("xfs_da_swap_lastblock(3)", 1833 XFS_ERROR_REPORT("xfs_da_swap_lastblock(3)",
1843 XFS_ERRLEVEL_LOW, mp); 1834 XFS_ERRLEVEL_LOW, mp);
1844 error = XFS_ERROR(EFSCORRUPTED); 1835 error = XFS_ERROR(EFSCORRUPTED);
1845 goto done; 1836 goto done;
1846 } 1837 }
1847 INT_SET(sib_info->back, ARCH_CONVERT, dead_blkno); 1838 sib_info->back = cpu_to_be32(dead_blkno);
1848 xfs_da_log_buf(tp, sib_buf, 1839 xfs_da_log_buf(tp, sib_buf,
1849 XFS_DA_LOGRANGE(sib_info, &sib_info->back, 1840 XFS_DA_LOGRANGE(sib_info, &sib_info->back,
1850 sizeof(sib_info->back))); 1841 sizeof(sib_info->back)));
@@ -1861,26 +1852,26 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop,
1861 goto done; 1852 goto done;
1862 par_node = par_buf->data; 1853 par_node = par_buf->data;
1863 if (unlikely( 1854 if (unlikely(
1864 INT_GET(par_node->hdr.info.magic, ARCH_CONVERT) != XFS_DA_NODE_MAGIC || 1855 be16_to_cpu(par_node->hdr.info.magic) != XFS_DA_NODE_MAGIC ||
1865 (level >= 0 && level != INT_GET(par_node->hdr.level, ARCH_CONVERT) + 1))) { 1856 (level >= 0 && level != be16_to_cpu(par_node->hdr.level) + 1))) {
1866 XFS_ERROR_REPORT("xfs_da_swap_lastblock(4)", 1857 XFS_ERROR_REPORT("xfs_da_swap_lastblock(4)",
1867 XFS_ERRLEVEL_LOW, mp); 1858 XFS_ERRLEVEL_LOW, mp);
1868 error = XFS_ERROR(EFSCORRUPTED); 1859 error = XFS_ERROR(EFSCORRUPTED);
1869 goto done; 1860 goto done;
1870 } 1861 }
1871 level = INT_GET(par_node->hdr.level, ARCH_CONVERT); 1862 level = be16_to_cpu(par_node->hdr.level);
1872 for (entno = 0; 1863 for (entno = 0;
1873 entno < INT_GET(par_node->hdr.count, ARCH_CONVERT) && 1864 entno < be16_to_cpu(par_node->hdr.count) &&
1874 INT_GET(par_node->btree[entno].hashval, ARCH_CONVERT) < dead_hash; 1865 be32_to_cpu(par_node->btree[entno].hashval) < dead_hash;
1875 entno++) 1866 entno++)
1876 continue; 1867 continue;
1877 if (unlikely(entno == INT_GET(par_node->hdr.count, ARCH_CONVERT))) { 1868 if (unlikely(entno == be16_to_cpu(par_node->hdr.count))) {
1878 XFS_ERROR_REPORT("xfs_da_swap_lastblock(5)", 1869 XFS_ERROR_REPORT("xfs_da_swap_lastblock(5)",
1879 XFS_ERRLEVEL_LOW, mp); 1870 XFS_ERRLEVEL_LOW, mp);
1880 error = XFS_ERROR(EFSCORRUPTED); 1871 error = XFS_ERROR(EFSCORRUPTED);
1881 goto done; 1872 goto done;
1882 } 1873 }
1883 par_blkno = INT_GET(par_node->btree[entno].before, ARCH_CONVERT); 1874 par_blkno = be32_to_cpu(par_node->btree[entno].before);
1884 if (level == dead_level + 1) 1875 if (level == dead_level + 1)
1885 break; 1876 break;
1886 xfs_da_brelse(tp, par_buf); 1877 xfs_da_brelse(tp, par_buf);
@@ -1892,13 +1883,13 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop,
1892 */ 1883 */
1893 for (;;) { 1884 for (;;) {
1894 for (; 1885 for (;
1895 entno < INT_GET(par_node->hdr.count, ARCH_CONVERT) && 1886 entno < be16_to_cpu(par_node->hdr.count) &&
1896 INT_GET(par_node->btree[entno].before, ARCH_CONVERT) != last_blkno; 1887 be32_to_cpu(par_node->btree[entno].before) != last_blkno;
1897 entno++) 1888 entno++)
1898 continue; 1889 continue;
1899 if (entno < INT_GET(par_node->hdr.count, ARCH_CONVERT)) 1890 if (entno < be16_to_cpu(par_node->hdr.count))
1900 break; 1891 break;
1901 par_blkno = INT_GET(par_node->hdr.info.forw, ARCH_CONVERT); 1892 par_blkno = be32_to_cpu(par_node->hdr.info.forw);
1902 xfs_da_brelse(tp, par_buf); 1893 xfs_da_brelse(tp, par_buf);
1903 par_buf = NULL; 1894 par_buf = NULL;
1904 if (unlikely(par_blkno == 0)) { 1895 if (unlikely(par_blkno == 0)) {
@@ -1911,8 +1902,8 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop,
1911 goto done; 1902 goto done;
1912 par_node = par_buf->data; 1903 par_node = par_buf->data;
1913 if (unlikely( 1904 if (unlikely(
1914 INT_GET(par_node->hdr.level, ARCH_CONVERT) != level || 1905 be16_to_cpu(par_node->hdr.level) != level ||
1915 INT_GET(par_node->hdr.info.magic, ARCH_CONVERT) != XFS_DA_NODE_MAGIC)) { 1906 be16_to_cpu(par_node->hdr.info.magic) != XFS_DA_NODE_MAGIC)) {
1916 XFS_ERROR_REPORT("xfs_da_swap_lastblock(7)", 1907 XFS_ERROR_REPORT("xfs_da_swap_lastblock(7)",
1917 XFS_ERRLEVEL_LOW, mp); 1908 XFS_ERRLEVEL_LOW, mp);
1918 error = XFS_ERROR(EFSCORRUPTED); 1909 error = XFS_ERROR(EFSCORRUPTED);
@@ -1923,7 +1914,7 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop,
1923 /* 1914 /*
1924 * Update the parent entry pointing to the moved block. 1915 * Update the parent entry pointing to the moved block.
1925 */ 1916 */
1926 INT_SET(par_node->btree[entno].before, ARCH_CONVERT, dead_blkno); 1917 par_node->btree[entno].before = cpu_to_be32(dead_blkno);
1927 xfs_da_log_buf(tp, par_buf, 1918 xfs_da_log_buf(tp, par_buf,
1928 XFS_DA_LOGRANGE(par_node, &par_node->btree[entno].before, 1919 XFS_DA_LOGRANGE(par_node, &par_node->btree[entno].before,
1929 sizeof(par_node->btree[entno].before))); 1920 sizeof(par_node->btree[entno].before)));
@@ -2203,8 +2194,8 @@ xfs_da_do_buf(
2203 info = rbp->data; 2194 info = rbp->data;
2204 data = rbp->data; 2195 data = rbp->data;
2205 free = rbp->data; 2196 free = rbp->data;
2206 magic = INT_GET(info->magic, ARCH_CONVERT); 2197 magic = be16_to_cpu(info->magic);
2207 magic1 = INT_GET(data->hdr.magic, ARCH_CONVERT); 2198 magic1 = be32_to_cpu(data->hdr.magic);
2208 if (unlikely( 2199 if (unlikely(
2209 XFS_TEST_ERROR((magic != XFS_DA_NODE_MAGIC) && 2200 XFS_TEST_ERROR((magic != XFS_DA_NODE_MAGIC) &&
2210 (magic != XFS_DIR_LEAF_MAGIC) && 2201 (magic != XFS_DIR_LEAF_MAGIC) &&
@@ -2213,7 +2204,7 @@ xfs_da_do_buf(
2213 (magic != XFS_DIR2_LEAFN_MAGIC) && 2204 (magic != XFS_DIR2_LEAFN_MAGIC) &&
2214 (magic1 != XFS_DIR2_BLOCK_MAGIC) && 2205 (magic1 != XFS_DIR2_BLOCK_MAGIC) &&
2215 (magic1 != XFS_DIR2_DATA_MAGIC) && 2206 (magic1 != XFS_DIR2_DATA_MAGIC) &&
2216 (INT_GET(free->hdr.magic, ARCH_CONVERT) != XFS_DIR2_FREE_MAGIC), 2207 (be32_to_cpu(free->hdr.magic) != XFS_DIR2_FREE_MAGIC),
2217 mp, XFS_ERRTAG_DA_READ_BUF, 2208 mp, XFS_ERRTAG_DA_READ_BUF,
2218 XFS_RANDOM_DA_READ_BUF))) { 2209 XFS_RANDOM_DA_READ_BUF))) {
2219 xfs_buftrace("DA READ ERROR", rbp->bps[0]); 2210 xfs_buftrace("DA READ ERROR", rbp->bps[0]);
diff --git a/fs/xfs/xfs_da_btree.h b/fs/xfs/xfs_da_btree.h
index 41352113721a..243a730d5ec8 100644
--- a/fs/xfs/xfs_da_btree.h
+++ b/fs/xfs/xfs_da_btree.h
@@ -45,10 +45,10 @@ struct zone;
45 (XFS_DIR_IS_V1(mp) ? XFS_DIR_LEAF_MAGIC : XFS_DIR2_LEAFN_MAGIC) 45 (XFS_DIR_IS_V1(mp) ? XFS_DIR_LEAF_MAGIC : XFS_DIR2_LEAFN_MAGIC)
46 46
47typedef struct xfs_da_blkinfo { 47typedef struct xfs_da_blkinfo {
48 xfs_dablk_t forw; /* previous block in list */ 48 __be32 forw; /* previous block in list */
49 xfs_dablk_t back; /* following block in list */ 49 __be32 back; /* following block in list */
50 __uint16_t magic; /* validity check on block */ 50 __be16 magic; /* validity check on block */
51 __uint16_t pad; /* unused */ 51 __be16 pad; /* unused */
52} xfs_da_blkinfo_t; 52} xfs_da_blkinfo_t;
53 53
54/* 54/*
@@ -65,12 +65,12 @@ typedef struct xfs_da_blkinfo {
65typedef struct xfs_da_intnode { 65typedef struct xfs_da_intnode {
66 struct xfs_da_node_hdr { /* constant-structure header block */ 66 struct xfs_da_node_hdr { /* constant-structure header block */
67 xfs_da_blkinfo_t info; /* block type, links, etc. */ 67 xfs_da_blkinfo_t info; /* block type, links, etc. */
68 __uint16_t count; /* count of active entries */ 68 __be16 count; /* count of active entries */
69 __uint16_t level; /* level above leaves (leaf == 0) */ 69 __be16 level; /* level above leaves (leaf == 0) */
70 } hdr; 70 } hdr;
71 struct xfs_da_node_entry { 71 struct xfs_da_node_entry {
72 xfs_dahash_t hashval; /* hash value for this descendant */ 72 __be32 hashval; /* hash value for this descendant */
73 xfs_dablk_t before; /* Btree block before this key */ 73 __be32 before; /* Btree block before this key */
74 } btree[1]; /* variable sized array of keys */ 74 } btree[1]; /* variable sized array of keys */
75} xfs_da_intnode_t; 75} xfs_da_intnode_t;
76typedef struct xfs_da_node_hdr xfs_da_node_hdr_t; 76typedef struct xfs_da_node_hdr xfs_da_node_hdr_t;
diff --git a/fs/xfs/xfs_dfrag.c b/fs/xfs/xfs_dfrag.c
index c6191d00ad27..4968a6358e61 100644
--- a/fs/xfs/xfs_dfrag.c
+++ b/fs/xfs/xfs_dfrag.c
@@ -83,7 +83,7 @@ xfs_swapext(
83 83
84 /* Pull information for the target fd */ 84 /* Pull information for the target fd */
85 if (((fp = fget((int)sxp->sx_fdtarget)) == NULL) || 85 if (((fp = fget((int)sxp->sx_fdtarget)) == NULL) ||
86 ((vp = LINVFS_GET_VP(fp->f_dentry->d_inode)) == NULL)) { 86 ((vp = vn_from_inode(fp->f_dentry->d_inode)) == NULL)) {
87 error = XFS_ERROR(EINVAL); 87 error = XFS_ERROR(EINVAL);
88 goto error0; 88 goto error0;
89 } 89 }
@@ -95,7 +95,7 @@ xfs_swapext(
95 } 95 }
96 96
97 if (((tfp = fget((int)sxp->sx_fdtmp)) == NULL) || 97 if (((tfp = fget((int)sxp->sx_fdtmp)) == NULL) ||
98 ((tvp = LINVFS_GET_VP(tfp->f_dentry->d_inode)) == NULL)) { 98 ((tvp = vn_from_inode(tfp->f_dentry->d_inode)) == NULL)) {
99 error = XFS_ERROR(EINVAL); 99 error = XFS_ERROR(EINVAL);
100 goto error0; 100 goto error0;
101 } 101 }
diff --git a/fs/xfs/xfs_dir.c b/fs/xfs/xfs_dir.c
index bb87d2a700a9..9cc702a839a3 100644
--- a/fs/xfs/xfs_dir.c
+++ b/fs/xfs/xfs_dir.c
@@ -634,7 +634,7 @@ xfs_dir_leaf_removename(xfs_da_args_t *args, int *count, int *totallen)
634 return(retval); 634 return(retval);
635 ASSERT(bp != NULL); 635 ASSERT(bp != NULL);
636 leaf = bp->data; 636 leaf = bp->data;
637 ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); 637 ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR_LEAF_MAGIC);
638 retval = xfs_dir_leaf_lookup_int(bp, args, &index); 638 retval = xfs_dir_leaf_lookup_int(bp, args, &index);
639 if (retval == EEXIST) { 639 if (retval == EEXIST) {
640 (void)xfs_dir_leaf_remove(args->trans, bp, index); 640 (void)xfs_dir_leaf_remove(args->trans, bp, index);
@@ -912,7 +912,7 @@ xfs_dir_node_getdents(xfs_trans_t *trans, xfs_inode_t *dp, uio_t *uio,
912 return(error); 912 return(error);
913 if (bp) 913 if (bp)
914 leaf = bp->data; 914 leaf = bp->data;
915 if (bp && INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) != XFS_DIR_LEAF_MAGIC) { 915 if (bp && be16_to_cpu(leaf->hdr.info.magic) != XFS_DIR_LEAF_MAGIC) {
916 xfs_dir_trace_g_dub("node: block not a leaf", 916 xfs_dir_trace_g_dub("node: block not a leaf",
917 dp, uio, bno); 917 dp, uio, bno);
918 xfs_da_brelse(trans, bp); 918 xfs_da_brelse(trans, bp);
@@ -949,17 +949,17 @@ xfs_dir_node_getdents(xfs_trans_t *trans, xfs_inode_t *dp, uio_t *uio,
949 if (bp == NULL) 949 if (bp == NULL)
950 return(XFS_ERROR(EFSCORRUPTED)); 950 return(XFS_ERROR(EFSCORRUPTED));
951 node = bp->data; 951 node = bp->data;
952 if (INT_GET(node->hdr.info.magic, ARCH_CONVERT) != XFS_DA_NODE_MAGIC) 952 if (be16_to_cpu(node->hdr.info.magic) != XFS_DA_NODE_MAGIC)
953 break; 953 break;
954 btree = &node->btree[0]; 954 btree = &node->btree[0];
955 xfs_dir_trace_g_dun("node: node detail", dp, uio, node); 955 xfs_dir_trace_g_dun("node: node detail", dp, uio, node);
956 for (i = 0; i < INT_GET(node->hdr.count, ARCH_CONVERT); btree++, i++) { 956 for (i = 0; i < be16_to_cpu(node->hdr.count); btree++, i++) {
957 if (INT_GET(btree->hashval, ARCH_CONVERT) >= cookhash) { 957 if (be32_to_cpu(btree->hashval) >= cookhash) {
958 bno = INT_GET(btree->before, ARCH_CONVERT); 958 bno = be32_to_cpu(btree->before);
959 break; 959 break;
960 } 960 }
961 } 961 }
962 if (i == INT_GET(node->hdr.count, ARCH_CONVERT)) { 962 if (i == be16_to_cpu(node->hdr.count)) {
963 xfs_da_brelse(trans, bp); 963 xfs_da_brelse(trans, bp);
964 xfs_dir_trace_g_du("node: hash beyond EOF", 964 xfs_dir_trace_g_du("node: hash beyond EOF",
965 dp, uio); 965 dp, uio);
@@ -982,7 +982,7 @@ xfs_dir_node_getdents(xfs_trans_t *trans, xfs_inode_t *dp, uio_t *uio,
982 */ 982 */
983 for (;;) { 983 for (;;) {
984 leaf = bp->data; 984 leaf = bp->data;
985 if (unlikely(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) != XFS_DIR_LEAF_MAGIC)) { 985 if (unlikely(be16_to_cpu(leaf->hdr.info.magic) != XFS_DIR_LEAF_MAGIC)) {
986 xfs_dir_trace_g_dul("node: not a leaf", dp, uio, leaf); 986 xfs_dir_trace_g_dul("node: not a leaf", dp, uio, leaf);
987 xfs_da_brelse(trans, bp); 987 xfs_da_brelse(trans, bp);
988 XFS_CORRUPTION_ERROR("xfs_dir_node_getdents(1)", 988 XFS_CORRUPTION_ERROR("xfs_dir_node_getdents(1)",
@@ -990,7 +990,7 @@ xfs_dir_node_getdents(xfs_trans_t *trans, xfs_inode_t *dp, uio_t *uio,
990 return XFS_ERROR(EFSCORRUPTED); 990 return XFS_ERROR(EFSCORRUPTED);
991 } 991 }
992 xfs_dir_trace_g_dul("node: leaf detail", dp, uio, leaf); 992 xfs_dir_trace_g_dul("node: leaf detail", dp, uio, leaf);
993 if ((nextbno = INT_GET(leaf->hdr.info.forw, ARCH_CONVERT))) { 993 if ((nextbno = be32_to_cpu(leaf->hdr.info.forw))) {
994 nextda = xfs_da_reada_buf(trans, dp, nextbno, 994 nextda = xfs_da_reada_buf(trans, dp, nextbno,
995 XFS_DATA_FORK); 995 XFS_DATA_FORK);
996 } else 996 } else
@@ -1118,21 +1118,20 @@ void
1118xfs_dir_trace_g_dun(char *where, xfs_inode_t *dp, uio_t *uio, 1118xfs_dir_trace_g_dun(char *where, xfs_inode_t *dp, uio_t *uio,
1119 xfs_da_intnode_t *node) 1119 xfs_da_intnode_t *node)
1120{ 1120{
1121 int last = INT_GET(node->hdr.count, ARCH_CONVERT) - 1; 1121 int last = be16_to_cpu(node->hdr.count) - 1;
1122 1122
1123 xfs_dir_trace_enter(XFS_DIR_KTRACE_G_DUN, where, 1123 xfs_dir_trace_enter(XFS_DIR_KTRACE_G_DUN, where,
1124 (void *)dp, (void *)dp->i_mount, 1124 (void *)dp, (void *)dp->i_mount,
1125 (void *)((unsigned long)(uio->uio_offset >> 32)), 1125 (void *)((unsigned long)(uio->uio_offset >> 32)),
1126 (void *)((unsigned long)(uio->uio_offset & 0xFFFFFFFF)), 1126 (void *)((unsigned long)(uio->uio_offset & 0xFFFFFFFF)),
1127 (void *)(unsigned long)uio->uio_resid, 1127 (void *)(unsigned long)uio->uio_resid,
1128 (void *)(unsigned long)be32_to_cpu(node->hdr.info.forw),
1128 (void *)(unsigned long) 1129 (void *)(unsigned long)
1129 INT_GET(node->hdr.info.forw, ARCH_CONVERT), 1130 be16_to_cpu(node->hdr.count),
1130 (void *)(unsigned long) 1131 (void *)(unsigned long)
1131 INT_GET(node->hdr.count, ARCH_CONVERT), 1132 be32_to_cpu(node->btree[0].hashval),
1132 (void *)(unsigned long) 1133 (void *)(unsigned long)
1133 INT_GET(node->btree[0].hashval, ARCH_CONVERT), 1134 be32_to_cpu(node->btree[last].hashval),
1134 (void *)(unsigned long)
1135 INT_GET(node->btree[last].hashval, ARCH_CONVERT),
1136 NULL, NULL, NULL); 1135 NULL, NULL, NULL);
1137} 1136}
1138 1137
@@ -1150,8 +1149,7 @@ xfs_dir_trace_g_dul(char *where, xfs_inode_t *dp, uio_t *uio,
1150 (void *)((unsigned long)(uio->uio_offset >> 32)), 1149 (void *)((unsigned long)(uio->uio_offset >> 32)),
1151 (void *)((unsigned long)(uio->uio_offset & 0xFFFFFFFF)), 1150 (void *)((unsigned long)(uio->uio_offset & 0xFFFFFFFF)),
1152 (void *)(unsigned long)uio->uio_resid, 1151 (void *)(unsigned long)uio->uio_resid,
1153 (void *)(unsigned long) 1152 (void *)(unsigned long)be32_to_cpu(leaf->hdr.info.forw),
1154 INT_GET(leaf->hdr.info.forw, ARCH_CONVERT),
1155 (void *)(unsigned long) 1153 (void *)(unsigned long)
1156 INT_GET(leaf->hdr.count, ARCH_CONVERT), 1154 INT_GET(leaf->hdr.count, ARCH_CONVERT),
1157 (void *)(unsigned long) 1155 (void *)(unsigned long)
diff --git a/fs/xfs/xfs_dir2.h b/fs/xfs/xfs_dir2.h
index 3158f5dc431f..7dd364b1e038 100644
--- a/fs/xfs/xfs_dir2.h
+++ b/fs/xfs/xfs_dir2.h
@@ -55,16 +55,16 @@ typedef __uint32_t xfs_dir2_db_t;
55/* 55/*
56 * Byte offset in a directory. 56 * Byte offset in a directory.
57 */ 57 */
58typedef xfs_off_t xfs_dir2_off_t; 58typedef xfs_off_t xfs_dir2_off_t;
59 59
60/* 60/*
61 * For getdents, argument struct for put routines. 61 * For getdents, argument struct for put routines.
62 */ 62 */
63typedef int (*xfs_dir2_put_t)(struct xfs_dir2_put_args *pa); 63typedef int (*xfs_dir2_put_t)(struct xfs_dir2_put_args *pa);
64typedef struct xfs_dir2_put_args { 64typedef struct xfs_dir2_put_args {
65 xfs_off_t cook; /* cookie of (next) entry */ 65 xfs_off_t cook; /* cookie of (next) entry */
66 xfs_intino_t ino; /* inode number */ 66 xfs_intino_t ino; /* inode number */
67 struct xfs_dirent *dbp; /* buffer pointer */ 67 xfs_dirent_t *dbp; /* buffer pointer */
68 char *name; /* directory entry name */ 68 char *name; /* directory entry name */
69 int namelen; /* length of name */ 69 int namelen; /* length of name */
70 int done; /* output: set if value was stored */ 70 int done; /* output: set if value was stored */
@@ -75,18 +75,13 @@ typedef struct xfs_dir2_put_args {
75/* 75/*
76 * Other interfaces used by the rest of the dir v2 code. 76 * Other interfaces used by the rest of the dir v2 code.
77 */ 77 */
78extern int 78extern int xfs_dir2_grow_inode(struct xfs_da_args *args, int space,
79 xfs_dir2_grow_inode(struct xfs_da_args *args, int space, 79 xfs_dir2_db_t *dbp);
80 xfs_dir2_db_t *dbp); 80extern int xfs_dir2_isblock(struct xfs_trans *tp, struct xfs_inode *dp,
81 81 int *vp);
82extern int 82extern int xfs_dir2_isleaf(struct xfs_trans *tp, struct xfs_inode *dp,
83 xfs_dir2_isblock(struct xfs_trans *tp, struct xfs_inode *dp, int *vp); 83 int *vp);
84 84extern int xfs_dir2_shrink_inode(struct xfs_da_args *args, xfs_dir2_db_t db,
85extern int 85 struct xfs_dabuf *bp);
86 xfs_dir2_isleaf(struct xfs_trans *tp, struct xfs_inode *dp, int *vp);
87
88extern int
89 xfs_dir2_shrink_inode(struct xfs_da_args *args, xfs_dir2_db_t db,
90 struct xfs_dabuf *bp);
91 86
92#endif /* __XFS_DIR2_H__ */ 87#endif /* __XFS_DIR2_H__ */
diff --git a/fs/xfs/xfs_dir2_block.c b/fs/xfs/xfs_dir2_block.c
index 31bc99faa704..bd5cee6aa51a 100644
--- a/fs/xfs/xfs_dir2_block.c
+++ b/fs/xfs/xfs_dir2_block.c
@@ -81,7 +81,7 @@ xfs_dir2_block_addname(
81 xfs_mount_t *mp; /* filesystem mount point */ 81 xfs_mount_t *mp; /* filesystem mount point */
82 int needlog; /* need to log header */ 82 int needlog; /* need to log header */
83 int needscan; /* need to rescan freespace */ 83 int needscan; /* need to rescan freespace */
84 xfs_dir2_data_off_t *tagp; /* pointer to tag value */ 84 __be16 *tagp; /* pointer to tag value */
85 xfs_trans_t *tp; /* transaction structure */ 85 xfs_trans_t *tp; /* transaction structure */
86 86
87 xfs_dir2_trace_args("block_addname", args); 87 xfs_dir2_trace_args("block_addname", args);
@@ -100,8 +100,7 @@ xfs_dir2_block_addname(
100 /* 100 /*
101 * Check the magic number, corrupted if wrong. 101 * Check the magic number, corrupted if wrong.
102 */ 102 */
103 if (unlikely(INT_GET(block->hdr.magic, ARCH_CONVERT) 103 if (unlikely(be32_to_cpu(block->hdr.magic) != XFS_DIR2_BLOCK_MAGIC)) {
104 != XFS_DIR2_BLOCK_MAGIC)) {
105 XFS_CORRUPTION_ERROR("xfs_dir2_block_addname", 104 XFS_CORRUPTION_ERROR("xfs_dir2_block_addname",
106 XFS_ERRLEVEL_LOW, mp, block); 105 XFS_ERRLEVEL_LOW, mp, block);
107 xfs_da_brelse(tp, bp); 106 xfs_da_brelse(tp, bp);
@@ -121,38 +120,38 @@ xfs_dir2_block_addname(
121 /* 120 /*
122 * Tag just before the first leaf entry. 121 * Tag just before the first leaf entry.
123 */ 122 */
124 tagp = (xfs_dir2_data_off_t *)blp - 1; 123 tagp = (__be16 *)blp - 1;
125 /* 124 /*
126 * Data object just before the first leaf entry. 125 * Data object just before the first leaf entry.
127 */ 126 */
128 enddup = (xfs_dir2_data_unused_t *)((char *)block + INT_GET(*tagp, ARCH_CONVERT)); 127 enddup = (xfs_dir2_data_unused_t *)((char *)block + be16_to_cpu(*tagp));
129 /* 128 /*
130 * If it's not free then can't do this add without cleaning up: 129 * If it's not free then can't do this add without cleaning up:
131 * the space before the first leaf entry needs to be free so it 130 * the space before the first leaf entry needs to be free so it
132 * can be expanded to hold the pointer to the new entry. 131 * can be expanded to hold the pointer to the new entry.
133 */ 132 */
134 if (INT_GET(enddup->freetag, ARCH_CONVERT) != XFS_DIR2_DATA_FREE_TAG) 133 if (be16_to_cpu(enddup->freetag) != XFS_DIR2_DATA_FREE_TAG)
135 dup = enddup = NULL; 134 dup = enddup = NULL;
136 /* 135 /*
137 * Check out the biggest freespace and see if it's the same one. 136 * Check out the biggest freespace and see if it's the same one.
138 */ 137 */
139 else { 138 else {
140 dup = (xfs_dir2_data_unused_t *) 139 dup = (xfs_dir2_data_unused_t *)
141 ((char *)block + INT_GET(bf[0].offset, ARCH_CONVERT)); 140 ((char *)block + be16_to_cpu(bf[0].offset));
142 if (dup == enddup) { 141 if (dup == enddup) {
143 /* 142 /*
144 * It is the biggest freespace, is it too small 143 * It is the biggest freespace, is it too small
145 * to hold the new leaf too? 144 * to hold the new leaf too?
146 */ 145 */
147 if (INT_GET(dup->length, ARCH_CONVERT) < len + (uint)sizeof(*blp)) { 146 if (be16_to_cpu(dup->length) < len + (uint)sizeof(*blp)) {
148 /* 147 /*
149 * Yes, we use the second-largest 148 * Yes, we use the second-largest
150 * entry instead if it works. 149 * entry instead if it works.
151 */ 150 */
152 if (INT_GET(bf[1].length, ARCH_CONVERT) >= len) 151 if (be16_to_cpu(bf[1].length) >= len)
153 dup = (xfs_dir2_data_unused_t *) 152 dup = (xfs_dir2_data_unused_t *)
154 ((char *)block + 153 ((char *)block +
155 INT_GET(bf[1].offset, ARCH_CONVERT)); 154 be16_to_cpu(bf[1].offset));
156 else 155 else
157 dup = NULL; 156 dup = NULL;
158 } 157 }
@@ -161,7 +160,7 @@ xfs_dir2_block_addname(
161 * Not the same free entry, 160 * Not the same free entry,
162 * just check its length. 161 * just check its length.
163 */ 162 */
164 if (INT_GET(dup->length, ARCH_CONVERT) < len) { 163 if (be16_to_cpu(dup->length) < len) {
165 dup = NULL; 164 dup = NULL;
166 } 165 }
167 } 166 }
@@ -172,9 +171,9 @@ xfs_dir2_block_addname(
172 * If there are stale entries we'll use one for the leaf. 171 * If there are stale entries we'll use one for the leaf.
173 * Is the biggest entry enough to avoid compaction? 172 * Is the biggest entry enough to avoid compaction?
174 */ 173 */
175 else if (INT_GET(bf[0].length, ARCH_CONVERT) >= len) { 174 else if (be16_to_cpu(bf[0].length) >= len) {
176 dup = (xfs_dir2_data_unused_t *) 175 dup = (xfs_dir2_data_unused_t *)
177 ((char *)block + INT_GET(bf[0].offset, ARCH_CONVERT)); 176 ((char *)block + be16_to_cpu(bf[0].offset));
178 compact = 0; 177 compact = 0;
179 } 178 }
180 /* 179 /*
@@ -184,20 +183,20 @@ xfs_dir2_block_addname(
184 /* 183 /*
185 * Tag just before the first leaf entry. 184 * Tag just before the first leaf entry.
186 */ 185 */
187 tagp = (xfs_dir2_data_off_t *)blp - 1; 186 tagp = (__be16 *)blp - 1;
188 /* 187 /*
189 * Data object just before the first leaf entry. 188 * Data object just before the first leaf entry.
190 */ 189 */
191 dup = (xfs_dir2_data_unused_t *)((char *)block + INT_GET(*tagp, ARCH_CONVERT)); 190 dup = (xfs_dir2_data_unused_t *)((char *)block + be16_to_cpu(*tagp));
192 /* 191 /*
193 * If it's not free then the data will go where the 192 * If it's not free then the data will go where the
194 * leaf data starts now, if it works at all. 193 * leaf data starts now, if it works at all.
195 */ 194 */
196 if (INT_GET(dup->freetag, ARCH_CONVERT) == XFS_DIR2_DATA_FREE_TAG) { 195 if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) {
197 if (INT_GET(dup->length, ARCH_CONVERT) + (INT_GET(btp->stale, ARCH_CONVERT) - 1) * 196 if (be16_to_cpu(dup->length) + (be32_to_cpu(btp->stale) - 1) *
198 (uint)sizeof(*blp) < len) 197 (uint)sizeof(*blp) < len)
199 dup = NULL; 198 dup = NULL;
200 } else if ((INT_GET(btp->stale, ARCH_CONVERT) - 1) * (uint)sizeof(*blp) < len) 199 } else if ((be32_to_cpu(btp->stale) - 1) * (uint)sizeof(*blp) < len)
201 dup = NULL; 200 dup = NULL;
202 else 201 else
203 dup = (xfs_dir2_data_unused_t *)blp; 202 dup = (xfs_dir2_data_unused_t *)blp;
@@ -243,11 +242,11 @@ xfs_dir2_block_addname(
243 int fromidx; /* source leaf index */ 242 int fromidx; /* source leaf index */
244 int toidx; /* target leaf index */ 243 int toidx; /* target leaf index */
245 244
246 for (fromidx = toidx = INT_GET(btp->count, ARCH_CONVERT) - 1, 245 for (fromidx = toidx = be32_to_cpu(btp->count) - 1,
247 highstale = lfloghigh = -1; 246 highstale = lfloghigh = -1;
248 fromidx >= 0; 247 fromidx >= 0;
249 fromidx--) { 248 fromidx--) {
250 if (INT_GET(blp[fromidx].address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR) { 249 if (be32_to_cpu(blp[fromidx].address) == XFS_DIR2_NULL_DATAPTR) {
251 if (highstale == -1) 250 if (highstale == -1)
252 highstale = toidx; 251 highstale = toidx;
253 else { 252 else {
@@ -260,15 +259,15 @@ xfs_dir2_block_addname(
260 blp[toidx] = blp[fromidx]; 259 blp[toidx] = blp[fromidx];
261 toidx--; 260 toidx--;
262 } 261 }
263 lfloglow = toidx + 1 - (INT_GET(btp->stale, ARCH_CONVERT) - 1); 262 lfloglow = toidx + 1 - (be32_to_cpu(btp->stale) - 1);
264 lfloghigh -= INT_GET(btp->stale, ARCH_CONVERT) - 1; 263 lfloghigh -= be32_to_cpu(btp->stale) - 1;
265 INT_MOD(btp->count, ARCH_CONVERT, -(INT_GET(btp->stale, ARCH_CONVERT) - 1)); 264 be32_add(&btp->count, -(be32_to_cpu(btp->stale) - 1));
266 xfs_dir2_data_make_free(tp, bp, 265 xfs_dir2_data_make_free(tp, bp,
267 (xfs_dir2_data_aoff_t)((char *)blp - (char *)block), 266 (xfs_dir2_data_aoff_t)((char *)blp - (char *)block),
268 (xfs_dir2_data_aoff_t)((INT_GET(btp->stale, ARCH_CONVERT) - 1) * sizeof(*blp)), 267 (xfs_dir2_data_aoff_t)((be32_to_cpu(btp->stale) - 1) * sizeof(*blp)),
269 &needlog, &needscan); 268 &needlog, &needscan);
270 blp += INT_GET(btp->stale, ARCH_CONVERT) - 1; 269 blp += be32_to_cpu(btp->stale) - 1;
271 INT_SET(btp->stale, ARCH_CONVERT, 1); 270 btp->stale = cpu_to_be32(1);
272 /* 271 /*
273 * If we now need to rebuild the bestfree map, do so. 272 * If we now need to rebuild the bestfree map, do so.
274 * This needs to happen before the next call to use_free. 273 * This needs to happen before the next call to use_free.
@@ -283,23 +282,23 @@ xfs_dir2_block_addname(
283 * Set leaf logging boundaries to impossible state. 282 * Set leaf logging boundaries to impossible state.
284 * For the no-stale case they're set explicitly. 283 * For the no-stale case they're set explicitly.
285 */ 284 */
286 else if (INT_GET(btp->stale, ARCH_CONVERT)) { 285 else if (btp->stale) {
287 lfloglow = INT_GET(btp->count, ARCH_CONVERT); 286 lfloglow = be32_to_cpu(btp->count);
288 lfloghigh = -1; 287 lfloghigh = -1;
289 } 288 }
290 /* 289 /*
291 * Find the slot that's first lower than our hash value, -1 if none. 290 * Find the slot that's first lower than our hash value, -1 if none.
292 */ 291 */
293 for (low = 0, high = INT_GET(btp->count, ARCH_CONVERT) - 1; low <= high; ) { 292 for (low = 0, high = be32_to_cpu(btp->count) - 1; low <= high; ) {
294 mid = (low + high) >> 1; 293 mid = (low + high) >> 1;
295 if ((hash = INT_GET(blp[mid].hashval, ARCH_CONVERT)) == args->hashval) 294 if ((hash = be32_to_cpu(blp[mid].hashval)) == args->hashval)
296 break; 295 break;
297 if (hash < args->hashval) 296 if (hash < args->hashval)
298 low = mid + 1; 297 low = mid + 1;
299 else 298 else
300 high = mid - 1; 299 high = mid - 1;
301 } 300 }
302 while (mid >= 0 && INT_GET(blp[mid].hashval, ARCH_CONVERT) >= args->hashval) { 301 while (mid >= 0 && be32_to_cpu(blp[mid].hashval) >= args->hashval) {
303 mid--; 302 mid--;
304 } 303 }
305 /* 304 /*
@@ -311,14 +310,14 @@ xfs_dir2_block_addname(
311 */ 310 */
312 xfs_dir2_data_use_free(tp, bp, enddup, 311 xfs_dir2_data_use_free(tp, bp, enddup,
313 (xfs_dir2_data_aoff_t) 312 (xfs_dir2_data_aoff_t)
314 ((char *)enddup - (char *)block + INT_GET(enddup->length, ARCH_CONVERT) - 313 ((char *)enddup - (char *)block + be16_to_cpu(enddup->length) -
315 sizeof(*blp)), 314 sizeof(*blp)),
316 (xfs_dir2_data_aoff_t)sizeof(*blp), 315 (xfs_dir2_data_aoff_t)sizeof(*blp),
317 &needlog, &needscan); 316 &needlog, &needscan);
318 /* 317 /*
319 * Update the tail (entry count). 318 * Update the tail (entry count).
320 */ 319 */
321 INT_MOD(btp->count, ARCH_CONVERT, +1); 320 be32_add(&btp->count, 1);
322 /* 321 /*
323 * If we now need to rebuild the bestfree map, do so. 322 * If we now need to rebuild the bestfree map, do so.
324 * This needs to happen before the next call to use_free. 323 * This needs to happen before the next call to use_free.
@@ -346,12 +345,12 @@ xfs_dir2_block_addname(
346 else { 345 else {
347 for (lowstale = mid; 346 for (lowstale = mid;
348 lowstale >= 0 && 347 lowstale >= 0 &&
349 INT_GET(blp[lowstale].address, ARCH_CONVERT) != XFS_DIR2_NULL_DATAPTR; 348 be32_to_cpu(blp[lowstale].address) != XFS_DIR2_NULL_DATAPTR;
350 lowstale--) 349 lowstale--)
351 continue; 350 continue;
352 for (highstale = mid + 1; 351 for (highstale = mid + 1;
353 highstale < INT_GET(btp->count, ARCH_CONVERT) && 352 highstale < be32_to_cpu(btp->count) &&
354 INT_GET(blp[highstale].address, ARCH_CONVERT) != XFS_DIR2_NULL_DATAPTR && 353 be32_to_cpu(blp[highstale].address) != XFS_DIR2_NULL_DATAPTR &&
355 (lowstale < 0 || mid - lowstale > highstale - mid); 354 (lowstale < 0 || mid - lowstale > highstale - mid);
356 highstale++) 355 highstale++)
357 continue; 356 continue;
@@ -359,7 +358,7 @@ xfs_dir2_block_addname(
359 * Move entries toward the low-numbered stale entry. 358 * Move entries toward the low-numbered stale entry.
360 */ 359 */
361 if (lowstale >= 0 && 360 if (lowstale >= 0 &&
362 (highstale == INT_GET(btp->count, ARCH_CONVERT) || 361 (highstale == be32_to_cpu(btp->count) ||
363 mid - lowstale <= highstale - mid)) { 362 mid - lowstale <= highstale - mid)) {
364 if (mid - lowstale) 363 if (mid - lowstale)
365 memmove(&blp[lowstale], &blp[lowstale + 1], 364 memmove(&blp[lowstale], &blp[lowstale + 1],
@@ -371,7 +370,7 @@ xfs_dir2_block_addname(
371 * Move entries toward the high-numbered stale entry. 370 * Move entries toward the high-numbered stale entry.
372 */ 371 */
373 else { 372 else {
374 ASSERT(highstale < INT_GET(btp->count, ARCH_CONVERT)); 373 ASSERT(highstale < be32_to_cpu(btp->count));
375 mid++; 374 mid++;
376 if (highstale - mid) 375 if (highstale - mid)
377 memmove(&blp[mid + 1], &blp[mid], 376 memmove(&blp[mid + 1], &blp[mid],
@@ -379,7 +378,7 @@ xfs_dir2_block_addname(
379 lfloglow = MIN(mid, lfloglow); 378 lfloglow = MIN(mid, lfloglow);
380 lfloghigh = MAX(highstale, lfloghigh); 379 lfloghigh = MAX(highstale, lfloghigh);
381 } 380 }
382 INT_MOD(btp->stale, ARCH_CONVERT, -1); 381 be32_add(&btp->stale, -1);
383 } 382 }
384 /* 383 /*
385 * Point to the new data entry. 384 * Point to the new data entry.
@@ -388,8 +387,9 @@ xfs_dir2_block_addname(
388 /* 387 /*
389 * Fill in the leaf entry. 388 * Fill in the leaf entry.
390 */ 389 */
391 INT_SET(blp[mid].hashval, ARCH_CONVERT, args->hashval); 390 blp[mid].hashval = cpu_to_be32(args->hashval);
392 INT_SET(blp[mid].address, ARCH_CONVERT, XFS_DIR2_BYTE_TO_DATAPTR(mp, (char *)dep - (char *)block)); 391 blp[mid].address = cpu_to_be32(XFS_DIR2_BYTE_TO_DATAPTR(mp,
392 (char *)dep - (char *)block));
393 xfs_dir2_block_log_leaf(tp, bp, lfloglow, lfloghigh); 393 xfs_dir2_block_log_leaf(tp, bp, lfloglow, lfloghigh);
394 /* 394 /*
395 * Mark space for the data entry used. 395 * Mark space for the data entry used.
@@ -404,7 +404,7 @@ xfs_dir2_block_addname(
404 dep->namelen = args->namelen; 404 dep->namelen = args->namelen;
405 memcpy(dep->name, args->name, args->namelen); 405 memcpy(dep->name, args->name, args->namelen);
406 tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep); 406 tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep);
407 INT_SET(*tagp, ARCH_CONVERT, (xfs_dir2_data_off_t)((char *)dep - (char *)block)); 407 *tagp = cpu_to_be16((char *)dep - (char *)block);
408 /* 408 /*
409 * Clean up the bestfree array and log the header, tail, and entry. 409 * Clean up the bestfree array and log the header, tail, and entry.
410 */ 410 */
@@ -485,8 +485,8 @@ xfs_dir2_block_getdents(
485 /* 485 /*
486 * Unused, skip it. 486 * Unused, skip it.
487 */ 487 */
488 if (INT_GET(dup->freetag, ARCH_CONVERT) == XFS_DIR2_DATA_FREE_TAG) { 488 if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) {
489 ptr += INT_GET(dup->length, ARCH_CONVERT); 489 ptr += be16_to_cpu(dup->length);
490 continue; 490 continue;
491 } 491 }
492 492
@@ -622,7 +622,7 @@ xfs_dir2_block_lookup(
622 * Get the offset from the leaf entry, to point to the data. 622 * Get the offset from the leaf entry, to point to the data.
623 */ 623 */
624 dep = (xfs_dir2_data_entry_t *) 624 dep = (xfs_dir2_data_entry_t *)
625 ((char *)block + XFS_DIR2_DATAPTR_TO_OFF(mp, INT_GET(blp[ent].address, ARCH_CONVERT))); 625 ((char *)block + XFS_DIR2_DATAPTR_TO_OFF(mp, be32_to_cpu(blp[ent].address)));
626 /* 626 /*
627 * Fill in inode number, release the block. 627 * Fill in inode number, release the block.
628 */ 628 */
@@ -674,10 +674,10 @@ xfs_dir2_block_lookup_int(
674 * Loop doing a binary search for our hash value. 674 * Loop doing a binary search for our hash value.
675 * Find our entry, ENOENT if it's not there. 675 * Find our entry, ENOENT if it's not there.
676 */ 676 */
677 for (low = 0, high = INT_GET(btp->count, ARCH_CONVERT) - 1; ; ) { 677 for (low = 0, high = be32_to_cpu(btp->count) - 1; ; ) {
678 ASSERT(low <= high); 678 ASSERT(low <= high);
679 mid = (low + high) >> 1; 679 mid = (low + high) >> 1;
680 if ((hash = INT_GET(blp[mid].hashval, ARCH_CONVERT)) == args->hashval) 680 if ((hash = be32_to_cpu(blp[mid].hashval)) == args->hashval)
681 break; 681 break;
682 if (hash < args->hashval) 682 if (hash < args->hashval)
683 low = mid + 1; 683 low = mid + 1;
@@ -692,7 +692,7 @@ xfs_dir2_block_lookup_int(
692 /* 692 /*
693 * Back up to the first one with the right hash value. 693 * Back up to the first one with the right hash value.
694 */ 694 */
695 while (mid > 0 && INT_GET(blp[mid - 1].hashval, ARCH_CONVERT) == args->hashval) { 695 while (mid > 0 && be32_to_cpu(blp[mid - 1].hashval) == args->hashval) {
696 mid--; 696 mid--;
697 } 697 }
698 /* 698 /*
@@ -700,7 +700,7 @@ xfs_dir2_block_lookup_int(
700 * right hash value looking for our name. 700 * right hash value looking for our name.
701 */ 701 */
702 do { 702 do {
703 if ((addr = INT_GET(blp[mid].address, ARCH_CONVERT)) == XFS_DIR2_NULL_DATAPTR) 703 if ((addr = be32_to_cpu(blp[mid].address)) == XFS_DIR2_NULL_DATAPTR)
704 continue; 704 continue;
705 /* 705 /*
706 * Get pointer to the entry from the leaf. 706 * Get pointer to the entry from the leaf.
@@ -717,7 +717,7 @@ xfs_dir2_block_lookup_int(
717 *entno = mid; 717 *entno = mid;
718 return 0; 718 return 0;
719 } 719 }
720 } while (++mid < INT_GET(btp->count, ARCH_CONVERT) && INT_GET(blp[mid].hashval, ARCH_CONVERT) == hash); 720 } while (++mid < be32_to_cpu(btp->count) && be32_to_cpu(blp[mid].hashval) == hash);
721 /* 721 /*
722 * No match, release the buffer and return ENOENT. 722 * No match, release the buffer and return ENOENT.
723 */ 723 */
@@ -767,7 +767,7 @@ xfs_dir2_block_removename(
767 * Point to the data entry using the leaf entry. 767 * Point to the data entry using the leaf entry.
768 */ 768 */
769 dep = (xfs_dir2_data_entry_t *) 769 dep = (xfs_dir2_data_entry_t *)
770 ((char *)block + XFS_DIR2_DATAPTR_TO_OFF(mp, INT_GET(blp[ent].address, ARCH_CONVERT))); 770 ((char *)block + XFS_DIR2_DATAPTR_TO_OFF(mp, be32_to_cpu(blp[ent].address)));
771 /* 771 /*
772 * Mark the data entry's space free. 772 * Mark the data entry's space free.
773 */ 773 */
@@ -778,12 +778,12 @@ xfs_dir2_block_removename(
778 /* 778 /*
779 * Fix up the block tail. 779 * Fix up the block tail.
780 */ 780 */
781 INT_MOD(btp->stale, ARCH_CONVERT, +1); 781 be32_add(&btp->stale, 1);
782 xfs_dir2_block_log_tail(tp, bp); 782 xfs_dir2_block_log_tail(tp, bp);
783 /* 783 /*
784 * Remove the leaf entry by marking it stale. 784 * Remove the leaf entry by marking it stale.
785 */ 785 */
786 INT_SET(blp[ent].address, ARCH_CONVERT, XFS_DIR2_NULL_DATAPTR); 786 blp[ent].address = cpu_to_be32(XFS_DIR2_NULL_DATAPTR);
787 xfs_dir2_block_log_leaf(tp, bp, ent, ent); 787 xfs_dir2_block_log_leaf(tp, bp, ent, ent);
788 /* 788 /*
789 * Fix up bestfree, log the header if necessary. 789 * Fix up bestfree, log the header if necessary.
@@ -843,7 +843,7 @@ xfs_dir2_block_replace(
843 * Point to the data entry we need to change. 843 * Point to the data entry we need to change.
844 */ 844 */
845 dep = (xfs_dir2_data_entry_t *) 845 dep = (xfs_dir2_data_entry_t *)
846 ((char *)block + XFS_DIR2_DATAPTR_TO_OFF(mp, INT_GET(blp[ent].address, ARCH_CONVERT))); 846 ((char *)block + XFS_DIR2_DATAPTR_TO_OFF(mp, be32_to_cpu(blp[ent].address)));
847 ASSERT(INT_GET(dep->inumber, ARCH_CONVERT) != args->inumber); 847 ASSERT(INT_GET(dep->inumber, ARCH_CONVERT) != args->inumber);
848 /* 848 /*
849 * Change the inode number to the new value. 849 * Change the inode number to the new value.
@@ -868,8 +868,8 @@ xfs_dir2_block_sort(
868 868
869 la = a; 869 la = a;
870 lb = b; 870 lb = b;
871 return INT_GET(la->hashval, ARCH_CONVERT) < INT_GET(lb->hashval, ARCH_CONVERT) ? -1 : 871 return be32_to_cpu(la->hashval) < be32_to_cpu(lb->hashval) ? -1 :
872 (INT_GET(la->hashval, ARCH_CONVERT) > INT_GET(lb->hashval, ARCH_CONVERT) ? 1 : 0); 872 (be32_to_cpu(la->hashval) > be32_to_cpu(lb->hashval) ? 1 : 0);
873} 873}
874 874
875/* 875/*
@@ -881,7 +881,7 @@ xfs_dir2_leaf_to_block(
881 xfs_dabuf_t *lbp, /* leaf buffer */ 881 xfs_dabuf_t *lbp, /* leaf buffer */
882 xfs_dabuf_t *dbp) /* data buffer */ 882 xfs_dabuf_t *dbp) /* data buffer */
883{ 883{
884 xfs_dir2_data_off_t *bestsp; /* leaf bests table */ 884 __be16 *bestsp; /* leaf bests table */
885 xfs_dir2_block_t *block; /* block structure */ 885 xfs_dir2_block_t *block; /* block structure */
886 xfs_dir2_block_tail_t *btp; /* block tail */ 886 xfs_dir2_block_tail_t *btp; /* block tail */
887 xfs_inode_t *dp; /* incore directory inode */ 887 xfs_inode_t *dp; /* incore directory inode */
@@ -896,7 +896,7 @@ xfs_dir2_leaf_to_block(
896 int needscan; /* need to scan for bestfree */ 896 int needscan; /* need to scan for bestfree */
897 xfs_dir2_sf_hdr_t sfh; /* shortform header */ 897 xfs_dir2_sf_hdr_t sfh; /* shortform header */
898 int size; /* bytes used */ 898 int size; /* bytes used */
899 xfs_dir2_data_off_t *tagp; /* end of entry (tag) */ 899 __be16 *tagp; /* end of entry (tag) */
900 int to; /* block/leaf to index */ 900 int to; /* block/leaf to index */
901 xfs_trans_t *tp; /* transaction pointer */ 901 xfs_trans_t *tp; /* transaction pointer */
902 902
@@ -905,7 +905,7 @@ xfs_dir2_leaf_to_block(
905 tp = args->trans; 905 tp = args->trans;
906 mp = dp->i_mount; 906 mp = dp->i_mount;
907 leaf = lbp->data; 907 leaf = lbp->data;
908 ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAF1_MAGIC); 908 ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAF1_MAGIC);
909 ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf); 909 ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf);
910 /* 910 /*
911 * If there are data blocks other than the first one, take this 911 * If there are data blocks other than the first one, take this
@@ -915,11 +915,11 @@ xfs_dir2_leaf_to_block(
915 */ 915 */
916 while (dp->i_d.di_size > mp->m_dirblksize) { 916 while (dp->i_d.di_size > mp->m_dirblksize) {
917 bestsp = XFS_DIR2_LEAF_BESTS_P(ltp); 917 bestsp = XFS_DIR2_LEAF_BESTS_P(ltp);
918 if (INT_GET(bestsp[INT_GET(ltp->bestcount, ARCH_CONVERT) - 1], ARCH_CONVERT) == 918 if (be16_to_cpu(bestsp[be32_to_cpu(ltp->bestcount) - 1]) ==
919 mp->m_dirblksize - (uint)sizeof(block->hdr)) { 919 mp->m_dirblksize - (uint)sizeof(block->hdr)) {
920 if ((error = 920 if ((error =
921 xfs_dir2_leaf_trim_data(args, lbp, 921 xfs_dir2_leaf_trim_data(args, lbp,
922 (xfs_dir2_db_t)(INT_GET(ltp->bestcount, ARCH_CONVERT) - 1)))) 922 (xfs_dir2_db_t)(be32_to_cpu(ltp->bestcount) - 1))))
923 goto out; 923 goto out;
924 } else { 924 } else {
925 error = 0; 925 error = 0;
@@ -935,28 +935,29 @@ xfs_dir2_leaf_to_block(
935 goto out; 935 goto out;
936 } 936 }
937 block = dbp->data; 937 block = dbp->data;
938 ASSERT(INT_GET(block->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC); 938 ASSERT(be32_to_cpu(block->hdr.magic) == XFS_DIR2_DATA_MAGIC);
939 /* 939 /*
940 * Size of the "leaf" area in the block. 940 * Size of the "leaf" area in the block.
941 */ 941 */
942 size = (uint)sizeof(block->tail) + 942 size = (uint)sizeof(block->tail) +
943 (uint)sizeof(*lep) * (INT_GET(leaf->hdr.count, ARCH_CONVERT) - INT_GET(leaf->hdr.stale, ARCH_CONVERT)); 943 (uint)sizeof(*lep) * (be16_to_cpu(leaf->hdr.count) - be16_to_cpu(leaf->hdr.stale));
944 /* 944 /*
945 * Look at the last data entry. 945 * Look at the last data entry.
946 */ 946 */
947 tagp = (xfs_dir2_data_off_t *)((char *)block + mp->m_dirblksize) - 1; 947 tagp = (__be16 *)((char *)block + mp->m_dirblksize) - 1;
948 dup = (xfs_dir2_data_unused_t *)((char *)block + INT_GET(*tagp, ARCH_CONVERT)); 948 dup = (xfs_dir2_data_unused_t *)((char *)block + be16_to_cpu(*tagp));
949 /* 949 /*
950 * If it's not free or is too short we can't do it. 950 * If it's not free or is too short we can't do it.
951 */ 951 */
952 if (INT_GET(dup->freetag, ARCH_CONVERT) != XFS_DIR2_DATA_FREE_TAG || INT_GET(dup->length, ARCH_CONVERT) < size) { 952 if (be16_to_cpu(dup->freetag) != XFS_DIR2_DATA_FREE_TAG ||
953 be16_to_cpu(dup->length) < size) {
953 error = 0; 954 error = 0;
954 goto out; 955 goto out;
955 } 956 }
956 /* 957 /*
957 * Start converting it to block form. 958 * Start converting it to block form.
958 */ 959 */
959 INT_SET(block->hdr.magic, ARCH_CONVERT, XFS_DIR2_BLOCK_MAGIC); 960 block->hdr.magic = cpu_to_be32(XFS_DIR2_BLOCK_MAGIC);
960 needlog = 1; 961 needlog = 1;
961 needscan = 0; 962 needscan = 0;
962 /* 963 /*
@@ -968,20 +969,20 @@ xfs_dir2_leaf_to_block(
968 * Initialize the block tail. 969 * Initialize the block tail.
969 */ 970 */
970 btp = XFS_DIR2_BLOCK_TAIL_P(mp, block); 971 btp = XFS_DIR2_BLOCK_TAIL_P(mp, block);
971 INT_SET(btp->count, ARCH_CONVERT, INT_GET(leaf->hdr.count, ARCH_CONVERT) - INT_GET(leaf->hdr.stale, ARCH_CONVERT)); 972 btp->count = cpu_to_be32(be16_to_cpu(leaf->hdr.count) - be16_to_cpu(leaf->hdr.stale));
972 btp->stale = 0; 973 btp->stale = 0;
973 xfs_dir2_block_log_tail(tp, dbp); 974 xfs_dir2_block_log_tail(tp, dbp);
974 /* 975 /*
975 * Initialize the block leaf area. We compact out stale entries. 976 * Initialize the block leaf area. We compact out stale entries.
976 */ 977 */
977 lep = XFS_DIR2_BLOCK_LEAF_P(btp); 978 lep = XFS_DIR2_BLOCK_LEAF_P(btp);
978 for (from = to = 0; from < INT_GET(leaf->hdr.count, ARCH_CONVERT); from++) { 979 for (from = to = 0; from < be16_to_cpu(leaf->hdr.count); from++) {
979 if (INT_GET(leaf->ents[from].address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR) 980 if (be32_to_cpu(leaf->ents[from].address) == XFS_DIR2_NULL_DATAPTR)
980 continue; 981 continue;
981 lep[to++] = leaf->ents[from]; 982 lep[to++] = leaf->ents[from];
982 } 983 }
983 ASSERT(to == INT_GET(btp->count, ARCH_CONVERT)); 984 ASSERT(to == be32_to_cpu(btp->count));
984 xfs_dir2_block_log_leaf(tp, dbp, 0, INT_GET(btp->count, ARCH_CONVERT) - 1); 985 xfs_dir2_block_log_leaf(tp, dbp, 0, be32_to_cpu(btp->count) - 1);
985 /* 986 /*
986 * Scan the bestfree if we need it and log the data block header. 987 * Scan the bestfree if we need it and log the data block header.
987 */ 988 */
@@ -1043,7 +1044,7 @@ xfs_dir2_sf_to_block(
1043 int offset; /* target block offset */ 1044 int offset; /* target block offset */
1044 xfs_dir2_sf_entry_t *sfep; /* sf entry pointer */ 1045 xfs_dir2_sf_entry_t *sfep; /* sf entry pointer */
1045 xfs_dir2_sf_t *sfp; /* shortform structure */ 1046 xfs_dir2_sf_t *sfp; /* shortform structure */
1046 xfs_dir2_data_off_t *tagp; /* end of data entry */ 1047 __be16 *tagp; /* end of data entry */
1047 xfs_trans_t *tp; /* transaction pointer */ 1048 xfs_trans_t *tp; /* transaction pointer */
1048 1049
1049 xfs_dir2_trace_args("sf_to_block", args); 1050 xfs_dir2_trace_args("sf_to_block", args);
@@ -1095,12 +1096,12 @@ xfs_dir2_sf_to_block(
1095 return error; 1096 return error;
1096 } 1097 }
1097 block = bp->data; 1098 block = bp->data;
1098 INT_SET(block->hdr.magic, ARCH_CONVERT, XFS_DIR2_BLOCK_MAGIC); 1099 block->hdr.magic = cpu_to_be32(XFS_DIR2_BLOCK_MAGIC);
1099 /* 1100 /*
1100 * Compute size of block "tail" area. 1101 * Compute size of block "tail" area.
1101 */ 1102 */
1102 i = (uint)sizeof(*btp) + 1103 i = (uint)sizeof(*btp) +
1103 (INT_GET(sfp->hdr.count, ARCH_CONVERT) + 2) * (uint)sizeof(xfs_dir2_leaf_entry_t); 1104 (sfp->hdr.count + 2) * (uint)sizeof(xfs_dir2_leaf_entry_t);
1104 /* 1105 /*
1105 * The whole thing is initialized to free by the init routine. 1106 * The whole thing is initialized to free by the init routine.
1106 * Say we're using the leaf and tail area. 1107 * Say we're using the leaf and tail area.
@@ -1114,7 +1115,7 @@ xfs_dir2_sf_to_block(
1114 * Fill in the tail. 1115 * Fill in the tail.
1115 */ 1116 */
1116 btp = XFS_DIR2_BLOCK_TAIL_P(mp, block); 1117 btp = XFS_DIR2_BLOCK_TAIL_P(mp, block);
1117 INT_SET(btp->count, ARCH_CONVERT, INT_GET(sfp->hdr.count, ARCH_CONVERT) + 2); /* ., .. */ 1118 btp->count = cpu_to_be32(sfp->hdr.count + 2); /* ., .. */
1118 btp->stale = 0; 1119 btp->stale = 0;
1119 blp = XFS_DIR2_BLOCK_LEAF_P(btp); 1120 blp = XFS_DIR2_BLOCK_LEAF_P(btp);
1120 endoffset = (uint)((char *)blp - (char *)block); 1121 endoffset = (uint)((char *)blp - (char *)block);
@@ -1123,7 +1124,7 @@ xfs_dir2_sf_to_block(
1123 */ 1124 */
1124 xfs_dir2_data_use_free(tp, bp, dup, 1125 xfs_dir2_data_use_free(tp, bp, dup,
1125 (xfs_dir2_data_aoff_t)((char *)dup - (char *)block), 1126 (xfs_dir2_data_aoff_t)((char *)dup - (char *)block),
1126 INT_GET(dup->length, ARCH_CONVERT), &needlog, &needscan); 1127 be16_to_cpu(dup->length), &needlog, &needscan);
1127 /* 1128 /*
1128 * Create entry for . 1129 * Create entry for .
1129 */ 1130 */
@@ -1133,10 +1134,11 @@ xfs_dir2_sf_to_block(
1133 dep->namelen = 1; 1134 dep->namelen = 1;
1134 dep->name[0] = '.'; 1135 dep->name[0] = '.';
1135 tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep); 1136 tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep);
1136 INT_SET(*tagp, ARCH_CONVERT, (xfs_dir2_data_off_t)((char *)dep - (char *)block)); 1137 *tagp = cpu_to_be16((char *)dep - (char *)block);
1137 xfs_dir2_data_log_entry(tp, bp, dep); 1138 xfs_dir2_data_log_entry(tp, bp, dep);
1138 INT_SET(blp[0].hashval, ARCH_CONVERT, xfs_dir_hash_dot); 1139 blp[0].hashval = cpu_to_be32(xfs_dir_hash_dot);
1139 INT_SET(blp[0].address, ARCH_CONVERT, XFS_DIR2_BYTE_TO_DATAPTR(mp, (char *)dep - (char *)block)); 1140 blp[0].address = cpu_to_be32(XFS_DIR2_BYTE_TO_DATAPTR(mp,
1141 (char *)dep - (char *)block));
1140 /* 1142 /*
1141 * Create entry for .. 1143 * Create entry for ..
1142 */ 1144 */
@@ -1146,15 +1148,16 @@ xfs_dir2_sf_to_block(
1146 dep->namelen = 2; 1148 dep->namelen = 2;
1147 dep->name[0] = dep->name[1] = '.'; 1149 dep->name[0] = dep->name[1] = '.';
1148 tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep); 1150 tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep);
1149 INT_SET(*tagp, ARCH_CONVERT, (xfs_dir2_data_off_t)((char *)dep - (char *)block)); 1151 *tagp = cpu_to_be16((char *)dep - (char *)block);
1150 xfs_dir2_data_log_entry(tp, bp, dep); 1152 xfs_dir2_data_log_entry(tp, bp, dep);
1151 INT_SET(blp[1].hashval, ARCH_CONVERT, xfs_dir_hash_dotdot); 1153 blp[1].hashval = cpu_to_be32(xfs_dir_hash_dotdot);
1152 INT_SET(blp[1].address, ARCH_CONVERT, XFS_DIR2_BYTE_TO_DATAPTR(mp, (char *)dep - (char *)block)); 1154 blp[1].address = cpu_to_be32(XFS_DIR2_BYTE_TO_DATAPTR(mp,
1155 (char *)dep - (char *)block));
1153 offset = XFS_DIR2_DATA_FIRST_OFFSET; 1156 offset = XFS_DIR2_DATA_FIRST_OFFSET;
1154 /* 1157 /*
1155 * Loop over existing entries, stuff them in. 1158 * Loop over existing entries, stuff them in.
1156 */ 1159 */
1157 if ((i = 0) == INT_GET(sfp->hdr.count, ARCH_CONVERT)) 1160 if ((i = 0) == sfp->hdr.count)
1158 sfep = NULL; 1161 sfep = NULL;
1159 else 1162 else
1160 sfep = XFS_DIR2_SF_FIRSTENTRY(sfp); 1163 sfep = XFS_DIR2_SF_FIRSTENTRY(sfp);
@@ -1176,15 +1179,14 @@ xfs_dir2_sf_to_block(
1176 if (offset < newoffset) { 1179 if (offset < newoffset) {
1177 dup = (xfs_dir2_data_unused_t *) 1180 dup = (xfs_dir2_data_unused_t *)
1178 ((char *)block + offset); 1181 ((char *)block + offset);
1179 INT_SET(dup->freetag, ARCH_CONVERT, XFS_DIR2_DATA_FREE_TAG); 1182 dup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG);
1180 INT_SET(dup->length, ARCH_CONVERT, newoffset - offset); 1183 dup->length = cpu_to_be16(newoffset - offset);
1181 INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P(dup), ARCH_CONVERT, 1184 *XFS_DIR2_DATA_UNUSED_TAG_P(dup) = cpu_to_be16(
1182 (xfs_dir2_data_off_t)
1183 ((char *)dup - (char *)block)); 1185 ((char *)dup - (char *)block));
1184 xfs_dir2_data_log_unused(tp, bp, dup); 1186 xfs_dir2_data_log_unused(tp, bp, dup);
1185 (void)xfs_dir2_data_freeinsert((xfs_dir2_data_t *)block, 1187 (void)xfs_dir2_data_freeinsert((xfs_dir2_data_t *)block,
1186 dup, &dummy); 1188 dup, &dummy);
1187 offset += INT_GET(dup->length, ARCH_CONVERT); 1189 offset += be16_to_cpu(dup->length);
1188 continue; 1190 continue;
1189 } 1191 }
1190 /* 1192 /*
@@ -1196,13 +1198,14 @@ xfs_dir2_sf_to_block(
1196 dep->namelen = sfep->namelen; 1198 dep->namelen = sfep->namelen;
1197 memcpy(dep->name, sfep->name, dep->namelen); 1199 memcpy(dep->name, sfep->name, dep->namelen);
1198 tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep); 1200 tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep);
1199 INT_SET(*tagp, ARCH_CONVERT, (xfs_dir2_data_off_t)((char *)dep - (char *)block)); 1201 *tagp = cpu_to_be16((char *)dep - (char *)block);
1200 xfs_dir2_data_log_entry(tp, bp, dep); 1202 xfs_dir2_data_log_entry(tp, bp, dep);
1201 INT_SET(blp[2 + i].hashval, ARCH_CONVERT, xfs_da_hashname((char *)sfep->name, sfep->namelen)); 1203 blp[2 + i].hashval = cpu_to_be32(xfs_da_hashname(
1202 INT_SET(blp[2 + i].address, ARCH_CONVERT, XFS_DIR2_BYTE_TO_DATAPTR(mp, 1204 (char *)sfep->name, sfep->namelen));
1205 blp[2 + i].address = cpu_to_be32(XFS_DIR2_BYTE_TO_DATAPTR(mp,
1203 (char *)dep - (char *)block)); 1206 (char *)dep - (char *)block));
1204 offset = (int)((char *)(tagp + 1) - (char *)block); 1207 offset = (int)((char *)(tagp + 1) - (char *)block);
1205 if (++i == INT_GET(sfp->hdr.count, ARCH_CONVERT)) 1208 if (++i == sfp->hdr.count)
1206 sfep = NULL; 1209 sfep = NULL;
1207 else 1210 else
1208 sfep = XFS_DIR2_SF_NEXTENTRY(sfp, sfep); 1211 sfep = XFS_DIR2_SF_NEXTENTRY(sfp, sfep);
@@ -1212,13 +1215,13 @@ xfs_dir2_sf_to_block(
1212 /* 1215 /*
1213 * Sort the leaf entries by hash value. 1216 * Sort the leaf entries by hash value.
1214 */ 1217 */
1215 xfs_sort(blp, INT_GET(btp->count, ARCH_CONVERT), sizeof(*blp), xfs_dir2_block_sort); 1218 xfs_sort(blp, be32_to_cpu(btp->count), sizeof(*blp), xfs_dir2_block_sort);
1216 /* 1219 /*
1217 * Log the leaf entry area and tail. 1220 * Log the leaf entry area and tail.
1218 * Already logged the header in data_init, ignore needlog. 1221 * Already logged the header in data_init, ignore needlog.
1219 */ 1222 */
1220 ASSERT(needscan == 0); 1223 ASSERT(needscan == 0);
1221 xfs_dir2_block_log_leaf(tp, bp, 0, INT_GET(btp->count, ARCH_CONVERT) - 1); 1224 xfs_dir2_block_log_leaf(tp, bp, 0, be32_to_cpu(btp->count) - 1);
1222 xfs_dir2_block_log_tail(tp, bp); 1225 xfs_dir2_block_log_tail(tp, bp);
1223 xfs_dir2_data_check(dp, bp); 1226 xfs_dir2_data_check(dp, bp);
1224 xfs_da_buf_done(bp); 1227 xfs_da_buf_done(bp);
diff --git a/fs/xfs/xfs_dir2_block.h b/fs/xfs/xfs_dir2_block.h
index a2e5cb98a838..6722effd0b20 100644
--- a/fs/xfs/xfs_dir2_block.h
+++ b/fs/xfs/xfs_dir2_block.h
@@ -43,8 +43,8 @@ struct xfs_trans;
43#define XFS_DIR2_BLOCK_MAGIC 0x58443242 /* XD2B: for one block dirs */ 43#define XFS_DIR2_BLOCK_MAGIC 0x58443242 /* XD2B: for one block dirs */
44 44
45typedef struct xfs_dir2_block_tail { 45typedef struct xfs_dir2_block_tail {
46 __uint32_t count; /* count of leaf entries */ 46 __be32 count; /* count of leaf entries */
47 __uint32_t stale; /* count of stale lf entries */ 47 __be32 stale; /* count of stale lf entries */
48} xfs_dir2_block_tail_t; 48} xfs_dir2_block_tail_t;
49 49
50/* 50/*
@@ -75,8 +75,7 @@ xfs_dir2_block_tail_p(struct xfs_mount *mp, xfs_dir2_block_t *block)
75static inline struct xfs_dir2_leaf_entry * 75static inline struct xfs_dir2_leaf_entry *
76xfs_dir2_block_leaf_p(xfs_dir2_block_tail_t *btp) 76xfs_dir2_block_leaf_p(xfs_dir2_block_tail_t *btp)
77{ 77{
78 return (((struct xfs_dir2_leaf_entry *) 78 return ((struct xfs_dir2_leaf_entry *)btp) - be32_to_cpu(btp->count);
79 (btp)) - INT_GET((btp)->count, ARCH_CONVERT));
80} 79}
81 80
82/* 81/*
diff --git a/fs/xfs/xfs_dir2_data.c b/fs/xfs/xfs_dir2_data.c
index 5b7c47e2f14a..bb3d03ff002b 100644
--- a/fs/xfs/xfs_dir2_data.c
+++ b/fs/xfs/xfs_dir2_data.c
@@ -70,11 +70,11 @@ xfs_dir2_data_check(
70 70
71 mp = dp->i_mount; 71 mp = dp->i_mount;
72 d = bp->data; 72 d = bp->data;
73 ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC || 73 ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC ||
74 INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC); 74 be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC);
75 bf = d->hdr.bestfree; 75 bf = d->hdr.bestfree;
76 p = (char *)d->u; 76 p = (char *)d->u;
77 if (INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC) { 77 if (be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC) {
78 btp = XFS_DIR2_BLOCK_TAIL_P(mp, (xfs_dir2_block_t *)d); 78 btp = XFS_DIR2_BLOCK_TAIL_P(mp, (xfs_dir2_block_t *)d);
79 lep = XFS_DIR2_BLOCK_LEAF_P(btp); 79 lep = XFS_DIR2_BLOCK_LEAF_P(btp);
80 endp = (char *)lep; 80 endp = (char *)lep;
@@ -96,8 +96,8 @@ xfs_dir2_data_check(
96 ASSERT(!bf[2].offset); 96 ASSERT(!bf[2].offset);
97 freeseen |= 1 << 2; 97 freeseen |= 1 << 2;
98 } 98 }
99 ASSERT(INT_GET(bf[0].length, ARCH_CONVERT) >= INT_GET(bf[1].length, ARCH_CONVERT)); 99 ASSERT(be16_to_cpu(bf[0].length) >= be16_to_cpu(bf[1].length));
100 ASSERT(INT_GET(bf[1].length, ARCH_CONVERT) >= INT_GET(bf[2].length, ARCH_CONVERT)); 100 ASSERT(be16_to_cpu(bf[1].length) >= be16_to_cpu(bf[2].length));
101 /* 101 /*
102 * Loop over the data/unused entries. 102 * Loop over the data/unused entries.
103 */ 103 */
@@ -108,18 +108,20 @@ xfs_dir2_data_check(
108 * If we find it, account for that, else make sure it 108 * If we find it, account for that, else make sure it
109 * doesn't need to be there. 109 * doesn't need to be there.
110 */ 110 */
111 if (INT_GET(dup->freetag, ARCH_CONVERT) == XFS_DIR2_DATA_FREE_TAG) { 111 if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) {
112 ASSERT(lastfree == 0); 112 ASSERT(lastfree == 0);
113 ASSERT(INT_GET(*XFS_DIR2_DATA_UNUSED_TAG_P(dup), ARCH_CONVERT) == 113 ASSERT(be16_to_cpu(*XFS_DIR2_DATA_UNUSED_TAG_P(dup)) ==
114 (char *)dup - (char *)d); 114 (char *)dup - (char *)d);
115 dfp = xfs_dir2_data_freefind(d, dup); 115 dfp = xfs_dir2_data_freefind(d, dup);
116 if (dfp) { 116 if (dfp) {
117 i = (int)(dfp - bf); 117 i = (int)(dfp - bf);
118 ASSERT((freeseen & (1 << i)) == 0); 118 ASSERT((freeseen & (1 << i)) == 0);
119 freeseen |= 1 << i; 119 freeseen |= 1 << i;
120 } else 120 } else {
121 ASSERT(INT_GET(dup->length, ARCH_CONVERT) <= INT_GET(bf[2].length, ARCH_CONVERT)); 121 ASSERT(be16_to_cpu(dup->length) <=
122 p += INT_GET(dup->length, ARCH_CONVERT); 122 be16_to_cpu(bf[2].length));
123 }
124 p += be16_to_cpu(dup->length);
123 lastfree = 1; 125 lastfree = 1;
124 continue; 126 continue;
125 } 127 }
@@ -132,21 +134,21 @@ xfs_dir2_data_check(
132 dep = (xfs_dir2_data_entry_t *)p; 134 dep = (xfs_dir2_data_entry_t *)p;
133 ASSERT(dep->namelen != 0); 135 ASSERT(dep->namelen != 0);
134 ASSERT(xfs_dir_ino_validate(mp, INT_GET(dep->inumber, ARCH_CONVERT)) == 0); 136 ASSERT(xfs_dir_ino_validate(mp, INT_GET(dep->inumber, ARCH_CONVERT)) == 0);
135 ASSERT(INT_GET(*XFS_DIR2_DATA_ENTRY_TAG_P(dep), ARCH_CONVERT) == 137 ASSERT(be16_to_cpu(*XFS_DIR2_DATA_ENTRY_TAG_P(dep)) ==
136 (char *)dep - (char *)d); 138 (char *)dep - (char *)d);
137 count++; 139 count++;
138 lastfree = 0; 140 lastfree = 0;
139 if (INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC) { 141 if (be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC) {
140 addr = XFS_DIR2_DB_OFF_TO_DATAPTR(mp, mp->m_dirdatablk, 142 addr = XFS_DIR2_DB_OFF_TO_DATAPTR(mp, mp->m_dirdatablk,
141 (xfs_dir2_data_aoff_t) 143 (xfs_dir2_data_aoff_t)
142 ((char *)dep - (char *)d)); 144 ((char *)dep - (char *)d));
143 hash = xfs_da_hashname((char *)dep->name, dep->namelen); 145 hash = xfs_da_hashname((char *)dep->name, dep->namelen);
144 for (i = 0; i < INT_GET(btp->count, ARCH_CONVERT); i++) { 146 for (i = 0; i < be32_to_cpu(btp->count); i++) {
145 if (INT_GET(lep[i].address, ARCH_CONVERT) == addr && 147 if (be32_to_cpu(lep[i].address) == addr &&
146 INT_GET(lep[i].hashval, ARCH_CONVERT) == hash) 148 be32_to_cpu(lep[i].hashval) == hash)
147 break; 149 break;
148 } 150 }
149 ASSERT(i < INT_GET(btp->count, ARCH_CONVERT)); 151 ASSERT(i < be32_to_cpu(btp->count));
150 } 152 }
151 p += XFS_DIR2_DATA_ENTSIZE(dep->namelen); 153 p += XFS_DIR2_DATA_ENTSIZE(dep->namelen);
152 } 154 }
@@ -154,15 +156,15 @@ xfs_dir2_data_check(
154 * Need to have seen all the entries and all the bestfree slots. 156 * Need to have seen all the entries and all the bestfree slots.
155 */ 157 */
156 ASSERT(freeseen == 7); 158 ASSERT(freeseen == 7);
157 if (INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC) { 159 if (be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC) {
158 for (i = stale = 0; i < INT_GET(btp->count, ARCH_CONVERT); i++) { 160 for (i = stale = 0; i < be32_to_cpu(btp->count); i++) {
159 if (INT_GET(lep[i].address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR) 161 if (be32_to_cpu(lep[i].address) == XFS_DIR2_NULL_DATAPTR)
160 stale++; 162 stale++;
161 if (i > 0) 163 if (i > 0)
162 ASSERT(INT_GET(lep[i].hashval, ARCH_CONVERT) >= INT_GET(lep[i - 1].hashval, ARCH_CONVERT)); 164 ASSERT(be32_to_cpu(lep[i].hashval) >= be32_to_cpu(lep[i - 1].hashval));
163 } 165 }
164 ASSERT(count == INT_GET(btp->count, ARCH_CONVERT) - INT_GET(btp->stale, ARCH_CONVERT)); 166 ASSERT(count == be32_to_cpu(btp->count) - be32_to_cpu(btp->stale));
165 ASSERT(stale == INT_GET(btp->stale, ARCH_CONVERT)); 167 ASSERT(stale == be32_to_cpu(btp->stale));
166 } 168 }
167} 169}
168#endif 170#endif
@@ -190,8 +192,8 @@ xfs_dir2_data_freefind(
190 * Check order, non-overlapping entries, and if we find the 192 * Check order, non-overlapping entries, and if we find the
191 * one we're looking for it has to be exact. 193 * one we're looking for it has to be exact.
192 */ 194 */
193 ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC || 195 ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC ||
194 INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC); 196 be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC);
195 for (dfp = &d->hdr.bestfree[0], seenzero = matched = 0; 197 for (dfp = &d->hdr.bestfree[0], seenzero = matched = 0;
196 dfp < &d->hdr.bestfree[XFS_DIR2_DATA_FD_COUNT]; 198 dfp < &d->hdr.bestfree[XFS_DIR2_DATA_FD_COUNT];
197 dfp++) { 199 dfp++) {
@@ -201,23 +203,24 @@ xfs_dir2_data_freefind(
201 continue; 203 continue;
202 } 204 }
203 ASSERT(seenzero == 0); 205 ASSERT(seenzero == 0);
204 if (INT_GET(dfp->offset, ARCH_CONVERT) == off) { 206 if (be16_to_cpu(dfp->offset) == off) {
205 matched = 1; 207 matched = 1;
206 ASSERT(INT_GET(dfp->length, ARCH_CONVERT) == INT_GET(dup->length, ARCH_CONVERT)); 208 ASSERT(dfp->length == dup->length);
207 } else if (off < INT_GET(dfp->offset, ARCH_CONVERT)) 209 } else if (off < be16_to_cpu(dfp->offset))
208 ASSERT(off + INT_GET(dup->length, ARCH_CONVERT) <= INT_GET(dfp->offset, ARCH_CONVERT)); 210 ASSERT(off + be16_to_cpu(dup->length) <= be16_to_cpu(dfp->offset));
209 else 211 else
210 ASSERT(INT_GET(dfp->offset, ARCH_CONVERT) + INT_GET(dfp->length, ARCH_CONVERT) <= off); 212 ASSERT(be16_to_cpu(dfp->offset) + be16_to_cpu(dfp->length) <= off);
211 ASSERT(matched || INT_GET(dfp->length, ARCH_CONVERT) >= INT_GET(dup->length, ARCH_CONVERT)); 213 ASSERT(matched || be16_to_cpu(dfp->length) >= be16_to_cpu(dup->length));
212 if (dfp > &d->hdr.bestfree[0]) 214 if (dfp > &d->hdr.bestfree[0])
213 ASSERT(INT_GET(dfp[-1].length, ARCH_CONVERT) >= INT_GET(dfp[0].length, ARCH_CONVERT)); 215 ASSERT(be16_to_cpu(dfp[-1].length) >= be16_to_cpu(dfp[0].length));
214 } 216 }
215#endif 217#endif
216 /* 218 /*
217 * If this is smaller than the smallest bestfree entry, 219 * If this is smaller than the smallest bestfree entry,
218 * it can't be there since they're sorted. 220 * it can't be there since they're sorted.
219 */ 221 */
220 if (INT_GET(dup->length, ARCH_CONVERT) < INT_GET(d->hdr.bestfree[XFS_DIR2_DATA_FD_COUNT - 1].length, ARCH_CONVERT)) 222 if (be16_to_cpu(dup->length) <
223 be16_to_cpu(d->hdr.bestfree[XFS_DIR2_DATA_FD_COUNT - 1].length))
221 return NULL; 224 return NULL;
222 /* 225 /*
223 * Look at the three bestfree entries for our guy. 226 * Look at the three bestfree entries for our guy.
@@ -227,7 +230,7 @@ xfs_dir2_data_freefind(
227 dfp++) { 230 dfp++) {
228 if (!dfp->offset) 231 if (!dfp->offset)
229 return NULL; 232 return NULL;
230 if (INT_GET(dfp->offset, ARCH_CONVERT) == off) 233 if (be16_to_cpu(dfp->offset) == off)
231 return dfp; 234 return dfp;
232 } 235 }
233 /* 236 /*
@@ -249,29 +252,29 @@ xfs_dir2_data_freeinsert(
249 xfs_dir2_data_free_t new; /* new bestfree entry */ 252 xfs_dir2_data_free_t new; /* new bestfree entry */
250 253
251#ifdef __KERNEL__ 254#ifdef __KERNEL__
252 ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC || 255 ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC ||
253 INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC); 256 be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC);
254#endif 257#endif
255 dfp = d->hdr.bestfree; 258 dfp = d->hdr.bestfree;
256 INT_COPY(new.length, dup->length, ARCH_CONVERT); 259 new.length = dup->length;
257 INT_SET(new.offset, ARCH_CONVERT, (xfs_dir2_data_off_t)((char *)dup - (char *)d)); 260 new.offset = cpu_to_be16((char *)dup - (char *)d);
258 /* 261 /*
259 * Insert at position 0, 1, or 2; or not at all. 262 * Insert at position 0, 1, or 2; or not at all.
260 */ 263 */
261 if (INT_GET(new.length, ARCH_CONVERT) > INT_GET(dfp[0].length, ARCH_CONVERT)) { 264 if (be16_to_cpu(new.length) > be16_to_cpu(dfp[0].length)) {
262 dfp[2] = dfp[1]; 265 dfp[2] = dfp[1];
263 dfp[1] = dfp[0]; 266 dfp[1] = dfp[0];
264 dfp[0] = new; 267 dfp[0] = new;
265 *loghead = 1; 268 *loghead = 1;
266 return &dfp[0]; 269 return &dfp[0];
267 } 270 }
268 if (INT_GET(new.length, ARCH_CONVERT) > INT_GET(dfp[1].length, ARCH_CONVERT)) { 271 if (be16_to_cpu(new.length) > be16_to_cpu(dfp[1].length)) {
269 dfp[2] = dfp[1]; 272 dfp[2] = dfp[1];
270 dfp[1] = new; 273 dfp[1] = new;
271 *loghead = 1; 274 *loghead = 1;
272 return &dfp[1]; 275 return &dfp[1];
273 } 276 }
274 if (INT_GET(new.length, ARCH_CONVERT) > INT_GET(dfp[2].length, ARCH_CONVERT)) { 277 if (be16_to_cpu(new.length) > be16_to_cpu(dfp[2].length)) {
275 dfp[2] = new; 278 dfp[2] = new;
276 *loghead = 1; 279 *loghead = 1;
277 return &dfp[2]; 280 return &dfp[2];
@@ -289,8 +292,8 @@ xfs_dir2_data_freeremove(
289 int *loghead) /* out: log data header */ 292 int *loghead) /* out: log data header */
290{ 293{
291#ifdef __KERNEL__ 294#ifdef __KERNEL__
292 ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC || 295 ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC ||
293 INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC); 296 be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC);
294#endif 297#endif
295 /* 298 /*
296 * It's the first entry, slide the next 2 up. 299 * It's the first entry, slide the next 2 up.
@@ -334,8 +337,8 @@ xfs_dir2_data_freescan(
334 char *p; /* current entry pointer */ 337 char *p; /* current entry pointer */
335 338
336#ifdef __KERNEL__ 339#ifdef __KERNEL__
337 ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC || 340 ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC ||
338 INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC); 341 be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC);
339#endif 342#endif
340 /* 343 /*
341 * Start by clearing the table. 344 * Start by clearing the table.
@@ -348,7 +351,7 @@ xfs_dir2_data_freescan(
348 p = (char *)d->u; 351 p = (char *)d->u;
349 if (aendp) 352 if (aendp)
350 endp = aendp; 353 endp = aendp;
351 else if (INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC) { 354 else if (be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC) {
352 btp = XFS_DIR2_BLOCK_TAIL_P(mp, (xfs_dir2_block_t *)d); 355 btp = XFS_DIR2_BLOCK_TAIL_P(mp, (xfs_dir2_block_t *)d);
353 endp = (char *)XFS_DIR2_BLOCK_LEAF_P(btp); 356 endp = (char *)XFS_DIR2_BLOCK_LEAF_P(btp);
354 } else 357 } else
@@ -361,11 +364,11 @@ xfs_dir2_data_freescan(
361 /* 364 /*
362 * If it's a free entry, insert it. 365 * If it's a free entry, insert it.
363 */ 366 */
364 if (INT_GET(dup->freetag, ARCH_CONVERT) == XFS_DIR2_DATA_FREE_TAG) { 367 if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) {
365 ASSERT((char *)dup - (char *)d == 368 ASSERT((char *)dup - (char *)d ==
366 INT_GET(*XFS_DIR2_DATA_UNUSED_TAG_P(dup), ARCH_CONVERT)); 369 be16_to_cpu(*XFS_DIR2_DATA_UNUSED_TAG_P(dup)));
367 xfs_dir2_data_freeinsert(d, dup, loghead); 370 xfs_dir2_data_freeinsert(d, dup, loghead);
368 p += INT_GET(dup->length, ARCH_CONVERT); 371 p += be16_to_cpu(dup->length);
369 } 372 }
370 /* 373 /*
371 * For active entries, check their tags and skip them. 374 * For active entries, check their tags and skip them.
@@ -373,7 +376,7 @@ xfs_dir2_data_freescan(
373 else { 376 else {
374 dep = (xfs_dir2_data_entry_t *)p; 377 dep = (xfs_dir2_data_entry_t *)p;
375 ASSERT((char *)dep - (char *)d == 378 ASSERT((char *)dep - (char *)d ==
376 INT_GET(*XFS_DIR2_DATA_ENTRY_TAG_P(dep), ARCH_CONVERT)); 379 be16_to_cpu(*XFS_DIR2_DATA_ENTRY_TAG_P(dep)));
377 p += XFS_DIR2_DATA_ENTSIZE(dep->namelen); 380 p += XFS_DIR2_DATA_ENTSIZE(dep->namelen);
378 } 381 }
379 } 382 }
@@ -415,8 +418,8 @@ xfs_dir2_data_init(
415 * Initialize the header. 418 * Initialize the header.
416 */ 419 */
417 d = bp->data; 420 d = bp->data;
418 INT_SET(d->hdr.magic, ARCH_CONVERT, XFS_DIR2_DATA_MAGIC); 421 d->hdr.magic = cpu_to_be32(XFS_DIR2_DATA_MAGIC);
419 INT_SET(d->hdr.bestfree[0].offset, ARCH_CONVERT, (xfs_dir2_data_off_t)sizeof(d->hdr)); 422 d->hdr.bestfree[0].offset = cpu_to_be16(sizeof(d->hdr));
420 for (i = 1; i < XFS_DIR2_DATA_FD_COUNT; i++) { 423 for (i = 1; i < XFS_DIR2_DATA_FD_COUNT; i++) {
421 d->hdr.bestfree[i].length = 0; 424 d->hdr.bestfree[i].length = 0;
422 d->hdr.bestfree[i].offset = 0; 425 d->hdr.bestfree[i].offset = 0;
@@ -425,13 +428,12 @@ xfs_dir2_data_init(
425 * Set up an unused entry for the block's body. 428 * Set up an unused entry for the block's body.
426 */ 429 */
427 dup = &d->u[0].unused; 430 dup = &d->u[0].unused;
428 INT_SET(dup->freetag, ARCH_CONVERT, XFS_DIR2_DATA_FREE_TAG); 431 dup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG);
429 432
430 t=mp->m_dirblksize - (uint)sizeof(d->hdr); 433 t=mp->m_dirblksize - (uint)sizeof(d->hdr);
431 INT_SET(d->hdr.bestfree[0].length, ARCH_CONVERT, t); 434 d->hdr.bestfree[0].length = cpu_to_be16(t);
432 INT_SET(dup->length, ARCH_CONVERT, t); 435 dup->length = cpu_to_be16(t);
433 INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P(dup), ARCH_CONVERT, 436 *XFS_DIR2_DATA_UNUSED_TAG_P(dup) = cpu_to_be16((char *)dup - (char *)d);
434 (xfs_dir2_data_off_t)((char *)dup - (char *)d));
435 /* 437 /*
436 * Log it and return it. 438 * Log it and return it.
437 */ 439 */
@@ -453,8 +455,8 @@ xfs_dir2_data_log_entry(
453 xfs_dir2_data_t *d; /* data block pointer */ 455 xfs_dir2_data_t *d; /* data block pointer */
454 456
455 d = bp->data; 457 d = bp->data;
456 ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC || 458 ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC ||
457 INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC); 459 be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC);
458 xfs_da_log_buf(tp, bp, (uint)((char *)dep - (char *)d), 460 xfs_da_log_buf(tp, bp, (uint)((char *)dep - (char *)d),
459 (uint)((char *)(XFS_DIR2_DATA_ENTRY_TAG_P(dep) + 1) - 461 (uint)((char *)(XFS_DIR2_DATA_ENTRY_TAG_P(dep) + 1) -
460 (char *)d - 1)); 462 (char *)d - 1));
@@ -471,8 +473,8 @@ xfs_dir2_data_log_header(
471 xfs_dir2_data_t *d; /* data block pointer */ 473 xfs_dir2_data_t *d; /* data block pointer */
472 474
473 d = bp->data; 475 d = bp->data;
474 ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC || 476 ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC ||
475 INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC); 477 be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC);
476 xfs_da_log_buf(tp, bp, (uint)((char *)&d->hdr - (char *)d), 478 xfs_da_log_buf(tp, bp, (uint)((char *)&d->hdr - (char *)d),
477 (uint)(sizeof(d->hdr) - 1)); 479 (uint)(sizeof(d->hdr) - 1));
478} 480}
@@ -489,8 +491,8 @@ xfs_dir2_data_log_unused(
489 xfs_dir2_data_t *d; /* data block pointer */ 491 xfs_dir2_data_t *d; /* data block pointer */
490 492
491 d = bp->data; 493 d = bp->data;
492 ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC || 494 ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC ||
493 INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC); 495 be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC);
494 /* 496 /*
495 * Log the first part of the unused entry. 497 * Log the first part of the unused entry.
496 */ 498 */
@@ -533,12 +535,12 @@ xfs_dir2_data_make_free(
533 /* 535 /*
534 * Figure out where the end of the data area is. 536 * Figure out where the end of the data area is.
535 */ 537 */
536 if (INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC) 538 if (be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC)
537 endptr = (char *)d + mp->m_dirblksize; 539 endptr = (char *)d + mp->m_dirblksize;
538 else { 540 else {
539 xfs_dir2_block_tail_t *btp; /* block tail */ 541 xfs_dir2_block_tail_t *btp; /* block tail */
540 542
541 ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC); 543 ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC);
542 btp = XFS_DIR2_BLOCK_TAIL_P(mp, (xfs_dir2_block_t *)d); 544 btp = XFS_DIR2_BLOCK_TAIL_P(mp, (xfs_dir2_block_t *)d);
543 endptr = (char *)XFS_DIR2_BLOCK_LEAF_P(btp); 545 endptr = (char *)XFS_DIR2_BLOCK_LEAF_P(btp);
544 } 546 }
@@ -547,11 +549,11 @@ xfs_dir2_data_make_free(
547 * the previous entry and see if it's free. 549 * the previous entry and see if it's free.
548 */ 550 */
549 if (offset > sizeof(d->hdr)) { 551 if (offset > sizeof(d->hdr)) {
550 xfs_dir2_data_off_t *tagp; /* tag just before us */ 552 __be16 *tagp; /* tag just before us */
551 553
552 tagp = (xfs_dir2_data_off_t *)((char *)d + offset) - 1; 554 tagp = (__be16 *)((char *)d + offset) - 1;
553 prevdup = (xfs_dir2_data_unused_t *)((char *)d + INT_GET(*tagp, ARCH_CONVERT)); 555 prevdup = (xfs_dir2_data_unused_t *)((char *)d + be16_to_cpu(*tagp));
554 if (INT_GET(prevdup->freetag, ARCH_CONVERT) != XFS_DIR2_DATA_FREE_TAG) 556 if (be16_to_cpu(prevdup->freetag) != XFS_DIR2_DATA_FREE_TAG)
555 prevdup = NULL; 557 prevdup = NULL;
556 } else 558 } else
557 prevdup = NULL; 559 prevdup = NULL;
@@ -562,7 +564,7 @@ xfs_dir2_data_make_free(
562 if ((char *)d + offset + len < endptr) { 564 if ((char *)d + offset + len < endptr) {
563 postdup = 565 postdup =
564 (xfs_dir2_data_unused_t *)((char *)d + offset + len); 566 (xfs_dir2_data_unused_t *)((char *)d + offset + len);
565 if (INT_GET(postdup->freetag, ARCH_CONVERT) != XFS_DIR2_DATA_FREE_TAG) 567 if (be16_to_cpu(postdup->freetag) != XFS_DIR2_DATA_FREE_TAG)
566 postdup = NULL; 568 postdup = NULL;
567 } else 569 } else
568 postdup = NULL; 570 postdup = NULL;
@@ -586,13 +588,13 @@ xfs_dir2_data_make_free(
586 * since the third bestfree is there, there might be more 588 * since the third bestfree is there, there might be more
587 * entries. 589 * entries.
588 */ 590 */
589 needscan = d->hdr.bestfree[2].length; 591 needscan = (d->hdr.bestfree[2].length != 0);
590 /* 592 /*
591 * Fix up the new big freespace. 593 * Fix up the new big freespace.
592 */ 594 */
593 INT_MOD(prevdup->length, ARCH_CONVERT, len + INT_GET(postdup->length, ARCH_CONVERT)); 595 be16_add(&prevdup->length, len + be16_to_cpu(postdup->length));
594 INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P(prevdup), ARCH_CONVERT, 596 *XFS_DIR2_DATA_UNUSED_TAG_P(prevdup) =
595 (xfs_dir2_data_off_t)((char *)prevdup - (char *)d)); 597 cpu_to_be16((char *)prevdup - (char *)d);
596 xfs_dir2_data_log_unused(tp, bp, prevdup); 598 xfs_dir2_data_log_unused(tp, bp, prevdup);
597 if (!needscan) { 599 if (!needscan) {
598 /* 600 /*
@@ -614,7 +616,7 @@ xfs_dir2_data_make_free(
614 */ 616 */
615 dfp = xfs_dir2_data_freeinsert(d, prevdup, needlogp); 617 dfp = xfs_dir2_data_freeinsert(d, prevdup, needlogp);
616 ASSERT(dfp == &d->hdr.bestfree[0]); 618 ASSERT(dfp == &d->hdr.bestfree[0]);
617 ASSERT(INT_GET(dfp->length, ARCH_CONVERT) == INT_GET(prevdup->length, ARCH_CONVERT)); 619 ASSERT(dfp->length == prevdup->length);
618 ASSERT(!dfp[1].length); 620 ASSERT(!dfp[1].length);
619 ASSERT(!dfp[2].length); 621 ASSERT(!dfp[2].length);
620 } 622 }
@@ -624,9 +626,9 @@ xfs_dir2_data_make_free(
624 */ 626 */
625 else if (prevdup) { 627 else if (prevdup) {
626 dfp = xfs_dir2_data_freefind(d, prevdup); 628 dfp = xfs_dir2_data_freefind(d, prevdup);
627 INT_MOD(prevdup->length, ARCH_CONVERT, len); 629 be16_add(&prevdup->length, len);
628 INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P(prevdup), ARCH_CONVERT, 630 *XFS_DIR2_DATA_UNUSED_TAG_P(prevdup) =
629 (xfs_dir2_data_off_t)((char *)prevdup - (char *)d)); 631 cpu_to_be16((char *)prevdup - (char *)d);
630 xfs_dir2_data_log_unused(tp, bp, prevdup); 632 xfs_dir2_data_log_unused(tp, bp, prevdup);
631 /* 633 /*
632 * If the previous entry was in the table, the new entry 634 * If the previous entry was in the table, the new entry
@@ -640,8 +642,10 @@ xfs_dir2_data_make_free(
640 /* 642 /*
641 * Otherwise we need a scan if the new entry is big enough. 643 * Otherwise we need a scan if the new entry is big enough.
642 */ 644 */
643 else 645 else {
644 needscan = INT_GET(prevdup->length, ARCH_CONVERT) > INT_GET(d->hdr.bestfree[2].length, ARCH_CONVERT); 646 needscan = be16_to_cpu(prevdup->length) >
647 be16_to_cpu(d->hdr.bestfree[2].length);
648 }
645 } 649 }
646 /* 650 /*
647 * The following entry is free, merge with it. 651 * The following entry is free, merge with it.
@@ -649,10 +653,10 @@ xfs_dir2_data_make_free(
649 else if (postdup) { 653 else if (postdup) {
650 dfp = xfs_dir2_data_freefind(d, postdup); 654 dfp = xfs_dir2_data_freefind(d, postdup);
651 newdup = (xfs_dir2_data_unused_t *)((char *)d + offset); 655 newdup = (xfs_dir2_data_unused_t *)((char *)d + offset);
652 INT_SET(newdup->freetag, ARCH_CONVERT, XFS_DIR2_DATA_FREE_TAG); 656 newdup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG);
653 INT_SET(newdup->length, ARCH_CONVERT, len + INT_GET(postdup->length, ARCH_CONVERT)); 657 newdup->length = cpu_to_be16(len + be16_to_cpu(postdup->length));
654 INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P(newdup), ARCH_CONVERT, 658 *XFS_DIR2_DATA_UNUSED_TAG_P(newdup) =
655 (xfs_dir2_data_off_t)((char *)newdup - (char *)d)); 659 cpu_to_be16((char *)newdup - (char *)d);
656 xfs_dir2_data_log_unused(tp, bp, newdup); 660 xfs_dir2_data_log_unused(tp, bp, newdup);
657 /* 661 /*
658 * If the following entry was in the table, the new entry 662 * If the following entry was in the table, the new entry
@@ -666,18 +670,20 @@ xfs_dir2_data_make_free(
666 /* 670 /*
667 * Otherwise we need a scan if the new entry is big enough. 671 * Otherwise we need a scan if the new entry is big enough.
668 */ 672 */
669 else 673 else {
670 needscan = INT_GET(newdup->length, ARCH_CONVERT) > INT_GET(d->hdr.bestfree[2].length, ARCH_CONVERT); 674 needscan = be16_to_cpu(newdup->length) >
675 be16_to_cpu(d->hdr.bestfree[2].length);
676 }
671 } 677 }
672 /* 678 /*
673 * Neither neighbor is free. Make a new entry. 679 * Neither neighbor is free. Make a new entry.
674 */ 680 */
675 else { 681 else {
676 newdup = (xfs_dir2_data_unused_t *)((char *)d + offset); 682 newdup = (xfs_dir2_data_unused_t *)((char *)d + offset);
677 INT_SET(newdup->freetag, ARCH_CONVERT, XFS_DIR2_DATA_FREE_TAG); 683 newdup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG);
678 INT_SET(newdup->length, ARCH_CONVERT, len); 684 newdup->length = cpu_to_be16(len);
679 INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P(newdup), ARCH_CONVERT, 685 *XFS_DIR2_DATA_UNUSED_TAG_P(newdup) =
680 (xfs_dir2_data_off_t)((char *)newdup - (char *)d)); 686 cpu_to_be16((char *)newdup - (char *)d);
681 xfs_dir2_data_log_unused(tp, bp, newdup); 687 xfs_dir2_data_log_unused(tp, bp, newdup);
682 (void)xfs_dir2_data_freeinsert(d, newdup, needlogp); 688 (void)xfs_dir2_data_freeinsert(d, newdup, needlogp);
683 } 689 }
@@ -707,18 +713,18 @@ xfs_dir2_data_use_free(
707 int oldlen; /* old unused entry's length */ 713 int oldlen; /* old unused entry's length */
708 714
709 d = bp->data; 715 d = bp->data;
710 ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC || 716 ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC ||
711 INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC); 717 be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC);
712 ASSERT(INT_GET(dup->freetag, ARCH_CONVERT) == XFS_DIR2_DATA_FREE_TAG); 718 ASSERT(be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG);
713 ASSERT(offset >= (char *)dup - (char *)d); 719 ASSERT(offset >= (char *)dup - (char *)d);
714 ASSERT(offset + len <= (char *)dup + INT_GET(dup->length, ARCH_CONVERT) - (char *)d); 720 ASSERT(offset + len <= (char *)dup + be16_to_cpu(dup->length) - (char *)d);
715 ASSERT((char *)dup - (char *)d == INT_GET(*XFS_DIR2_DATA_UNUSED_TAG_P(dup), ARCH_CONVERT)); 721 ASSERT((char *)dup - (char *)d == be16_to_cpu(*XFS_DIR2_DATA_UNUSED_TAG_P(dup)));
716 /* 722 /*
717 * Look up the entry in the bestfree table. 723 * Look up the entry in the bestfree table.
718 */ 724 */
719 dfp = xfs_dir2_data_freefind(d, dup); 725 dfp = xfs_dir2_data_freefind(d, dup);
720 oldlen = INT_GET(dup->length, ARCH_CONVERT); 726 oldlen = be16_to_cpu(dup->length);
721 ASSERT(dfp || oldlen <= INT_GET(d->hdr.bestfree[2].length, ARCH_CONVERT)); 727 ASSERT(dfp || oldlen <= be16_to_cpu(d->hdr.bestfree[2].length));
722 /* 728 /*
723 * Check for alignment with front and back of the entry. 729 * Check for alignment with front and back of the entry.
724 */ 730 */
@@ -732,7 +738,7 @@ xfs_dir2_data_use_free(
732 */ 738 */
733 if (matchfront && matchback) { 739 if (matchfront && matchback) {
734 if (dfp) { 740 if (dfp) {
735 needscan = d->hdr.bestfree[2].offset; 741 needscan = (d->hdr.bestfree[2].offset != 0);
736 if (!needscan) 742 if (!needscan)
737 xfs_dir2_data_freeremove(d, dfp, needlogp); 743 xfs_dir2_data_freeremove(d, dfp, needlogp);
738 } 744 }
@@ -743,10 +749,10 @@ xfs_dir2_data_use_free(
743 */ 749 */
744 else if (matchfront) { 750 else if (matchfront) {
745 newdup = (xfs_dir2_data_unused_t *)((char *)d + offset + len); 751 newdup = (xfs_dir2_data_unused_t *)((char *)d + offset + len);
746 INT_SET(newdup->freetag, ARCH_CONVERT, XFS_DIR2_DATA_FREE_TAG); 752 newdup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG);
747 INT_SET(newdup->length, ARCH_CONVERT, oldlen - len); 753 newdup->length = cpu_to_be16(oldlen - len);
748 INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P(newdup), ARCH_CONVERT, 754 *XFS_DIR2_DATA_UNUSED_TAG_P(newdup) =
749 (xfs_dir2_data_off_t)((char *)newdup - (char *)d)); 755 cpu_to_be16((char *)newdup - (char *)d);
750 xfs_dir2_data_log_unused(tp, bp, newdup); 756 xfs_dir2_data_log_unused(tp, bp, newdup);
751 /* 757 /*
752 * If it was in the table, remove it and add the new one. 758 * If it was in the table, remove it and add the new one.
@@ -755,8 +761,8 @@ xfs_dir2_data_use_free(
755 xfs_dir2_data_freeremove(d, dfp, needlogp); 761 xfs_dir2_data_freeremove(d, dfp, needlogp);
756 dfp = xfs_dir2_data_freeinsert(d, newdup, needlogp); 762 dfp = xfs_dir2_data_freeinsert(d, newdup, needlogp);
757 ASSERT(dfp != NULL); 763 ASSERT(dfp != NULL);
758 ASSERT(INT_GET(dfp->length, ARCH_CONVERT) == INT_GET(newdup->length, ARCH_CONVERT)); 764 ASSERT(dfp->length == newdup->length);
759 ASSERT(INT_GET(dfp->offset, ARCH_CONVERT) == (char *)newdup - (char *)d); 765 ASSERT(be16_to_cpu(dfp->offset) == (char *)newdup - (char *)d);
760 /* 766 /*
761 * If we got inserted at the last slot, 767 * If we got inserted at the last slot,
762 * that means we don't know if there was a better 768 * that means we don't know if there was a better
@@ -771,10 +777,9 @@ xfs_dir2_data_use_free(
771 */ 777 */
772 else if (matchback) { 778 else if (matchback) {
773 newdup = dup; 779 newdup = dup;
774 INT_SET(newdup->length, ARCH_CONVERT, (xfs_dir2_data_off_t) 780 newdup->length = cpu_to_be16(((char *)d + offset) - (char *)newdup);
775 (((char *)d + offset) - (char *)newdup)); 781 *XFS_DIR2_DATA_UNUSED_TAG_P(newdup) =
776 INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P(newdup), ARCH_CONVERT, 782 cpu_to_be16((char *)newdup - (char *)d);
777 (xfs_dir2_data_off_t)((char *)newdup - (char *)d));
778 xfs_dir2_data_log_unused(tp, bp, newdup); 783 xfs_dir2_data_log_unused(tp, bp, newdup);
779 /* 784 /*
780 * If it was in the table, remove it and add the new one. 785 * If it was in the table, remove it and add the new one.
@@ -783,8 +788,8 @@ xfs_dir2_data_use_free(
783 xfs_dir2_data_freeremove(d, dfp, needlogp); 788 xfs_dir2_data_freeremove(d, dfp, needlogp);
784 dfp = xfs_dir2_data_freeinsert(d, newdup, needlogp); 789 dfp = xfs_dir2_data_freeinsert(d, newdup, needlogp);
785 ASSERT(dfp != NULL); 790 ASSERT(dfp != NULL);
786 ASSERT(INT_GET(dfp->length, ARCH_CONVERT) == INT_GET(newdup->length, ARCH_CONVERT)); 791 ASSERT(dfp->length == newdup->length);
787 ASSERT(INT_GET(dfp->offset, ARCH_CONVERT) == (char *)newdup - (char *)d); 792 ASSERT(be16_to_cpu(dfp->offset) == (char *)newdup - (char *)d);
788 /* 793 /*
789 * If we got inserted at the last slot, 794 * If we got inserted at the last slot,
790 * that means we don't know if there was a better 795 * that means we don't know if there was a better
@@ -799,16 +804,15 @@ xfs_dir2_data_use_free(
799 */ 804 */
800 else { 805 else {
801 newdup = dup; 806 newdup = dup;
802 INT_SET(newdup->length, ARCH_CONVERT, (xfs_dir2_data_off_t) 807 newdup->length = cpu_to_be16(((char *)d + offset) - (char *)newdup);
803 (((char *)d + offset) - (char *)newdup)); 808 *XFS_DIR2_DATA_UNUSED_TAG_P(newdup) =
804 INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P(newdup), ARCH_CONVERT, 809 cpu_to_be16((char *)newdup - (char *)d);
805 (xfs_dir2_data_off_t)((char *)newdup - (char *)d));
806 xfs_dir2_data_log_unused(tp, bp, newdup); 810 xfs_dir2_data_log_unused(tp, bp, newdup);
807 newdup2 = (xfs_dir2_data_unused_t *)((char *)d + offset + len); 811 newdup2 = (xfs_dir2_data_unused_t *)((char *)d + offset + len);
808 INT_SET(newdup2->freetag, ARCH_CONVERT, XFS_DIR2_DATA_FREE_TAG); 812 newdup2->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG);
809 INT_SET(newdup2->length, ARCH_CONVERT, oldlen - len - INT_GET(newdup->length, ARCH_CONVERT)); 813 newdup2->length = cpu_to_be16(oldlen - len - be16_to_cpu(newdup->length));
810 INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P(newdup2), ARCH_CONVERT, 814 *XFS_DIR2_DATA_UNUSED_TAG_P(newdup2) =
811 (xfs_dir2_data_off_t)((char *)newdup2 - (char *)d)); 815 cpu_to_be16((char *)newdup2 - (char *)d);
812 xfs_dir2_data_log_unused(tp, bp, newdup2); 816 xfs_dir2_data_log_unused(tp, bp, newdup2);
813 /* 817 /*
814 * If the old entry was in the table, we need to scan 818 * If the old entry was in the table, we need to scan
@@ -819,7 +823,7 @@ xfs_dir2_data_use_free(
819 * the 2 new will work. 823 * the 2 new will work.
820 */ 824 */
821 if (dfp) { 825 if (dfp) {
822 needscan = d->hdr.bestfree[2].length; 826 needscan = (d->hdr.bestfree[2].length != 0);
823 if (!needscan) { 827 if (!needscan) {
824 xfs_dir2_data_freeremove(d, dfp, needlogp); 828 xfs_dir2_data_freeremove(d, dfp, needlogp);
825 (void)xfs_dir2_data_freeinsert(d, newdup, 829 (void)xfs_dir2_data_freeinsert(d, newdup,
diff --git a/fs/xfs/xfs_dir2_data.h b/fs/xfs/xfs_dir2_data.h
index 5e3a7f9ec735..0847cbb53e17 100644
--- a/fs/xfs/xfs_dir2_data.h
+++ b/fs/xfs/xfs_dir2_data.h
@@ -65,8 +65,8 @@ struct xfs_trans;
65 * The freespace will be formatted as a xfs_dir2_data_unused_t. 65 * The freespace will be formatted as a xfs_dir2_data_unused_t.
66 */ 66 */
67typedef struct xfs_dir2_data_free { 67typedef struct xfs_dir2_data_free {
68 xfs_dir2_data_off_t offset; /* start of freespace */ 68 __be16 offset; /* start of freespace */
69 xfs_dir2_data_off_t length; /* length of freespace */ 69 __be16 length; /* length of freespace */
70} xfs_dir2_data_free_t; 70} xfs_dir2_data_free_t;
71 71
72/* 72/*
@@ -75,7 +75,7 @@ typedef struct xfs_dir2_data_free {
75 * The code knows that XFS_DIR2_DATA_FD_COUNT is 3. 75 * The code knows that XFS_DIR2_DATA_FD_COUNT is 3.
76 */ 76 */
77typedef struct xfs_dir2_data_hdr { 77typedef struct xfs_dir2_data_hdr {
78 __uint32_t magic; /* XFS_DIR2_DATA_MAGIC */ 78 __be32 magic; /* XFS_DIR2_DATA_MAGIC */
79 /* or XFS_DIR2_BLOCK_MAGIC */ 79 /* or XFS_DIR2_BLOCK_MAGIC */
80 xfs_dir2_data_free_t bestfree[XFS_DIR2_DATA_FD_COUNT]; 80 xfs_dir2_data_free_t bestfree[XFS_DIR2_DATA_FD_COUNT];
81} xfs_dir2_data_hdr_t; 81} xfs_dir2_data_hdr_t;
@@ -97,10 +97,10 @@ typedef struct xfs_dir2_data_entry {
97 * Tag appears as the last 2 bytes. 97 * Tag appears as the last 2 bytes.
98 */ 98 */
99typedef struct xfs_dir2_data_unused { 99typedef struct xfs_dir2_data_unused {
100 __uint16_t freetag; /* XFS_DIR2_DATA_FREE_TAG */ 100 __be16 freetag; /* XFS_DIR2_DATA_FREE_TAG */
101 xfs_dir2_data_off_t length; /* total free length */ 101 __be16 length; /* total free length */
102 /* variable offset */ 102 /* variable offset */
103 xfs_dir2_data_off_t tag; /* starting offset of us */ 103 __be16 tag; /* starting offset of us */
104} xfs_dir2_data_unused_t; 104} xfs_dir2_data_unused_t;
105 105
106typedef union { 106typedef union {
@@ -134,12 +134,11 @@ static inline int xfs_dir2_data_entsize(int n)
134 * Pointer to an entry's tag word. 134 * Pointer to an entry's tag word.
135 */ 135 */
136#define XFS_DIR2_DATA_ENTRY_TAG_P(dep) xfs_dir2_data_entry_tag_p(dep) 136#define XFS_DIR2_DATA_ENTRY_TAG_P(dep) xfs_dir2_data_entry_tag_p(dep)
137static inline xfs_dir2_data_off_t * 137static inline __be16 *
138xfs_dir2_data_entry_tag_p(xfs_dir2_data_entry_t *dep) 138xfs_dir2_data_entry_tag_p(xfs_dir2_data_entry_t *dep)
139{ 139{
140 return (xfs_dir2_data_off_t *) \ 140 return (__be16 *)((char *)dep +
141 ((char *)(dep) + XFS_DIR2_DATA_ENTSIZE((dep)->namelen) - \ 141 XFS_DIR2_DATA_ENTSIZE(dep->namelen) - sizeof(__be16));
142 (uint)sizeof(xfs_dir2_data_off_t));
143} 142}
144 143
145/* 144/*
@@ -147,12 +146,11 @@ xfs_dir2_data_entry_tag_p(xfs_dir2_data_entry_t *dep)
147 */ 146 */
148#define XFS_DIR2_DATA_UNUSED_TAG_P(dup) \ 147#define XFS_DIR2_DATA_UNUSED_TAG_P(dup) \
149 xfs_dir2_data_unused_tag_p(dup) 148 xfs_dir2_data_unused_tag_p(dup)
150static inline xfs_dir2_data_off_t * 149static inline __be16 *
151xfs_dir2_data_unused_tag_p(xfs_dir2_data_unused_t *dup) 150xfs_dir2_data_unused_tag_p(xfs_dir2_data_unused_t *dup)
152{ 151{
153 return (xfs_dir2_data_off_t *) \ 152 return (__be16 *)((char *)dup +
154 ((char *)(dup) + INT_GET((dup)->length, ARCH_CONVERT) \ 153 be16_to_cpu(dup->length) - sizeof(__be16));
155 - (uint)sizeof(xfs_dir2_data_off_t));
156} 154}
157 155
158/* 156/*
diff --git a/fs/xfs/xfs_dir2_leaf.c b/fs/xfs/xfs_dir2_leaf.c
index d342b6b55239..08648b18265c 100644
--- a/fs/xfs/xfs_dir2_leaf.c
+++ b/fs/xfs/xfs_dir2_leaf.c
@@ -66,7 +66,7 @@ xfs_dir2_block_to_leaf(
66 xfs_da_args_t *args, /* operation arguments */ 66 xfs_da_args_t *args, /* operation arguments */
67 xfs_dabuf_t *dbp) /* input block's buffer */ 67 xfs_dabuf_t *dbp) /* input block's buffer */
68{ 68{
69 xfs_dir2_data_off_t *bestsp; /* leaf's bestsp entries */ 69 __be16 *bestsp; /* leaf's bestsp entries */
70 xfs_dablk_t blkno; /* leaf block's bno */ 70 xfs_dablk_t blkno; /* leaf block's bno */
71 xfs_dir2_block_t *block; /* block structure */ 71 xfs_dir2_block_t *block; /* block structure */
72 xfs_dir2_leaf_entry_t *blp; /* block's leaf entries */ 72 xfs_dir2_leaf_entry_t *blp; /* block's leaf entries */
@@ -111,14 +111,14 @@ xfs_dir2_block_to_leaf(
111 /* 111 /*
112 * Set the counts in the leaf header. 112 * Set the counts in the leaf header.
113 */ 113 */
114 INT_COPY(leaf->hdr.count, btp->count, ARCH_CONVERT); /* INT_: type change */ 114 leaf->hdr.count = cpu_to_be16(be32_to_cpu(btp->count));
115 INT_COPY(leaf->hdr.stale, btp->stale, ARCH_CONVERT); /* INT_: type change */ 115 leaf->hdr.stale = cpu_to_be16(be32_to_cpu(btp->stale));
116 /* 116 /*
117 * Could compact these but I think we always do the conversion 117 * Could compact these but I think we always do the conversion
118 * after squeezing out stale entries. 118 * after squeezing out stale entries.
119 */ 119 */
120 memcpy(leaf->ents, blp, INT_GET(btp->count, ARCH_CONVERT) * sizeof(xfs_dir2_leaf_entry_t)); 120 memcpy(leaf->ents, blp, be32_to_cpu(btp->count) * sizeof(xfs_dir2_leaf_entry_t));
121 xfs_dir2_leaf_log_ents(tp, lbp, 0, INT_GET(leaf->hdr.count, ARCH_CONVERT) - 1); 121 xfs_dir2_leaf_log_ents(tp, lbp, 0, be16_to_cpu(leaf->hdr.count) - 1);
122 needscan = 0; 122 needscan = 0;
123 needlog = 1; 123 needlog = 1;
124 /* 124 /*
@@ -133,7 +133,7 @@ xfs_dir2_block_to_leaf(
133 /* 133 /*
134 * Fix up the block header, make it a data block. 134 * Fix up the block header, make it a data block.
135 */ 135 */
136 INT_SET(block->hdr.magic, ARCH_CONVERT, XFS_DIR2_DATA_MAGIC); 136 block->hdr.magic = cpu_to_be32(XFS_DIR2_DATA_MAGIC);
137 if (needscan) 137 if (needscan)
138 xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)block, &needlog, 138 xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)block, &needlog,
139 NULL); 139 NULL);
@@ -141,9 +141,9 @@ xfs_dir2_block_to_leaf(
141 * Set up leaf tail and bests table. 141 * Set up leaf tail and bests table.
142 */ 142 */
143 ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf); 143 ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf);
144 INT_SET(ltp->bestcount, ARCH_CONVERT, 1); 144 ltp->bestcount = cpu_to_be32(1);
145 bestsp = XFS_DIR2_LEAF_BESTS_P(ltp); 145 bestsp = XFS_DIR2_LEAF_BESTS_P(ltp);
146 INT_COPY(bestsp[0], block->hdr.bestfree[0].length, ARCH_CONVERT); 146 bestsp[0] = block->hdr.bestfree[0].length;
147 /* 147 /*
148 * Log the data header and leaf bests table. 148 * Log the data header and leaf bests table.
149 */ 149 */
@@ -163,7 +163,7 @@ int /* error */
163xfs_dir2_leaf_addname( 163xfs_dir2_leaf_addname(
164 xfs_da_args_t *args) /* operation arguments */ 164 xfs_da_args_t *args) /* operation arguments */
165{ 165{
166 xfs_dir2_data_off_t *bestsp; /* freespace table in leaf */ 166 __be16 *bestsp; /* freespace table in leaf */
167 int compact; /* need to compact leaves */ 167 int compact; /* need to compact leaves */
168 xfs_dir2_data_t *data; /* data block structure */ 168 xfs_dir2_data_t *data; /* data block structure */
169 xfs_dabuf_t *dbp; /* data block buffer */ 169 xfs_dabuf_t *dbp; /* data block buffer */
@@ -187,7 +187,7 @@ xfs_dir2_leaf_addname(
187 int needbytes; /* leaf block bytes needed */ 187 int needbytes; /* leaf block bytes needed */
188 int needlog; /* need to log data header */ 188 int needlog; /* need to log data header */
189 int needscan; /* need to rescan data free */ 189 int needscan; /* need to rescan data free */
190 xfs_dir2_data_off_t *tagp; /* end of data entry */ 190 __be16 *tagp; /* end of data entry */
191 xfs_trans_t *tp; /* transaction pointer */ 191 xfs_trans_t *tp; /* transaction pointer */
192 xfs_dir2_db_t use_block; /* data block number */ 192 xfs_dir2_db_t use_block; /* data block number */
193 193
@@ -222,14 +222,14 @@ xfs_dir2_leaf_addname(
222 * in a data block, improving the lookup of those entries. 222 * in a data block, improving the lookup of those entries.
223 */ 223 */
224 for (use_block = -1, lep = &leaf->ents[index]; 224 for (use_block = -1, lep = &leaf->ents[index];
225 index < INT_GET(leaf->hdr.count, ARCH_CONVERT) && INT_GET(lep->hashval, ARCH_CONVERT) == args->hashval; 225 index < be16_to_cpu(leaf->hdr.count) && be32_to_cpu(lep->hashval) == args->hashval;
226 index++, lep++) { 226 index++, lep++) {
227 if (INT_GET(lep->address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR) 227 if (be32_to_cpu(lep->address) == XFS_DIR2_NULL_DATAPTR)
228 continue; 228 continue;
229 i = XFS_DIR2_DATAPTR_TO_DB(mp, INT_GET(lep->address, ARCH_CONVERT)); 229 i = XFS_DIR2_DATAPTR_TO_DB(mp, be32_to_cpu(lep->address));
230 ASSERT(i < INT_GET(ltp->bestcount, ARCH_CONVERT)); 230 ASSERT(i < be32_to_cpu(ltp->bestcount));
231 ASSERT(INT_GET(bestsp[i], ARCH_CONVERT) != NULLDATAOFF); 231 ASSERT(be16_to_cpu(bestsp[i]) != NULLDATAOFF);
232 if (INT_GET(bestsp[i], ARCH_CONVERT) >= length) { 232 if (be16_to_cpu(bestsp[i]) >= length) {
233 use_block = i; 233 use_block = i;
234 break; 234 break;
235 } 235 }
@@ -238,13 +238,13 @@ xfs_dir2_leaf_addname(
238 * Didn't find a block yet, linear search all the data blocks. 238 * Didn't find a block yet, linear search all the data blocks.
239 */ 239 */
240 if (use_block == -1) { 240 if (use_block == -1) {
241 for (i = 0; i < INT_GET(ltp->bestcount, ARCH_CONVERT); i++) { 241 for (i = 0; i < be32_to_cpu(ltp->bestcount); i++) {
242 /* 242 /*
243 * Remember a block we see that's missing. 243 * Remember a block we see that's missing.
244 */ 244 */
245 if (INT_GET(bestsp[i], ARCH_CONVERT) == NULLDATAOFF && use_block == -1) 245 if (be16_to_cpu(bestsp[i]) == NULLDATAOFF && use_block == -1)
246 use_block = i; 246 use_block = i;
247 else if (INT_GET(bestsp[i], ARCH_CONVERT) >= length) { 247 else if (be16_to_cpu(bestsp[i]) >= length) {
248 use_block = i; 248 use_block = i;
249 break; 249 break;
250 } 250 }
@@ -260,21 +260,21 @@ xfs_dir2_leaf_addname(
260 * Now kill use_block if it refers to a missing block, so we 260 * Now kill use_block if it refers to a missing block, so we
261 * can use it as an indication of allocation needed. 261 * can use it as an indication of allocation needed.
262 */ 262 */
263 if (use_block != -1 && INT_GET(bestsp[use_block], ARCH_CONVERT) == NULLDATAOFF) 263 if (use_block != -1 && be16_to_cpu(bestsp[use_block]) == NULLDATAOFF)
264 use_block = -1; 264 use_block = -1;
265 /* 265 /*
266 * If we don't have enough free bytes but we can make enough 266 * If we don't have enough free bytes but we can make enough
267 * by compacting out stale entries, we'll do that. 267 * by compacting out stale entries, we'll do that.
268 */ 268 */
269 if ((char *)bestsp - (char *)&leaf->ents[INT_GET(leaf->hdr.count, ARCH_CONVERT)] < needbytes && 269 if ((char *)bestsp - (char *)&leaf->ents[be16_to_cpu(leaf->hdr.count)] < needbytes &&
270 INT_GET(leaf->hdr.stale, ARCH_CONVERT) > 1) { 270 be16_to_cpu(leaf->hdr.stale) > 1) {
271 compact = 1; 271 compact = 1;
272 } 272 }
273 /* 273 /*
274 * Otherwise if we don't have enough free bytes we need to 274 * Otherwise if we don't have enough free bytes we need to
275 * convert to node form. 275 * convert to node form.
276 */ 276 */
277 else if ((char *)bestsp - (char *)&leaf->ents[INT_GET(leaf->hdr.count, ARCH_CONVERT)] < 277 else if ((char *)bestsp - (char *)&leaf->ents[be16_to_cpu(leaf->hdr.count)] <
278 needbytes) { 278 needbytes) {
279 /* 279 /*
280 * Just checking or no space reservation, give up. 280 * Just checking or no space reservation, give up.
@@ -330,8 +330,8 @@ xfs_dir2_leaf_addname(
330 * There are stale entries, so we'll need log-low and log-high 330 * There are stale entries, so we'll need log-low and log-high
331 * impossibly bad values later. 331 * impossibly bad values later.
332 */ 332 */
333 else if (INT_GET(leaf->hdr.stale, ARCH_CONVERT)) { 333 else if (be16_to_cpu(leaf->hdr.stale)) {
334 lfloglow = INT_GET(leaf->hdr.count, ARCH_CONVERT); 334 lfloglow = be16_to_cpu(leaf->hdr.count);
335 lfloghigh = -1; 335 lfloghigh = -1;
336 } 336 }
337 /* 337 /*
@@ -358,13 +358,13 @@ xfs_dir2_leaf_addname(
358 * If we're adding a new data block on the end we need to 358 * If we're adding a new data block on the end we need to
359 * extend the bests table. Copy it up one entry. 359 * extend the bests table. Copy it up one entry.
360 */ 360 */
361 if (use_block >= INT_GET(ltp->bestcount, ARCH_CONVERT)) { 361 if (use_block >= be32_to_cpu(ltp->bestcount)) {
362 bestsp--; 362 bestsp--;
363 memmove(&bestsp[0], &bestsp[1], 363 memmove(&bestsp[0], &bestsp[1],
364 INT_GET(ltp->bestcount, ARCH_CONVERT) * sizeof(bestsp[0])); 364 be32_to_cpu(ltp->bestcount) * sizeof(bestsp[0]));
365 INT_MOD(ltp->bestcount, ARCH_CONVERT, +1); 365 be32_add(&ltp->bestcount, 1);
366 xfs_dir2_leaf_log_tail(tp, lbp); 366 xfs_dir2_leaf_log_tail(tp, lbp);
367 xfs_dir2_leaf_log_bests(tp, lbp, 0, INT_GET(ltp->bestcount, ARCH_CONVERT) - 1); 367 xfs_dir2_leaf_log_bests(tp, lbp, 0, be32_to_cpu(ltp->bestcount) - 1);
368 } 368 }
369 /* 369 /*
370 * If we're filling in a previously empty block just log it. 370 * If we're filling in a previously empty block just log it.
@@ -372,7 +372,7 @@ xfs_dir2_leaf_addname(
372 else 372 else
373 xfs_dir2_leaf_log_bests(tp, lbp, use_block, use_block); 373 xfs_dir2_leaf_log_bests(tp, lbp, use_block, use_block);
374 data = dbp->data; 374 data = dbp->data;
375 INT_COPY(bestsp[use_block], data->hdr.bestfree[0].length, ARCH_CONVERT); 375 bestsp[use_block] = data->hdr.bestfree[0].length;
376 grown = 1; 376 grown = 1;
377 } 377 }
378 /* 378 /*
@@ -394,8 +394,8 @@ xfs_dir2_leaf_addname(
394 * Point to the biggest freespace in our data block. 394 * Point to the biggest freespace in our data block.
395 */ 395 */
396 dup = (xfs_dir2_data_unused_t *) 396 dup = (xfs_dir2_data_unused_t *)
397 ((char *)data + INT_GET(data->hdr.bestfree[0].offset, ARCH_CONVERT)); 397 ((char *)data + be16_to_cpu(data->hdr.bestfree[0].offset));
398 ASSERT(INT_GET(dup->length, ARCH_CONVERT) >= length); 398 ASSERT(be16_to_cpu(dup->length) >= length);
399 needscan = needlog = 0; 399 needscan = needlog = 0;
400 /* 400 /*
401 * Mark the initial part of our freespace in use for the new entry. 401 * Mark the initial part of our freespace in use for the new entry.
@@ -411,7 +411,7 @@ xfs_dir2_leaf_addname(
411 dep->namelen = args->namelen; 411 dep->namelen = args->namelen;
412 memcpy(dep->name, args->name, dep->namelen); 412 memcpy(dep->name, args->name, dep->namelen);
413 tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep); 413 tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep);
414 INT_SET(*tagp, ARCH_CONVERT, (xfs_dir2_data_off_t)((char *)dep - (char *)data)); 414 *tagp = cpu_to_be16((char *)dep - (char *)data);
415 /* 415 /*
416 * Need to scan fix up the bestfree table. 416 * Need to scan fix up the bestfree table.
417 */ 417 */
@@ -427,8 +427,8 @@ xfs_dir2_leaf_addname(
427 * If the bests table needs to be changed, do it. 427 * If the bests table needs to be changed, do it.
428 * Log the change unless we've already done that. 428 * Log the change unless we've already done that.
429 */ 429 */
430 if (INT_GET(bestsp[use_block], ARCH_CONVERT) != INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT)) { 430 if (be16_to_cpu(bestsp[use_block]) != be16_to_cpu(data->hdr.bestfree[0].length)) {
431 INT_COPY(bestsp[use_block], data->hdr.bestfree[0].length, ARCH_CONVERT); 431 bestsp[use_block] = data->hdr.bestfree[0].length;
432 if (!grown) 432 if (!grown)
433 xfs_dir2_leaf_log_bests(tp, lbp, use_block, use_block); 433 xfs_dir2_leaf_log_bests(tp, lbp, use_block, use_block);
434 } 434 }
@@ -440,15 +440,15 @@ xfs_dir2_leaf_addname(
440 /* 440 /*
441 * lep is still good as the index leaf entry. 441 * lep is still good as the index leaf entry.
442 */ 442 */
443 if (index < INT_GET(leaf->hdr.count, ARCH_CONVERT)) 443 if (index < be16_to_cpu(leaf->hdr.count))
444 memmove(lep + 1, lep, 444 memmove(lep + 1, lep,
445 (INT_GET(leaf->hdr.count, ARCH_CONVERT) - index) * sizeof(*lep)); 445 (be16_to_cpu(leaf->hdr.count) - index) * sizeof(*lep));
446 /* 446 /*
447 * Record low and high logging indices for the leaf. 447 * Record low and high logging indices for the leaf.
448 */ 448 */
449 lfloglow = index; 449 lfloglow = index;
450 lfloghigh = INT_GET(leaf->hdr.count, ARCH_CONVERT); 450 lfloghigh = be16_to_cpu(leaf->hdr.count);
451 INT_MOD(leaf->hdr.count, ARCH_CONVERT, +1); 451 be16_add(&leaf->hdr.count, 1);
452 } 452 }
453 /* 453 /*
454 * There are stale entries. 454 * There are stale entries.
@@ -468,7 +468,7 @@ xfs_dir2_leaf_addname(
468 */ 468 */
469 for (lowstale = index - 1; 469 for (lowstale = index - 1;
470 lowstale >= 0 && 470 lowstale >= 0 &&
471 INT_GET(leaf->ents[lowstale].address, ARCH_CONVERT) != 471 be32_to_cpu(leaf->ents[lowstale].address) !=
472 XFS_DIR2_NULL_DATAPTR; 472 XFS_DIR2_NULL_DATAPTR;
473 lowstale--) 473 lowstale--)
474 continue; 474 continue;
@@ -478,8 +478,8 @@ xfs_dir2_leaf_addname(
478 * lowstale entry would be better. 478 * lowstale entry would be better.
479 */ 479 */
480 for (highstale = index; 480 for (highstale = index;
481 highstale < INT_GET(leaf->hdr.count, ARCH_CONVERT) && 481 highstale < be16_to_cpu(leaf->hdr.count) &&
482 INT_GET(leaf->ents[highstale].address, ARCH_CONVERT) != 482 be32_to_cpu(leaf->ents[highstale].address) !=
483 XFS_DIR2_NULL_DATAPTR && 483 XFS_DIR2_NULL_DATAPTR &&
484 (lowstale < 0 || 484 (lowstale < 0 ||
485 index - lowstale - 1 >= highstale - index); 485 index - lowstale - 1 >= highstale - index);
@@ -490,10 +490,10 @@ xfs_dir2_leaf_addname(
490 * If the low one is better, use it. 490 * If the low one is better, use it.
491 */ 491 */
492 if (lowstale >= 0 && 492 if (lowstale >= 0 &&
493 (highstale == INT_GET(leaf->hdr.count, ARCH_CONVERT) || 493 (highstale == be16_to_cpu(leaf->hdr.count) ||
494 index - lowstale - 1 < highstale - index)) { 494 index - lowstale - 1 < highstale - index)) {
495 ASSERT(index - lowstale - 1 >= 0); 495 ASSERT(index - lowstale - 1 >= 0);
496 ASSERT(INT_GET(leaf->ents[lowstale].address, ARCH_CONVERT) == 496 ASSERT(be32_to_cpu(leaf->ents[lowstale].address) ==
497 XFS_DIR2_NULL_DATAPTR); 497 XFS_DIR2_NULL_DATAPTR);
498 /* 498 /*
499 * Copy entries up to cover the stale entry 499 * Copy entries up to cover the stale entry
@@ -512,7 +512,7 @@ xfs_dir2_leaf_addname(
512 */ 512 */
513 else { 513 else {
514 ASSERT(highstale - index >= 0); 514 ASSERT(highstale - index >= 0);
515 ASSERT(INT_GET(leaf->ents[highstale].address, ARCH_CONVERT) == 515 ASSERT(be32_to_cpu(leaf->ents[highstale].address) ==
516 XFS_DIR2_NULL_DATAPTR); 516 XFS_DIR2_NULL_DATAPTR);
517 /* 517 /*
518 * Copy entries down to copver the stale entry 518 * Copy entries down to copver the stale entry
@@ -526,13 +526,14 @@ xfs_dir2_leaf_addname(
526 lfloglow = MIN(index, lfloglow); 526 lfloglow = MIN(index, lfloglow);
527 lfloghigh = MAX(highstale, lfloghigh); 527 lfloghigh = MAX(highstale, lfloghigh);
528 } 528 }
529 INT_MOD(leaf->hdr.stale, ARCH_CONVERT, -1); 529 be16_add(&leaf->hdr.stale, -1);
530 } 530 }
531 /* 531 /*
532 * Fill in the new leaf entry. 532 * Fill in the new leaf entry.
533 */ 533 */
534 INT_SET(lep->hashval, ARCH_CONVERT, args->hashval); 534 lep->hashval = cpu_to_be32(args->hashval);
535 INT_SET(lep->address, ARCH_CONVERT, XFS_DIR2_DB_OFF_TO_DATAPTR(mp, use_block, INT_GET(*tagp, ARCH_CONVERT))); 535 lep->address = cpu_to_be32(XFS_DIR2_DB_OFF_TO_DATAPTR(mp, use_block,
536 be16_to_cpu(*tagp)));
536 /* 537 /*
537 * Log the leaf fields and give up the buffers. 538 * Log the leaf fields and give up the buffers.
538 */ 539 */
@@ -563,30 +564,30 @@ xfs_dir2_leaf_check(
563 564
564 leaf = bp->data; 565 leaf = bp->data;
565 mp = dp->i_mount; 566 mp = dp->i_mount;
566 ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAF1_MAGIC); 567 ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAF1_MAGIC);
567 /* 568 /*
568 * This value is not restrictive enough. 569 * This value is not restrictive enough.
569 * Should factor in the size of the bests table as well. 570 * Should factor in the size of the bests table as well.
570 * We can deduce a value for that from di_size. 571 * We can deduce a value for that from di_size.
571 */ 572 */
572 ASSERT(INT_GET(leaf->hdr.count, ARCH_CONVERT) <= XFS_DIR2_MAX_LEAF_ENTS(mp)); 573 ASSERT(be16_to_cpu(leaf->hdr.count) <= XFS_DIR2_MAX_LEAF_ENTS(mp));
573 ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf); 574 ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf);
574 /* 575 /*
575 * Leaves and bests don't overlap. 576 * Leaves and bests don't overlap.
576 */ 577 */
577 ASSERT((char *)&leaf->ents[INT_GET(leaf->hdr.count, ARCH_CONVERT)] <= 578 ASSERT((char *)&leaf->ents[be16_to_cpu(leaf->hdr.count)] <=
578 (char *)XFS_DIR2_LEAF_BESTS_P(ltp)); 579 (char *)XFS_DIR2_LEAF_BESTS_P(ltp));
579 /* 580 /*
580 * Check hash value order, count stale entries. 581 * Check hash value order, count stale entries.
581 */ 582 */
582 for (i = stale = 0; i < INT_GET(leaf->hdr.count, ARCH_CONVERT); i++) { 583 for (i = stale = 0; i < be16_to_cpu(leaf->hdr.count); i++) {
583 if (i + 1 < INT_GET(leaf->hdr.count, ARCH_CONVERT)) 584 if (i + 1 < be16_to_cpu(leaf->hdr.count))
584 ASSERT(INT_GET(leaf->ents[i].hashval, ARCH_CONVERT) <= 585 ASSERT(be32_to_cpu(leaf->ents[i].hashval) <=
585 INT_GET(leaf->ents[i + 1].hashval, ARCH_CONVERT)); 586 be32_to_cpu(leaf->ents[i + 1].hashval));
586 if (INT_GET(leaf->ents[i].address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR) 587 if (be32_to_cpu(leaf->ents[i].address) == XFS_DIR2_NULL_DATAPTR)
587 stale++; 588 stale++;
588 } 589 }
589 ASSERT(INT_GET(leaf->hdr.stale, ARCH_CONVERT) == stale); 590 ASSERT(be16_to_cpu(leaf->hdr.stale) == stale);
590} 591}
591#endif /* DEBUG */ 592#endif /* DEBUG */
592 593
@@ -611,8 +612,8 @@ xfs_dir2_leaf_compact(
611 /* 612 /*
612 * Compress out the stale entries in place. 613 * Compress out the stale entries in place.
613 */ 614 */
614 for (from = to = 0, loglow = -1; from < INT_GET(leaf->hdr.count, ARCH_CONVERT); from++) { 615 for (from = to = 0, loglow = -1; from < be16_to_cpu(leaf->hdr.count); from++) {
615 if (INT_GET(leaf->ents[from].address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR) 616 if (be32_to_cpu(leaf->ents[from].address) == XFS_DIR2_NULL_DATAPTR)
616 continue; 617 continue;
617 /* 618 /*
618 * Only actually copy the entries that are different. 619 * Only actually copy the entries that are different.
@@ -627,8 +628,8 @@ xfs_dir2_leaf_compact(
627 /* 628 /*
628 * Update and log the header, log the leaf entries. 629 * Update and log the header, log the leaf entries.
629 */ 630 */
630 ASSERT(INT_GET(leaf->hdr.stale, ARCH_CONVERT) == from - to); 631 ASSERT(be16_to_cpu(leaf->hdr.stale) == from - to);
631 INT_MOD(leaf->hdr.count, ARCH_CONVERT, -(INT_GET(leaf->hdr.stale, ARCH_CONVERT))); 632 be16_add(&leaf->hdr.count, -(be16_to_cpu(leaf->hdr.stale)));
632 leaf->hdr.stale = 0; 633 leaf->hdr.stale = 0;
633 xfs_dir2_leaf_log_header(args->trans, bp); 634 xfs_dir2_leaf_log_header(args->trans, bp);
634 if (loglow != -1) 635 if (loglow != -1)
@@ -662,14 +663,14 @@ xfs_dir2_leaf_compact_x1(
662 int to; /* destination copy index */ 663 int to; /* destination copy index */
663 664
664 leaf = bp->data; 665 leaf = bp->data;
665 ASSERT(INT_GET(leaf->hdr.stale, ARCH_CONVERT) > 1); 666 ASSERT(be16_to_cpu(leaf->hdr.stale) > 1);
666 index = *indexp; 667 index = *indexp;
667 /* 668 /*
668 * Find the first stale entry before our index, if any. 669 * Find the first stale entry before our index, if any.
669 */ 670 */
670 for (lowstale = index - 1; 671 for (lowstale = index - 1;
671 lowstale >= 0 && 672 lowstale >= 0 &&
672 INT_GET(leaf->ents[lowstale].address, ARCH_CONVERT) != XFS_DIR2_NULL_DATAPTR; 673 be32_to_cpu(leaf->ents[lowstale].address) != XFS_DIR2_NULL_DATAPTR;
673 lowstale--) 674 lowstale--)
674 continue; 675 continue;
675 /* 676 /*
@@ -677,8 +678,8 @@ xfs_dir2_leaf_compact_x1(
677 * Stop if the answer would be worse than lowstale. 678 * Stop if the answer would be worse than lowstale.
678 */ 679 */
679 for (highstale = index; 680 for (highstale = index;
680 highstale < INT_GET(leaf->hdr.count, ARCH_CONVERT) && 681 highstale < be16_to_cpu(leaf->hdr.count) &&
681 INT_GET(leaf->ents[highstale].address, ARCH_CONVERT) != XFS_DIR2_NULL_DATAPTR && 682 be32_to_cpu(leaf->ents[highstale].address) != XFS_DIR2_NULL_DATAPTR &&
682 (lowstale < 0 || index - lowstale > highstale - index); 683 (lowstale < 0 || index - lowstale > highstale - index);
683 highstale++) 684 highstale++)
684 continue; 685 continue;
@@ -686,7 +687,7 @@ xfs_dir2_leaf_compact_x1(
686 * Pick the better of lowstale and highstale. 687 * Pick the better of lowstale and highstale.
687 */ 688 */
688 if (lowstale >= 0 && 689 if (lowstale >= 0 &&
689 (highstale == INT_GET(leaf->hdr.count, ARCH_CONVERT) || 690 (highstale == be16_to_cpu(leaf->hdr.count) ||
690 index - lowstale <= highstale - index)) 691 index - lowstale <= highstale - index))
691 keepstale = lowstale; 692 keepstale = lowstale;
692 else 693 else
@@ -695,14 +696,14 @@ xfs_dir2_leaf_compact_x1(
695 * Copy the entries in place, removing all the stale entries 696 * Copy the entries in place, removing all the stale entries
696 * except keepstale. 697 * except keepstale.
697 */ 698 */
698 for (from = to = 0; from < INT_GET(leaf->hdr.count, ARCH_CONVERT); from++) { 699 for (from = to = 0; from < be16_to_cpu(leaf->hdr.count); from++) {
699 /* 700 /*
700 * Notice the new value of index. 701 * Notice the new value of index.
701 */ 702 */
702 if (index == from) 703 if (index == from)
703 newindex = to; 704 newindex = to;
704 if (from != keepstale && 705 if (from != keepstale &&
705 INT_GET(leaf->ents[from].address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR) { 706 be32_to_cpu(leaf->ents[from].address) == XFS_DIR2_NULL_DATAPTR) {
706 if (from == to) 707 if (from == to)
707 *lowlogp = to; 708 *lowlogp = to;
708 continue; 709 continue;
@@ -730,8 +731,8 @@ xfs_dir2_leaf_compact_x1(
730 /* 731 /*
731 * Adjust the leaf header values. 732 * Adjust the leaf header values.
732 */ 733 */
733 INT_MOD(leaf->hdr.count, ARCH_CONVERT, -(from - to)); 734 be16_add(&leaf->hdr.count, -(from - to));
734 INT_SET(leaf->hdr.stale, ARCH_CONVERT, 1); 735 leaf->hdr.stale = cpu_to_be16(1);
735 /* 736 /*
736 * Remember the low/high stale value only in the "right" 737 * Remember the low/high stale value only in the "right"
737 * direction. 738 * direction.
@@ -739,8 +740,8 @@ xfs_dir2_leaf_compact_x1(
739 if (lowstale >= newindex) 740 if (lowstale >= newindex)
740 lowstale = -1; 741 lowstale = -1;
741 else 742 else
742 highstale = INT_GET(leaf->hdr.count, ARCH_CONVERT); 743 highstale = be16_to_cpu(leaf->hdr.count);
743 *highlogp = INT_GET(leaf->hdr.count, ARCH_CONVERT) - 1; 744 *highlogp = be16_to_cpu(leaf->hdr.count) - 1;
744 *lowstalep = lowstale; 745 *lowstalep = lowstale;
745 *highstalep = highstale; 746 *highstalep = highstale;
746} 747}
@@ -766,7 +767,7 @@ xfs_dir2_leaf_getdents(
766 xfs_dir2_data_entry_t *dep; /* data entry */ 767 xfs_dir2_data_entry_t *dep; /* data entry */
767 xfs_dir2_data_unused_t *dup; /* unused entry */ 768 xfs_dir2_data_unused_t *dup; /* unused entry */
768 int eof; /* reached end of directory */ 769 int eof; /* reached end of directory */
769 int error=0; /* error return value */ 770 int error = 0; /* error return value */
770 int i; /* temporary loop index */ 771 int i; /* temporary loop index */
771 int j; /* temporary loop index */ 772 int j; /* temporary loop index */
772 int length; /* temporary length value */ 773 int length; /* temporary length value */
@@ -778,8 +779,8 @@ xfs_dir2_leaf_getdents(
778 xfs_mount_t *mp; /* filesystem mount point */ 779 xfs_mount_t *mp; /* filesystem mount point */
779 xfs_dir2_off_t newoff; /* new curoff after new blk */ 780 xfs_dir2_off_t newoff; /* new curoff after new blk */
780 int nmap; /* mappings to ask xfs_bmapi */ 781 int nmap; /* mappings to ask xfs_bmapi */
781 xfs_dir2_put_args_t p; /* formatting arg bundle */ 782 xfs_dir2_put_args_t *p; /* formatting arg bundle */
782 char *ptr=NULL; /* pointer to current data */ 783 char *ptr = NULL; /* pointer to current data */
783 int ra_current; /* number of read-ahead blks */ 784 int ra_current; /* number of read-ahead blks */
784 int ra_index; /* *map index for read-ahead */ 785 int ra_index; /* *map index for read-ahead */
785 int ra_offset; /* map entry offset for ra */ 786 int ra_offset; /* map entry offset for ra */
@@ -797,9 +798,10 @@ xfs_dir2_leaf_getdents(
797 /* 798 /*
798 * Setup formatting arguments. 799 * Setup formatting arguments.
799 */ 800 */
800 p.dbp = dbp; 801 p = kmem_alloc(sizeof(*p), KM_SLEEP);
801 p.put = put; 802 p->dbp = dbp;
802 p.uio = uio; 803 p->put = put;
804 p->uio = uio;
803 /* 805 /*
804 * Set up to bmap a number of blocks based on the caller's 806 * Set up to bmap a number of blocks based on the caller's
805 * buffer size, the directory block size, and the filesystem 807 * buffer size, the directory block size, and the filesystem
@@ -1046,11 +1048,10 @@ xfs_dir2_leaf_getdents(
1046 while ((char *)ptr - (char *)data < byteoff) { 1048 while ((char *)ptr - (char *)data < byteoff) {
1047 dup = (xfs_dir2_data_unused_t *)ptr; 1049 dup = (xfs_dir2_data_unused_t *)ptr;
1048 1050
1049 if (INT_GET(dup->freetag, ARCH_CONVERT) 1051 if (be16_to_cpu(dup->freetag)
1050 == XFS_DIR2_DATA_FREE_TAG) { 1052 == XFS_DIR2_DATA_FREE_TAG) {
1051 1053
1052 length = INT_GET(dup->length, 1054 length = be16_to_cpu(dup->length);
1053 ARCH_CONVERT);
1054 ptr += length; 1055 ptr += length;
1055 continue; 1056 continue;
1056 } 1057 }
@@ -1079,9 +1080,8 @@ xfs_dir2_leaf_getdents(
1079 /* 1080 /*
1080 * No, it's unused, skip over it. 1081 * No, it's unused, skip over it.
1081 */ 1082 */
1082 if (INT_GET(dup->freetag, ARCH_CONVERT) 1083 if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) {
1083 == XFS_DIR2_DATA_FREE_TAG) { 1084 length = be16_to_cpu(dup->length);
1084 length = INT_GET(dup->length, ARCH_CONVERT);
1085 ptr += length; 1085 ptr += length;
1086 curoff += length; 1086 curoff += length;
1087 continue; 1087 continue;
@@ -1092,24 +1092,24 @@ xfs_dir2_leaf_getdents(
1092 */ 1092 */
1093 dep = (xfs_dir2_data_entry_t *)ptr; 1093 dep = (xfs_dir2_data_entry_t *)ptr;
1094 1094
1095 p.namelen = dep->namelen; 1095 p->namelen = dep->namelen;
1096 1096
1097 length = XFS_DIR2_DATA_ENTSIZE(p.namelen); 1097 length = XFS_DIR2_DATA_ENTSIZE(p->namelen);
1098 1098
1099 p.cook = XFS_DIR2_BYTE_TO_DATAPTR(mp, curoff + length); 1099 p->cook = XFS_DIR2_BYTE_TO_DATAPTR(mp, curoff + length);
1100 1100
1101 p.ino = INT_GET(dep->inumber, ARCH_CONVERT); 1101 p->ino = INT_GET(dep->inumber, ARCH_CONVERT);
1102#if XFS_BIG_INUMS 1102#if XFS_BIG_INUMS
1103 p.ino += mp->m_inoadd; 1103 p->ino += mp->m_inoadd;
1104#endif 1104#endif
1105 p.name = (char *)dep->name; 1105 p->name = (char *)dep->name;
1106 1106
1107 error = p.put(&p); 1107 error = p->put(p);
1108 1108
1109 /* 1109 /*
1110 * Won't fit. Return to caller. 1110 * Won't fit. Return to caller.
1111 */ 1111 */
1112 if (!p.done) { 1112 if (!p->done) {
1113 eof = 0; 1113 eof = 0;
1114 break; 1114 break;
1115 } 1115 }
@@ -1129,6 +1129,7 @@ xfs_dir2_leaf_getdents(
1129 else 1129 else
1130 uio->uio_offset = XFS_DIR2_BYTE_TO_DATAPTR(mp, curoff); 1130 uio->uio_offset = XFS_DIR2_BYTE_TO_DATAPTR(mp, curoff);
1131 kmem_free(map, map_size * sizeof(*map)); 1131 kmem_free(map, map_size * sizeof(*map));
1132 kmem_free(p, sizeof(*p));
1132 if (bp) 1133 if (bp)
1133 xfs_da_brelse(tp, bp); 1134 xfs_da_brelse(tp, bp);
1134 return error; 1135 return error;
@@ -1171,7 +1172,7 @@ xfs_dir2_leaf_init(
1171 /* 1172 /*
1172 * Initialize the header. 1173 * Initialize the header.
1173 */ 1174 */
1174 INT_SET(leaf->hdr.info.magic, ARCH_CONVERT, magic); 1175 leaf->hdr.info.magic = cpu_to_be16(magic);
1175 leaf->hdr.info.forw = 0; 1176 leaf->hdr.info.forw = 0;
1176 leaf->hdr.info.back = 0; 1177 leaf->hdr.info.back = 0;
1177 leaf->hdr.count = 0; 1178 leaf->hdr.count = 0;
@@ -1201,13 +1202,13 @@ xfs_dir2_leaf_log_bests(
1201 int first, /* first entry to log */ 1202 int first, /* first entry to log */
1202 int last) /* last entry to log */ 1203 int last) /* last entry to log */
1203{ 1204{
1204 xfs_dir2_data_off_t *firstb; /* pointer to first entry */ 1205 __be16 *firstb; /* pointer to first entry */
1205 xfs_dir2_data_off_t *lastb; /* pointer to last entry */ 1206 __be16 *lastb; /* pointer to last entry */
1206 xfs_dir2_leaf_t *leaf; /* leaf structure */ 1207 xfs_dir2_leaf_t *leaf; /* leaf structure */
1207 xfs_dir2_leaf_tail_t *ltp; /* leaf tail structure */ 1208 xfs_dir2_leaf_tail_t *ltp; /* leaf tail structure */
1208 1209
1209 leaf = bp->data; 1210 leaf = bp->data;
1210 ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAF1_MAGIC); 1211 ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAF1_MAGIC);
1211 ltp = XFS_DIR2_LEAF_TAIL_P(tp->t_mountp, leaf); 1212 ltp = XFS_DIR2_LEAF_TAIL_P(tp->t_mountp, leaf);
1212 firstb = XFS_DIR2_LEAF_BESTS_P(ltp) + first; 1213 firstb = XFS_DIR2_LEAF_BESTS_P(ltp) + first;
1213 lastb = XFS_DIR2_LEAF_BESTS_P(ltp) + last; 1214 lastb = XFS_DIR2_LEAF_BESTS_P(ltp) + last;
@@ -1230,8 +1231,8 @@ xfs_dir2_leaf_log_ents(
1230 xfs_dir2_leaf_t *leaf; /* leaf structure */ 1231 xfs_dir2_leaf_t *leaf; /* leaf structure */
1231 1232
1232 leaf = bp->data; 1233 leaf = bp->data;
1233 ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAF1_MAGIC || 1234 ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAF1_MAGIC ||
1234 INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); 1235 be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC);
1235 firstlep = &leaf->ents[first]; 1236 firstlep = &leaf->ents[first];
1236 lastlep = &leaf->ents[last]; 1237 lastlep = &leaf->ents[last];
1237 xfs_da_log_buf(tp, bp, (uint)((char *)firstlep - (char *)leaf), 1238 xfs_da_log_buf(tp, bp, (uint)((char *)firstlep - (char *)leaf),
@@ -1249,8 +1250,8 @@ xfs_dir2_leaf_log_header(
1249 xfs_dir2_leaf_t *leaf; /* leaf structure */ 1250 xfs_dir2_leaf_t *leaf; /* leaf structure */
1250 1251
1251 leaf = bp->data; 1252 leaf = bp->data;
1252 ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAF1_MAGIC || 1253 ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAF1_MAGIC ||
1253 INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); 1254 be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC);
1254 xfs_da_log_buf(tp, bp, (uint)((char *)&leaf->hdr - (char *)leaf), 1255 xfs_da_log_buf(tp, bp, (uint)((char *)&leaf->hdr - (char *)leaf),
1255 (uint)(sizeof(leaf->hdr) - 1)); 1256 (uint)(sizeof(leaf->hdr) - 1));
1256} 1257}
@@ -1269,7 +1270,7 @@ xfs_dir2_leaf_log_tail(
1269 1270
1270 mp = tp->t_mountp; 1271 mp = tp->t_mountp;
1271 leaf = bp->data; 1272 leaf = bp->data;
1272 ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAF1_MAGIC); 1273 ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAF1_MAGIC);
1273 ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf); 1274 ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf);
1274 xfs_da_log_buf(tp, bp, (uint)((char *)ltp - (char *)leaf), 1275 xfs_da_log_buf(tp, bp, (uint)((char *)ltp - (char *)leaf),
1275 (uint)(mp->m_dirblksize - 1)); 1276 (uint)(mp->m_dirblksize - 1));
@@ -1314,7 +1315,7 @@ xfs_dir2_leaf_lookup(
1314 */ 1315 */
1315 dep = (xfs_dir2_data_entry_t *) 1316 dep = (xfs_dir2_data_entry_t *)
1316 ((char *)dbp->data + 1317 ((char *)dbp->data +
1317 XFS_DIR2_DATAPTR_TO_OFF(dp->i_mount, INT_GET(lep->address, ARCH_CONVERT))); 1318 XFS_DIR2_DATAPTR_TO_OFF(dp->i_mount, be32_to_cpu(lep->address)));
1318 /* 1319 /*
1319 * Return the found inode number. 1320 * Return the found inode number.
1320 */ 1321 */
@@ -1373,17 +1374,17 @@ xfs_dir2_leaf_lookup_int(
1373 * looking to match the name. 1374 * looking to match the name.
1374 */ 1375 */
1375 for (lep = &leaf->ents[index], dbp = NULL, curdb = -1; 1376 for (lep = &leaf->ents[index], dbp = NULL, curdb = -1;
1376 index < INT_GET(leaf->hdr.count, ARCH_CONVERT) && INT_GET(lep->hashval, ARCH_CONVERT) == args->hashval; 1377 index < be16_to_cpu(leaf->hdr.count) && be32_to_cpu(lep->hashval) == args->hashval;
1377 lep++, index++) { 1378 lep++, index++) {
1378 /* 1379 /*
1379 * Skip over stale leaf entries. 1380 * Skip over stale leaf entries.
1380 */ 1381 */
1381 if (INT_GET(lep->address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR) 1382 if (be32_to_cpu(lep->address) == XFS_DIR2_NULL_DATAPTR)
1382 continue; 1383 continue;
1383 /* 1384 /*
1384 * Get the new data block number. 1385 * Get the new data block number.
1385 */ 1386 */
1386 newdb = XFS_DIR2_DATAPTR_TO_DB(mp, INT_GET(lep->address, ARCH_CONVERT)); 1387 newdb = XFS_DIR2_DATAPTR_TO_DB(mp, be32_to_cpu(lep->address));
1387 /* 1388 /*
1388 * If it's not the same as the old data block number, 1389 * If it's not the same as the old data block number,
1389 * need to pitch the old one and read the new one. 1390 * need to pitch the old one and read the new one.
@@ -1406,7 +1407,7 @@ xfs_dir2_leaf_lookup_int(
1406 */ 1407 */
1407 dep = (xfs_dir2_data_entry_t *) 1408 dep = (xfs_dir2_data_entry_t *)
1408 ((char *)dbp->data + 1409 ((char *)dbp->data +
1409 XFS_DIR2_DATAPTR_TO_OFF(mp, INT_GET(lep->address, ARCH_CONVERT))); 1410 XFS_DIR2_DATAPTR_TO_OFF(mp, be32_to_cpu(lep->address)));
1410 /* 1411 /*
1411 * If it matches then return it. 1412 * If it matches then return it.
1412 */ 1413 */
@@ -1435,7 +1436,7 @@ int /* error */
1435xfs_dir2_leaf_removename( 1436xfs_dir2_leaf_removename(
1436 xfs_da_args_t *args) /* operation arguments */ 1437 xfs_da_args_t *args) /* operation arguments */
1437{ 1438{
1438 xfs_dir2_data_off_t *bestsp; /* leaf block best freespace */ 1439 __be16 *bestsp; /* leaf block best freespace */
1439 xfs_dir2_data_t *data; /* data block structure */ 1440 xfs_dir2_data_t *data; /* data block structure */
1440 xfs_dir2_db_t db; /* data block number */ 1441 xfs_dir2_db_t db; /* data block number */
1441 xfs_dabuf_t *dbp; /* data block buffer */ 1442 xfs_dabuf_t *dbp; /* data block buffer */
@@ -1471,14 +1472,14 @@ xfs_dir2_leaf_removename(
1471 * Point to the leaf entry, use that to point to the data entry. 1472 * Point to the leaf entry, use that to point to the data entry.
1472 */ 1473 */
1473 lep = &leaf->ents[index]; 1474 lep = &leaf->ents[index];
1474 db = XFS_DIR2_DATAPTR_TO_DB(mp, INT_GET(lep->address, ARCH_CONVERT)); 1475 db = XFS_DIR2_DATAPTR_TO_DB(mp, be32_to_cpu(lep->address));
1475 dep = (xfs_dir2_data_entry_t *) 1476 dep = (xfs_dir2_data_entry_t *)
1476 ((char *)data + XFS_DIR2_DATAPTR_TO_OFF(mp, INT_GET(lep->address, ARCH_CONVERT))); 1477 ((char *)data + XFS_DIR2_DATAPTR_TO_OFF(mp, be32_to_cpu(lep->address)));
1477 needscan = needlog = 0; 1478 needscan = needlog = 0;
1478 oldbest = INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT); 1479 oldbest = be16_to_cpu(data->hdr.bestfree[0].length);
1479 ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf); 1480 ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf);
1480 bestsp = XFS_DIR2_LEAF_BESTS_P(ltp); 1481 bestsp = XFS_DIR2_LEAF_BESTS_P(ltp);
1481 ASSERT(INT_GET(bestsp[db], ARCH_CONVERT) == oldbest); 1482 ASSERT(be16_to_cpu(bestsp[db]) == oldbest);
1482 /* 1483 /*
1483 * Mark the former data entry unused. 1484 * Mark the former data entry unused.
1484 */ 1485 */
@@ -1488,9 +1489,9 @@ xfs_dir2_leaf_removename(
1488 /* 1489 /*
1489 * We just mark the leaf entry stale by putting a null in it. 1490 * We just mark the leaf entry stale by putting a null in it.
1490 */ 1491 */
1491 INT_MOD(leaf->hdr.stale, ARCH_CONVERT, +1); 1492 be16_add(&leaf->hdr.stale, 1);
1492 xfs_dir2_leaf_log_header(tp, lbp); 1493 xfs_dir2_leaf_log_header(tp, lbp);
1493 INT_SET(lep->address, ARCH_CONVERT, XFS_DIR2_NULL_DATAPTR); 1494 lep->address = cpu_to_be32(XFS_DIR2_NULL_DATAPTR);
1494 xfs_dir2_leaf_log_ents(tp, lbp, index, index); 1495 xfs_dir2_leaf_log_ents(tp, lbp, index, index);
1495 /* 1496 /*
1496 * Scan the freespace in the data block again if necessary, 1497 * Scan the freespace in the data block again if necessary,
@@ -1504,15 +1505,15 @@ xfs_dir2_leaf_removename(
1504 * If the longest freespace in the data block has changed, 1505 * If the longest freespace in the data block has changed,
1505 * put the new value in the bests table and log that. 1506 * put the new value in the bests table and log that.
1506 */ 1507 */
1507 if (INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT) != oldbest) { 1508 if (be16_to_cpu(data->hdr.bestfree[0].length) != oldbest) {
1508 INT_COPY(bestsp[db], data->hdr.bestfree[0].length, ARCH_CONVERT); 1509 bestsp[db] = data->hdr.bestfree[0].length;
1509 xfs_dir2_leaf_log_bests(tp, lbp, db, db); 1510 xfs_dir2_leaf_log_bests(tp, lbp, db, db);
1510 } 1511 }
1511 xfs_dir2_data_check(dp, dbp); 1512 xfs_dir2_data_check(dp, dbp);
1512 /* 1513 /*
1513 * If the data block is now empty then get rid of the data block. 1514 * If the data block is now empty then get rid of the data block.
1514 */ 1515 */
1515 if (INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT) == 1516 if (be16_to_cpu(data->hdr.bestfree[0].length) ==
1516 mp->m_dirblksize - (uint)sizeof(data->hdr)) { 1517 mp->m_dirblksize - (uint)sizeof(data->hdr)) {
1517 ASSERT(db != mp->m_dirdatablk); 1518 ASSERT(db != mp->m_dirdatablk);
1518 if ((error = xfs_dir2_shrink_inode(args, db, dbp))) { 1519 if ((error = xfs_dir2_shrink_inode(args, db, dbp))) {
@@ -1535,12 +1536,12 @@ xfs_dir2_leaf_removename(
1535 * If this is the last data block then compact the 1536 * If this is the last data block then compact the
1536 * bests table by getting rid of entries. 1537 * bests table by getting rid of entries.
1537 */ 1538 */
1538 if (db == INT_GET(ltp->bestcount, ARCH_CONVERT) - 1) { 1539 if (db == be32_to_cpu(ltp->bestcount) - 1) {
1539 /* 1540 /*
1540 * Look for the last active entry (i). 1541 * Look for the last active entry (i).
1541 */ 1542 */
1542 for (i = db - 1; i > 0; i--) { 1543 for (i = db - 1; i > 0; i--) {
1543 if (INT_GET(bestsp[i], ARCH_CONVERT) != NULLDATAOFF) 1544 if (be16_to_cpu(bestsp[i]) != NULLDATAOFF)
1544 break; 1545 break;
1545 } 1546 }
1546 /* 1547 /*
@@ -1548,12 +1549,12 @@ xfs_dir2_leaf_removename(
1548 * end are removed. 1549 * end are removed.
1549 */ 1550 */
1550 memmove(&bestsp[db - i], bestsp, 1551 memmove(&bestsp[db - i], bestsp,
1551 (INT_GET(ltp->bestcount, ARCH_CONVERT) - (db - i)) * sizeof(*bestsp)); 1552 (be32_to_cpu(ltp->bestcount) - (db - i)) * sizeof(*bestsp));
1552 INT_MOD(ltp->bestcount, ARCH_CONVERT, -(db - i)); 1553 be32_add(&ltp->bestcount, -(db - i));
1553 xfs_dir2_leaf_log_tail(tp, lbp); 1554 xfs_dir2_leaf_log_tail(tp, lbp);
1554 xfs_dir2_leaf_log_bests(tp, lbp, 0, INT_GET(ltp->bestcount, ARCH_CONVERT) - 1); 1555 xfs_dir2_leaf_log_bests(tp, lbp, 0, be32_to_cpu(ltp->bestcount) - 1);
1555 } else 1556 } else
1556 INT_SET(bestsp[db], ARCH_CONVERT, NULLDATAOFF); 1557 bestsp[db] = cpu_to_be16(NULLDATAOFF);
1557 } 1558 }
1558 /* 1559 /*
1559 * If the data block was not the first one, drop it. 1560 * If the data block was not the first one, drop it.
@@ -1604,7 +1605,7 @@ xfs_dir2_leaf_replace(
1604 */ 1605 */
1605 dep = (xfs_dir2_data_entry_t *) 1606 dep = (xfs_dir2_data_entry_t *)
1606 ((char *)dbp->data + 1607 ((char *)dbp->data +
1607 XFS_DIR2_DATAPTR_TO_OFF(dp->i_mount, INT_GET(lep->address, ARCH_CONVERT))); 1608 XFS_DIR2_DATAPTR_TO_OFF(dp->i_mount, be32_to_cpu(lep->address)));
1608 ASSERT(args->inumber != INT_GET(dep->inumber, ARCH_CONVERT)); 1609 ASSERT(args->inumber != INT_GET(dep->inumber, ARCH_CONVERT));
1609 /* 1610 /*
1610 * Put the new inode number in, log it. 1611 * Put the new inode number in, log it.
@@ -1645,11 +1646,11 @@ xfs_dir2_leaf_search_hash(
1645 * Note, the table cannot be empty, so we have to go through the loop. 1646 * Note, the table cannot be empty, so we have to go through the loop.
1646 * Binary search the leaf entries looking for our hash value. 1647 * Binary search the leaf entries looking for our hash value.
1647 */ 1648 */
1648 for (lep = leaf->ents, low = 0, high = INT_GET(leaf->hdr.count, ARCH_CONVERT) - 1, 1649 for (lep = leaf->ents, low = 0, high = be16_to_cpu(leaf->hdr.count) - 1,
1649 hashwant = args->hashval; 1650 hashwant = args->hashval;
1650 low <= high; ) { 1651 low <= high; ) {
1651 mid = (low + high) >> 1; 1652 mid = (low + high) >> 1;
1652 if ((hash = INT_GET(lep[mid].hashval, ARCH_CONVERT)) == hashwant) 1653 if ((hash = be32_to_cpu(lep[mid].hashval)) == hashwant)
1653 break; 1654 break;
1654 if (hash < hashwant) 1655 if (hash < hashwant)
1655 low = mid + 1; 1656 low = mid + 1;
@@ -1660,7 +1661,7 @@ xfs_dir2_leaf_search_hash(
1660 * Found one, back up through all the equal hash values. 1661 * Found one, back up through all the equal hash values.
1661 */ 1662 */
1662 if (hash == hashwant) { 1663 if (hash == hashwant) {
1663 while (mid > 0 && INT_GET(lep[mid - 1].hashval, ARCH_CONVERT) == hashwant) { 1664 while (mid > 0 && be32_to_cpu(lep[mid - 1].hashval) == hashwant) {
1664 mid--; 1665 mid--;
1665 } 1666 }
1666 } 1667 }
@@ -1682,7 +1683,7 @@ xfs_dir2_leaf_trim_data(
1682 xfs_dabuf_t *lbp, /* leaf buffer */ 1683 xfs_dabuf_t *lbp, /* leaf buffer */
1683 xfs_dir2_db_t db) /* data block number */ 1684 xfs_dir2_db_t db) /* data block number */
1684{ 1685{
1685 xfs_dir2_data_off_t *bestsp; /* leaf bests table */ 1686 __be16 *bestsp; /* leaf bests table */
1686#ifdef DEBUG 1687#ifdef DEBUG
1687 xfs_dir2_data_t *data; /* data block structure */ 1688 xfs_dir2_data_t *data; /* data block structure */
1688#endif 1689#endif
@@ -1706,7 +1707,7 @@ xfs_dir2_leaf_trim_data(
1706 } 1707 }
1707#ifdef DEBUG 1708#ifdef DEBUG
1708 data = dbp->data; 1709 data = dbp->data;
1709 ASSERT(INT_GET(data->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC); 1710 ASSERT(be32_to_cpu(data->hdr.magic) == XFS_DIR2_DATA_MAGIC);
1710#endif 1711#endif
1711 /* this seems to be an error 1712 /* this seems to be an error
1712 * data is only valid if DEBUG is defined? 1713 * data is only valid if DEBUG is defined?
@@ -1715,9 +1716,9 @@ xfs_dir2_leaf_trim_data(
1715 1716
1716 leaf = lbp->data; 1717 leaf = lbp->data;
1717 ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf); 1718 ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf);
1718 ASSERT(INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT) == 1719 ASSERT(be16_to_cpu(data->hdr.bestfree[0].length) ==
1719 mp->m_dirblksize - (uint)sizeof(data->hdr)); 1720 mp->m_dirblksize - (uint)sizeof(data->hdr));
1720 ASSERT(db == INT_GET(ltp->bestcount, ARCH_CONVERT) - 1); 1721 ASSERT(db == be32_to_cpu(ltp->bestcount) - 1);
1721 /* 1722 /*
1722 * Get rid of the data block. 1723 * Get rid of the data block.
1723 */ 1724 */
@@ -1730,10 +1731,10 @@ xfs_dir2_leaf_trim_data(
1730 * Eliminate the last bests entry from the table. 1731 * Eliminate the last bests entry from the table.
1731 */ 1732 */
1732 bestsp = XFS_DIR2_LEAF_BESTS_P(ltp); 1733 bestsp = XFS_DIR2_LEAF_BESTS_P(ltp);
1733 INT_MOD(ltp->bestcount, ARCH_CONVERT, -1); 1734 be32_add(&ltp->bestcount, -1);
1734 memmove(&bestsp[1], &bestsp[0], INT_GET(ltp->bestcount, ARCH_CONVERT) * sizeof(*bestsp)); 1735 memmove(&bestsp[1], &bestsp[0], be32_to_cpu(ltp->bestcount) * sizeof(*bestsp));
1735 xfs_dir2_leaf_log_tail(tp, lbp); 1736 xfs_dir2_leaf_log_tail(tp, lbp);
1736 xfs_dir2_leaf_log_bests(tp, lbp, 0, INT_GET(ltp->bestcount, ARCH_CONVERT) - 1); 1737 xfs_dir2_leaf_log_bests(tp, lbp, 0, be32_to_cpu(ltp->bestcount) - 1);
1737 return 0; 1738 return 0;
1738} 1739}
1739 1740
@@ -1805,7 +1806,7 @@ xfs_dir2_node_to_leaf(
1805 return 0; 1806 return 0;
1806 lbp = state->path.blk[0].bp; 1807 lbp = state->path.blk[0].bp;
1807 leaf = lbp->data; 1808 leaf = lbp->data;
1808 ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); 1809 ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC);
1809 /* 1810 /*
1810 * Read the freespace block. 1811 * Read the freespace block.
1811 */ 1812 */
@@ -1814,15 +1815,15 @@ xfs_dir2_node_to_leaf(
1814 return error; 1815 return error;
1815 } 1816 }
1816 free = fbp->data; 1817 free = fbp->data;
1817 ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == XFS_DIR2_FREE_MAGIC); 1818 ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC);
1818 ASSERT(!free->hdr.firstdb); 1819 ASSERT(!free->hdr.firstdb);
1819 /* 1820 /*
1820 * Now see if the leafn and free data will fit in a leaf1. 1821 * Now see if the leafn and free data will fit in a leaf1.
1821 * If not, release the buffer and give up. 1822 * If not, release the buffer and give up.
1822 */ 1823 */
1823 if ((uint)sizeof(leaf->hdr) + 1824 if ((uint)sizeof(leaf->hdr) +
1824 (INT_GET(leaf->hdr.count, ARCH_CONVERT) - INT_GET(leaf->hdr.stale, ARCH_CONVERT)) * (uint)sizeof(leaf->ents[0]) + 1825 (be16_to_cpu(leaf->hdr.count) - be16_to_cpu(leaf->hdr.stale)) * (uint)sizeof(leaf->ents[0]) +
1825 INT_GET(free->hdr.nvalid, ARCH_CONVERT) * (uint)sizeof(leaf->bests[0]) + 1826 be32_to_cpu(free->hdr.nvalid) * (uint)sizeof(leaf->bests[0]) +
1826 (uint)sizeof(leaf->tail) > 1827 (uint)sizeof(leaf->tail) >
1827 mp->m_dirblksize) { 1828 mp->m_dirblksize) {
1828 xfs_da_brelse(tp, fbp); 1829 xfs_da_brelse(tp, fbp);
@@ -1832,22 +1833,22 @@ xfs_dir2_node_to_leaf(
1832 * If the leaf has any stale entries in it, compress them out. 1833 * If the leaf has any stale entries in it, compress them out.
1833 * The compact routine will log the header. 1834 * The compact routine will log the header.
1834 */ 1835 */
1835 if (INT_GET(leaf->hdr.stale, ARCH_CONVERT)) 1836 if (be16_to_cpu(leaf->hdr.stale))
1836 xfs_dir2_leaf_compact(args, lbp); 1837 xfs_dir2_leaf_compact(args, lbp);
1837 else 1838 else
1838 xfs_dir2_leaf_log_header(tp, lbp); 1839 xfs_dir2_leaf_log_header(tp, lbp);
1839 INT_SET(leaf->hdr.info.magic, ARCH_CONVERT, XFS_DIR2_LEAF1_MAGIC); 1840 leaf->hdr.info.magic = cpu_to_be16(XFS_DIR2_LEAF1_MAGIC);
1840 /* 1841 /*
1841 * Set up the leaf tail from the freespace block. 1842 * Set up the leaf tail from the freespace block.
1842 */ 1843 */
1843 ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf); 1844 ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf);
1844 INT_COPY(ltp->bestcount, free->hdr.nvalid, ARCH_CONVERT); 1845 ltp->bestcount = free->hdr.nvalid;
1845 /* 1846 /*
1846 * Set up the leaf bests table. 1847 * Set up the leaf bests table.
1847 */ 1848 */
1848 memcpy(XFS_DIR2_LEAF_BESTS_P(ltp), free->bests, 1849 memcpy(XFS_DIR2_LEAF_BESTS_P(ltp), free->bests,
1849 INT_GET(ltp->bestcount, ARCH_CONVERT) * sizeof(leaf->bests[0])); 1850 be32_to_cpu(ltp->bestcount) * sizeof(leaf->bests[0]));
1850 xfs_dir2_leaf_log_bests(tp, lbp, 0, INT_GET(ltp->bestcount, ARCH_CONVERT) - 1); 1851 xfs_dir2_leaf_log_bests(tp, lbp, 0, be32_to_cpu(ltp->bestcount) - 1);
1851 xfs_dir2_leaf_log_tail(tp, lbp); 1852 xfs_dir2_leaf_log_tail(tp, lbp);
1852 xfs_dir2_leaf_check(dp, lbp); 1853 xfs_dir2_leaf_check(dp, lbp);
1853 /* 1854 /*
diff --git a/fs/xfs/xfs_dir2_leaf.h b/fs/xfs/xfs_dir2_leaf.h
index 1393993d61e9..f57ca1162412 100644
--- a/fs/xfs/xfs_dir2_leaf.h
+++ b/fs/xfs/xfs_dir2_leaf.h
@@ -46,23 +46,23 @@ typedef __uint32_t xfs_dir2_dataptr_t;
46 */ 46 */
47typedef struct xfs_dir2_leaf_hdr { 47typedef struct xfs_dir2_leaf_hdr {
48 xfs_da_blkinfo_t info; /* header for da routines */ 48 xfs_da_blkinfo_t info; /* header for da routines */
49 __uint16_t count; /* count of entries */ 49 __be16 count; /* count of entries */
50 __uint16_t stale; /* count of stale entries */ 50 __be16 stale; /* count of stale entries */
51} xfs_dir2_leaf_hdr_t; 51} xfs_dir2_leaf_hdr_t;
52 52
53/* 53/*
54 * Leaf block entry. 54 * Leaf block entry.
55 */ 55 */
56typedef struct xfs_dir2_leaf_entry { 56typedef struct xfs_dir2_leaf_entry {
57 xfs_dahash_t hashval; /* hash value of name */ 57 __be32 hashval; /* hash value of name */
58 xfs_dir2_dataptr_t address; /* address of data entry */ 58 __be32 address; /* address of data entry */
59} xfs_dir2_leaf_entry_t; 59} xfs_dir2_leaf_entry_t;
60 60
61/* 61/*
62 * Leaf block tail. 62 * Leaf block tail.
63 */ 63 */
64typedef struct xfs_dir2_leaf_tail { 64typedef struct xfs_dir2_leaf_tail {
65 __uint32_t bestcount; 65 __be32 bestcount;
66} xfs_dir2_leaf_tail_t; 66} xfs_dir2_leaf_tail_t;
67 67
68/* 68/*
@@ -105,11 +105,10 @@ xfs_dir2_leaf_tail_p(struct xfs_mount *mp, xfs_dir2_leaf_t *lp)
105 * Get address of the bests array in the single-leaf block. 105 * Get address of the bests array in the single-leaf block.
106 */ 106 */
107#define XFS_DIR2_LEAF_BESTS_P(ltp) xfs_dir2_leaf_bests_p(ltp) 107#define XFS_DIR2_LEAF_BESTS_P(ltp) xfs_dir2_leaf_bests_p(ltp)
108static inline xfs_dir2_data_off_t * 108static inline __be16 *
109xfs_dir2_leaf_bests_p(xfs_dir2_leaf_tail_t *ltp) 109xfs_dir2_leaf_bests_p(xfs_dir2_leaf_tail_t *ltp)
110{ 110{
111 return (xfs_dir2_data_off_t *) 111 return (__be16 *)ltp - be32_to_cpu(ltp->bestcount);
112 (ltp) - INT_GET((ltp)->bestcount, ARCH_CONVERT);
113} 112}
114 113
115/* 114/*
diff --git a/fs/xfs/xfs_dir2_node.c b/fs/xfs/xfs_dir2_node.c
index 641f8633d254..af556f16a0c7 100644
--- a/fs/xfs/xfs_dir2_node.c
+++ b/fs/xfs/xfs_dir2_node.c
@@ -76,7 +76,7 @@ xfs_dir2_free_log_bests(
76 xfs_dir2_free_t *free; /* freespace structure */ 76 xfs_dir2_free_t *free; /* freespace structure */
77 77
78 free = bp->data; 78 free = bp->data;
79 ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == XFS_DIR2_FREE_MAGIC); 79 ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC);
80 xfs_da_log_buf(tp, bp, 80 xfs_da_log_buf(tp, bp,
81 (uint)((char *)&free->bests[first] - (char *)free), 81 (uint)((char *)&free->bests[first] - (char *)free),
82 (uint)((char *)&free->bests[last] - (char *)free + 82 (uint)((char *)&free->bests[last] - (char *)free +
@@ -94,7 +94,7 @@ xfs_dir2_free_log_header(
94 xfs_dir2_free_t *free; /* freespace structure */ 94 xfs_dir2_free_t *free; /* freespace structure */
95 95
96 free = bp->data; 96 free = bp->data;
97 ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == XFS_DIR2_FREE_MAGIC); 97 ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC);
98 xfs_da_log_buf(tp, bp, (uint)((char *)&free->hdr - (char *)free), 98 xfs_da_log_buf(tp, bp, (uint)((char *)&free->hdr - (char *)free),
99 (uint)(sizeof(xfs_dir2_free_hdr_t) - 1)); 99 (uint)(sizeof(xfs_dir2_free_hdr_t) - 1));
100} 100}
@@ -114,14 +114,14 @@ xfs_dir2_leaf_to_node(
114 xfs_dabuf_t *fbp; /* freespace buffer */ 114 xfs_dabuf_t *fbp; /* freespace buffer */
115 xfs_dir2_db_t fdb; /* freespace block number */ 115 xfs_dir2_db_t fdb; /* freespace block number */
116 xfs_dir2_free_t *free; /* freespace structure */ 116 xfs_dir2_free_t *free; /* freespace structure */
117 xfs_dir2_data_off_t *from; /* pointer to freespace entry */ 117 __be16 *from; /* pointer to freespace entry */
118 int i; /* leaf freespace index */ 118 int i; /* leaf freespace index */
119 xfs_dir2_leaf_t *leaf; /* leaf structure */ 119 xfs_dir2_leaf_t *leaf; /* leaf structure */
120 xfs_dir2_leaf_tail_t *ltp; /* leaf tail structure */ 120 xfs_dir2_leaf_tail_t *ltp; /* leaf tail structure */
121 xfs_mount_t *mp; /* filesystem mount point */ 121 xfs_mount_t *mp; /* filesystem mount point */
122 int n; /* count of live freespc ents */ 122 int n; /* count of live freespc ents */
123 xfs_dir2_data_off_t off; /* freespace entry value */ 123 xfs_dir2_data_off_t off; /* freespace entry value */
124 xfs_dir2_data_off_t *to; /* pointer to freespace entry */ 124 __be16 *to; /* pointer to freespace entry */
125 xfs_trans_t *tp; /* transaction pointer */ 125 xfs_trans_t *tp; /* transaction pointer */
126 126
127 xfs_dir2_trace_args_b("leaf_to_node", args, lbp); 127 xfs_dir2_trace_args_b("leaf_to_node", args, lbp);
@@ -149,28 +149,28 @@ xfs_dir2_leaf_to_node(
149 /* 149 /*
150 * Initialize the freespace block header. 150 * Initialize the freespace block header.
151 */ 151 */
152 INT_SET(free->hdr.magic, ARCH_CONVERT, XFS_DIR2_FREE_MAGIC); 152 free->hdr.magic = cpu_to_be32(XFS_DIR2_FREE_MAGIC);
153 free->hdr.firstdb = 0; 153 free->hdr.firstdb = 0;
154 ASSERT(INT_GET(ltp->bestcount, ARCH_CONVERT) <= (uint)dp->i_d.di_size / mp->m_dirblksize); 154 ASSERT(be32_to_cpu(ltp->bestcount) <= (uint)dp->i_d.di_size / mp->m_dirblksize);
155 INT_COPY(free->hdr.nvalid, ltp->bestcount, ARCH_CONVERT); 155 free->hdr.nvalid = ltp->bestcount;
156 /* 156 /*
157 * Copy freespace entries from the leaf block to the new block. 157 * Copy freespace entries from the leaf block to the new block.
158 * Count active entries. 158 * Count active entries.
159 */ 159 */
160 for (i = n = 0, from = XFS_DIR2_LEAF_BESTS_P(ltp), to = free->bests; 160 for (i = n = 0, from = XFS_DIR2_LEAF_BESTS_P(ltp), to = free->bests;
161 i < INT_GET(ltp->bestcount, ARCH_CONVERT); i++, from++, to++) { 161 i < be32_to_cpu(ltp->bestcount); i++, from++, to++) {
162 if ((off = INT_GET(*from, ARCH_CONVERT)) != NULLDATAOFF) 162 if ((off = be16_to_cpu(*from)) != NULLDATAOFF)
163 n++; 163 n++;
164 INT_SET(*to, ARCH_CONVERT, off); 164 *to = cpu_to_be16(off);
165 } 165 }
166 INT_SET(free->hdr.nused, ARCH_CONVERT, n); 166 free->hdr.nused = cpu_to_be32(n);
167 INT_SET(leaf->hdr.info.magic, ARCH_CONVERT, XFS_DIR2_LEAFN_MAGIC); 167 leaf->hdr.info.magic = cpu_to_be16(XFS_DIR2_LEAFN_MAGIC);
168 /* 168 /*
169 * Log everything. 169 * Log everything.
170 */ 170 */
171 xfs_dir2_leaf_log_header(tp, lbp); 171 xfs_dir2_leaf_log_header(tp, lbp);
172 xfs_dir2_free_log_header(tp, fbp); 172 xfs_dir2_free_log_header(tp, fbp);
173 xfs_dir2_free_log_bests(tp, fbp, 0, INT_GET(free->hdr.nvalid, ARCH_CONVERT) - 1); 173 xfs_dir2_free_log_bests(tp, fbp, 0, be32_to_cpu(free->hdr.nvalid) - 1);
174 xfs_da_buf_done(fbp); 174 xfs_da_buf_done(fbp);
175 xfs_dir2_leafn_check(dp, lbp); 175 xfs_dir2_leafn_check(dp, lbp);
176 return 0; 176 return 0;
@@ -217,15 +217,15 @@ xfs_dir2_leafn_add(
217 * a compact. 217 * a compact.
218 */ 218 */
219 219
220 if (INT_GET(leaf->hdr.count, ARCH_CONVERT) == XFS_DIR2_MAX_LEAF_ENTS(mp)) { 220 if (be16_to_cpu(leaf->hdr.count) == XFS_DIR2_MAX_LEAF_ENTS(mp)) {
221 if (!leaf->hdr.stale) 221 if (!leaf->hdr.stale)
222 return XFS_ERROR(ENOSPC); 222 return XFS_ERROR(ENOSPC);
223 compact = INT_GET(leaf->hdr.stale, ARCH_CONVERT) > 1; 223 compact = be16_to_cpu(leaf->hdr.stale) > 1;
224 } else 224 } else
225 compact = 0; 225 compact = 0;
226 ASSERT(index == 0 || INT_GET(leaf->ents[index - 1].hashval, ARCH_CONVERT) <= args->hashval); 226 ASSERT(index == 0 || be32_to_cpu(leaf->ents[index - 1].hashval) <= args->hashval);
227 ASSERT(index == INT_GET(leaf->hdr.count, ARCH_CONVERT) || 227 ASSERT(index == be16_to_cpu(leaf->hdr.count) ||
228 INT_GET(leaf->ents[index].hashval, ARCH_CONVERT) >= args->hashval); 228 be32_to_cpu(leaf->ents[index].hashval) >= args->hashval);
229 229
230 if (args->justcheck) 230 if (args->justcheck)
231 return 0; 231 return 0;
@@ -242,7 +242,7 @@ xfs_dir2_leafn_add(
242 * Set impossible logging indices for this case. 242 * Set impossible logging indices for this case.
243 */ 243 */
244 else if (leaf->hdr.stale) { 244 else if (leaf->hdr.stale) {
245 lfloglow = INT_GET(leaf->hdr.count, ARCH_CONVERT); 245 lfloglow = be16_to_cpu(leaf->hdr.count);
246 lfloghigh = -1; 246 lfloghigh = -1;
247 } 247 }
248 /* 248 /*
@@ -250,12 +250,12 @@ xfs_dir2_leafn_add(
250 */ 250 */
251 if (!leaf->hdr.stale) { 251 if (!leaf->hdr.stale) {
252 lep = &leaf->ents[index]; 252 lep = &leaf->ents[index];
253 if (index < INT_GET(leaf->hdr.count, ARCH_CONVERT)) 253 if (index < be16_to_cpu(leaf->hdr.count))
254 memmove(lep + 1, lep, 254 memmove(lep + 1, lep,
255 (INT_GET(leaf->hdr.count, ARCH_CONVERT) - index) * sizeof(*lep)); 255 (be16_to_cpu(leaf->hdr.count) - index) * sizeof(*lep));
256 lfloglow = index; 256 lfloglow = index;
257 lfloghigh = INT_GET(leaf->hdr.count, ARCH_CONVERT); 257 lfloghigh = be16_to_cpu(leaf->hdr.count);
258 INT_MOD(leaf->hdr.count, ARCH_CONVERT, +1); 258 be16_add(&leaf->hdr.count, 1);
259 } 259 }
260 /* 260 /*
261 * There are stale entries. We'll use one for the new entry. 261 * There are stale entries. We'll use one for the new entry.
@@ -271,7 +271,7 @@ xfs_dir2_leafn_add(
271 */ 271 */
272 for (lowstale = index - 1; 272 for (lowstale = index - 1;
273 lowstale >= 0 && 273 lowstale >= 0 &&
274 INT_GET(leaf->ents[lowstale].address, ARCH_CONVERT) != 274 be32_to_cpu(leaf->ents[lowstale].address) !=
275 XFS_DIR2_NULL_DATAPTR; 275 XFS_DIR2_NULL_DATAPTR;
276 lowstale--) 276 lowstale--)
277 continue; 277 continue;
@@ -281,8 +281,8 @@ xfs_dir2_leafn_add(
281 * lowstale already found. 281 * lowstale already found.
282 */ 282 */
283 for (highstale = index; 283 for (highstale = index;
284 highstale < INT_GET(leaf->hdr.count, ARCH_CONVERT) && 284 highstale < be16_to_cpu(leaf->hdr.count) &&
285 INT_GET(leaf->ents[highstale].address, ARCH_CONVERT) != 285 be32_to_cpu(leaf->ents[highstale].address) !=
286 XFS_DIR2_NULL_DATAPTR && 286 XFS_DIR2_NULL_DATAPTR &&
287 (lowstale < 0 || 287 (lowstale < 0 ||
288 index - lowstale - 1 >= highstale - index); 288 index - lowstale - 1 >= highstale - index);
@@ -294,9 +294,9 @@ xfs_dir2_leafn_add(
294 * Shift entries up toward the stale slot. 294 * Shift entries up toward the stale slot.
295 */ 295 */
296 if (lowstale >= 0 && 296 if (lowstale >= 0 &&
297 (highstale == INT_GET(leaf->hdr.count, ARCH_CONVERT) || 297 (highstale == be16_to_cpu(leaf->hdr.count) ||
298 index - lowstale - 1 < highstale - index)) { 298 index - lowstale - 1 < highstale - index)) {
299 ASSERT(INT_GET(leaf->ents[lowstale].address, ARCH_CONVERT) == 299 ASSERT(be32_to_cpu(leaf->ents[lowstale].address) ==
300 XFS_DIR2_NULL_DATAPTR); 300 XFS_DIR2_NULL_DATAPTR);
301 ASSERT(index - lowstale - 1 >= 0); 301 ASSERT(index - lowstale - 1 >= 0);
302 if (index - lowstale - 1 > 0) 302 if (index - lowstale - 1 > 0)
@@ -312,7 +312,7 @@ xfs_dir2_leafn_add(
312 * Shift entries down toward the stale slot. 312 * Shift entries down toward the stale slot.
313 */ 313 */
314 else { 314 else {
315 ASSERT(INT_GET(leaf->ents[highstale].address, ARCH_CONVERT) == 315 ASSERT(be32_to_cpu(leaf->ents[highstale].address) ==
316 XFS_DIR2_NULL_DATAPTR); 316 XFS_DIR2_NULL_DATAPTR);
317 ASSERT(highstale - index >= 0); 317 ASSERT(highstale - index >= 0);
318 if (highstale - index > 0) 318 if (highstale - index > 0)
@@ -323,13 +323,14 @@ xfs_dir2_leafn_add(
323 lfloglow = MIN(index, lfloglow); 323 lfloglow = MIN(index, lfloglow);
324 lfloghigh = MAX(highstale, lfloghigh); 324 lfloghigh = MAX(highstale, lfloghigh);
325 } 325 }
326 INT_MOD(leaf->hdr.stale, ARCH_CONVERT, -1); 326 be16_add(&leaf->hdr.stale, -1);
327 } 327 }
328 /* 328 /*
329 * Insert the new entry, log everything. 329 * Insert the new entry, log everything.
330 */ 330 */
331 INT_SET(lep->hashval, ARCH_CONVERT, args->hashval); 331 lep->hashval = cpu_to_be32(args->hashval);
332 INT_SET(lep->address, ARCH_CONVERT, XFS_DIR2_DB_OFF_TO_DATAPTR(mp, args->blkno, args->index)); 332 lep->address = cpu_to_be32(XFS_DIR2_DB_OFF_TO_DATAPTR(mp,
333 args->blkno, args->index));
333 xfs_dir2_leaf_log_header(tp, bp); 334 xfs_dir2_leaf_log_header(tp, bp);
334 xfs_dir2_leaf_log_ents(tp, bp, lfloglow, lfloghigh); 335 xfs_dir2_leaf_log_ents(tp, bp, lfloglow, lfloghigh);
335 xfs_dir2_leafn_check(dp, bp); 336 xfs_dir2_leafn_check(dp, bp);
@@ -352,17 +353,17 @@ xfs_dir2_leafn_check(
352 353
353 leaf = bp->data; 354 leaf = bp->data;
354 mp = dp->i_mount; 355 mp = dp->i_mount;
355 ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); 356 ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC);
356 ASSERT(INT_GET(leaf->hdr.count, ARCH_CONVERT) <= XFS_DIR2_MAX_LEAF_ENTS(mp)); 357 ASSERT(be16_to_cpu(leaf->hdr.count) <= XFS_DIR2_MAX_LEAF_ENTS(mp));
357 for (i = stale = 0; i < INT_GET(leaf->hdr.count, ARCH_CONVERT); i++) { 358 for (i = stale = 0; i < be16_to_cpu(leaf->hdr.count); i++) {
358 if (i + 1 < INT_GET(leaf->hdr.count, ARCH_CONVERT)) { 359 if (i + 1 < be16_to_cpu(leaf->hdr.count)) {
359 ASSERT(INT_GET(leaf->ents[i].hashval, ARCH_CONVERT) <= 360 ASSERT(be32_to_cpu(leaf->ents[i].hashval) <=
360 INT_GET(leaf->ents[i + 1].hashval, ARCH_CONVERT)); 361 be32_to_cpu(leaf->ents[i + 1].hashval));
361 } 362 }
362 if (INT_GET(leaf->ents[i].address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR) 363 if (be32_to_cpu(leaf->ents[i].address) == XFS_DIR2_NULL_DATAPTR)
363 stale++; 364 stale++;
364 } 365 }
365 ASSERT(INT_GET(leaf->hdr.stale, ARCH_CONVERT) == stale); 366 ASSERT(be16_to_cpu(leaf->hdr.stale) == stale);
366} 367}
367#endif /* DEBUG */ 368#endif /* DEBUG */
368 369
@@ -378,12 +379,12 @@ xfs_dir2_leafn_lasthash(
378 xfs_dir2_leaf_t *leaf; /* leaf structure */ 379 xfs_dir2_leaf_t *leaf; /* leaf structure */
379 380
380 leaf = bp->data; 381 leaf = bp->data;
381 ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); 382 ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC);
382 if (count) 383 if (count)
383 *count = INT_GET(leaf->hdr.count, ARCH_CONVERT); 384 *count = be16_to_cpu(leaf->hdr.count);
384 if (!leaf->hdr.count) 385 if (!leaf->hdr.count)
385 return 0; 386 return 0;
386 return INT_GET(leaf->ents[INT_GET(leaf->hdr.count, ARCH_CONVERT) - 1].hashval, ARCH_CONVERT); 387 return be32_to_cpu(leaf->ents[be16_to_cpu(leaf->hdr.count) - 1].hashval);
387} 388}
388 389
389/* 390/*
@@ -419,9 +420,9 @@ xfs_dir2_leafn_lookup_int(
419 tp = args->trans; 420 tp = args->trans;
420 mp = dp->i_mount; 421 mp = dp->i_mount;
421 leaf = bp->data; 422 leaf = bp->data;
422 ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); 423 ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC);
423#ifdef __KERNEL__ 424#ifdef __KERNEL__
424 ASSERT(INT_GET(leaf->hdr.count, ARCH_CONVERT) > 0); 425 ASSERT(be16_to_cpu(leaf->hdr.count) > 0);
425#endif 426#endif
426 xfs_dir2_leafn_check(dp, bp); 427 xfs_dir2_leafn_check(dp, bp);
427 /* 428 /*
@@ -443,7 +444,7 @@ xfs_dir2_leafn_lookup_int(
443 curdb = -1; 444 curdb = -1;
444 length = XFS_DIR2_DATA_ENTSIZE(args->namelen); 445 length = XFS_DIR2_DATA_ENTSIZE(args->namelen);
445 if ((free = (curbp ? curbp->data : NULL))) 446 if ((free = (curbp ? curbp->data : NULL)))
446 ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == XFS_DIR2_FREE_MAGIC); 447 ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC);
447 } 448 }
448 /* 449 /*
449 * For others, it's a data block buffer, get the block number. 450 * For others, it's a data block buffer, get the block number.
@@ -456,17 +457,17 @@ xfs_dir2_leafn_lookup_int(
456 * Loop over leaf entries with the right hash value. 457 * Loop over leaf entries with the right hash value.
457 */ 458 */
458 for (lep = &leaf->ents[index]; 459 for (lep = &leaf->ents[index];
459 index < INT_GET(leaf->hdr.count, ARCH_CONVERT) && INT_GET(lep->hashval, ARCH_CONVERT) == args->hashval; 460 index < be16_to_cpu(leaf->hdr.count) && be32_to_cpu(lep->hashval) == args->hashval;
460 lep++, index++) { 461 lep++, index++) {
461 /* 462 /*
462 * Skip stale leaf entries. 463 * Skip stale leaf entries.
463 */ 464 */
464 if (INT_GET(lep->address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR) 465 if (be32_to_cpu(lep->address) == XFS_DIR2_NULL_DATAPTR)
465 continue; 466 continue;
466 /* 467 /*
467 * Pull the data block number from the entry. 468 * Pull the data block number from the entry.
468 */ 469 */
469 newdb = XFS_DIR2_DATAPTR_TO_DB(mp, INT_GET(lep->address, ARCH_CONVERT)); 470 newdb = XFS_DIR2_DATAPTR_TO_DB(mp, be32_to_cpu(lep->address));
470 /* 471 /*
471 * For addname, we're looking for a place to put the new entry. 472 * For addname, we're looking for a place to put the new entry.
472 * We want to use a data block with an entry of equal 473 * We want to use a data block with an entry of equal
@@ -506,15 +507,15 @@ xfs_dir2_leafn_lookup_int(
506 } 507 }
507 curfdb = newfdb; 508 curfdb = newfdb;
508 free = curbp->data; 509 free = curbp->data;
509 ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == 510 ASSERT(be32_to_cpu(free->hdr.magic) ==
510 XFS_DIR2_FREE_MAGIC); 511 XFS_DIR2_FREE_MAGIC);
511 ASSERT((INT_GET(free->hdr.firstdb, ARCH_CONVERT) % 512 ASSERT((be32_to_cpu(free->hdr.firstdb) %
512 XFS_DIR2_MAX_FREE_BESTS(mp)) == 513 XFS_DIR2_MAX_FREE_BESTS(mp)) ==
513 0); 514 0);
514 ASSERT(INT_GET(free->hdr.firstdb, ARCH_CONVERT) <= curdb); 515 ASSERT(be32_to_cpu(free->hdr.firstdb) <= curdb);
515 ASSERT(curdb < 516 ASSERT(curdb <
516 INT_GET(free->hdr.firstdb, ARCH_CONVERT) + 517 be32_to_cpu(free->hdr.firstdb) +
517 INT_GET(free->hdr.nvalid, ARCH_CONVERT)); 518 be32_to_cpu(free->hdr.nvalid));
518 } 519 }
519 /* 520 /*
520 * Get the index for our entry. 521 * Get the index for our entry.
@@ -523,12 +524,12 @@ xfs_dir2_leafn_lookup_int(
523 /* 524 /*
524 * If it has room, return it. 525 * If it has room, return it.
525 */ 526 */
526 if (unlikely(INT_GET(free->bests[fi], ARCH_CONVERT) == NULLDATAOFF)) { 527 if (unlikely(be16_to_cpu(free->bests[fi]) == NULLDATAOFF)) {
527 XFS_ERROR_REPORT("xfs_dir2_leafn_lookup_int", 528 XFS_ERROR_REPORT("xfs_dir2_leafn_lookup_int",
528 XFS_ERRLEVEL_LOW, mp); 529 XFS_ERRLEVEL_LOW, mp);
529 return XFS_ERROR(EFSCORRUPTED); 530 return XFS_ERROR(EFSCORRUPTED);
530 } 531 }
531 if (INT_GET(free->bests[fi], ARCH_CONVERT) >= length) { 532 if (be16_to_cpu(free->bests[fi]) >= length) {
532 *indexp = index; 533 *indexp = index;
533 state->extravalid = 1; 534 state->extravalid = 1;
534 state->extrablk.bp = curbp; 535 state->extrablk.bp = curbp;
@@ -572,7 +573,7 @@ xfs_dir2_leafn_lookup_int(
572 */ 573 */
573 dep = (xfs_dir2_data_entry_t *) 574 dep = (xfs_dir2_data_entry_t *)
574 ((char *)curbp->data + 575 ((char *)curbp->data +
575 XFS_DIR2_DATAPTR_TO_OFF(mp, INT_GET(lep->address, ARCH_CONVERT))); 576 XFS_DIR2_DATAPTR_TO_OFF(mp, be32_to_cpu(lep->address)));
576 /* 577 /*
577 * Compare the entry, return it if it matches. 578 * Compare the entry, return it if it matches.
578 */ 579 */
@@ -619,7 +620,7 @@ xfs_dir2_leafn_lookup_int(
619 * Return the final index, that will be the insertion point. 620 * Return the final index, that will be the insertion point.
620 */ 621 */
621 *indexp = index; 622 *indexp = index;
622 ASSERT(index == INT_GET(leaf->hdr.count, ARCH_CONVERT) || args->oknoent); 623 ASSERT(index == be16_to_cpu(leaf->hdr.count) || args->oknoent);
623 return XFS_ERROR(ENOENT); 624 return XFS_ERROR(ENOENT);
624} 625}
625 626
@@ -657,12 +658,12 @@ xfs_dir2_leafn_moveents(
657 * destination leaf entries, open up a hole in the destination 658 * destination leaf entries, open up a hole in the destination
658 * to hold the new entries. 659 * to hold the new entries.
659 */ 660 */
660 if (start_d < INT_GET(leaf_d->hdr.count, ARCH_CONVERT)) { 661 if (start_d < be16_to_cpu(leaf_d->hdr.count)) {
661 memmove(&leaf_d->ents[start_d + count], &leaf_d->ents[start_d], 662 memmove(&leaf_d->ents[start_d + count], &leaf_d->ents[start_d],
662 (INT_GET(leaf_d->hdr.count, ARCH_CONVERT) - start_d) * 663 (be16_to_cpu(leaf_d->hdr.count) - start_d) *
663 sizeof(xfs_dir2_leaf_entry_t)); 664 sizeof(xfs_dir2_leaf_entry_t));
664 xfs_dir2_leaf_log_ents(tp, bp_d, start_d + count, 665 xfs_dir2_leaf_log_ents(tp, bp_d, start_d + count,
665 count + INT_GET(leaf_d->hdr.count, ARCH_CONVERT) - 1); 666 count + be16_to_cpu(leaf_d->hdr.count) - 1);
666 } 667 }
667 /* 668 /*
668 * If the source has stale leaves, count the ones in the copy range 669 * If the source has stale leaves, count the ones in the copy range
@@ -672,7 +673,7 @@ xfs_dir2_leafn_moveents(
672 int i; /* temp leaf index */ 673 int i; /* temp leaf index */
673 674
674 for (i = start_s, stale = 0; i < start_s + count; i++) { 675 for (i = start_s, stale = 0; i < start_s + count; i++) {
675 if (INT_GET(leaf_s->ents[i].address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR) 676 if (be32_to_cpu(leaf_s->ents[i].address) == XFS_DIR2_NULL_DATAPTR)
676 stale++; 677 stale++;
677 } 678 }
678 } else 679 } else
@@ -687,7 +688,7 @@ xfs_dir2_leafn_moveents(
687 * If there are source entries after the ones we copied, 688 * If there are source entries after the ones we copied,
688 * delete the ones we copied by sliding the next ones down. 689 * delete the ones we copied by sliding the next ones down.
689 */ 690 */
690 if (start_s + count < INT_GET(leaf_s->hdr.count, ARCH_CONVERT)) { 691 if (start_s + count < be16_to_cpu(leaf_s->hdr.count)) {
691 memmove(&leaf_s->ents[start_s], &leaf_s->ents[start_s + count], 692 memmove(&leaf_s->ents[start_s], &leaf_s->ents[start_s + count],
692 count * sizeof(xfs_dir2_leaf_entry_t)); 693 count * sizeof(xfs_dir2_leaf_entry_t));
693 xfs_dir2_leaf_log_ents(tp, bp_s, start_s, start_s + count - 1); 694 xfs_dir2_leaf_log_ents(tp, bp_s, start_s, start_s + count - 1);
@@ -695,10 +696,10 @@ xfs_dir2_leafn_moveents(
695 /* 696 /*
696 * Update the headers and log them. 697 * Update the headers and log them.
697 */ 698 */
698 INT_MOD(leaf_s->hdr.count, ARCH_CONVERT, -(count)); 699 be16_add(&leaf_s->hdr.count, -(count));
699 INT_MOD(leaf_s->hdr.stale, ARCH_CONVERT, -(stale)); 700 be16_add(&leaf_s->hdr.stale, -(stale));
700 INT_MOD(leaf_d->hdr.count, ARCH_CONVERT, count); 701 be16_add(&leaf_d->hdr.count, count);
701 INT_MOD(leaf_d->hdr.stale, ARCH_CONVERT, stale); 702 be16_add(&leaf_d->hdr.stale, stale);
702 xfs_dir2_leaf_log_header(tp, bp_s); 703 xfs_dir2_leaf_log_header(tp, bp_s);
703 xfs_dir2_leaf_log_header(tp, bp_d); 704 xfs_dir2_leaf_log_header(tp, bp_d);
704 xfs_dir2_leafn_check(args->dp, bp_s); 705 xfs_dir2_leafn_check(args->dp, bp_s);
@@ -719,13 +720,13 @@ xfs_dir2_leafn_order(
719 720
720 leaf1 = leaf1_bp->data; 721 leaf1 = leaf1_bp->data;
721 leaf2 = leaf2_bp->data; 722 leaf2 = leaf2_bp->data;
722 ASSERT(INT_GET(leaf1->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); 723 ASSERT(be16_to_cpu(leaf1->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC);
723 ASSERT(INT_GET(leaf2->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); 724 ASSERT(be16_to_cpu(leaf2->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC);
724 if (INT_GET(leaf1->hdr.count, ARCH_CONVERT) > 0 && 725 if (be16_to_cpu(leaf1->hdr.count) > 0 &&
725 INT_GET(leaf2->hdr.count, ARCH_CONVERT) > 0 && 726 be16_to_cpu(leaf2->hdr.count) > 0 &&
726 (INT_GET(leaf2->ents[0].hashval, ARCH_CONVERT) < INT_GET(leaf1->ents[0].hashval, ARCH_CONVERT) || 727 (be32_to_cpu(leaf2->ents[0].hashval) < be32_to_cpu(leaf1->ents[0].hashval) ||
727 INT_GET(leaf2->ents[INT_GET(leaf2->hdr.count, ARCH_CONVERT) - 1].hashval, ARCH_CONVERT) < 728 be32_to_cpu(leaf2->ents[be16_to_cpu(leaf2->hdr.count) - 1].hashval) <
728 INT_GET(leaf1->ents[INT_GET(leaf1->hdr.count, ARCH_CONVERT) - 1].hashval, ARCH_CONVERT))) 729 be32_to_cpu(leaf1->ents[be16_to_cpu(leaf1->hdr.count) - 1].hashval)))
729 return 1; 730 return 1;
730 return 0; 731 return 0;
731} 732}
@@ -768,9 +769,9 @@ xfs_dir2_leafn_rebalance(
768 } 769 }
769 leaf1 = blk1->bp->data; 770 leaf1 = blk1->bp->data;
770 leaf2 = blk2->bp->data; 771 leaf2 = blk2->bp->data;
771 oldsum = INT_GET(leaf1->hdr.count, ARCH_CONVERT) + INT_GET(leaf2->hdr.count, ARCH_CONVERT); 772 oldsum = be16_to_cpu(leaf1->hdr.count) + be16_to_cpu(leaf2->hdr.count);
772#ifdef DEBUG 773#ifdef DEBUG
773 oldstale = INT_GET(leaf1->hdr.stale, ARCH_CONVERT) + INT_GET(leaf2->hdr.stale, ARCH_CONVERT); 774 oldstale = be16_to_cpu(leaf1->hdr.stale) + be16_to_cpu(leaf2->hdr.stale);
774#endif 775#endif
775 mid = oldsum >> 1; 776 mid = oldsum >> 1;
776 /* 777 /*
@@ -780,10 +781,10 @@ xfs_dir2_leafn_rebalance(
780 if (oldsum & 1) { 781 if (oldsum & 1) {
781 xfs_dahash_t midhash; /* middle entry hash value */ 782 xfs_dahash_t midhash; /* middle entry hash value */
782 783
783 if (mid >= INT_GET(leaf1->hdr.count, ARCH_CONVERT)) 784 if (mid >= be16_to_cpu(leaf1->hdr.count))
784 midhash = INT_GET(leaf2->ents[mid - INT_GET(leaf1->hdr.count, ARCH_CONVERT)].hashval, ARCH_CONVERT); 785 midhash = be32_to_cpu(leaf2->ents[mid - be16_to_cpu(leaf1->hdr.count)].hashval);
785 else 786 else
786 midhash = INT_GET(leaf1->ents[mid].hashval, ARCH_CONVERT); 787 midhash = be32_to_cpu(leaf1->ents[mid].hashval);
787 isleft = args->hashval <= midhash; 788 isleft = args->hashval <= midhash;
788 } 789 }
789 /* 790 /*
@@ -797,30 +798,30 @@ xfs_dir2_leafn_rebalance(
797 * Calculate moved entry count. Positive means left-to-right, 798 * Calculate moved entry count. Positive means left-to-right,
798 * negative means right-to-left. Then move the entries. 799 * negative means right-to-left. Then move the entries.
799 */ 800 */
800 count = INT_GET(leaf1->hdr.count, ARCH_CONVERT) - mid + (isleft == 0); 801 count = be16_to_cpu(leaf1->hdr.count) - mid + (isleft == 0);
801 if (count > 0) 802 if (count > 0)
802 xfs_dir2_leafn_moveents(args, blk1->bp, 803 xfs_dir2_leafn_moveents(args, blk1->bp,
803 INT_GET(leaf1->hdr.count, ARCH_CONVERT) - count, blk2->bp, 0, count); 804 be16_to_cpu(leaf1->hdr.count) - count, blk2->bp, 0, count);
804 else if (count < 0) 805 else if (count < 0)
805 xfs_dir2_leafn_moveents(args, blk2->bp, 0, blk1->bp, 806 xfs_dir2_leafn_moveents(args, blk2->bp, 0, blk1->bp,
806 INT_GET(leaf1->hdr.count, ARCH_CONVERT), count); 807 be16_to_cpu(leaf1->hdr.count), count);
807 ASSERT(INT_GET(leaf1->hdr.count, ARCH_CONVERT) + INT_GET(leaf2->hdr.count, ARCH_CONVERT) == oldsum); 808 ASSERT(be16_to_cpu(leaf1->hdr.count) + be16_to_cpu(leaf2->hdr.count) == oldsum);
808 ASSERT(INT_GET(leaf1->hdr.stale, ARCH_CONVERT) + INT_GET(leaf2->hdr.stale, ARCH_CONVERT) == oldstale); 809 ASSERT(be16_to_cpu(leaf1->hdr.stale) + be16_to_cpu(leaf2->hdr.stale) == oldstale);
809 /* 810 /*
810 * Mark whether we're inserting into the old or new leaf. 811 * Mark whether we're inserting into the old or new leaf.
811 */ 812 */
812 if (INT_GET(leaf1->hdr.count, ARCH_CONVERT) < INT_GET(leaf2->hdr.count, ARCH_CONVERT)) 813 if (be16_to_cpu(leaf1->hdr.count) < be16_to_cpu(leaf2->hdr.count))
813 state->inleaf = swap; 814 state->inleaf = swap;
814 else if (INT_GET(leaf1->hdr.count, ARCH_CONVERT) > INT_GET(leaf2->hdr.count, ARCH_CONVERT)) 815 else if (be16_to_cpu(leaf1->hdr.count) > be16_to_cpu(leaf2->hdr.count))
815 state->inleaf = !swap; 816 state->inleaf = !swap;
816 else 817 else
817 state->inleaf = 818 state->inleaf =
818 swap ^ (blk1->index <= INT_GET(leaf1->hdr.count, ARCH_CONVERT)); 819 swap ^ (blk1->index <= be16_to_cpu(leaf1->hdr.count));
819 /* 820 /*
820 * Adjust the expected index for insertion. 821 * Adjust the expected index for insertion.
821 */ 822 */
822 if (!state->inleaf) 823 if (!state->inleaf)
823 blk2->index = blk1->index - INT_GET(leaf1->hdr.count, ARCH_CONVERT); 824 blk2->index = blk1->index - be16_to_cpu(leaf1->hdr.count);
824 825
825 /* 826 /*
826 * Finally sanity check just to make sure we are not returning a negative index 827 * Finally sanity check just to make sure we are not returning a negative index
@@ -867,7 +868,7 @@ xfs_dir2_leafn_remove(
867 tp = args->trans; 868 tp = args->trans;
868 mp = dp->i_mount; 869 mp = dp->i_mount;
869 leaf = bp->data; 870 leaf = bp->data;
870 ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); 871 ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC);
871 /* 872 /*
872 * Point to the entry we're removing. 873 * Point to the entry we're removing.
873 */ 874 */
@@ -875,17 +876,17 @@ xfs_dir2_leafn_remove(
875 /* 876 /*
876 * Extract the data block and offset from the entry. 877 * Extract the data block and offset from the entry.
877 */ 878 */
878 db = XFS_DIR2_DATAPTR_TO_DB(mp, INT_GET(lep->address, ARCH_CONVERT)); 879 db = XFS_DIR2_DATAPTR_TO_DB(mp, be32_to_cpu(lep->address));
879 ASSERT(dblk->blkno == db); 880 ASSERT(dblk->blkno == db);
880 off = XFS_DIR2_DATAPTR_TO_OFF(mp, INT_GET(lep->address, ARCH_CONVERT)); 881 off = XFS_DIR2_DATAPTR_TO_OFF(mp, be32_to_cpu(lep->address));
881 ASSERT(dblk->index == off); 882 ASSERT(dblk->index == off);
882 /* 883 /*
883 * Kill the leaf entry by marking it stale. 884 * Kill the leaf entry by marking it stale.
884 * Log the leaf block changes. 885 * Log the leaf block changes.
885 */ 886 */
886 INT_MOD(leaf->hdr.stale, ARCH_CONVERT, +1); 887 be16_add(&leaf->hdr.stale, 1);
887 xfs_dir2_leaf_log_header(tp, bp); 888 xfs_dir2_leaf_log_header(tp, bp);
888 INT_SET(lep->address, ARCH_CONVERT, XFS_DIR2_NULL_DATAPTR); 889 lep->address = cpu_to_be32(XFS_DIR2_NULL_DATAPTR);
889 xfs_dir2_leaf_log_ents(tp, bp, index, index); 890 xfs_dir2_leaf_log_ents(tp, bp, index, index);
890 /* 891 /*
891 * Make the data entry free. Keep track of the longest freespace 892 * Make the data entry free. Keep track of the longest freespace
@@ -894,7 +895,7 @@ xfs_dir2_leafn_remove(
894 dbp = dblk->bp; 895 dbp = dblk->bp;
895 data = dbp->data; 896 data = dbp->data;
896 dep = (xfs_dir2_data_entry_t *)((char *)data + off); 897 dep = (xfs_dir2_data_entry_t *)((char *)data + off);
897 longest = INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT); 898 longest = be16_to_cpu(data->hdr.bestfree[0].length);
898 needlog = needscan = 0; 899 needlog = needscan = 0;
899 xfs_dir2_data_make_free(tp, dbp, off, 900 xfs_dir2_data_make_free(tp, dbp, off,
900 XFS_DIR2_DATA_ENTSIZE(dep->namelen), &needlog, &needscan); 901 XFS_DIR2_DATA_ENTSIZE(dep->namelen), &needlog, &needscan);
@@ -911,7 +912,7 @@ xfs_dir2_leafn_remove(
911 * If the longest data block freespace changes, need to update 912 * If the longest data block freespace changes, need to update
912 * the corresponding freeblock entry. 913 * the corresponding freeblock entry.
913 */ 914 */
914 if (longest < INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT)) { 915 if (longest < be16_to_cpu(data->hdr.bestfree[0].length)) {
915 int error; /* error return value */ 916 int error; /* error return value */
916 xfs_dabuf_t *fbp; /* freeblock buffer */ 917 xfs_dabuf_t *fbp; /* freeblock buffer */
917 xfs_dir2_db_t fdb; /* freeblock block number */ 918 xfs_dir2_db_t fdb; /* freeblock block number */
@@ -929,15 +930,15 @@ xfs_dir2_leafn_remove(
929 return error; 930 return error;
930 } 931 }
931 free = fbp->data; 932 free = fbp->data;
932 ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == XFS_DIR2_FREE_MAGIC); 933 ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC);
933 ASSERT(INT_GET(free->hdr.firstdb, ARCH_CONVERT) == 934 ASSERT(be32_to_cpu(free->hdr.firstdb) ==
934 XFS_DIR2_MAX_FREE_BESTS(mp) * 935 XFS_DIR2_MAX_FREE_BESTS(mp) *
935 (fdb - XFS_DIR2_FREE_FIRSTDB(mp))); 936 (fdb - XFS_DIR2_FREE_FIRSTDB(mp)));
936 /* 937 /*
937 * Calculate which entry we need to fix. 938 * Calculate which entry we need to fix.
938 */ 939 */
939 findex = XFS_DIR2_DB_TO_FDINDEX(mp, db); 940 findex = XFS_DIR2_DB_TO_FDINDEX(mp, db);
940 longest = INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT); 941 longest = be16_to_cpu(data->hdr.bestfree[0].length);
941 /* 942 /*
942 * If the data block is now empty we can get rid of it 943 * If the data block is now empty we can get rid of it
943 * (usually). 944 * (usually).
@@ -969,7 +970,7 @@ xfs_dir2_leafn_remove(
969 /* 970 /*
970 * One less used entry in the free table. 971 * One less used entry in the free table.
971 */ 972 */
972 INT_MOD(free->hdr.nused, ARCH_CONVERT, -1); 973 free->hdr.nused = cpu_to_be32(-1);
973 xfs_dir2_free_log_header(tp, fbp); 974 xfs_dir2_free_log_header(tp, fbp);
974 /* 975 /*
975 * If this was the last entry in the table, we can 976 * If this was the last entry in the table, we can
@@ -977,21 +978,21 @@ xfs_dir2_leafn_remove(
977 * entries at the end referring to non-existent 978 * entries at the end referring to non-existent
978 * data blocks, get those too. 979 * data blocks, get those too.
979 */ 980 */
980 if (findex == INT_GET(free->hdr.nvalid, ARCH_CONVERT) - 1) { 981 if (findex == be32_to_cpu(free->hdr.nvalid) - 1) {
981 int i; /* free entry index */ 982 int i; /* free entry index */
982 983
983 for (i = findex - 1; 984 for (i = findex - 1;
984 i >= 0 && INT_GET(free->bests[i], ARCH_CONVERT) == NULLDATAOFF; 985 i >= 0 && be16_to_cpu(free->bests[i]) == NULLDATAOFF;
985 i--) 986 i--)
986 continue; 987 continue;
987 INT_SET(free->hdr.nvalid, ARCH_CONVERT, i + 1); 988 free->hdr.nvalid = cpu_to_be32(i + 1);
988 logfree = 0; 989 logfree = 0;
989 } 990 }
990 /* 991 /*
991 * Not the last entry, just punch it out. 992 * Not the last entry, just punch it out.
992 */ 993 */
993 else { 994 else {
994 INT_SET(free->bests[findex], ARCH_CONVERT, NULLDATAOFF); 995 free->bests[findex] = cpu_to_be16(NULLDATAOFF);
995 logfree = 1; 996 logfree = 1;
996 } 997 }
997 /* 998 /*
@@ -1017,7 +1018,7 @@ xfs_dir2_leafn_remove(
1017 * the new value. 1018 * the new value.
1018 */ 1019 */
1019 else { 1020 else {
1020 INT_SET(free->bests[findex], ARCH_CONVERT, longest); 1021 free->bests[findex] = cpu_to_be16(longest);
1021 logfree = 1; 1022 logfree = 1;
1022 } 1023 }
1023 /* 1024 /*
@@ -1039,7 +1040,7 @@ xfs_dir2_leafn_remove(
1039 *rval = 1040 *rval =
1040 ((uint)sizeof(leaf->hdr) + 1041 ((uint)sizeof(leaf->hdr) +
1041 (uint)sizeof(leaf->ents[0]) * 1042 (uint)sizeof(leaf->ents[0]) *
1042 (INT_GET(leaf->hdr.count, ARCH_CONVERT) - INT_GET(leaf->hdr.stale, ARCH_CONVERT))) < 1043 (be16_to_cpu(leaf->hdr.count) - be16_to_cpu(leaf->hdr.stale))) <
1043 mp->m_dir_magicpct; 1044 mp->m_dir_magicpct;
1044 return 0; 1045 return 0;
1045} 1046}
@@ -1138,9 +1139,9 @@ xfs_dir2_leafn_toosmall(
1138 */ 1139 */
1139 blk = &state->path.blk[state->path.active - 1]; 1140 blk = &state->path.blk[state->path.active - 1];
1140 info = blk->bp->data; 1141 info = blk->bp->data;
1141 ASSERT(INT_GET(info->magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); 1142 ASSERT(be16_to_cpu(info->magic) == XFS_DIR2_LEAFN_MAGIC);
1142 leaf = (xfs_dir2_leaf_t *)info; 1143 leaf = (xfs_dir2_leaf_t *)info;
1143 count = INT_GET(leaf->hdr.count, ARCH_CONVERT) - INT_GET(leaf->hdr.stale, ARCH_CONVERT); 1144 count = be16_to_cpu(leaf->hdr.count) - be16_to_cpu(leaf->hdr.stale);
1144 bytes = (uint)sizeof(leaf->hdr) + count * (uint)sizeof(leaf->ents[0]); 1145 bytes = (uint)sizeof(leaf->hdr) + count * (uint)sizeof(leaf->ents[0]);
1145 if (bytes > (state->blocksize >> 1)) { 1146 if (bytes > (state->blocksize >> 1)) {
1146 /* 1147 /*
@@ -1160,7 +1161,7 @@ xfs_dir2_leafn_toosmall(
1160 * Make altpath point to the block we want to keep and 1161 * Make altpath point to the block we want to keep and
1161 * path point to the block we want to drop (this one). 1162 * path point to the block we want to drop (this one).
1162 */ 1163 */
1163 forward = info->forw; 1164 forward = (info->forw != 0);
1164 memcpy(&state->altpath, &state->path, sizeof(state->path)); 1165 memcpy(&state->altpath, &state->path, sizeof(state->path));
1165 error = xfs_da_path_shift(state, &state->altpath, forward, 0, 1166 error = xfs_da_path_shift(state, &state->altpath, forward, 0,
1166 &rval); 1167 &rval);
@@ -1176,9 +1177,9 @@ xfs_dir2_leafn_toosmall(
1176 * We prefer coalescing with the lower numbered sibling so as 1177 * We prefer coalescing with the lower numbered sibling so as
1177 * to shrink a directory over time. 1178 * to shrink a directory over time.
1178 */ 1179 */
1179 forward = INT_GET(info->forw, ARCH_CONVERT) < INT_GET(info->back, ARCH_CONVERT); 1180 forward = be32_to_cpu(info->forw) < be32_to_cpu(info->back);
1180 for (i = 0, bp = NULL; i < 2; forward = !forward, i++) { 1181 for (i = 0, bp = NULL; i < 2; forward = !forward, i++) {
1181 blkno = forward ?INT_GET( info->forw, ARCH_CONVERT) : INT_GET(info->back, ARCH_CONVERT); 1182 blkno = forward ? be32_to_cpu(info->forw) : be32_to_cpu(info->back);
1182 if (blkno == 0) 1183 if (blkno == 0)
1183 continue; 1184 continue;
1184 /* 1185 /*
@@ -1194,11 +1195,11 @@ xfs_dir2_leafn_toosmall(
1194 * Count bytes in the two blocks combined. 1195 * Count bytes in the two blocks combined.
1195 */ 1196 */
1196 leaf = (xfs_dir2_leaf_t *)info; 1197 leaf = (xfs_dir2_leaf_t *)info;
1197 count = INT_GET(leaf->hdr.count, ARCH_CONVERT) - INT_GET(leaf->hdr.stale, ARCH_CONVERT); 1198 count = be16_to_cpu(leaf->hdr.count) - be16_to_cpu(leaf->hdr.stale);
1198 bytes = state->blocksize - (state->blocksize >> 2); 1199 bytes = state->blocksize - (state->blocksize >> 2);
1199 leaf = bp->data; 1200 leaf = bp->data;
1200 ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); 1201 ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC);
1201 count += INT_GET(leaf->hdr.count, ARCH_CONVERT) - INT_GET(leaf->hdr.stale, ARCH_CONVERT); 1202 count += be16_to_cpu(leaf->hdr.count) - be16_to_cpu(leaf->hdr.stale);
1202 bytes -= count * (uint)sizeof(leaf->ents[0]); 1203 bytes -= count * (uint)sizeof(leaf->ents[0]);
1203 /* 1204 /*
1204 * Fits with at least 25% to spare. 1205 * Fits with at least 25% to spare.
@@ -1256,27 +1257,27 @@ xfs_dir2_leafn_unbalance(
1256 ASSERT(save_blk->magic == XFS_DIR2_LEAFN_MAGIC); 1257 ASSERT(save_blk->magic == XFS_DIR2_LEAFN_MAGIC);
1257 drop_leaf = drop_blk->bp->data; 1258 drop_leaf = drop_blk->bp->data;
1258 save_leaf = save_blk->bp->data; 1259 save_leaf = save_blk->bp->data;
1259 ASSERT(INT_GET(drop_leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); 1260 ASSERT(be16_to_cpu(drop_leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC);
1260 ASSERT(INT_GET(save_leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); 1261 ASSERT(be16_to_cpu(save_leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC);
1261 /* 1262 /*
1262 * If there are any stale leaf entries, take this opportunity 1263 * If there are any stale leaf entries, take this opportunity
1263 * to purge them. 1264 * to purge them.
1264 */ 1265 */
1265 if (INT_GET(drop_leaf->hdr.stale, ARCH_CONVERT)) 1266 if (drop_leaf->hdr.stale)
1266 xfs_dir2_leaf_compact(args, drop_blk->bp); 1267 xfs_dir2_leaf_compact(args, drop_blk->bp);
1267 if (INT_GET(save_leaf->hdr.stale, ARCH_CONVERT)) 1268 if (save_leaf->hdr.stale)
1268 xfs_dir2_leaf_compact(args, save_blk->bp); 1269 xfs_dir2_leaf_compact(args, save_blk->bp);
1269 /* 1270 /*
1270 * Move the entries from drop to the appropriate end of save. 1271 * Move the entries from drop to the appropriate end of save.
1271 */ 1272 */
1272 drop_blk->hashval = INT_GET(drop_leaf->ents[INT_GET(drop_leaf->hdr.count, ARCH_CONVERT) - 1].hashval, ARCH_CONVERT); 1273 drop_blk->hashval = be32_to_cpu(drop_leaf->ents[be16_to_cpu(drop_leaf->hdr.count) - 1].hashval);
1273 if (xfs_dir2_leafn_order(save_blk->bp, drop_blk->bp)) 1274 if (xfs_dir2_leafn_order(save_blk->bp, drop_blk->bp))
1274 xfs_dir2_leafn_moveents(args, drop_blk->bp, 0, save_blk->bp, 0, 1275 xfs_dir2_leafn_moveents(args, drop_blk->bp, 0, save_blk->bp, 0,
1275 INT_GET(drop_leaf->hdr.count, ARCH_CONVERT)); 1276 be16_to_cpu(drop_leaf->hdr.count));
1276 else 1277 else
1277 xfs_dir2_leafn_moveents(args, drop_blk->bp, 0, save_blk->bp, 1278 xfs_dir2_leafn_moveents(args, drop_blk->bp, 0, save_blk->bp,
1278 INT_GET(save_leaf->hdr.count, ARCH_CONVERT), INT_GET(drop_leaf->hdr.count, ARCH_CONVERT)); 1279 be16_to_cpu(save_leaf->hdr.count), be16_to_cpu(drop_leaf->hdr.count));
1279 save_blk->hashval = INT_GET(save_leaf->ents[INT_GET(save_leaf->hdr.count, ARCH_CONVERT) - 1].hashval, ARCH_CONVERT); 1280 save_blk->hashval = be32_to_cpu(save_leaf->ents[be16_to_cpu(save_leaf->hdr.count) - 1].hashval);
1280 xfs_dir2_leafn_check(args->dp, save_blk->bp); 1281 xfs_dir2_leafn_check(args->dp, save_blk->bp);
1281} 1282}
1282 1283
@@ -1378,7 +1379,7 @@ xfs_dir2_node_addname_int(
1378 xfs_mount_t *mp; /* filesystem mount point */ 1379 xfs_mount_t *mp; /* filesystem mount point */
1379 int needlog; /* need to log data header */ 1380 int needlog; /* need to log data header */
1380 int needscan; /* need to rescan data frees */ 1381 int needscan; /* need to rescan data frees */
1381 xfs_dir2_data_off_t *tagp; /* data entry tag pointer */ 1382 __be16 *tagp; /* data entry tag pointer */
1382 xfs_trans_t *tp; /* transaction pointer */ 1383 xfs_trans_t *tp; /* transaction pointer */
1383 1384
1384 dp = args->dp; 1385 dp = args->dp;
@@ -1397,7 +1398,7 @@ xfs_dir2_node_addname_int(
1397 */ 1398 */
1398 ifbno = fblk->blkno; 1399 ifbno = fblk->blkno;
1399 free = fbp->data; 1400 free = fbp->data;
1400 ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == XFS_DIR2_FREE_MAGIC); 1401 ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC);
1401 findex = fblk->index; 1402 findex = fblk->index;
1402 /* 1403 /*
1403 * This means the free entry showed that the data block had 1404 * This means the free entry showed that the data block had
@@ -1405,10 +1406,10 @@ xfs_dir2_node_addname_int(
1405 * Use that data block. 1406 * Use that data block.
1406 */ 1407 */
1407 if (findex >= 0) { 1408 if (findex >= 0) {
1408 ASSERT(findex < INT_GET(free->hdr.nvalid, ARCH_CONVERT)); 1409 ASSERT(findex < be32_to_cpu(free->hdr.nvalid));
1409 ASSERT(INT_GET(free->bests[findex], ARCH_CONVERT) != NULLDATAOFF); 1410 ASSERT(be16_to_cpu(free->bests[findex]) != NULLDATAOFF);
1410 ASSERT(INT_GET(free->bests[findex], ARCH_CONVERT) >= length); 1411 ASSERT(be16_to_cpu(free->bests[findex]) >= length);
1411 dbno = INT_GET(free->hdr.firstdb, ARCH_CONVERT) + findex; 1412 dbno = be32_to_cpu(free->hdr.firstdb) + findex;
1412 } 1413 }
1413 /* 1414 /*
1414 * The data block looked at didn't have enough room. 1415 * The data block looked at didn't have enough room.
@@ -1481,20 +1482,20 @@ xfs_dir2_node_addname_int(
1481 continue; 1482 continue;
1482 } 1483 }
1483 free = fbp->data; 1484 free = fbp->data;
1484 ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == XFS_DIR2_FREE_MAGIC); 1485 ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC);
1485 findex = 0; 1486 findex = 0;
1486 } 1487 }
1487 /* 1488 /*
1488 * Look at the current free entry. Is it good enough? 1489 * Look at the current free entry. Is it good enough?
1489 */ 1490 */
1490 if (INT_GET(free->bests[findex], ARCH_CONVERT) != NULLDATAOFF && 1491 if (be16_to_cpu(free->bests[findex]) != NULLDATAOFF &&
1491 INT_GET(free->bests[findex], ARCH_CONVERT) >= length) 1492 be16_to_cpu(free->bests[findex]) >= length)
1492 dbno = INT_GET(free->hdr.firstdb, ARCH_CONVERT) + findex; 1493 dbno = be32_to_cpu(free->hdr.firstdb) + findex;
1493 else { 1494 else {
1494 /* 1495 /*
1495 * Are we done with the freeblock? 1496 * Are we done with the freeblock?
1496 */ 1497 */
1497 if (++findex == INT_GET(free->hdr.nvalid, ARCH_CONVERT)) { 1498 if (++findex == be32_to_cpu(free->hdr.nvalid)) {
1498 /* 1499 /*
1499 * Drop the block. 1500 * Drop the block.
1500 */ 1501 */
@@ -1608,15 +1609,15 @@ xfs_dir2_node_addname_int(
1608 * its first slot as our empty slot. 1609 * its first slot as our empty slot.
1609 */ 1610 */
1610 free = fbp->data; 1611 free = fbp->data;
1611 INT_SET(free->hdr.magic, ARCH_CONVERT, XFS_DIR2_FREE_MAGIC); 1612 free->hdr.magic = cpu_to_be32(XFS_DIR2_FREE_MAGIC);
1612 INT_SET(free->hdr.firstdb, ARCH_CONVERT, 1613 free->hdr.firstdb = cpu_to_be32(
1613 (fbno - XFS_DIR2_FREE_FIRSTDB(mp)) * 1614 (fbno - XFS_DIR2_FREE_FIRSTDB(mp)) *
1614 XFS_DIR2_MAX_FREE_BESTS(mp)); 1615 XFS_DIR2_MAX_FREE_BESTS(mp));
1615 free->hdr.nvalid = 0; 1616 free->hdr.nvalid = 0;
1616 free->hdr.nused = 0; 1617 free->hdr.nused = 0;
1617 } else { 1618 } else {
1618 free = fbp->data; 1619 free = fbp->data;
1619 ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == XFS_DIR2_FREE_MAGIC); 1620 ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC);
1620 } 1621 }
1621 1622
1622 /* 1623 /*
@@ -1627,20 +1628,20 @@ xfs_dir2_node_addname_int(
1627 * If it's after the end of the current entries in the 1628 * If it's after the end of the current entries in the
1628 * freespace block, extend that table. 1629 * freespace block, extend that table.
1629 */ 1630 */
1630 if (findex >= INT_GET(free->hdr.nvalid, ARCH_CONVERT)) { 1631 if (findex >= be32_to_cpu(free->hdr.nvalid)) {
1631 ASSERT(findex < XFS_DIR2_MAX_FREE_BESTS(mp)); 1632 ASSERT(findex < XFS_DIR2_MAX_FREE_BESTS(mp));
1632 INT_SET(free->hdr.nvalid, ARCH_CONVERT, findex + 1); 1633 free->hdr.nvalid = cpu_to_be32(findex + 1);
1633 /* 1634 /*
1634 * Tag new entry so nused will go up. 1635 * Tag new entry so nused will go up.
1635 */ 1636 */
1636 INT_SET(free->bests[findex], ARCH_CONVERT, NULLDATAOFF); 1637 free->bests[findex] = cpu_to_be16(NULLDATAOFF);
1637 } 1638 }
1638 /* 1639 /*
1639 * If this entry was for an empty data block 1640 * If this entry was for an empty data block
1640 * (this should always be true) then update the header. 1641 * (this should always be true) then update the header.
1641 */ 1642 */
1642 if (INT_GET(free->bests[findex], ARCH_CONVERT) == NULLDATAOFF) { 1643 if (be16_to_cpu(free->bests[findex]) == NULLDATAOFF) {
1643 INT_MOD(free->hdr.nused, ARCH_CONVERT, +1); 1644 be32_add(&free->hdr.nused, 1);
1644 xfs_dir2_free_log_header(tp, fbp); 1645 xfs_dir2_free_log_header(tp, fbp);
1645 } 1646 }
1646 /* 1647 /*
@@ -1649,7 +1650,7 @@ xfs_dir2_node_addname_int(
1649 * change again. 1650 * change again.
1650 */ 1651 */
1651 data = dbp->data; 1652 data = dbp->data;
1652 INT_COPY(free->bests[findex], data->hdr.bestfree[0].length, ARCH_CONVERT); 1653 free->bests[findex] = data->hdr.bestfree[0].length;
1653 logfree = 1; 1654 logfree = 1;
1654 } 1655 }
1655 /* 1656 /*
@@ -1677,12 +1678,12 @@ xfs_dir2_node_addname_int(
1677 data = dbp->data; 1678 data = dbp->data;
1678 logfree = 0; 1679 logfree = 0;
1679 } 1680 }
1680 ASSERT(INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT) >= length); 1681 ASSERT(be16_to_cpu(data->hdr.bestfree[0].length) >= length);
1681 /* 1682 /*
1682 * Point to the existing unused space. 1683 * Point to the existing unused space.
1683 */ 1684 */
1684 dup = (xfs_dir2_data_unused_t *) 1685 dup = (xfs_dir2_data_unused_t *)
1685 ((char *)data + INT_GET(data->hdr.bestfree[0].offset, ARCH_CONVERT)); 1686 ((char *)data + be16_to_cpu(data->hdr.bestfree[0].offset));
1686 needscan = needlog = 0; 1687 needscan = needlog = 0;
1687 /* 1688 /*
1688 * Mark the first part of the unused space, inuse for us. 1689 * Mark the first part of the unused space, inuse for us.
@@ -1698,7 +1699,7 @@ xfs_dir2_node_addname_int(
1698 dep->namelen = args->namelen; 1699 dep->namelen = args->namelen;
1699 memcpy(dep->name, args->name, dep->namelen); 1700 memcpy(dep->name, args->name, dep->namelen);
1700 tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep); 1701 tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep);
1701 INT_SET(*tagp, ARCH_CONVERT, (xfs_dir2_data_off_t)((char *)dep - (char *)data)); 1702 *tagp = cpu_to_be16((char *)dep - (char *)data);
1702 xfs_dir2_data_log_entry(tp, dbp, dep); 1703 xfs_dir2_data_log_entry(tp, dbp, dep);
1703 /* 1704 /*
1704 * Rescan the block for bestfree if needed. 1705 * Rescan the block for bestfree if needed.
@@ -1713,8 +1714,8 @@ xfs_dir2_node_addname_int(
1713 /* 1714 /*
1714 * If the freespace entry is now wrong, update it. 1715 * If the freespace entry is now wrong, update it.
1715 */ 1716 */
1716 if (INT_GET(free->bests[findex], ARCH_CONVERT) != INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT)) { 1717 if (be16_to_cpu(free->bests[findex]) != be16_to_cpu(data->hdr.bestfree[0].length)) {
1717 INT_COPY(free->bests[findex], data->hdr.bestfree[0].length, ARCH_CONVERT); 1718 free->bests[findex] = data->hdr.bestfree[0].length;
1718 logfree = 1; 1719 logfree = 1;
1719 } 1720 }
1720 /* 1721 /*
@@ -1731,7 +1732,7 @@ xfs_dir2_node_addname_int(
1731 * Return the data block and offset in args, then drop the data block. 1732 * Return the data block and offset in args, then drop the data block.
1732 */ 1733 */
1733 args->blkno = (xfs_dablk_t)dbno; 1734 args->blkno = (xfs_dablk_t)dbno;
1734 args->index = INT_GET(*tagp, ARCH_CONVERT); 1735 args->index = be16_to_cpu(*tagp);
1735 xfs_da_buf_done(dbp); 1736 xfs_da_buf_done(dbp);
1736 return 0; 1737 return 0;
1737} 1738}
@@ -1900,10 +1901,10 @@ xfs_dir2_node_replace(
1900 * Point to the data entry. 1901 * Point to the data entry.
1901 */ 1902 */
1902 data = state->extrablk.bp->data; 1903 data = state->extrablk.bp->data;
1903 ASSERT(INT_GET(data->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC); 1904 ASSERT(be32_to_cpu(data->hdr.magic) == XFS_DIR2_DATA_MAGIC);
1904 dep = (xfs_dir2_data_entry_t *) 1905 dep = (xfs_dir2_data_entry_t *)
1905 ((char *)data + 1906 ((char *)data +
1906 XFS_DIR2_DATAPTR_TO_OFF(state->mp, INT_GET(lep->address, ARCH_CONVERT))); 1907 XFS_DIR2_DATAPTR_TO_OFF(state->mp, be32_to_cpu(lep->address)));
1907 ASSERT(inum != INT_GET(dep->inumber, ARCH_CONVERT)); 1908 ASSERT(inum != INT_GET(dep->inumber, ARCH_CONVERT));
1908 /* 1909 /*
1909 * Fill in the new inode number and log the entry. 1910 * Fill in the new inode number and log the entry.
@@ -1966,11 +1967,11 @@ xfs_dir2_node_trim_free(
1966 return 0; 1967 return 0;
1967 } 1968 }
1968 free = bp->data; 1969 free = bp->data;
1969 ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == XFS_DIR2_FREE_MAGIC); 1970 ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC);
1970 /* 1971 /*
1971 * If there are used entries, there's nothing to do. 1972 * If there are used entries, there's nothing to do.
1972 */ 1973 */
1973 if (INT_GET(free->hdr.nused, ARCH_CONVERT) > 0) { 1974 if (be32_to_cpu(free->hdr.nused) > 0) {
1974 xfs_da_brelse(tp, bp); 1975 xfs_da_brelse(tp, bp);
1975 *rvalp = 0; 1976 *rvalp = 0;
1976 return 0; 1977 return 0;
diff --git a/fs/xfs/xfs_dir2_node.h b/fs/xfs/xfs_dir2_node.h
index 0ab8fbd59512..c7c870ee7857 100644
--- a/fs/xfs/xfs_dir2_node.h
+++ b/fs/xfs/xfs_dir2_node.h
@@ -41,15 +41,15 @@ struct xfs_trans;
41#define XFS_DIR2_FREE_MAGIC 0x58443246 /* XD2F */ 41#define XFS_DIR2_FREE_MAGIC 0x58443246 /* XD2F */
42 42
43typedef struct xfs_dir2_free_hdr { 43typedef struct xfs_dir2_free_hdr {
44 __uint32_t magic; /* XFS_DIR2_FREE_MAGIC */ 44 __be32 magic; /* XFS_DIR2_FREE_MAGIC */
45 __int32_t firstdb; /* db of first entry */ 45 __be32 firstdb; /* db of first entry */
46 __int32_t nvalid; /* count of valid entries */ 46 __be32 nvalid; /* count of valid entries */
47 __int32_t nused; /* count of used entries */ 47 __be32 nused; /* count of used entries */
48} xfs_dir2_free_hdr_t; 48} xfs_dir2_free_hdr_t;
49 49
50typedef struct xfs_dir2_free { 50typedef struct xfs_dir2_free {
51 xfs_dir2_free_hdr_t hdr; /* block header */ 51 xfs_dir2_free_hdr_t hdr; /* block header */
52 xfs_dir2_data_off_t bests[1]; /* best free counts */ 52 __be16 bests[1]; /* best free counts */
53 /* unused entries are -1 */ 53 /* unused entries are -1 */
54} xfs_dir2_free_t; 54} xfs_dir2_free_t;
55 55
diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
index ec8e7476c8b7..d98a41d1fe63 100644
--- a/fs/xfs/xfs_dir2_sf.c
+++ b/fs/xfs/xfs_dir2_sf.c
@@ -98,8 +98,8 @@ xfs_dir2_block_sfsize(
98 /* 98 /*
99 * Iterate over the block's data entries by using the leaf pointers. 99 * Iterate over the block's data entries by using the leaf pointers.
100 */ 100 */
101 for (i = 0; i < INT_GET(btp->count, ARCH_CONVERT); i++) { 101 for (i = 0; i < be32_to_cpu(btp->count); i++) {
102 if ((addr = INT_GET(blp[i].address, ARCH_CONVERT)) == XFS_DIR2_NULL_DATAPTR) 102 if ((addr = be32_to_cpu(blp[i].address)) == XFS_DIR2_NULL_DATAPTR)
103 continue; 103 continue;
104 /* 104 /*
105 * Calculate the pointer to the entry at hand. 105 * Calculate the pointer to the entry at hand.
@@ -220,8 +220,8 @@ xfs_dir2_block_to_sf(
220 * If it's unused, just skip over it. 220 * If it's unused, just skip over it.
221 */ 221 */
222 dup = (xfs_dir2_data_unused_t *)ptr; 222 dup = (xfs_dir2_data_unused_t *)ptr;
223 if (INT_GET(dup->freetag, ARCH_CONVERT) == XFS_DIR2_DATA_FREE_TAG) { 223 if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) {
224 ptr += INT_GET(dup->length, ARCH_CONVERT); 224 ptr += be16_to_cpu(dup->length);
225 continue; 225 continue;
226 } 226 }
227 dep = (xfs_dir2_data_entry_t *)ptr; 227 dep = (xfs_dir2_data_entry_t *)ptr;
diff --git a/fs/xfs/xfs_dir_leaf.c b/fs/xfs/xfs_dir_leaf.c
index e83074016abb..ee88751c3be6 100644
--- a/fs/xfs/xfs_dir_leaf.c
+++ b/fs/xfs/xfs_dir_leaf.c
@@ -176,7 +176,7 @@ xfs_dir_shortform_addname(xfs_da_args_t *args)
176 ASSERT(dp->i_df.if_u1.if_data != NULL); 176 ASSERT(dp->i_df.if_u1.if_data != NULL);
177 sf = (xfs_dir_shortform_t *)dp->i_df.if_u1.if_data; 177 sf = (xfs_dir_shortform_t *)dp->i_df.if_u1.if_data;
178 sfe = &sf->list[0]; 178 sfe = &sf->list[0];
179 for (i = INT_GET(sf->hdr.count, ARCH_CONVERT)-1; i >= 0; i--) { 179 for (i = sf->hdr.count-1; i >= 0; i--) {
180 if (sfe->namelen == args->namelen && 180 if (sfe->namelen == args->namelen &&
181 args->name[0] == sfe->name[0] && 181 args->name[0] == sfe->name[0] &&
182 memcmp(args->name, sfe->name, args->namelen) == 0) 182 memcmp(args->name, sfe->name, args->namelen) == 0)
@@ -193,7 +193,7 @@ xfs_dir_shortform_addname(xfs_da_args_t *args)
193 XFS_DIR_SF_PUT_DIRINO(&args->inumber, &sfe->inumber); 193 XFS_DIR_SF_PUT_DIRINO(&args->inumber, &sfe->inumber);
194 sfe->namelen = args->namelen; 194 sfe->namelen = args->namelen;
195 memcpy(sfe->name, args->name, sfe->namelen); 195 memcpy(sfe->name, args->name, sfe->namelen);
196 INT_MOD(sf->hdr.count, ARCH_CONVERT, +1); 196 sf->hdr.count++;
197 197
198 dp->i_d.di_size += size; 198 dp->i_d.di_size += size;
199 xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_DDATA); 199 xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_DDATA);
@@ -227,7 +227,7 @@ xfs_dir_shortform_removename(xfs_da_args_t *args)
227 base = sizeof(xfs_dir_sf_hdr_t); 227 base = sizeof(xfs_dir_sf_hdr_t);
228 sf = (xfs_dir_shortform_t *)dp->i_df.if_u1.if_data; 228 sf = (xfs_dir_shortform_t *)dp->i_df.if_u1.if_data;
229 sfe = &sf->list[0]; 229 sfe = &sf->list[0];
230 for (i = INT_GET(sf->hdr.count, ARCH_CONVERT)-1; i >= 0; i--) { 230 for (i = sf->hdr.count-1; i >= 0; i--) {
231 size = XFS_DIR_SF_ENTSIZE_BYENTRY(sfe); 231 size = XFS_DIR_SF_ENTSIZE_BYENTRY(sfe);
232 if (sfe->namelen == args->namelen && 232 if (sfe->namelen == args->namelen &&
233 sfe->name[0] == args->name[0] && 233 sfe->name[0] == args->name[0] &&
@@ -245,7 +245,7 @@ xfs_dir_shortform_removename(xfs_da_args_t *args)
245 memmove(&((char *)sf)[base], &((char *)sf)[base+size], 245 memmove(&((char *)sf)[base], &((char *)sf)[base+size],
246 dp->i_d.di_size - (base+size)); 246 dp->i_d.di_size - (base+size));
247 } 247 }
248 INT_MOD(sf->hdr.count, ARCH_CONVERT, -1); 248 sf->hdr.count--;
249 249
250 xfs_idata_realloc(dp, -size, XFS_DATA_FORK); 250 xfs_idata_realloc(dp, -size, XFS_DATA_FORK);
251 dp->i_d.di_size -= size; 251 dp->i_d.di_size -= size;
@@ -288,7 +288,7 @@ xfs_dir_shortform_lookup(xfs_da_args_t *args)
288 return(XFS_ERROR(EEXIST)); 288 return(XFS_ERROR(EEXIST));
289 } 289 }
290 sfe = &sf->list[0]; 290 sfe = &sf->list[0];
291 for (i = INT_GET(sf->hdr.count, ARCH_CONVERT)-1; i >= 0; i--) { 291 for (i = sf->hdr.count-1; i >= 0; i--) {
292 if (sfe->namelen == args->namelen && 292 if (sfe->namelen == args->namelen &&
293 sfe->name[0] == args->name[0] && 293 sfe->name[0] == args->name[0] &&
294 memcmp(args->name, sfe->name, args->namelen) == 0) { 294 memcmp(args->name, sfe->name, args->namelen) == 0) {
@@ -375,7 +375,7 @@ xfs_dir_shortform_to_leaf(xfs_da_args_t *iargs)
375 goto out; 375 goto out;
376 376
377 sfe = &sf->list[0]; 377 sfe = &sf->list[0];
378 for (i = 0; i < INT_GET(sf->hdr.count, ARCH_CONVERT); i++) { 378 for (i = 0; i < sf->hdr.count; i++) {
379 args.name = (char *)(sfe->name); 379 args.name = (char *)(sfe->name);
380 args.namelen = sfe->namelen; 380 args.namelen = sfe->namelen;
381 args.hashval = xfs_da_hashname((char *)(sfe->name), 381 args.hashval = xfs_da_hashname((char *)(sfe->name),
@@ -428,7 +428,7 @@ xfs_dir_shortform_getdents(xfs_inode_t *dp, uio_t *uio, int *eofp,
428 sf = (xfs_dir_shortform_t *)dp->i_df.if_u1.if_data; 428 sf = (xfs_dir_shortform_t *)dp->i_df.if_u1.if_data;
429 cookhash = XFS_DA_COOKIE_HASH(mp, uio->uio_offset); 429 cookhash = XFS_DA_COOKIE_HASH(mp, uio->uio_offset);
430 want_entno = XFS_DA_COOKIE_ENTRY(mp, uio->uio_offset); 430 want_entno = XFS_DA_COOKIE_ENTRY(mp, uio->uio_offset);
431 nsbuf = INT_GET(sf->hdr.count, ARCH_CONVERT) + 2; 431 nsbuf = sf->hdr.count + 2;
432 sbsize = (nsbuf + 1) * sizeof(*sbuf); 432 sbsize = (nsbuf + 1) * sizeof(*sbuf);
433 sbp = sbuf = kmem_alloc(sbsize, KM_SLEEP); 433 sbp = sbuf = kmem_alloc(sbsize, KM_SLEEP);
434 434
@@ -460,8 +460,7 @@ xfs_dir_shortform_getdents(xfs_inode_t *dp, uio_t *uio, int *eofp,
460 /* 460 /*
461 * Scan the directory data for the rest of the entries. 461 * Scan the directory data for the rest of the entries.
462 */ 462 */
463 for (i = 0, sfe = &sf->list[0]; 463 for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) {
464 i < INT_GET(sf->hdr.count, ARCH_CONVERT); i++) {
465 464
466 if (unlikely( 465 if (unlikely(
467 ((char *)sfe < (char *)sf) || 466 ((char *)sfe < (char *)sf) ||
@@ -600,7 +599,7 @@ xfs_dir_shortform_replace(xfs_da_args_t *args)
600 } 599 }
601 ASSERT(args->namelen != 1 || args->name[0] != '.'); 600 ASSERT(args->namelen != 1 || args->name[0] != '.');
602 sfe = &sf->list[0]; 601 sfe = &sf->list[0];
603 for (i = INT_GET(sf->hdr.count, ARCH_CONVERT)-1; i >= 0; i--) { 602 for (i = sf->hdr.count-1; i >= 0; i--) {
604 if (sfe->namelen == args->namelen && 603 if (sfe->namelen == args->namelen &&
605 sfe->name[0] == args->name[0] && 604 sfe->name[0] == args->name[0] &&
606 memcmp(args->name, sfe->name, args->namelen) == 0) { 605 memcmp(args->name, sfe->name, args->namelen) == 0) {
@@ -644,7 +643,7 @@ xfs_dir_leaf_to_shortform(xfs_da_args_t *iargs)
644 ASSERT(bp != NULL); 643 ASSERT(bp != NULL);
645 memcpy(tmpbuffer, bp->data, XFS_LBSIZE(dp->i_mount)); 644 memcpy(tmpbuffer, bp->data, XFS_LBSIZE(dp->i_mount));
646 leaf = (xfs_dir_leafblock_t *)tmpbuffer; 645 leaf = (xfs_dir_leafblock_t *)tmpbuffer;
647 ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); 646 ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR_LEAF_MAGIC);
648 memset(bp->data, 0, XFS_LBSIZE(dp->i_mount)); 647 memset(bp->data, 0, XFS_LBSIZE(dp->i_mount));
649 648
650 /* 649 /*
@@ -742,11 +741,13 @@ xfs_dir_leaf_to_node(xfs_da_args_t *args)
742 } 741 }
743 node = bp1->data; 742 node = bp1->data;
744 leaf = bp2->data; 743 leaf = bp2->data;
745 ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); 744 ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR_LEAF_MAGIC);
746 INT_SET(node->btree[0].hashval, ARCH_CONVERT, INT_GET(leaf->entries[ INT_GET(leaf->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT)); 745 node->btree[0].hashval = cpu_to_be32(
746 INT_GET(leaf->entries[
747 INT_GET(leaf->hdr.count, ARCH_CONVERT)-1].hashval, ARCH_CONVERT));
747 xfs_da_buf_done(bp2); 748 xfs_da_buf_done(bp2);
748 INT_SET(node->btree[0].before, ARCH_CONVERT, blkno); 749 node->btree[0].before = cpu_to_be32(blkno);
749 INT_SET(node->hdr.count, ARCH_CONVERT, 1); 750 node->hdr.count = cpu_to_be16(1);
750 xfs_da_log_buf(args->trans, bp1, 751 xfs_da_log_buf(args->trans, bp1,
751 XFS_DA_LOGRANGE(node, &node->btree[0], sizeof(node->btree[0]))); 752 XFS_DA_LOGRANGE(node, &node->btree[0], sizeof(node->btree[0])));
752 xfs_da_buf_done(bp1); 753 xfs_da_buf_done(bp1);
@@ -781,7 +782,7 @@ xfs_dir_leaf_create(xfs_da_args_t *args, xfs_dablk_t blkno, xfs_dabuf_t **bpp)
781 leaf = bp->data; 782 leaf = bp->data;
782 memset((char *)leaf, 0, XFS_LBSIZE(dp->i_mount)); 783 memset((char *)leaf, 0, XFS_LBSIZE(dp->i_mount));
783 hdr = &leaf->hdr; 784 hdr = &leaf->hdr;
784 INT_SET(hdr->info.magic, ARCH_CONVERT, XFS_DIR_LEAF_MAGIC); 785 hdr->info.magic = cpu_to_be16(XFS_DIR_LEAF_MAGIC);
785 INT_SET(hdr->firstused, ARCH_CONVERT, XFS_LBSIZE(dp->i_mount)); 786 INT_SET(hdr->firstused, ARCH_CONVERT, XFS_LBSIZE(dp->i_mount));
786 if (!hdr->firstused) 787 if (!hdr->firstused)
787 INT_SET(hdr->firstused, ARCH_CONVERT, XFS_LBSIZE(dp->i_mount) - 1); 788 INT_SET(hdr->firstused, ARCH_CONVERT, XFS_LBSIZE(dp->i_mount) - 1);
@@ -860,7 +861,7 @@ xfs_dir_leaf_add(xfs_dabuf_t *bp, xfs_da_args_t *args, int index)
860 int tablesize, entsize, sum, i, tmp, error; 861 int tablesize, entsize, sum, i, tmp, error;
861 862
862 leaf = bp->data; 863 leaf = bp->data;
863 ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); 864 ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR_LEAF_MAGIC);
864 ASSERT((index >= 0) && (index <= INT_GET(leaf->hdr.count, ARCH_CONVERT))); 865 ASSERT((index >= 0) && (index <= INT_GET(leaf->hdr.count, ARCH_CONVERT)));
865 hdr = &leaf->hdr; 866 hdr = &leaf->hdr;
866 entsize = XFS_DIR_LEAF_ENTSIZE_BYNAME(args->namelen); 867 entsize = XFS_DIR_LEAF_ENTSIZE_BYNAME(args->namelen);
@@ -940,7 +941,7 @@ xfs_dir_leaf_add_work(xfs_dabuf_t *bp, xfs_da_args_t *args, int index,
940 int tmp, i; 941 int tmp, i;
941 942
942 leaf = bp->data; 943 leaf = bp->data;
943 ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); 944 ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR_LEAF_MAGIC);
944 hdr = &leaf->hdr; 945 hdr = &leaf->hdr;
945 ASSERT((mapindex >= 0) && (mapindex < XFS_DIR_LEAF_MAPSIZE)); 946 ASSERT((mapindex >= 0) && (mapindex < XFS_DIR_LEAF_MAPSIZE));
946 ASSERT((index >= 0) && (index <= INT_GET(hdr->count, ARCH_CONVERT))); 947 ASSERT((index >= 0) && (index <= INT_GET(hdr->count, ARCH_CONVERT)));
@@ -1097,8 +1098,8 @@ xfs_dir_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
1097 ASSERT(blk2->magic == XFS_DIR_LEAF_MAGIC); 1098 ASSERT(blk2->magic == XFS_DIR_LEAF_MAGIC);
1098 leaf1 = blk1->bp->data; 1099 leaf1 = blk1->bp->data;
1099 leaf2 = blk2->bp->data; 1100 leaf2 = blk2->bp->data;
1100 ASSERT(INT_GET(leaf1->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); 1101 ASSERT(be16_to_cpu(leaf1->hdr.info.magic) == XFS_DIR_LEAF_MAGIC);
1101 ASSERT(INT_GET(leaf2->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); 1102 ASSERT(be16_to_cpu(leaf2->hdr.info.magic) == XFS_DIR_LEAF_MAGIC);
1102 1103
1103 /* 1104 /*
1104 * Check ordering of blocks, reverse if it makes things simpler. 1105 * Check ordering of blocks, reverse if it makes things simpler.
@@ -1325,7 +1326,7 @@ xfs_dir_leaf_toosmall(xfs_da_state_t *state, int *action)
1325 */ 1326 */
1326 blk = &state->path.blk[ state->path.active-1 ]; 1327 blk = &state->path.blk[ state->path.active-1 ];
1327 info = blk->bp->data; 1328 info = blk->bp->data;
1328 ASSERT(INT_GET(info->magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); 1329 ASSERT(be16_to_cpu(info->magic) == XFS_DIR_LEAF_MAGIC);
1329 leaf = (xfs_dir_leafblock_t *)info; 1330 leaf = (xfs_dir_leafblock_t *)info;
1330 count = INT_GET(leaf->hdr.count, ARCH_CONVERT); 1331 count = INT_GET(leaf->hdr.count, ARCH_CONVERT);
1331 bytes = (uint)sizeof(xfs_dir_leaf_hdr_t) + 1332 bytes = (uint)sizeof(xfs_dir_leaf_hdr_t) +
@@ -1348,7 +1349,7 @@ xfs_dir_leaf_toosmall(xfs_da_state_t *state, int *action)
1348 * Make altpath point to the block we want to keep and 1349 * Make altpath point to the block we want to keep and
1349 * path point to the block we want to drop (this one). 1350 * path point to the block we want to drop (this one).
1350 */ 1351 */
1351 forward = info->forw; 1352 forward = (info->forw != 0);
1352 memcpy(&state->altpath, &state->path, sizeof(state->path)); 1353 memcpy(&state->altpath, &state->path, sizeof(state->path));
1353 error = xfs_da_path_shift(state, &state->altpath, forward, 1354 error = xfs_da_path_shift(state, &state->altpath, forward,
1354 0, &retval); 1355 0, &retval);
@@ -1369,12 +1370,12 @@ xfs_dir_leaf_toosmall(xfs_da_state_t *state, int *action)
1369 * We prefer coalescing with the lower numbered sibling so as 1370 * We prefer coalescing with the lower numbered sibling so as
1370 * to shrink a directory over time. 1371 * to shrink a directory over time.
1371 */ 1372 */
1372 forward = (INT_GET(info->forw, ARCH_CONVERT) < INT_GET(info->back, ARCH_CONVERT)); /* start with smaller blk num */ 1373 forward = (be32_to_cpu(info->forw) < be32_to_cpu(info->back)); /* start with smaller blk num */
1373 for (i = 0; i < 2; forward = !forward, i++) { 1374 for (i = 0; i < 2; forward = !forward, i++) {
1374 if (forward) 1375 if (forward)
1375 blkno = INT_GET(info->forw, ARCH_CONVERT); 1376 blkno = be32_to_cpu(info->forw);
1376 else 1377 else
1377 blkno = INT_GET(info->back, ARCH_CONVERT); 1378 blkno = be32_to_cpu(info->back);
1378 if (blkno == 0) 1379 if (blkno == 0)
1379 continue; 1380 continue;
1380 error = xfs_da_read_buf(state->args->trans, state->args->dp, 1381 error = xfs_da_read_buf(state->args->trans, state->args->dp,
@@ -1389,7 +1390,7 @@ xfs_dir_leaf_toosmall(xfs_da_state_t *state, int *action)
1389 bytes = state->blocksize - (state->blocksize>>2); 1390 bytes = state->blocksize - (state->blocksize>>2);
1390 bytes -= INT_GET(leaf->hdr.namebytes, ARCH_CONVERT); 1391 bytes -= INT_GET(leaf->hdr.namebytes, ARCH_CONVERT);
1391 leaf = bp->data; 1392 leaf = bp->data;
1392 ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); 1393 ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR_LEAF_MAGIC);
1393 count += INT_GET(leaf->hdr.count, ARCH_CONVERT); 1394 count += INT_GET(leaf->hdr.count, ARCH_CONVERT);
1394 bytes -= INT_GET(leaf->hdr.namebytes, ARCH_CONVERT); 1395 bytes -= INT_GET(leaf->hdr.namebytes, ARCH_CONVERT);
1395 bytes -= count * ((uint)sizeof(xfs_dir_leaf_name_t) - 1); 1396 bytes -= count * ((uint)sizeof(xfs_dir_leaf_name_t) - 1);
@@ -1447,7 +1448,7 @@ xfs_dir_leaf_remove(xfs_trans_t *trans, xfs_dabuf_t *bp, int index)
1447 xfs_mount_t *mp; 1448 xfs_mount_t *mp;
1448 1449
1449 leaf = bp->data; 1450 leaf = bp->data;
1450 ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); 1451 ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR_LEAF_MAGIC);
1451 hdr = &leaf->hdr; 1452 hdr = &leaf->hdr;
1452 mp = trans->t_mountp; 1453 mp = trans->t_mountp;
1453 ASSERT((INT_GET(hdr->count, ARCH_CONVERT) > 0) && (INT_GET(hdr->count, ARCH_CONVERT) < (XFS_LBSIZE(mp)/8))); 1454 ASSERT((INT_GET(hdr->count, ARCH_CONVERT) > 0) && (INT_GET(hdr->count, ARCH_CONVERT) < (XFS_LBSIZE(mp)/8)));
@@ -1599,8 +1600,8 @@ xfs_dir_leaf_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
1599 ASSERT(save_blk->magic == XFS_DIR_LEAF_MAGIC); 1600 ASSERT(save_blk->magic == XFS_DIR_LEAF_MAGIC);
1600 drop_leaf = drop_blk->bp->data; 1601 drop_leaf = drop_blk->bp->data;
1601 save_leaf = save_blk->bp->data; 1602 save_leaf = save_blk->bp->data;
1602 ASSERT(INT_GET(drop_leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); 1603 ASSERT(be16_to_cpu(drop_leaf->hdr.info.magic) == XFS_DIR_LEAF_MAGIC);
1603 ASSERT(INT_GET(save_leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); 1604 ASSERT(be16_to_cpu(save_leaf->hdr.info.magic) == XFS_DIR_LEAF_MAGIC);
1604 drop_hdr = &drop_leaf->hdr; 1605 drop_hdr = &drop_leaf->hdr;
1605 save_hdr = &save_leaf->hdr; 1606 save_hdr = &save_leaf->hdr;
1606 1607
@@ -1695,7 +1696,7 @@ xfs_dir_leaf_lookup_int(xfs_dabuf_t *bp, xfs_da_args_t *args, int *index)
1695 xfs_dahash_t hashval; 1696 xfs_dahash_t hashval;
1696 1697
1697 leaf = bp->data; 1698 leaf = bp->data;
1698 ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); 1699 ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR_LEAF_MAGIC);
1699 ASSERT(INT_GET(leaf->hdr.count, ARCH_CONVERT) < (XFS_LBSIZE(args->dp->i_mount)/8)); 1700 ASSERT(INT_GET(leaf->hdr.count, ARCH_CONVERT) < (XFS_LBSIZE(args->dp->i_mount)/8));
1700 1701
1701 /* 1702 /*
@@ -1782,8 +1783,8 @@ xfs_dir_leaf_moveents(xfs_dir_leafblock_t *leaf_s, int start_s,
1782 /* 1783 /*
1783 * Set up environment. 1784 * Set up environment.
1784 */ 1785 */
1785 ASSERT(INT_GET(leaf_s->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); 1786 ASSERT(be16_to_cpu(leaf_s->hdr.info.magic) == XFS_DIR_LEAF_MAGIC);
1786 ASSERT(INT_GET(leaf_d->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); 1787 ASSERT(be16_to_cpu(leaf_d->hdr.info.magic) == XFS_DIR_LEAF_MAGIC);
1787 hdr_s = &leaf_s->hdr; 1788 hdr_s = &leaf_s->hdr;
1788 hdr_d = &leaf_d->hdr; 1789 hdr_d = &leaf_d->hdr;
1789 ASSERT((INT_GET(hdr_s->count, ARCH_CONVERT) > 0) && (INT_GET(hdr_s->count, ARCH_CONVERT) < (XFS_LBSIZE(mp)/8))); 1790 ASSERT((INT_GET(hdr_s->count, ARCH_CONVERT) > 0) && (INT_GET(hdr_s->count, ARCH_CONVERT) < (XFS_LBSIZE(mp)/8)));
@@ -1883,8 +1884,8 @@ xfs_dir_leaf_order(xfs_dabuf_t *leaf1_bp, xfs_dabuf_t *leaf2_bp)
1883 1884
1884 leaf1 = leaf1_bp->data; 1885 leaf1 = leaf1_bp->data;
1885 leaf2 = leaf2_bp->data; 1886 leaf2 = leaf2_bp->data;
1886 ASSERT((INT_GET(leaf1->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC) && 1887 ASSERT((be16_to_cpu(leaf1->hdr.info.magic) == XFS_DIR_LEAF_MAGIC) &&
1887 (INT_GET(leaf2->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC)); 1888 (be16_to_cpu(leaf2->hdr.info.magic) == XFS_DIR_LEAF_MAGIC));
1888 if ((INT_GET(leaf1->hdr.count, ARCH_CONVERT) > 0) && (INT_GET(leaf2->hdr.count, ARCH_CONVERT) > 0) && 1889 if ((INT_GET(leaf1->hdr.count, ARCH_CONVERT) > 0) && (INT_GET(leaf2->hdr.count, ARCH_CONVERT) > 0) &&
1889 ((INT_GET(leaf2->entries[ 0 ].hashval, ARCH_CONVERT) < 1890 ((INT_GET(leaf2->entries[ 0 ].hashval, ARCH_CONVERT) <
1890 INT_GET(leaf1->entries[ 0 ].hashval, ARCH_CONVERT)) || 1891 INT_GET(leaf1->entries[ 0 ].hashval, ARCH_CONVERT)) ||
@@ -1904,7 +1905,7 @@ xfs_dir_leaf_lasthash(xfs_dabuf_t *bp, int *count)
1904 xfs_dir_leafblock_t *leaf; 1905 xfs_dir_leafblock_t *leaf;
1905 1906
1906 leaf = bp->data; 1907 leaf = bp->data;
1907 ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); 1908 ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR_LEAF_MAGIC);
1908 if (count) 1909 if (count)
1909 *count = INT_GET(leaf->hdr.count, ARCH_CONVERT); 1910 *count = INT_GET(leaf->hdr.count, ARCH_CONVERT);
1910 if (!leaf->hdr.count) 1911 if (!leaf->hdr.count)
@@ -1940,7 +1941,7 @@ xfs_dir_leaf_getdents_int(
1940 1941
1941 mp = dp->i_mount; 1942 mp = dp->i_mount;
1942 leaf = bp->data; 1943 leaf = bp->data;
1943 if (INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) != XFS_DIR_LEAF_MAGIC) { 1944 if (be16_to_cpu(leaf->hdr.info.magic) != XFS_DIR_LEAF_MAGIC) {
1944 *eobp = 1; 1945 *eobp = 1;
1945 return XFS_ERROR(ENOENT); /* XXX wrong code */ 1946 return XFS_ERROR(ENOENT); /* XXX wrong code */
1946 } 1947 }
@@ -1992,7 +1993,7 @@ xfs_dir_leaf_getdents_int(
1992 1993
1993 if (i == INT_GET(leaf->hdr.count, ARCH_CONVERT)) { 1994 if (i == INT_GET(leaf->hdr.count, ARCH_CONVERT)) {
1994 xfs_dir_trace_g_du("leaf: hash not found", dp, uio); 1995 xfs_dir_trace_g_du("leaf: hash not found", dp, uio);
1995 if (!INT_GET(leaf->hdr.info.forw, ARCH_CONVERT)) 1996 if (!leaf->hdr.info.forw)
1996 uio->uio_offset = 1997 uio->uio_offset =
1997 XFS_DA_MAKE_COOKIE(mp, 0, 0, XFS_DA_MAXHASH); 1998 XFS_DA_MAKE_COOKIE(mp, 0, 0, XFS_DA_MAXHASH);
1998 /* 1999 /*
@@ -2047,8 +2048,7 @@ xfs_dir_leaf_getdents_int(
2047 xfs_dir_trace_g_duc("leaf: middle cookie ", 2048 xfs_dir_trace_g_duc("leaf: middle cookie ",
2048 dp, uio, p.cook.o); 2049 dp, uio, p.cook.o);
2049 2050
2050 } else if ((thishash = INT_GET(leaf->hdr.info.forw, 2051 } else if ((thishash = be32_to_cpu(leaf->hdr.info.forw))) {
2051 ARCH_CONVERT))) {
2052 xfs_dabuf_t *bp2; 2052 xfs_dabuf_t *bp2;
2053 xfs_dir_leafblock_t *leaf2; 2053 xfs_dir_leafblock_t *leaf2;
2054 2054
@@ -2064,9 +2064,9 @@ xfs_dir_leaf_getdents_int(
2064 leaf2 = bp2->data; 2064 leaf2 = bp2->data;
2065 2065
2066 if (unlikely( 2066 if (unlikely(
2067 (INT_GET(leaf2->hdr.info.magic, ARCH_CONVERT) 2067 (be16_to_cpu(leaf2->hdr.info.magic)
2068 != XFS_DIR_LEAF_MAGIC) 2068 != XFS_DIR_LEAF_MAGIC)
2069 || (INT_GET(leaf2->hdr.info.back, ARCH_CONVERT) 2069 || (be32_to_cpu(leaf2->hdr.info.back)
2070 != bno))) { /* GROT */ 2070 != bno))) { /* GROT */
2071 XFS_CORRUPTION_ERROR("xfs_dir_leaf_getdents_int(3)", 2071 XFS_CORRUPTION_ERROR("xfs_dir_leaf_getdents_int(3)",
2072 XFS_ERRLEVEL_LOW, mp, 2072 XFS_ERRLEVEL_LOW, mp,
diff --git a/fs/xfs/xfs_dir_sf.h b/fs/xfs/xfs_dir_sf.h
index fe44c6f4d560..5b20b4d3f57d 100644
--- a/fs/xfs/xfs_dir_sf.h
+++ b/fs/xfs/xfs_dir_sf.h
@@ -35,19 +35,21 @@ typedef struct { __uint8_t i[sizeof(xfs_ino_t)]; } xfs_dir_ino_t;
35 * and the elements much be memcpy'd out into a work area to get correct 35 * and the elements much be memcpy'd out into a work area to get correct
36 * alignment for the inode number fields. 36 * alignment for the inode number fields.
37 */ 37 */
38typedef struct xfs_dir_sf_hdr { /* constant-structure header block */
39 xfs_dir_ino_t parent; /* parent dir inode number */
40 __uint8_t count; /* count of active entries */
41} xfs_dir_sf_hdr_t;
42
43typedef struct xfs_dir_sf_entry {
44 xfs_dir_ino_t inumber; /* referenced inode number */
45 __uint8_t namelen; /* actual length of name (no NULL) */
46 __uint8_t name[1]; /* name */
47} xfs_dir_sf_entry_t;
48
38typedef struct xfs_dir_shortform { 49typedef struct xfs_dir_shortform {
39 struct xfs_dir_sf_hdr { /* constant-structure header block */ 50 xfs_dir_sf_hdr_t hdr;
40 xfs_dir_ino_t parent; /* parent dir inode number */ 51 xfs_dir_sf_entry_t list[1]; /* variable sized array */
41 __uint8_t count; /* count of active entries */
42 } hdr;
43 struct xfs_dir_sf_entry {
44 xfs_dir_ino_t inumber; /* referenced inode number */
45 __uint8_t namelen; /* actual length of name (no NULL) */
46 __uint8_t name[1]; /* name */
47 } list[1]; /* variable sized array */
48} xfs_dir_shortform_t; 52} xfs_dir_shortform_t;
49typedef struct xfs_dir_sf_hdr xfs_dir_sf_hdr_t;
50typedef struct xfs_dir_sf_entry xfs_dir_sf_entry_t;
51 53
52/* 54/*
53 * We generate this then sort it, so that readdirs are returned in 55 * We generate this then sort it, so that readdirs are returned in
diff --git a/fs/xfs/xfs_dmapi.h b/fs/xfs/xfs_dmapi.h
index b4c7f2bc55a0..00b1540f8108 100644
--- a/fs/xfs/xfs_dmapi.h
+++ b/fs/xfs/xfs_dmapi.h
@@ -191,14 +191,4 @@ typedef enum {
191 191
192extern struct bhv_vfsops xfs_dmops; 192extern struct bhv_vfsops xfs_dmops;
193 193
194#ifdef CONFIG_XFS_DMAPI
195void xfs_dm_init(struct file_system_type *);
196void xfs_dm_exit(struct file_system_type *);
197#define XFS_DM_INIT(fstype) xfs_dm_init(fstype)
198#define XFS_DM_EXIT(fstype) xfs_dm_exit(fstype)
199#else
200#define XFS_DM_INIT(fstype)
201#define XFS_DM_EXIT(fstype)
202#endif
203
204#endif /* __XFS_DMAPI_H__ */ 194#endif /* __XFS_DMAPI_H__ */
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index b4d971b01588..56caa88713ab 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -462,6 +462,7 @@ xfs_fs_counts(
462{ 462{
463 unsigned long s; 463 unsigned long s;
464 464
465 xfs_icsb_sync_counters_lazy(mp);
465 s = XFS_SB_LOCK(mp); 466 s = XFS_SB_LOCK(mp);
466 cnt->freedata = mp->m_sb.sb_fdblocks; 467 cnt->freedata = mp->m_sb.sb_fdblocks;
467 cnt->freertx = mp->m_sb.sb_frextents; 468 cnt->freertx = mp->m_sb.sb_frextents;
diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c
index 8f3fae1aa98a..0024892841a3 100644
--- a/fs/xfs/xfs_ialloc.c
+++ b/fs/xfs/xfs_ialloc.c
@@ -138,8 +138,6 @@ xfs_ialloc_ag_alloc(
138 int version; /* inode version number to use */ 138 int version; /* inode version number to use */
139 int isaligned; /* inode allocation at stripe unit */ 139 int isaligned; /* inode allocation at stripe unit */
140 /* boundary */ 140 /* boundary */
141 xfs_dinode_core_t dic; /* a dinode_core to copy to new */
142 /* inodes */
143 141
144 args.tp = tp; 142 args.tp = tp;
145 args.mp = tp->t_mountp; 143 args.mp = tp->t_mountp;
@@ -250,10 +248,6 @@ xfs_ialloc_ag_alloc(
250 else 248 else
251 version = XFS_DINODE_VERSION_1; 249 version = XFS_DINODE_VERSION_1;
252 250
253 memset(&dic, 0, sizeof(xfs_dinode_core_t));
254 INT_SET(dic.di_magic, ARCH_CONVERT, XFS_DINODE_MAGIC);
255 INT_SET(dic.di_version, ARCH_CONVERT, version);
256
257 for (j = 0; j < nbufs; j++) { 251 for (j = 0; j < nbufs; j++) {
258 /* 252 /*
259 * Get the block. 253 * Get the block.
@@ -266,12 +260,13 @@ xfs_ialloc_ag_alloc(
266 ASSERT(fbuf); 260 ASSERT(fbuf);
267 ASSERT(!XFS_BUF_GETERROR(fbuf)); 261 ASSERT(!XFS_BUF_GETERROR(fbuf));
268 /* 262 /*
269 * Loop over the inodes in this buffer. 263 * Set initial values for the inodes in this buffer.
270 */ 264 */
271 265 xfs_biozero(fbuf, 0, ninodes << args.mp->m_sb.sb_inodelog);
272 for (i = 0; i < ninodes; i++) { 266 for (i = 0; i < ninodes; i++) {
273 free = XFS_MAKE_IPTR(args.mp, fbuf, i); 267 free = XFS_MAKE_IPTR(args.mp, fbuf, i);
274 memcpy(&(free->di_core), &dic, sizeof(xfs_dinode_core_t)); 268 INT_SET(free->di_core.di_magic, ARCH_CONVERT, XFS_DINODE_MAGIC);
269 INT_SET(free->di_core.di_version, ARCH_CONVERT, version);
275 INT_SET(free->di_next_unlinked, ARCH_CONVERT, NULLAGINO); 270 INT_SET(free->di_next_unlinked, ARCH_CONVERT, NULLAGINO);
276 xfs_ialloc_log_di(tp, fbuf, i, 271 xfs_ialloc_log_di(tp, fbuf, i,
277 XFS_DI_CORE_BITS | XFS_DI_NEXT_UNLINKED); 272 XFS_DI_CORE_BITS | XFS_DI_NEXT_UNLINKED);
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c
index 8e380a1fb79b..3ce35a6f700b 100644
--- a/fs/xfs/xfs_iget.c
+++ b/fs/xfs/xfs_iget.c
@@ -258,7 +258,7 @@ again:
258 goto finish_inode; 258 goto finish_inode;
259 259
260 } else if (vp != inode_vp) { 260 } else if (vp != inode_vp) {
261 struct inode *inode = LINVFS_GET_IP(inode_vp); 261 struct inode *inode = vn_to_inode(inode_vp);
262 262
263 /* The inode is being torn down, pause and 263 /* The inode is being torn down, pause and
264 * try again. 264 * try again.
@@ -495,7 +495,7 @@ retry:
495 if ((inode = iget_locked(XFS_MTOVFS(mp)->vfs_super, ino))) { 495 if ((inode = iget_locked(XFS_MTOVFS(mp)->vfs_super, ino))) {
496 xfs_inode_t *ip; 496 xfs_inode_t *ip;
497 497
498 vp = LINVFS_GET_VP(inode); 498 vp = vn_from_inode(inode);
499 if (inode->i_state & I_NEW) { 499 if (inode->i_state & I_NEW) {
500 vn_initialize(inode); 500 vn_initialize(inode);
501 error = xfs_iget_core(vp, mp, tp, ino, flags, 501 error = xfs_iget_core(vp, mp, tp, ino, flags,
@@ -617,7 +617,7 @@ xfs_iput_new(xfs_inode_t *ip,
617 uint lock_flags) 617 uint lock_flags)
618{ 618{
619 vnode_t *vp = XFS_ITOV(ip); 619 vnode_t *vp = XFS_ITOV(ip);
620 struct inode *inode = LINVFS_GET_IP(vp); 620 struct inode *inode = vn_to_inode(vp);
621 621
622 vn_trace_entry(vp, "xfs_iput_new", (inst_t *)__return_address); 622 vn_trace_entry(vp, "xfs_iput_new", (inst_t *)__return_address);
623 623
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 1d7f5a7e063e..88a517fad07b 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -76,16 +76,18 @@ STATIC int xfs_iformat_btree(xfs_inode_t *, xfs_dinode_t *, int);
76 */ 76 */
77STATIC void 77STATIC void
78xfs_validate_extents( 78xfs_validate_extents(
79 xfs_bmbt_rec_t *ep, 79 xfs_ifork_t *ifp,
80 int nrecs, 80 int nrecs,
81 int disk, 81 int disk,
82 xfs_exntfmt_t fmt) 82 xfs_exntfmt_t fmt)
83{ 83{
84 xfs_bmbt_rec_t *ep;
84 xfs_bmbt_irec_t irec; 85 xfs_bmbt_irec_t irec;
85 xfs_bmbt_rec_t rec; 86 xfs_bmbt_rec_t rec;
86 int i; 87 int i;
87 88
88 for (i = 0; i < nrecs; i++) { 89 for (i = 0; i < nrecs; i++) {
90 ep = xfs_iext_get_ext(ifp, i);
89 rec.l0 = get_unaligned((__uint64_t*)&ep->l0); 91 rec.l0 = get_unaligned((__uint64_t*)&ep->l0);
90 rec.l1 = get_unaligned((__uint64_t*)&ep->l1); 92 rec.l1 = get_unaligned((__uint64_t*)&ep->l1);
91 if (disk) 93 if (disk)
@@ -94,11 +96,10 @@ xfs_validate_extents(
94 xfs_bmbt_get_all(&rec, &irec); 96 xfs_bmbt_get_all(&rec, &irec);
95 if (fmt == XFS_EXTFMT_NOSTATE) 97 if (fmt == XFS_EXTFMT_NOSTATE)
96 ASSERT(irec.br_state == XFS_EXT_NORM); 98 ASSERT(irec.br_state == XFS_EXT_NORM);
97 ep++;
98 } 99 }
99} 100}
100#else /* DEBUG */ 101#else /* DEBUG */
101#define xfs_validate_extents(ep, nrecs, disk, fmt) 102#define xfs_validate_extents(ifp, nrecs, disk, fmt)
102#endif /* DEBUG */ 103#endif /* DEBUG */
103 104
104/* 105/*
@@ -252,7 +253,8 @@ xfs_itobp(
252 xfs_inode_t *ip, 253 xfs_inode_t *ip,
253 xfs_dinode_t **dipp, 254 xfs_dinode_t **dipp,
254 xfs_buf_t **bpp, 255 xfs_buf_t **bpp,
255 xfs_daddr_t bno) 256 xfs_daddr_t bno,
257 uint imap_flags)
256{ 258{
257 xfs_buf_t *bp; 259 xfs_buf_t *bp;
258 int error; 260 int error;
@@ -268,10 +270,9 @@ xfs_itobp(
268 * inode on disk. 270 * inode on disk.
269 */ 271 */
270 imap.im_blkno = bno; 272 imap.im_blkno = bno;
271 error = xfs_imap(mp, tp, ip->i_ino, &imap, XFS_IMAP_LOOKUP); 273 if ((error = xfs_imap(mp, tp, ip->i_ino, &imap,
272 if (error != 0) { 274 XFS_IMAP_LOOKUP | imap_flags)))
273 return error; 275 return error;
274 }
275 276
276 /* 277 /*
277 * If the inode number maps to a block outside the bounds 278 * If the inode number maps to a block outside the bounds
@@ -335,9 +336,10 @@ xfs_itobp(
335 * (if DEBUG kernel) or the first inode in the buffer, otherwise. 336 * (if DEBUG kernel) or the first inode in the buffer, otherwise.
336 */ 337 */
337#ifdef DEBUG 338#ifdef DEBUG
338 ni = BBTOB(imap.im_len) >> mp->m_sb.sb_inodelog; 339 ni = (imap_flags & XFS_IMAP_BULKSTAT) ? 0 :
340 (BBTOB(imap.im_len) >> mp->m_sb.sb_inodelog);
339#else 341#else
340 ni = 1; 342 ni = (imap_flags & XFS_IMAP_BULKSTAT) ? 0 : 1;
341#endif 343#endif
342 for (i = 0; i < ni; i++) { 344 for (i = 0; i < ni; i++) {
343 int di_ok; 345 int di_ok;
@@ -504,7 +506,7 @@ xfs_iformat(
504 switch (INT_GET(dip->di_core.di_aformat, ARCH_CONVERT)) { 506 switch (INT_GET(dip->di_core.di_aformat, ARCH_CONVERT)) {
505 case XFS_DINODE_FMT_LOCAL: 507 case XFS_DINODE_FMT_LOCAL:
506 atp = (xfs_attr_shortform_t *)XFS_DFORK_APTR(dip); 508 atp = (xfs_attr_shortform_t *)XFS_DFORK_APTR(dip);
507 size = (int)INT_GET(atp->hdr.totsize, ARCH_CONVERT); 509 size = be16_to_cpu(atp->hdr.totsize);
508 error = xfs_iformat_local(ip, dip, XFS_ATTR_FORK, size); 510 error = xfs_iformat_local(ip, dip, XFS_ATTR_FORK, size);
509 break; 511 break;
510 case XFS_DINODE_FMT_EXTENTS: 512 case XFS_DINODE_FMT_EXTENTS:
@@ -597,7 +599,6 @@ xfs_iformat_extents(
597 xfs_bmbt_rec_t *ep, *dp; 599 xfs_bmbt_rec_t *ep, *dp;
598 xfs_ifork_t *ifp; 600 xfs_ifork_t *ifp;
599 int nex; 601 int nex;
600 int real_size;
601 int size; 602 int size;
602 int i; 603 int i;
603 604
@@ -619,23 +620,20 @@ xfs_iformat_extents(
619 return XFS_ERROR(EFSCORRUPTED); 620 return XFS_ERROR(EFSCORRUPTED);
620 } 621 }
621 622
622 real_size = 0; 623 ifp->if_real_bytes = 0;
623 if (nex == 0) 624 if (nex == 0)
624 ifp->if_u1.if_extents = NULL; 625 ifp->if_u1.if_extents = NULL;
625 else if (nex <= XFS_INLINE_EXTS) 626 else if (nex <= XFS_INLINE_EXTS)
626 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext; 627 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
627 else { 628 else
628 ifp->if_u1.if_extents = kmem_alloc(size, KM_SLEEP); 629 xfs_iext_add(ifp, 0, nex);
629 ASSERT(ifp->if_u1.if_extents != NULL); 630
630 real_size = size;
631 }
632 ifp->if_bytes = size; 631 ifp->if_bytes = size;
633 ifp->if_real_bytes = real_size;
634 if (size) { 632 if (size) {
635 dp = (xfs_bmbt_rec_t *) XFS_DFORK_PTR(dip, whichfork); 633 dp = (xfs_bmbt_rec_t *) XFS_DFORK_PTR(dip, whichfork);
636 xfs_validate_extents(dp, nex, 1, XFS_EXTFMT_INODE(ip)); 634 xfs_validate_extents(ifp, nex, 1, XFS_EXTFMT_INODE(ip));
637 ep = ifp->if_u1.if_extents; 635 for (i = 0; i < nex; i++, dp++) {
638 for (i = 0; i < nex; i++, ep++, dp++) { 636 ep = xfs_iext_get_ext(ifp, i);
639 ep->l0 = INT_GET(get_unaligned((__uint64_t*)&dp->l0), 637 ep->l0 = INT_GET(get_unaligned((__uint64_t*)&dp->l0),
640 ARCH_CONVERT); 638 ARCH_CONVERT);
641 ep->l1 = INT_GET(get_unaligned((__uint64_t*)&dp->l1), 639 ep->l1 = INT_GET(get_unaligned((__uint64_t*)&dp->l1),
@@ -646,7 +644,7 @@ xfs_iformat_extents(
646 if (whichfork != XFS_DATA_FORK || 644 if (whichfork != XFS_DATA_FORK ||
647 XFS_EXTFMT_INODE(ip) == XFS_EXTFMT_NOSTATE) 645 XFS_EXTFMT_INODE(ip) == XFS_EXTFMT_NOSTATE)
648 if (unlikely(xfs_check_nostate_extents( 646 if (unlikely(xfs_check_nostate_extents(
649 ifp->if_u1.if_extents, nex))) { 647 ifp, 0, nex))) {
650 XFS_ERROR_REPORT("xfs_iformat_extents(2)", 648 XFS_ERROR_REPORT("xfs_iformat_extents(2)",
651 XFS_ERRLEVEL_LOW, 649 XFS_ERRLEVEL_LOW,
652 ip->i_mount); 650 ip->i_mount);
@@ -871,9 +869,8 @@ xfs_iread(
871 * return NULL as well. Set i_blkno to 0 so that xfs_itobp() will 869 * return NULL as well. Set i_blkno to 0 so that xfs_itobp() will
872 * know that this is a new incore inode. 870 * know that this is a new incore inode.
873 */ 871 */
874 error = xfs_itobp(mp, tp, ip, &dip, &bp, bno); 872 error = xfs_itobp(mp, tp, ip, &dip, &bp, bno, 0);
875 873 if (error) {
876 if (error != 0) {
877 kmem_zone_free(xfs_inode_zone, ip); 874 kmem_zone_free(xfs_inode_zone, ip);
878 return error; 875 return error;
879 } 876 }
@@ -1015,6 +1012,7 @@ xfs_iread_extents(
1015{ 1012{
1016 int error; 1013 int error;
1017 xfs_ifork_t *ifp; 1014 xfs_ifork_t *ifp;
1015 xfs_extnum_t nextents;
1018 size_t size; 1016 size_t size;
1019 1017
1020 if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) { 1018 if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) {
@@ -1022,26 +1020,24 @@ xfs_iread_extents(
1022 ip->i_mount); 1020 ip->i_mount);
1023 return XFS_ERROR(EFSCORRUPTED); 1021 return XFS_ERROR(EFSCORRUPTED);
1024 } 1022 }
1025 size = XFS_IFORK_NEXTENTS(ip, whichfork) * (uint)sizeof(xfs_bmbt_rec_t); 1023 nextents = XFS_IFORK_NEXTENTS(ip, whichfork);
1024 size = nextents * sizeof(xfs_bmbt_rec_t);
1026 ifp = XFS_IFORK_PTR(ip, whichfork); 1025 ifp = XFS_IFORK_PTR(ip, whichfork);
1026
1027 /* 1027 /*
1028 * We know that the size is valid (it's checked in iformat_btree) 1028 * We know that the size is valid (it's checked in iformat_btree)
1029 */ 1029 */
1030 ifp->if_u1.if_extents = kmem_alloc(size, KM_SLEEP);
1031 ASSERT(ifp->if_u1.if_extents != NULL);
1032 ifp->if_lastex = NULLEXTNUM; 1030 ifp->if_lastex = NULLEXTNUM;
1033 ifp->if_bytes = ifp->if_real_bytes = (int)size; 1031 ifp->if_bytes = ifp->if_real_bytes = 0;
1034 ifp->if_flags |= XFS_IFEXTENTS; 1032 ifp->if_flags |= XFS_IFEXTENTS;
1033 xfs_iext_add(ifp, 0, nextents);
1035 error = xfs_bmap_read_extents(tp, ip, whichfork); 1034 error = xfs_bmap_read_extents(tp, ip, whichfork);
1036 if (error) { 1035 if (error) {
1037 kmem_free(ifp->if_u1.if_extents, size); 1036 xfs_iext_destroy(ifp);
1038 ifp->if_u1.if_extents = NULL;
1039 ifp->if_bytes = ifp->if_real_bytes = 0;
1040 ifp->if_flags &= ~XFS_IFEXTENTS; 1037 ifp->if_flags &= ~XFS_IFEXTENTS;
1041 return error; 1038 return error;
1042 } 1039 }
1043 xfs_validate_extents((xfs_bmbt_rec_t *)ifp->if_u1.if_extents, 1040 xfs_validate_extents(ifp, nextents, 0, XFS_EXTFMT_INODE(ip));
1044 XFS_IFORK_NEXTENTS(ip, whichfork), 0, XFS_EXTFMT_INODE(ip));
1045 return 0; 1041 return 0;
1046} 1042}
1047 1043
@@ -1376,10 +1372,10 @@ xfs_itrunc_trace(
1376 (void*)(unsigned long)((toss_finish >> 32) & 0xffffffff), 1372 (void*)(unsigned long)((toss_finish >> 32) & 0xffffffff),
1377 (void*)(unsigned long)(toss_finish & 0xffffffff), 1373 (void*)(unsigned long)(toss_finish & 0xffffffff),
1378 (void*)(unsigned long)current_cpu(), 1374 (void*)(unsigned long)current_cpu(),
1379 (void*)0, 1375 (void*)(unsigned long)current_pid(),
1380 (void*)0, 1376 (void*)NULL,
1381 (void*)0, 1377 (void*)NULL,
1382 (void*)0); 1378 (void*)NULL);
1383} 1379}
1384#else 1380#else
1385#define xfs_itrunc_trace(tag, ip, flag, new_size, toss_start, toss_finish) 1381#define xfs_itrunc_trace(tag, ip, flag, new_size, toss_start, toss_finish)
@@ -1397,6 +1393,16 @@ xfs_itrunc_trace(
1397 * calling into the buffer/page cache code and we can't hold the 1393 * calling into the buffer/page cache code and we can't hold the
1398 * inode lock when we do so. 1394 * inode lock when we do so.
1399 * 1395 *
1396 * We need to wait for any direct I/Os in flight to complete before we
1397 * proceed with the truncate. This is needed to prevent the extents
1398 * being read or written by the direct I/Os from being removed while the
1399 * I/O is in flight as there is no other method of synchronising
1400 * direct I/O with the truncate operation. Also, because we hold
1401 * the IOLOCK in exclusive mode, we prevent new direct I/Os from being
1402 * started until the truncate completes and drops the lock. Essentially,
1403 * the vn_iowait() call forms an I/O barrier that provides strict ordering
1404 * between direct I/Os and the truncate operation.
1405 *
1400 * The flags parameter can have either the value XFS_ITRUNC_DEFINITE 1406 * The flags parameter can have either the value XFS_ITRUNC_DEFINITE
1401 * or XFS_ITRUNC_MAYBE. The XFS_ITRUNC_MAYBE value should be used 1407 * or XFS_ITRUNC_MAYBE. The XFS_ITRUNC_MAYBE value should be used
1402 * in the case that the caller is locking things out of order and 1408 * in the case that the caller is locking things out of order and
@@ -1424,6 +1430,9 @@ xfs_itruncate_start(
1424 1430
1425 mp = ip->i_mount; 1431 mp = ip->i_mount;
1426 vp = XFS_ITOV(ip); 1432 vp = XFS_ITOV(ip);
1433
1434 vn_iowait(vp); /* wait for the completion of any pending DIOs */
1435
1427 /* 1436 /*
1428 * Call VOP_TOSS_PAGES() or VOP_FLUSHINVAL_PAGES() to get rid of pages and buffers 1437 * Call VOP_TOSS_PAGES() or VOP_FLUSHINVAL_PAGES() to get rid of pages and buffers
1429 * overlapping the region being removed. We have to use 1438 * overlapping the region being removed. We have to use
@@ -1899,7 +1908,7 @@ xfs_iunlink(
1899 * Here we put the head pointer into our next pointer, 1908 * Here we put the head pointer into our next pointer,
1900 * and then we fall through to point the head at us. 1909 * and then we fall through to point the head at us.
1901 */ 1910 */
1902 error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0); 1911 error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0);
1903 if (error) { 1912 if (error) {
1904 return error; 1913 return error;
1905 } 1914 }
@@ -2008,7 +2017,7 @@ xfs_iunlink_remove(
2008 * of dealing with the buffer when there is no need to 2017 * of dealing with the buffer when there is no need to
2009 * change it. 2018 * change it.
2010 */ 2019 */
2011 error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0); 2020 error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0);
2012 if (error) { 2021 if (error) {
2013 cmn_err(CE_WARN, 2022 cmn_err(CE_WARN,
2014 "xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.", 2023 "xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.",
@@ -2070,7 +2079,7 @@ xfs_iunlink_remove(
2070 * Now last_ibp points to the buffer previous to us on 2079 * Now last_ibp points to the buffer previous to us on
2071 * the unlinked list. Pull us from the list. 2080 * the unlinked list. Pull us from the list.
2072 */ 2081 */
2073 error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0); 2082 error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0);
2074 if (error) { 2083 if (error) {
2075 cmn_err(CE_WARN, 2084 cmn_err(CE_WARN,
2076 "xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.", 2085 "xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.",
@@ -2476,92 +2485,6 @@ xfs_iroot_realloc(
2476 2485
2477 2486
2478/* 2487/*
2479 * This is called when the amount of space needed for if_extents
2480 * is increased or decreased. The change in size is indicated by
2481 * the number of extents that need to be added or deleted in the
2482 * ext_diff parameter.
2483 *
2484 * If the amount of space needed has decreased below the size of the
2485 * inline buffer, then switch to using the inline buffer. Otherwise,
2486 * use kmem_realloc() or kmem_alloc() to adjust the size of the buffer
2487 * to what is needed.
2488 *
2489 * ip -- the inode whose if_extents area is changing
2490 * ext_diff -- the change in the number of extents, positive or negative,
2491 * requested for the if_extents array.
2492 */
2493void
2494xfs_iext_realloc(
2495 xfs_inode_t *ip,
2496 int ext_diff,
2497 int whichfork)
2498{
2499 int byte_diff;
2500 xfs_ifork_t *ifp;
2501 int new_size;
2502 uint rnew_size;
2503
2504 if (ext_diff == 0) {
2505 return;
2506 }
2507
2508 ifp = XFS_IFORK_PTR(ip, whichfork);
2509 byte_diff = ext_diff * (uint)sizeof(xfs_bmbt_rec_t);
2510 new_size = (int)ifp->if_bytes + byte_diff;
2511 ASSERT(new_size >= 0);
2512
2513 if (new_size == 0) {
2514 if (ifp->if_u1.if_extents != ifp->if_u2.if_inline_ext) {
2515 ASSERT(ifp->if_real_bytes != 0);
2516 kmem_free(ifp->if_u1.if_extents, ifp->if_real_bytes);
2517 }
2518 ifp->if_u1.if_extents = NULL;
2519 rnew_size = 0;
2520 } else if (new_size <= sizeof(ifp->if_u2.if_inline_ext)) {
2521 /*
2522 * If the valid extents can fit in if_inline_ext,
2523 * copy them from the malloc'd vector and free it.
2524 */
2525 if (ifp->if_u1.if_extents != ifp->if_u2.if_inline_ext) {
2526 /*
2527 * For now, empty files are format EXTENTS,
2528 * so the if_extents pointer is null.
2529 */
2530 if (ifp->if_u1.if_extents) {
2531 memcpy(ifp->if_u2.if_inline_ext,
2532 ifp->if_u1.if_extents, new_size);
2533 kmem_free(ifp->if_u1.if_extents,
2534 ifp->if_real_bytes);
2535 }
2536 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
2537 }
2538 rnew_size = 0;
2539 } else {
2540 rnew_size = new_size;
2541 if ((rnew_size & (rnew_size - 1)) != 0)
2542 rnew_size = xfs_iroundup(rnew_size);
2543 /*
2544 * Stuck with malloc/realloc.
2545 */
2546 if (ifp->if_u1.if_extents == ifp->if_u2.if_inline_ext) {
2547 ifp->if_u1.if_extents = (xfs_bmbt_rec_t *)
2548 kmem_alloc(rnew_size, KM_SLEEP);
2549 memcpy(ifp->if_u1.if_extents, ifp->if_u2.if_inline_ext,
2550 sizeof(ifp->if_u2.if_inline_ext));
2551 } else if (rnew_size != ifp->if_real_bytes) {
2552 ifp->if_u1.if_extents = (xfs_bmbt_rec_t *)
2553 kmem_realloc(ifp->if_u1.if_extents,
2554 rnew_size,
2555 ifp->if_real_bytes,
2556 KM_NOFS);
2557 }
2558 }
2559 ifp->if_real_bytes = rnew_size;
2560 ifp->if_bytes = new_size;
2561}
2562
2563
2564/*
2565 * This is called when the amount of space needed for if_data 2488 * This is called when the amount of space needed for if_data
2566 * is increased or decreased. The change in size is indicated by 2489 * is increased or decreased. The change in size is indicated by
2567 * the number of bytes that need to be added or deleted in the 2490 * the number of bytes that need to be added or deleted in the
@@ -2720,12 +2643,11 @@ xfs_idestroy_fork(
2720 ifp->if_real_bytes = 0; 2643 ifp->if_real_bytes = 0;
2721 } 2644 }
2722 } else if ((ifp->if_flags & XFS_IFEXTENTS) && 2645 } else if ((ifp->if_flags & XFS_IFEXTENTS) &&
2723 (ifp->if_u1.if_extents != NULL) && 2646 ((ifp->if_flags & XFS_IFEXTIREC) ||
2724 (ifp->if_u1.if_extents != ifp->if_u2.if_inline_ext)) { 2647 ((ifp->if_u1.if_extents != NULL) &&
2648 (ifp->if_u1.if_extents != ifp->if_u2.if_inline_ext)))) {
2725 ASSERT(ifp->if_real_bytes != 0); 2649 ASSERT(ifp->if_real_bytes != 0);
2726 kmem_free(ifp->if_u1.if_extents, ifp->if_real_bytes); 2650 xfs_iext_destroy(ifp);
2727 ifp->if_u1.if_extents = NULL;
2728 ifp->if_real_bytes = 0;
2729 } 2651 }
2730 ASSERT(ifp->if_u1.if_extents == NULL || 2652 ASSERT(ifp->if_u1.if_extents == NULL ||
2731 ifp->if_u1.if_extents == ifp->if_u2.if_inline_ext); 2653 ifp->if_u1.if_extents == ifp->if_u2.if_inline_ext);
@@ -2814,7 +2736,7 @@ xfs_iunpin(
2814 2736
2815 /* make sync come back and flush this inode */ 2737 /* make sync come back and flush this inode */
2816 if (vp) { 2738 if (vp) {
2817 struct inode *inode = LINVFS_GET_IP(vp); 2739 struct inode *inode = vn_to_inode(vp);
2818 2740
2819 if (!(inode->i_state & I_NEW)) 2741 if (!(inode->i_state & I_NEW))
2820 mark_inode_dirty_sync(inode); 2742 mark_inode_dirty_sync(inode);
@@ -2902,16 +2824,15 @@ xfs_iextents_copy(
2902 * the delayed ones. There must be at least one 2824 * the delayed ones. There must be at least one
2903 * non-delayed extent. 2825 * non-delayed extent.
2904 */ 2826 */
2905 ep = ifp->if_u1.if_extents;
2906 dest_ep = buffer; 2827 dest_ep = buffer;
2907 copied = 0; 2828 copied = 0;
2908 for (i = 0; i < nrecs; i++) { 2829 for (i = 0; i < nrecs; i++) {
2830 ep = xfs_iext_get_ext(ifp, i);
2909 start_block = xfs_bmbt_get_startblock(ep); 2831 start_block = xfs_bmbt_get_startblock(ep);
2910 if (ISNULLSTARTBLOCK(start_block)) { 2832 if (ISNULLSTARTBLOCK(start_block)) {
2911 /* 2833 /*
2912 * It's a delayed allocation extent, so skip it. 2834 * It's a delayed allocation extent, so skip it.
2913 */ 2835 */
2914 ep++;
2915 continue; 2836 continue;
2916 } 2837 }
2917 2838
@@ -2921,11 +2842,10 @@ xfs_iextents_copy(
2921 put_unaligned(INT_GET(ep->l1, ARCH_CONVERT), 2842 put_unaligned(INT_GET(ep->l1, ARCH_CONVERT),
2922 (__uint64_t*)&dest_ep->l1); 2843 (__uint64_t*)&dest_ep->l1);
2923 dest_ep++; 2844 dest_ep++;
2924 ep++;
2925 copied++; 2845 copied++;
2926 } 2846 }
2927 ASSERT(copied != 0); 2847 ASSERT(copied != 0);
2928 xfs_validate_extents(buffer, copied, 1, XFS_EXTFMT_INODE(ip)); 2848 xfs_validate_extents(ifp, copied, 1, XFS_EXTFMT_INODE(ip));
2929 2849
2930 return (copied * (uint)sizeof(xfs_bmbt_rec_t)); 2850 return (copied * (uint)sizeof(xfs_bmbt_rec_t));
2931} 2851}
@@ -2995,8 +2915,10 @@ xfs_iflush_fork(
2995 case XFS_DINODE_FMT_EXTENTS: 2915 case XFS_DINODE_FMT_EXTENTS:
2996 ASSERT((ifp->if_flags & XFS_IFEXTENTS) || 2916 ASSERT((ifp->if_flags & XFS_IFEXTENTS) ||
2997 !(iip->ili_format.ilf_fields & extflag[whichfork])); 2917 !(iip->ili_format.ilf_fields & extflag[whichfork]));
2998 ASSERT((ifp->if_u1.if_extents != NULL) || (ifp->if_bytes == 0)); 2918 ASSERT((xfs_iext_get_ext(ifp, 0) != NULL) ||
2999 ASSERT((ifp->if_u1.if_extents == NULL) || (ifp->if_bytes > 0)); 2919 (ifp->if_bytes == 0));
2920 ASSERT((xfs_iext_get_ext(ifp, 0) == NULL) ||
2921 (ifp->if_bytes > 0));
3000 if ((iip->ili_format.ilf_fields & extflag[whichfork]) && 2922 if ((iip->ili_format.ilf_fields & extflag[whichfork]) &&
3001 (ifp->if_bytes > 0)) { 2923 (ifp->if_bytes > 0)) {
3002 ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) > 0); 2924 ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) > 0);
@@ -3114,8 +3036,8 @@ xfs_iflush(
3114 /* 3036 /*
3115 * Get the buffer containing the on-disk inode. 3037 * Get the buffer containing the on-disk inode.
3116 */ 3038 */
3117 error = xfs_itobp(mp, NULL, ip, &dip, &bp, 0); 3039 error = xfs_itobp(mp, NULL, ip, &dip, &bp, 0, 0);
3118 if (error != 0) { 3040 if (error) {
3119 xfs_ifunlock(ip); 3041 xfs_ifunlock(ip);
3120 return error; 3042 return error;
3121 } 3043 }
@@ -3610,7 +3532,7 @@ xfs_iaccess(
3610{ 3532{
3611 int error; 3533 int error;
3612 mode_t orgmode = mode; 3534 mode_t orgmode = mode;
3613 struct inode *inode = LINVFS_GET_IP(XFS_ITOV(ip)); 3535 struct inode *inode = vn_to_inode(XFS_ITOV(ip));
3614 3536
3615 if (mode & S_IWUSR) { 3537 if (mode & S_IWUSR) {
3616 umode_t imode = inode->i_mode; 3538 umode_t imode = inode->i_mode;
@@ -3704,3 +3626,1100 @@ xfs_ilock_trace(xfs_inode_t *ip, int lock, unsigned int lockflags, inst_t *ra)
3704 NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL); 3626 NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL);
3705} 3627}
3706#endif 3628#endif
3629
3630/*
3631 * Return a pointer to the extent record at file index idx.
3632 */
3633xfs_bmbt_rec_t *
3634xfs_iext_get_ext(
3635 xfs_ifork_t *ifp, /* inode fork pointer */
3636 xfs_extnum_t idx) /* index of target extent */
3637{
3638 ASSERT(idx >= 0);
3639 if ((ifp->if_flags & XFS_IFEXTIREC) && (idx == 0)) {
3640 return ifp->if_u1.if_ext_irec->er_extbuf;
3641 } else if (ifp->if_flags & XFS_IFEXTIREC) {
3642 xfs_ext_irec_t *erp; /* irec pointer */
3643 int erp_idx = 0; /* irec index */
3644 xfs_extnum_t page_idx = idx; /* ext index in target list */
3645
3646 erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 0);
3647 return &erp->er_extbuf[page_idx];
3648 } else if (ifp->if_bytes) {
3649 return &ifp->if_u1.if_extents[idx];
3650 } else {
3651 return NULL;
3652 }
3653}
3654
3655/*
3656 * Insert new item(s) into the extent records for incore inode
3657 * fork 'ifp'. 'count' new items are inserted at index 'idx'.
3658 */
3659void
3660xfs_iext_insert(
3661 xfs_ifork_t *ifp, /* inode fork pointer */
3662 xfs_extnum_t idx, /* starting index of new items */
3663 xfs_extnum_t count, /* number of inserted items */
3664 xfs_bmbt_irec_t *new) /* items to insert */
3665{
3666 xfs_bmbt_rec_t *ep; /* extent record pointer */
3667 xfs_extnum_t i; /* extent record index */
3668
3669 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
3670 xfs_iext_add(ifp, idx, count);
3671 for (i = idx; i < idx + count; i++, new++) {
3672 ep = xfs_iext_get_ext(ifp, i);
3673 xfs_bmbt_set_all(ep, new);
3674 }
3675}
3676
3677/*
3678 * This is called when the amount of space required for incore file
3679 * extents needs to be increased. The ext_diff parameter stores the
3680 * number of new extents being added and the idx parameter contains
3681 * the extent index where the new extents will be added. If the new
3682 * extents are being appended, then we just need to (re)allocate and
3683 * initialize the space. Otherwise, if the new extents are being
3684 * inserted into the middle of the existing entries, a bit more work
3685 * is required to make room for the new extents to be inserted. The
3686 * caller is responsible for filling in the new extent entries upon
3687 * return.
3688 */
3689void
3690xfs_iext_add(
3691 xfs_ifork_t *ifp, /* inode fork pointer */
3692 xfs_extnum_t idx, /* index to begin adding exts */
3693 int ext_diff) /* nubmer of extents to add */
3694{
3695 int byte_diff; /* new bytes being added */
3696 int new_size; /* size of extents after adding */
3697 xfs_extnum_t nextents; /* number of extents in file */
3698
3699 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3700 ASSERT((idx >= 0) && (idx <= nextents));
3701 byte_diff = ext_diff * sizeof(xfs_bmbt_rec_t);
3702 new_size = ifp->if_bytes + byte_diff;
3703 /*
3704 * If the new number of extents (nextents + ext_diff)
3705 * fits inside the inode, then continue to use the inline
3706 * extent buffer.
3707 */
3708 if (nextents + ext_diff <= XFS_INLINE_EXTS) {
3709 if (idx < nextents) {
3710 memmove(&ifp->if_u2.if_inline_ext[idx + ext_diff],
3711 &ifp->if_u2.if_inline_ext[idx],
3712 (nextents - idx) * sizeof(xfs_bmbt_rec_t));
3713 memset(&ifp->if_u2.if_inline_ext[idx], 0, byte_diff);
3714 }
3715 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
3716 ifp->if_real_bytes = 0;
3717 ifp->if_lastex = nextents + ext_diff;
3718 }
3719 /*
3720 * Otherwise use a linear (direct) extent list.
3721 * If the extents are currently inside the inode,
3722 * xfs_iext_realloc_direct will switch us from
3723 * inline to direct extent allocation mode.
3724 */
3725 else if (nextents + ext_diff <= XFS_LINEAR_EXTS) {
3726 xfs_iext_realloc_direct(ifp, new_size);
3727 if (idx < nextents) {
3728 memmove(&ifp->if_u1.if_extents[idx + ext_diff],
3729 &ifp->if_u1.if_extents[idx],
3730 (nextents - idx) * sizeof(xfs_bmbt_rec_t));
3731 memset(&ifp->if_u1.if_extents[idx], 0, byte_diff);
3732 }
3733 }
3734 /* Indirection array */
3735 else {
3736 xfs_ext_irec_t *erp;
3737 int erp_idx = 0;
3738 int page_idx = idx;
3739
3740 ASSERT(nextents + ext_diff > XFS_LINEAR_EXTS);
3741 if (ifp->if_flags & XFS_IFEXTIREC) {
3742 erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 1);
3743 } else {
3744 xfs_iext_irec_init(ifp);
3745 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3746 erp = ifp->if_u1.if_ext_irec;
3747 }
3748 /* Extents fit in target extent page */
3749 if (erp && erp->er_extcount + ext_diff <= XFS_LINEAR_EXTS) {
3750 if (page_idx < erp->er_extcount) {
3751 memmove(&erp->er_extbuf[page_idx + ext_diff],
3752 &erp->er_extbuf[page_idx],
3753 (erp->er_extcount - page_idx) *
3754 sizeof(xfs_bmbt_rec_t));
3755 memset(&erp->er_extbuf[page_idx], 0, byte_diff);
3756 }
3757 erp->er_extcount += ext_diff;
3758 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff);
3759 }
3760 /* Insert a new extent page */
3761 else if (erp) {
3762 xfs_iext_add_indirect_multi(ifp,
3763 erp_idx, page_idx, ext_diff);
3764 }
3765 /*
3766 * If extent(s) are being appended to the last page in
3767 * the indirection array and the new extent(s) don't fit
3768 * in the page, then erp is NULL and erp_idx is set to
3769 * the next index needed in the indirection array.
3770 */
3771 else {
3772 int count = ext_diff;
3773
3774 while (count) {
3775 erp = xfs_iext_irec_new(ifp, erp_idx);
3776 erp->er_extcount = count;
3777 count -= MIN(count, (int)XFS_LINEAR_EXTS);
3778 if (count) {
3779 erp_idx++;
3780 }
3781 }
3782 }
3783 }
3784 ifp->if_bytes = new_size;
3785}
3786
3787/*
3788 * This is called when incore extents are being added to the indirection
3789 * array and the new extents do not fit in the target extent list. The
3790 * erp_idx parameter contains the irec index for the target extent list
3791 * in the indirection array, and the idx parameter contains the extent
3792 * index within the list. The number of extents being added is stored
3793 * in the count parameter.
3794 *
3795 * |-------| |-------|
3796 * | | | | idx - number of extents before idx
3797 * | idx | | count |
3798 * | | | | count - number of extents being inserted at idx
3799 * |-------| |-------|
3800 * | count | | nex2 | nex2 - number of extents after idx + count
3801 * |-------| |-------|
3802 */
3803void
3804xfs_iext_add_indirect_multi(
3805 xfs_ifork_t *ifp, /* inode fork pointer */
3806 int erp_idx, /* target extent irec index */
3807 xfs_extnum_t idx, /* index within target list */
3808 int count) /* new extents being added */
3809{
3810 int byte_diff; /* new bytes being added */
3811 xfs_ext_irec_t *erp; /* pointer to irec entry */
3812 xfs_extnum_t ext_diff; /* number of extents to add */
3813 xfs_extnum_t ext_cnt; /* new extents still needed */
3814 xfs_extnum_t nex2; /* extents after idx + count */
3815 xfs_bmbt_rec_t *nex2_ep = NULL; /* temp list for nex2 extents */
3816 int nlists; /* number of irec's (lists) */
3817
3818 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3819 erp = &ifp->if_u1.if_ext_irec[erp_idx];
3820 nex2 = erp->er_extcount - idx;
3821 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3822
3823 /*
3824 * Save second part of target extent list
3825 * (all extents past */
3826 if (nex2) {
3827 byte_diff = nex2 * sizeof(xfs_bmbt_rec_t);
3828 nex2_ep = (xfs_bmbt_rec_t *) kmem_alloc(byte_diff, KM_SLEEP);
3829 memmove(nex2_ep, &erp->er_extbuf[idx], byte_diff);
3830 erp->er_extcount -= nex2;
3831 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -nex2);
3832 memset(&erp->er_extbuf[idx], 0, byte_diff);
3833 }
3834
3835 /*
3836 * Add the new extents to the end of the target
3837 * list, then allocate new irec record(s) and
3838 * extent buffer(s) as needed to store the rest
3839 * of the new extents.
3840 */
3841 ext_cnt = count;
3842 ext_diff = MIN(ext_cnt, (int)XFS_LINEAR_EXTS - erp->er_extcount);
3843 if (ext_diff) {
3844 erp->er_extcount += ext_diff;
3845 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff);
3846 ext_cnt -= ext_diff;
3847 }
3848 while (ext_cnt) {
3849 erp_idx++;
3850 erp = xfs_iext_irec_new(ifp, erp_idx);
3851 ext_diff = MIN(ext_cnt, (int)XFS_LINEAR_EXTS);
3852 erp->er_extcount = ext_diff;
3853 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff);
3854 ext_cnt -= ext_diff;
3855 }
3856
3857 /* Add nex2 extents back to indirection array */
3858 if (nex2) {
3859 xfs_extnum_t ext_avail;
3860 int i;
3861
3862 byte_diff = nex2 * sizeof(xfs_bmbt_rec_t);
3863 ext_avail = XFS_LINEAR_EXTS - erp->er_extcount;
3864 i = 0;
3865 /*
3866 * If nex2 extents fit in the current page, append
3867 * nex2_ep after the new extents.
3868 */
3869 if (nex2 <= ext_avail) {
3870 i = erp->er_extcount;
3871 }
3872 /*
3873 * Otherwise, check if space is available in the
3874 * next page.
3875 */
3876 else if ((erp_idx < nlists - 1) &&
3877 (nex2 <= (ext_avail = XFS_LINEAR_EXTS -
3878 ifp->if_u1.if_ext_irec[erp_idx+1].er_extcount))) {
3879 erp_idx++;
3880 erp++;
3881 /* Create a hole for nex2 extents */
3882 memmove(&erp->er_extbuf[nex2], erp->er_extbuf,
3883 erp->er_extcount * sizeof(xfs_bmbt_rec_t));
3884 }
3885 /*
3886 * Final choice, create a new extent page for
3887 * nex2 extents.
3888 */
3889 else {
3890 erp_idx++;
3891 erp = xfs_iext_irec_new(ifp, erp_idx);
3892 }
3893 memmove(&erp->er_extbuf[i], nex2_ep, byte_diff);
3894 kmem_free(nex2_ep, byte_diff);
3895 erp->er_extcount += nex2;
3896 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, nex2);
3897 }
3898}
3899
3900/*
3901 * This is called when the amount of space required for incore file
3902 * extents needs to be decreased. The ext_diff parameter stores the
3903 * number of extents to be removed and the idx parameter contains
3904 * the extent index where the extents will be removed from.
3905 *
3906 * If the amount of space needed has decreased below the linear
3907 * limit, XFS_IEXT_BUFSZ, then switch to using the contiguous
3908 * extent array. Otherwise, use kmem_realloc() to adjust the
3909 * size to what is needed.
3910 */
3911void
3912xfs_iext_remove(
3913 xfs_ifork_t *ifp, /* inode fork pointer */
3914 xfs_extnum_t idx, /* index to begin removing exts */
3915 int ext_diff) /* number of extents to remove */
3916{
3917 xfs_extnum_t nextents; /* number of extents in file */
3918 int new_size; /* size of extents after removal */
3919
3920 ASSERT(ext_diff > 0);
3921 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3922 new_size = (nextents - ext_diff) * sizeof(xfs_bmbt_rec_t);
3923
3924 if (new_size == 0) {
3925 xfs_iext_destroy(ifp);
3926 } else if (ifp->if_flags & XFS_IFEXTIREC) {
3927 xfs_iext_remove_indirect(ifp, idx, ext_diff);
3928 } else if (ifp->if_real_bytes) {
3929 xfs_iext_remove_direct(ifp, idx, ext_diff);
3930 } else {
3931 xfs_iext_remove_inline(ifp, idx, ext_diff);
3932 }
3933 ifp->if_bytes = new_size;
3934}
3935
3936/*
3937 * This removes ext_diff extents from the inline buffer, beginning
3938 * at extent index idx.
3939 */
3940void
3941xfs_iext_remove_inline(
3942 xfs_ifork_t *ifp, /* inode fork pointer */
3943 xfs_extnum_t idx, /* index to begin removing exts */
3944 int ext_diff) /* number of extents to remove */
3945{
3946 int nextents; /* number of extents in file */
3947
3948 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC));
3949 ASSERT(idx < XFS_INLINE_EXTS);
3950 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3951 ASSERT(((nextents - ext_diff) > 0) &&
3952 (nextents - ext_diff) < XFS_INLINE_EXTS);
3953
3954 if (idx + ext_diff < nextents) {
3955 memmove(&ifp->if_u2.if_inline_ext[idx],
3956 &ifp->if_u2.if_inline_ext[idx + ext_diff],
3957 (nextents - (idx + ext_diff)) *
3958 sizeof(xfs_bmbt_rec_t));
3959 memset(&ifp->if_u2.if_inline_ext[nextents - ext_diff],
3960 0, ext_diff * sizeof(xfs_bmbt_rec_t));
3961 } else {
3962 memset(&ifp->if_u2.if_inline_ext[idx], 0,
3963 ext_diff * sizeof(xfs_bmbt_rec_t));
3964 }
3965}
3966
3967/*
3968 * This removes ext_diff extents from a linear (direct) extent list,
3969 * beginning at extent index idx. If the extents are being removed
3970 * from the end of the list (ie. truncate) then we just need to re-
3971 * allocate the list to remove the extra space. Otherwise, if the
3972 * extents are being removed from the middle of the existing extent
3973 * entries, then we first need to move the extent records beginning
3974 * at idx + ext_diff up in the list to overwrite the records being
3975 * removed, then remove the extra space via kmem_realloc.
3976 */
3977void
3978xfs_iext_remove_direct(
3979 xfs_ifork_t *ifp, /* inode fork pointer */
3980 xfs_extnum_t idx, /* index to begin removing exts */
3981 int ext_diff) /* number of extents to remove */
3982{
3983 xfs_extnum_t nextents; /* number of extents in file */
3984 int new_size; /* size of extents after removal */
3985
3986 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC));
3987 new_size = ifp->if_bytes -
3988 (ext_diff * sizeof(xfs_bmbt_rec_t));
3989 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3990
3991 if (new_size == 0) {
3992 xfs_iext_destroy(ifp);
3993 return;
3994 }
3995 /* Move extents up in the list (if needed) */
3996 if (idx + ext_diff < nextents) {
3997 memmove(&ifp->if_u1.if_extents[idx],
3998 &ifp->if_u1.if_extents[idx + ext_diff],
3999 (nextents - (idx + ext_diff)) *
4000 sizeof(xfs_bmbt_rec_t));
4001 }
4002 memset(&ifp->if_u1.if_extents[nextents - ext_diff],
4003 0, ext_diff * sizeof(xfs_bmbt_rec_t));
4004 /*
4005 * Reallocate the direct extent list. If the extents
4006 * will fit inside the inode then xfs_iext_realloc_direct
4007 * will switch from direct to inline extent allocation
4008 * mode for us.
4009 */
4010 xfs_iext_realloc_direct(ifp, new_size);
4011 ifp->if_bytes = new_size;
4012}
4013
4014/*
4015 * This is called when incore extents are being removed from the
4016 * indirection array and the extents being removed span multiple extent
4017 * buffers. The idx parameter contains the file extent index where we
4018 * want to begin removing extents, and the count parameter contains
4019 * how many extents need to be removed.
4020 *
4021 * |-------| |-------|
4022 * | nex1 | | | nex1 - number of extents before idx
4023 * |-------| | count |
4024 * | | | | count - number of extents being removed at idx
4025 * | count | |-------|
4026 * | | | nex2 | nex2 - number of extents after idx + count
4027 * |-------| |-------|
4028 */
4029void
4030xfs_iext_remove_indirect(
4031 xfs_ifork_t *ifp, /* inode fork pointer */
4032 xfs_extnum_t idx, /* index to begin removing extents */
4033 int count) /* number of extents to remove */
4034{
4035 xfs_ext_irec_t *erp; /* indirection array pointer */
4036 int erp_idx = 0; /* indirection array index */
4037 xfs_extnum_t ext_cnt; /* extents left to remove */
4038 xfs_extnum_t ext_diff; /* extents to remove in current list */
4039 xfs_extnum_t nex1; /* number of extents before idx */
4040 xfs_extnum_t nex2; /* extents after idx + count */
4041 int nlists; /* entries in indirecton array */
4042 int page_idx = idx; /* index in target extent list */
4043
4044 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4045 erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 0);
4046 ASSERT(erp != NULL);
4047 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4048 nex1 = page_idx;
4049 ext_cnt = count;
4050 while (ext_cnt) {
4051 nex2 = MAX((erp->er_extcount - (nex1 + ext_cnt)), 0);
4052 ext_diff = MIN(ext_cnt, (erp->er_extcount - nex1));
4053 /*
4054 * Check for deletion of entire list;
4055 * xfs_iext_irec_remove() updates extent offsets.
4056 */
4057 if (ext_diff == erp->er_extcount) {
4058 xfs_iext_irec_remove(ifp, erp_idx);
4059 ext_cnt -= ext_diff;
4060 nex1 = 0;
4061 if (ext_cnt) {
4062 ASSERT(erp_idx < ifp->if_real_bytes /
4063 XFS_IEXT_BUFSZ);
4064 erp = &ifp->if_u1.if_ext_irec[erp_idx];
4065 nex1 = 0;
4066 continue;
4067 } else {
4068 break;
4069 }
4070 }
4071 /* Move extents up (if needed) */
4072 if (nex2) {
4073 memmove(&erp->er_extbuf[nex1],
4074 &erp->er_extbuf[nex1 + ext_diff],
4075 nex2 * sizeof(xfs_bmbt_rec_t));
4076 }
4077 /* Zero out rest of page */
4078 memset(&erp->er_extbuf[nex1 + nex2], 0, (XFS_IEXT_BUFSZ -
4079 ((nex1 + nex2) * sizeof(xfs_bmbt_rec_t))));
4080 /* Update remaining counters */
4081 erp->er_extcount -= ext_diff;
4082 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -ext_diff);
4083 ext_cnt -= ext_diff;
4084 nex1 = 0;
4085 erp_idx++;
4086 erp++;
4087 }
4088 ifp->if_bytes -= count * sizeof(xfs_bmbt_rec_t);
4089 xfs_iext_irec_compact(ifp);
4090}
4091
4092/*
4093 * Create, destroy, or resize a linear (direct) block of extents.
4094 */
4095void
4096xfs_iext_realloc_direct(
4097 xfs_ifork_t *ifp, /* inode fork pointer */
4098 int new_size) /* new size of extents */
4099{
4100 int rnew_size; /* real new size of extents */
4101
4102 rnew_size = new_size;
4103
4104 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC) ||
4105 ((new_size >= 0) && (new_size <= XFS_IEXT_BUFSZ) &&
4106 (new_size != ifp->if_real_bytes)));
4107
4108 /* Free extent records */
4109 if (new_size == 0) {
4110 xfs_iext_destroy(ifp);
4111 }
4112 /* Resize direct extent list and zero any new bytes */
4113 else if (ifp->if_real_bytes) {
4114 /* Check if extents will fit inside the inode */
4115 if (new_size <= XFS_INLINE_EXTS * sizeof(xfs_bmbt_rec_t)) {
4116 xfs_iext_direct_to_inline(ifp, new_size /
4117 (uint)sizeof(xfs_bmbt_rec_t));
4118 ifp->if_bytes = new_size;
4119 return;
4120 }
4121 if ((new_size & (new_size - 1)) != 0) {
4122 rnew_size = xfs_iroundup(new_size);
4123 }
4124 if (rnew_size != ifp->if_real_bytes) {
4125 ifp->if_u1.if_extents = (xfs_bmbt_rec_t *)
4126 kmem_realloc(ifp->if_u1.if_extents,
4127 rnew_size,
4128 ifp->if_real_bytes,
4129 KM_SLEEP);
4130 }
4131 if (rnew_size > ifp->if_real_bytes) {
4132 memset(&ifp->if_u1.if_extents[ifp->if_bytes /
4133 (uint)sizeof(xfs_bmbt_rec_t)], 0,
4134 rnew_size - ifp->if_real_bytes);
4135 }
4136 }
4137 /*
4138 * Switch from the inline extent buffer to a direct
4139 * extent list. Be sure to include the inline extent
4140 * bytes in new_size.
4141 */
4142 else {
4143 new_size += ifp->if_bytes;
4144 if ((new_size & (new_size - 1)) != 0) {
4145 rnew_size = xfs_iroundup(new_size);
4146 }
4147 xfs_iext_inline_to_direct(ifp, rnew_size);
4148 }
4149 ifp->if_real_bytes = rnew_size;
4150 ifp->if_bytes = new_size;
4151}
4152
4153/*
4154 * Switch from linear (direct) extent records to inline buffer.
4155 */
4156void
4157xfs_iext_direct_to_inline(
4158 xfs_ifork_t *ifp, /* inode fork pointer */
4159 xfs_extnum_t nextents) /* number of extents in file */
4160{
4161 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
4162 ASSERT(nextents <= XFS_INLINE_EXTS);
4163 /*
4164 * The inline buffer was zeroed when we switched
4165 * from inline to direct extent allocation mode,
4166 * so we don't need to clear it here.
4167 */
4168 memcpy(ifp->if_u2.if_inline_ext, ifp->if_u1.if_extents,
4169 nextents * sizeof(xfs_bmbt_rec_t));
4170 kmem_free(ifp->if_u1.if_extents, KM_SLEEP);
4171 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
4172 ifp->if_real_bytes = 0;
4173}
4174
4175/*
4176 * Switch from inline buffer to linear (direct) extent records.
4177 * new_size should already be rounded up to the next power of 2
4178 * by the caller (when appropriate), so use new_size as it is.
4179 * However, since new_size may be rounded up, we can't update
4180 * if_bytes here. It is the caller's responsibility to update
4181 * if_bytes upon return.
4182 */
4183void
4184xfs_iext_inline_to_direct(
4185 xfs_ifork_t *ifp, /* inode fork pointer */
4186 int new_size) /* number of extents in file */
4187{
4188 ifp->if_u1.if_extents = (xfs_bmbt_rec_t *)
4189 kmem_alloc(new_size, KM_SLEEP);
4190 memset(ifp->if_u1.if_extents, 0, new_size);
4191 if (ifp->if_bytes) {
4192 memcpy(ifp->if_u1.if_extents, ifp->if_u2.if_inline_ext,
4193 ifp->if_bytes);
4194 memset(ifp->if_u2.if_inline_ext, 0, XFS_INLINE_EXTS *
4195 sizeof(xfs_bmbt_rec_t));
4196 }
4197 ifp->if_real_bytes = new_size;
4198}
4199
4200/*
4201 * Resize an extent indirection array to new_size bytes.
4202 */
4203void
4204xfs_iext_realloc_indirect(
4205 xfs_ifork_t *ifp, /* inode fork pointer */
4206 int new_size) /* new indirection array size */
4207{
4208 int nlists; /* number of irec's (ex lists) */
4209 int size; /* current indirection array size */
4210
4211 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4212 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4213 size = nlists * sizeof(xfs_ext_irec_t);
4214 ASSERT(ifp->if_real_bytes);
4215 ASSERT((new_size >= 0) && (new_size != size));
4216 if (new_size == 0) {
4217 xfs_iext_destroy(ifp);
4218 } else {
4219 ifp->if_u1.if_ext_irec = (xfs_ext_irec_t *)
4220 kmem_realloc(ifp->if_u1.if_ext_irec,
4221 new_size, size, KM_SLEEP);
4222 }
4223}
4224
4225/*
4226 * Switch from indirection array to linear (direct) extent allocations.
4227 */
4228void
4229xfs_iext_indirect_to_direct(
4230 xfs_ifork_t *ifp) /* inode fork pointer */
4231{
4232 xfs_bmbt_rec_t *ep; /* extent record pointer */
4233 xfs_extnum_t nextents; /* number of extents in file */
4234 int size; /* size of file extents */
4235
4236 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4237 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4238 ASSERT(nextents <= XFS_LINEAR_EXTS);
4239 size = nextents * sizeof(xfs_bmbt_rec_t);
4240
4241 xfs_iext_irec_compact_full(ifp);
4242 ASSERT(ifp->if_real_bytes == XFS_IEXT_BUFSZ);
4243
4244 ep = ifp->if_u1.if_ext_irec->er_extbuf;
4245 kmem_free(ifp->if_u1.if_ext_irec, sizeof(xfs_ext_irec_t));
4246 ifp->if_flags &= ~XFS_IFEXTIREC;
4247 ifp->if_u1.if_extents = ep;
4248 ifp->if_bytes = size;
4249 if (nextents < XFS_LINEAR_EXTS) {
4250 xfs_iext_realloc_direct(ifp, size);
4251 }
4252}
4253
4254/*
4255 * Free incore file extents.
4256 */
4257void
4258xfs_iext_destroy(
4259 xfs_ifork_t *ifp) /* inode fork pointer */
4260{
4261 if (ifp->if_flags & XFS_IFEXTIREC) {
4262 int erp_idx;
4263 int nlists;
4264
4265 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4266 for (erp_idx = nlists - 1; erp_idx >= 0 ; erp_idx--) {
4267 xfs_iext_irec_remove(ifp, erp_idx);
4268 }
4269 ifp->if_flags &= ~XFS_IFEXTIREC;
4270 } else if (ifp->if_real_bytes) {
4271 kmem_free(ifp->if_u1.if_extents, ifp->if_real_bytes);
4272 } else if (ifp->if_bytes) {
4273 memset(ifp->if_u2.if_inline_ext, 0, XFS_INLINE_EXTS *
4274 sizeof(xfs_bmbt_rec_t));
4275 }
4276 ifp->if_u1.if_extents = NULL;
4277 ifp->if_real_bytes = 0;
4278 ifp->if_bytes = 0;
4279}
4280
4281/*
4282 * Return a pointer to the extent record for file system block bno.
4283 */
4284xfs_bmbt_rec_t * /* pointer to found extent record */
4285xfs_iext_bno_to_ext(
4286 xfs_ifork_t *ifp, /* inode fork pointer */
4287 xfs_fileoff_t bno, /* block number to search for */
4288 xfs_extnum_t *idxp) /* index of target extent */
4289{
4290 xfs_bmbt_rec_t *base; /* pointer to first extent */
4291 xfs_filblks_t blockcount = 0; /* number of blocks in extent */
4292 xfs_bmbt_rec_t *ep = NULL; /* pointer to target extent */
4293 xfs_ext_irec_t *erp = NULL; /* indirection array pointer */
4294 int high; /* upper boundry in search */
4295 xfs_extnum_t idx = 0; /* index of target extent */
4296 int low; /* lower boundry in search */
4297 xfs_extnum_t nextents; /* number of file extents */
4298 xfs_fileoff_t startoff = 0; /* start offset of extent */
4299
4300 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4301 if (nextents == 0) {
4302 *idxp = 0;
4303 return NULL;
4304 }
4305 low = 0;
4306 if (ifp->if_flags & XFS_IFEXTIREC) {
4307 /* Find target extent list */
4308 int erp_idx = 0;
4309 erp = xfs_iext_bno_to_irec(ifp, bno, &erp_idx);
4310 base = erp->er_extbuf;
4311 high = erp->er_extcount - 1;
4312 } else {
4313 base = ifp->if_u1.if_extents;
4314 high = nextents - 1;
4315 }
4316 /* Binary search extent records */
4317 while (low <= high) {
4318 idx = (low + high) >> 1;
4319 ep = base + idx;
4320 startoff = xfs_bmbt_get_startoff(ep);
4321 blockcount = xfs_bmbt_get_blockcount(ep);
4322 if (bno < startoff) {
4323 high = idx - 1;
4324 } else if (bno >= startoff + blockcount) {
4325 low = idx + 1;
4326 } else {
4327 /* Convert back to file-based extent index */
4328 if (ifp->if_flags & XFS_IFEXTIREC) {
4329 idx += erp->er_extoff;
4330 }
4331 *idxp = idx;
4332 return ep;
4333 }
4334 }
4335 /* Convert back to file-based extent index */
4336 if (ifp->if_flags & XFS_IFEXTIREC) {
4337 idx += erp->er_extoff;
4338 }
4339 if (bno >= startoff + blockcount) {
4340 if (++idx == nextents) {
4341 ep = NULL;
4342 } else {
4343 ep = xfs_iext_get_ext(ifp, idx);
4344 }
4345 }
4346 *idxp = idx;
4347 return ep;
4348}
4349
4350/*
4351 * Return a pointer to the indirection array entry containing the
4352 * extent record for filesystem block bno. Store the index of the
4353 * target irec in *erp_idxp.
4354 */
4355xfs_ext_irec_t * /* pointer to found extent record */
4356xfs_iext_bno_to_irec(
4357 xfs_ifork_t *ifp, /* inode fork pointer */
4358 xfs_fileoff_t bno, /* block number to search for */
4359 int *erp_idxp) /* irec index of target ext list */
4360{
4361 xfs_ext_irec_t *erp = NULL; /* indirection array pointer */
4362 xfs_ext_irec_t *erp_next; /* next indirection array entry */
4363 int erp_idx; /* indirection array index */
4364 int nlists; /* number of extent irec's (lists) */
4365 int high; /* binary search upper limit */
4366 int low; /* binary search lower limit */
4367
4368 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4369 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4370 erp_idx = 0;
4371 low = 0;
4372 high = nlists - 1;
4373 while (low <= high) {
4374 erp_idx = (low + high) >> 1;
4375 erp = &ifp->if_u1.if_ext_irec[erp_idx];
4376 erp_next = erp_idx < nlists - 1 ? erp + 1 : NULL;
4377 if (bno < xfs_bmbt_get_startoff(erp->er_extbuf)) {
4378 high = erp_idx - 1;
4379 } else if (erp_next && bno >=
4380 xfs_bmbt_get_startoff(erp_next->er_extbuf)) {
4381 low = erp_idx + 1;
4382 } else {
4383 break;
4384 }
4385 }
4386 *erp_idxp = erp_idx;
4387 return erp;
4388}
4389
4390/*
4391 * Return a pointer to the indirection array entry containing the
4392 * extent record at file extent index *idxp. Store the index of the
4393 * target irec in *erp_idxp and store the page index of the target
4394 * extent record in *idxp.
4395 */
4396xfs_ext_irec_t *
4397xfs_iext_idx_to_irec(
4398 xfs_ifork_t *ifp, /* inode fork pointer */
4399 xfs_extnum_t *idxp, /* extent index (file -> page) */
4400 int *erp_idxp, /* pointer to target irec */
4401 int realloc) /* new bytes were just added */
4402{
4403 xfs_ext_irec_t *prev; /* pointer to previous irec */
4404 xfs_ext_irec_t *erp = NULL; /* pointer to current irec */
4405 int erp_idx; /* indirection array index */
4406 int nlists; /* number of irec's (ex lists) */
4407 int high; /* binary search upper limit */
4408 int low; /* binary search lower limit */
4409 xfs_extnum_t page_idx = *idxp; /* extent index in target list */
4410
4411 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4412 ASSERT(page_idx >= 0 && page_idx <=
4413 ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t));
4414 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4415 erp_idx = 0;
4416 low = 0;
4417 high = nlists - 1;
4418
4419 /* Binary search extent irec's */
4420 while (low <= high) {
4421 erp_idx = (low + high) >> 1;
4422 erp = &ifp->if_u1.if_ext_irec[erp_idx];
4423 prev = erp_idx > 0 ? erp - 1 : NULL;
4424 if (page_idx < erp->er_extoff || (page_idx == erp->er_extoff &&
4425 realloc && prev && prev->er_extcount < XFS_LINEAR_EXTS)) {
4426 high = erp_idx - 1;
4427 } else if (page_idx > erp->er_extoff + erp->er_extcount ||
4428 (page_idx == erp->er_extoff + erp->er_extcount &&
4429 !realloc)) {
4430 low = erp_idx + 1;
4431 } else if (page_idx == erp->er_extoff + erp->er_extcount &&
4432 erp->er_extcount == XFS_LINEAR_EXTS) {
4433 ASSERT(realloc);
4434 page_idx = 0;
4435 erp_idx++;
4436 erp = erp_idx < nlists ? erp + 1 : NULL;
4437 break;
4438 } else {
4439 page_idx -= erp->er_extoff;
4440 break;
4441 }
4442 }
4443 *idxp = page_idx;
4444 *erp_idxp = erp_idx;
4445 return(erp);
4446}
4447
4448/*
4449 * Allocate and initialize an indirection array once the space needed
4450 * for incore extents increases above XFS_IEXT_BUFSZ.
4451 */
4452void
4453xfs_iext_irec_init(
4454 xfs_ifork_t *ifp) /* inode fork pointer */
4455{
4456 xfs_ext_irec_t *erp; /* indirection array pointer */
4457 xfs_extnum_t nextents; /* number of extents in file */
4458
4459 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC));
4460 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4461 ASSERT(nextents <= XFS_LINEAR_EXTS);
4462
4463 erp = (xfs_ext_irec_t *)
4464 kmem_alloc(sizeof(xfs_ext_irec_t), KM_SLEEP);
4465
4466 if (nextents == 0) {
4467 ifp->if_u1.if_extents = (xfs_bmbt_rec_t *)
4468 kmem_alloc(XFS_IEXT_BUFSZ, KM_SLEEP);
4469 } else if (!ifp->if_real_bytes) {
4470 xfs_iext_inline_to_direct(ifp, XFS_IEXT_BUFSZ);
4471 } else if (ifp->if_real_bytes < XFS_IEXT_BUFSZ) {
4472 xfs_iext_realloc_direct(ifp, XFS_IEXT_BUFSZ);
4473 }
4474 erp->er_extbuf = ifp->if_u1.if_extents;
4475 erp->er_extcount = nextents;
4476 erp->er_extoff = 0;
4477
4478 ifp->if_flags |= XFS_IFEXTIREC;
4479 ifp->if_real_bytes = XFS_IEXT_BUFSZ;
4480 ifp->if_bytes = nextents * sizeof(xfs_bmbt_rec_t);
4481 ifp->if_u1.if_ext_irec = erp;
4482
4483 return;
4484}
4485
4486/*
4487 * Allocate and initialize a new entry in the indirection array.
4488 */
4489xfs_ext_irec_t *
4490xfs_iext_irec_new(
4491 xfs_ifork_t *ifp, /* inode fork pointer */
4492 int erp_idx) /* index for new irec */
4493{
4494 xfs_ext_irec_t *erp; /* indirection array pointer */
4495 int i; /* loop counter */
4496 int nlists; /* number of irec's (ex lists) */
4497
4498 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4499 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4500
4501 /* Resize indirection array */
4502 xfs_iext_realloc_indirect(ifp, ++nlists *
4503 sizeof(xfs_ext_irec_t));
4504 /*
4505 * Move records down in the array so the
4506 * new page can use erp_idx.
4507 */
4508 erp = ifp->if_u1.if_ext_irec;
4509 for (i = nlists - 1; i > erp_idx; i--) {
4510 memmove(&erp[i], &erp[i-1], sizeof(xfs_ext_irec_t));
4511 }
4512 ASSERT(i == erp_idx);
4513
4514 /* Initialize new extent record */
4515 erp = ifp->if_u1.if_ext_irec;
4516 erp[erp_idx].er_extbuf = (xfs_bmbt_rec_t *)
4517 kmem_alloc(XFS_IEXT_BUFSZ, KM_SLEEP);
4518 ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ;
4519 memset(erp[erp_idx].er_extbuf, 0, XFS_IEXT_BUFSZ);
4520 erp[erp_idx].er_extcount = 0;
4521 erp[erp_idx].er_extoff = erp_idx > 0 ?
4522 erp[erp_idx-1].er_extoff + erp[erp_idx-1].er_extcount : 0;
4523 return (&erp[erp_idx]);
4524}
4525
4526/*
4527 * Remove a record from the indirection array.
4528 */
4529void
4530xfs_iext_irec_remove(
4531 xfs_ifork_t *ifp, /* inode fork pointer */
4532 int erp_idx) /* irec index to remove */
4533{
4534 xfs_ext_irec_t *erp; /* indirection array pointer */
4535 int i; /* loop counter */
4536 int nlists; /* number of irec's (ex lists) */
4537
4538 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4539 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4540 erp = &ifp->if_u1.if_ext_irec[erp_idx];
4541 if (erp->er_extbuf) {
4542 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1,
4543 -erp->er_extcount);
4544 kmem_free(erp->er_extbuf, XFS_IEXT_BUFSZ);
4545 }
4546 /* Compact extent records */
4547 erp = ifp->if_u1.if_ext_irec;
4548 for (i = erp_idx; i < nlists - 1; i++) {
4549 memmove(&erp[i], &erp[i+1], sizeof(xfs_ext_irec_t));
4550 }
4551 /*
4552 * Manually free the last extent record from the indirection
4553 * array. A call to xfs_iext_realloc_indirect() with a size
4554 * of zero would result in a call to xfs_iext_destroy() which
4555 * would in turn call this function again, creating a nasty
4556 * infinite loop.
4557 */
4558 if (--nlists) {
4559 xfs_iext_realloc_indirect(ifp,
4560 nlists * sizeof(xfs_ext_irec_t));
4561 } else {
4562 kmem_free(ifp->if_u1.if_ext_irec,
4563 sizeof(xfs_ext_irec_t));
4564 }
4565 ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ;
4566}
4567
4568/*
4569 * This is called to clean up large amounts of unused memory allocated
4570 * by the indirection array. Before compacting anything though, verify
4571 * that the indirection array is still needed and switch back to the
4572 * linear extent list (or even the inline buffer) if possible. The
4573 * compaction policy is as follows:
4574 *
4575 * Full Compaction: Extents fit into a single page (or inline buffer)
4576 * Full Compaction: Extents occupy less than 10% of allocated space
4577 * Partial Compaction: Extents occupy > 10% and < 50% of allocated space
4578 * No Compaction: Extents occupy at least 50% of allocated space
4579 */
4580void
4581xfs_iext_irec_compact(
4582 xfs_ifork_t *ifp) /* inode fork pointer */
4583{
4584 xfs_extnum_t nextents; /* number of extents in file */
4585 int nlists; /* number of irec's (ex lists) */
4586
4587 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4588 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4589 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4590
4591 if (nextents == 0) {
4592 xfs_iext_destroy(ifp);
4593 } else if (nextents <= XFS_INLINE_EXTS) {
4594 xfs_iext_indirect_to_direct(ifp);
4595 xfs_iext_direct_to_inline(ifp, nextents);
4596 } else if (nextents <= XFS_LINEAR_EXTS) {
4597 xfs_iext_indirect_to_direct(ifp);
4598 } else if (nextents < (nlists * XFS_LINEAR_EXTS) >> 3) {
4599 xfs_iext_irec_compact_full(ifp);
4600 } else if (nextents < (nlists * XFS_LINEAR_EXTS) >> 1) {
4601 xfs_iext_irec_compact_pages(ifp);
4602 }
4603}
4604
4605/*
4606 * Combine extents from neighboring extent pages.
4607 */
4608void
4609xfs_iext_irec_compact_pages(
4610 xfs_ifork_t *ifp) /* inode fork pointer */
4611{
4612 xfs_ext_irec_t *erp, *erp_next;/* pointers to irec entries */
4613 int erp_idx = 0; /* indirection array index */
4614 int nlists; /* number of irec's (ex lists) */
4615
4616 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4617 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4618 while (erp_idx < nlists - 1) {
4619 erp = &ifp->if_u1.if_ext_irec[erp_idx];
4620 erp_next = erp + 1;
4621 if (erp_next->er_extcount <=
4622 (XFS_LINEAR_EXTS - erp->er_extcount)) {
4623 memmove(&erp->er_extbuf[erp->er_extcount],
4624 erp_next->er_extbuf, erp_next->er_extcount *
4625 sizeof(xfs_bmbt_rec_t));
4626 erp->er_extcount += erp_next->er_extcount;
4627 /*
4628 * Free page before removing extent record
4629 * so er_extoffs don't get modified in
4630 * xfs_iext_irec_remove.
4631 */
4632 kmem_free(erp_next->er_extbuf, XFS_IEXT_BUFSZ);
4633 erp_next->er_extbuf = NULL;
4634 xfs_iext_irec_remove(ifp, erp_idx + 1);
4635 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4636 } else {
4637 erp_idx++;
4638 }
4639 }
4640}
4641
4642/*
4643 * Fully compact the extent records managed by the indirection array.
4644 */
4645void
4646xfs_iext_irec_compact_full(
4647 xfs_ifork_t *ifp) /* inode fork pointer */
4648{
4649 xfs_bmbt_rec_t *ep, *ep_next; /* extent record pointers */
4650 xfs_ext_irec_t *erp, *erp_next; /* extent irec pointers */
4651 int erp_idx = 0; /* extent irec index */
4652 int ext_avail; /* empty entries in ex list */
4653 int ext_diff; /* number of exts to add */
4654 int nlists; /* number of irec's (ex lists) */
4655
4656 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4657 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4658 erp = ifp->if_u1.if_ext_irec;
4659 ep = &erp->er_extbuf[erp->er_extcount];
4660 erp_next = erp + 1;
4661 ep_next = erp_next->er_extbuf;
4662 while (erp_idx < nlists - 1) {
4663 ext_avail = XFS_LINEAR_EXTS - erp->er_extcount;
4664 ext_diff = MIN(ext_avail, erp_next->er_extcount);
4665 memcpy(ep, ep_next, ext_diff * sizeof(xfs_bmbt_rec_t));
4666 erp->er_extcount += ext_diff;
4667 erp_next->er_extcount -= ext_diff;
4668 /* Remove next page */
4669 if (erp_next->er_extcount == 0) {
4670 /*
4671 * Free page before removing extent record
4672 * so er_extoffs don't get modified in
4673 * xfs_iext_irec_remove.
4674 */
4675 kmem_free(erp_next->er_extbuf,
4676 erp_next->er_extcount * sizeof(xfs_bmbt_rec_t));
4677 erp_next->er_extbuf = NULL;
4678 xfs_iext_irec_remove(ifp, erp_idx + 1);
4679 erp = &ifp->if_u1.if_ext_irec[erp_idx];
4680 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4681 /* Update next page */
4682 } else {
4683 /* Move rest of page up to become next new page */
4684 memmove(erp_next->er_extbuf, ep_next,
4685 erp_next->er_extcount * sizeof(xfs_bmbt_rec_t));
4686 ep_next = erp_next->er_extbuf;
4687 memset(&ep_next[erp_next->er_extcount], 0,
4688 (XFS_LINEAR_EXTS - erp_next->er_extcount) *
4689 sizeof(xfs_bmbt_rec_t));
4690 }
4691 if (erp->er_extcount == XFS_LINEAR_EXTS) {
4692 erp_idx++;
4693 if (erp_idx < nlists)
4694 erp = &ifp->if_u1.if_ext_irec[erp_idx];
4695 else
4696 break;
4697 }
4698 ep = &erp->er_extbuf[erp->er_extcount];
4699 erp_next = erp + 1;
4700 ep_next = erp_next->er_extbuf;
4701 }
4702}
4703
4704/*
4705 * This is called to update the er_extoff field in the indirection
4706 * array when extents have been added or removed from one of the
4707 * extent lists. erp_idx contains the irec index to begin updating
4708 * at and ext_diff contains the number of extents that were added
4709 * or removed.
4710 */
4711void
4712xfs_iext_irec_update_extoffs(
4713 xfs_ifork_t *ifp, /* inode fork pointer */
4714 int erp_idx, /* irec index to update */
4715 int ext_diff) /* number of new extents */
4716{
4717 int i; /* loop counter */
4718 int nlists; /* number of irec's (ex lists */
4719
4720 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4721 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4722 for (i = erp_idx; i < nlists; i++) {
4723 ifp->if_u1.if_ext_irec[i].er_extoff += ext_diff;
4724 }
4725}
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 1cfbcf18ce86..39ef9c36ea55 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -25,10 +25,37 @@
25#define XFS_ATTR_FORK 1 25#define XFS_ATTR_FORK 1
26 26
27/* 27/*
28 * The following xfs_ext_irec_t struct introduces a second (top) level
29 * to the in-core extent allocation scheme. These structs are allocated
30 * in a contiguous block, creating an indirection array where each entry
31 * (irec) contains a pointer to a buffer of in-core extent records which
32 * it manages. Each extent buffer is 4k in size, since 4k is the system
33 * page size on Linux i386 and systems with larger page sizes don't seem
34 * to gain much, if anything, by using their native page size as the
35 * extent buffer size. Also, using 4k extent buffers everywhere provides
36 * a consistent interface for CXFS across different platforms.
37 *
38 * There is currently no limit on the number of irec's (extent lists)
39 * allowed, so heavily fragmented files may require an indirection array
40 * which spans multiple system pages of memory. The number of extents
41 * which would require this amount of contiguous memory is very large
42 * and should not cause problems in the foreseeable future. However,
43 * if the memory needed for the contiguous array ever becomes a problem,
44 * it is possible that a third level of indirection may be required.
45 */
46typedef struct xfs_ext_irec {
47 xfs_bmbt_rec_t *er_extbuf; /* block of extent records */
48 xfs_extnum_t er_extoff; /* extent offset in file */
49 xfs_extnum_t er_extcount; /* number of extents in page/block */
50} xfs_ext_irec_t;
51
52/*
28 * File incore extent information, present for each of data & attr forks. 53 * File incore extent information, present for each of data & attr forks.
29 */ 54 */
30#define XFS_INLINE_EXTS 2 55#define XFS_IEXT_BUFSZ 4096
31#define XFS_INLINE_DATA 32 56#define XFS_LINEAR_EXTS (XFS_IEXT_BUFSZ / (uint)sizeof(xfs_bmbt_rec_t))
57#define XFS_INLINE_EXTS 2
58#define XFS_INLINE_DATA 32
32typedef struct xfs_ifork { 59typedef struct xfs_ifork {
33 int if_bytes; /* bytes in if_u1 */ 60 int if_bytes; /* bytes in if_u1 */
34 int if_real_bytes; /* bytes allocated in if_u1 */ 61 int if_real_bytes; /* bytes allocated in if_u1 */
@@ -39,6 +66,7 @@ typedef struct xfs_ifork {
39 xfs_extnum_t if_lastex; /* last if_extents used */ 66 xfs_extnum_t if_lastex; /* last if_extents used */
40 union { 67 union {
41 xfs_bmbt_rec_t *if_extents; /* linear map file exts */ 68 xfs_bmbt_rec_t *if_extents; /* linear map file exts */
69 xfs_ext_irec_t *if_ext_irec; /* irec map file exts */
42 char *if_data; /* inline file data */ 70 char *if_data; /* inline file data */
43 } if_u1; 71 } if_u1;
44 union { 72 union {
@@ -61,20 +89,16 @@ typedef struct xfs_ifork {
61/* 89/*
62 * Per-fork incore inode flags. 90 * Per-fork incore inode flags.
63 */ 91 */
64#define XFS_IFINLINE 0x0001 /* Inline data is read in */ 92#define XFS_IFINLINE 0x01 /* Inline data is read in */
65#define XFS_IFEXTENTS 0x0002 /* All extent pointers are read in */ 93#define XFS_IFEXTENTS 0x02 /* All extent pointers are read in */
66#define XFS_IFBROOT 0x0004 /* i_broot points to the bmap b-tree root */ 94#define XFS_IFBROOT 0x04 /* i_broot points to the bmap b-tree root */
95#define XFS_IFEXTIREC 0x08 /* Indirection array of extent blocks */
67 96
68/* 97/*
69 * Flags for xfs_imap() and xfs_dilocate(). 98 * Flags for xfs_itobp(), xfs_imap() and xfs_dilocate().
70 */ 99 */
71#define XFS_IMAP_LOOKUP 0x1 100#define XFS_IMAP_LOOKUP 0x1
72 101#define XFS_IMAP_BULKSTAT 0x2
73/*
74 * Maximum number of extent pointers in if_u1.if_extents.
75 */
76#define XFS_MAX_INCORE_EXTENTS 32768
77
78 102
79#ifdef __KERNEL__ 103#ifdef __KERNEL__
80struct bhv_desc; 104struct bhv_desc;
@@ -398,7 +422,7 @@ int xfs_finish_reclaim_all(struct xfs_mount *, int);
398 */ 422 */
399int xfs_itobp(struct xfs_mount *, struct xfs_trans *, 423int xfs_itobp(struct xfs_mount *, struct xfs_trans *,
400 xfs_inode_t *, xfs_dinode_t **, struct xfs_buf **, 424 xfs_inode_t *, xfs_dinode_t **, struct xfs_buf **,
401 xfs_daddr_t); 425 xfs_daddr_t, uint);
402int xfs_iread(struct xfs_mount *, struct xfs_trans *, xfs_ino_t, 426int xfs_iread(struct xfs_mount *, struct xfs_trans *, xfs_ino_t,
403 xfs_inode_t **, xfs_daddr_t); 427 xfs_inode_t **, xfs_daddr_t);
404int xfs_iread_extents(struct xfs_trans *, xfs_inode_t *, int); 428int xfs_iread_extents(struct xfs_trans *, xfs_inode_t *, int);
@@ -440,6 +464,32 @@ xfs_inode_t *xfs_vtoi(struct vnode *vp);
440 464
441void xfs_synchronize_atime(xfs_inode_t *); 465void xfs_synchronize_atime(xfs_inode_t *);
442 466
467xfs_bmbt_rec_t *xfs_iext_get_ext(xfs_ifork_t *, xfs_extnum_t);
468void xfs_iext_insert(xfs_ifork_t *, xfs_extnum_t, xfs_extnum_t,
469 xfs_bmbt_irec_t *);
470void xfs_iext_add(xfs_ifork_t *, xfs_extnum_t, int);
471void xfs_iext_add_indirect_multi(xfs_ifork_t *, int, xfs_extnum_t, int);
472void xfs_iext_remove(xfs_ifork_t *, xfs_extnum_t, int);
473void xfs_iext_remove_inline(xfs_ifork_t *, xfs_extnum_t, int);
474void xfs_iext_remove_direct(xfs_ifork_t *, xfs_extnum_t, int);
475void xfs_iext_remove_indirect(xfs_ifork_t *, xfs_extnum_t, int);
476void xfs_iext_realloc_direct(xfs_ifork_t *, int);
477void xfs_iext_realloc_indirect(xfs_ifork_t *, int);
478void xfs_iext_indirect_to_direct(xfs_ifork_t *);
479void xfs_iext_direct_to_inline(xfs_ifork_t *, xfs_extnum_t);
480void xfs_iext_inline_to_direct(xfs_ifork_t *, int);
481void xfs_iext_destroy(xfs_ifork_t *);
482xfs_bmbt_rec_t *xfs_iext_bno_to_ext(xfs_ifork_t *, xfs_fileoff_t, int *);
483xfs_ext_irec_t *xfs_iext_bno_to_irec(xfs_ifork_t *, xfs_fileoff_t, int *);
484xfs_ext_irec_t *xfs_iext_idx_to_irec(xfs_ifork_t *, xfs_extnum_t *, int *, int);
485void xfs_iext_irec_init(xfs_ifork_t *);
486xfs_ext_irec_t *xfs_iext_irec_new(xfs_ifork_t *, int);
487void xfs_iext_irec_remove(xfs_ifork_t *, int);
488void xfs_iext_irec_compact(xfs_ifork_t *);
489void xfs_iext_irec_compact_pages(xfs_ifork_t *);
490void xfs_iext_irec_compact_full(xfs_ifork_t *);
491void xfs_iext_irec_update_extoffs(xfs_ifork_t *, int, int);
492
443#define xfs_ipincount(ip) ((unsigned int) atomic_read(&ip->i_pincount)) 493#define xfs_ipincount(ip) ((unsigned int) atomic_read(&ip->i_pincount))
444 494
445#ifdef DEBUG 495#ifdef DEBUG
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 788917f355c4..d5dfedcb8922 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -76,7 +76,7 @@ xfs_iomap_enter_trace(
76 (void *)((unsigned long)count), 76 (void *)((unsigned long)count),
77 (void *)((unsigned long)((io->io_new_size >> 32) & 0xffffffff)), 77 (void *)((unsigned long)((io->io_new_size >> 32) & 0xffffffff)),
78 (void *)((unsigned long)(io->io_new_size & 0xffffffff)), 78 (void *)((unsigned long)(io->io_new_size & 0xffffffff)),
79 (void *)NULL, 79 (void *)((unsigned long)current_pid()),
80 (void *)NULL, 80 (void *)NULL,
81 (void *)NULL, 81 (void *)NULL,
82 (void *)NULL, 82 (void *)NULL,
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index c59450e1be40..32247b6bfee7 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -562,7 +562,8 @@ xfs_bulkstat(
562 if (bp) 562 if (bp)
563 xfs_buf_relse(bp); 563 xfs_buf_relse(bp);
564 error = xfs_itobp(mp, NULL, ip, 564 error = xfs_itobp(mp, NULL, ip,
565 &dip, &bp, bno); 565 &dip, &bp, bno,
566 XFS_IMAP_BULKSTAT);
566 if (!error) 567 if (!error)
567 clustidx = ip->i_boffset / mp->m_sb.sb_inodesize; 568 clustidx = ip->i_boffset / mp->m_sb.sb_inodesize;
568 kmem_zone_free(xfs_inode_zone, ip); 569 kmem_zone_free(xfs_inode_zone, ip);
@@ -570,6 +571,8 @@ xfs_bulkstat(
570 mp, XFS_ERRTAG_BULKSTAT_READ_CHUNK, 571 mp, XFS_ERRTAG_BULKSTAT_READ_CHUNK,
571 XFS_RANDOM_BULKSTAT_READ_CHUNK)) { 572 XFS_RANDOM_BULKSTAT_READ_CHUNK)) {
572 bp = NULL; 573 bp = NULL;
574 ubleft = 0;
575 rval = error;
573 break; 576 break;
574 } 577 }
575 } 578 }
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 7d46cbd6a07a..add13f507ed2 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -3249,7 +3249,7 @@ xlog_recover_process_iunlinks(
3249 * next inode in the bucket. 3249 * next inode in the bucket.
3250 */ 3250 */
3251 error = xfs_itobp(mp, NULL, ip, &dip, 3251 error = xfs_itobp(mp, NULL, ip, &dip,
3252 &ibp, 0); 3252 &ibp, 0, 0);
3253 ASSERT(error || (dip != NULL)); 3253 ASSERT(error || (dip != NULL));
3254 } 3254 }
3255 3255
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 62188ea392c7..20e8abc16d18 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -51,11 +51,32 @@ STATIC int xfs_uuid_mount(xfs_mount_t *);
51STATIC void xfs_uuid_unmount(xfs_mount_t *mp); 51STATIC void xfs_uuid_unmount(xfs_mount_t *mp);
52STATIC void xfs_unmountfs_wait(xfs_mount_t *); 52STATIC void xfs_unmountfs_wait(xfs_mount_t *);
53 53
54
55#ifdef HAVE_PERCPU_SB
56STATIC void xfs_icsb_destroy_counters(xfs_mount_t *);
57STATIC void xfs_icsb_balance_counter(xfs_mount_t *, xfs_sb_field_t, int);
58STATIC void xfs_icsb_sync_counters(xfs_mount_t *);
59STATIC int xfs_icsb_modify_counters(xfs_mount_t *, xfs_sb_field_t,
60 int, int);
61STATIC int xfs_icsb_modify_counters_locked(xfs_mount_t *, xfs_sb_field_t,
62 int, int);
63STATIC int xfs_icsb_disable_counter(xfs_mount_t *, xfs_sb_field_t);
64
65#else
66
67#define xfs_icsb_destroy_counters(mp) do { } while (0)
68#define xfs_icsb_balance_counter(mp, a, b) do { } while (0)
69#define xfs_icsb_sync_counters(mp) do { } while (0)
70#define xfs_icsb_modify_counters(mp, a, b, c) do { } while (0)
71#define xfs_icsb_modify_counters_locked(mp, a, b, c) do { } while (0)
72
73#endif
74
54static const struct { 75static const struct {
55 short offset; 76 short offset;
56 short type; /* 0 = integer 77 short type; /* 0 = integer
57 * 1 = binary / string (no translation) 78 * 1 = binary / string (no translation)
58 */ 79 */
59} xfs_sb_info[] = { 80} xfs_sb_info[] = {
60 { offsetof(xfs_sb_t, sb_magicnum), 0 }, 81 { offsetof(xfs_sb_t, sb_magicnum), 0 },
61 { offsetof(xfs_sb_t, sb_blocksize), 0 }, 82 { offsetof(xfs_sb_t, sb_blocksize), 0 },
@@ -113,7 +134,11 @@ xfs_mount_init(void)
113{ 134{
114 xfs_mount_t *mp; 135 xfs_mount_t *mp;
115 136
116 mp = kmem_zalloc(sizeof(*mp), KM_SLEEP); 137 mp = kmem_zalloc(sizeof(xfs_mount_t), KM_SLEEP);
138
139 if (xfs_icsb_init_counters(mp)) {
140 mp->m_flags |= XFS_MOUNT_NO_PERCPU_SB;
141 }
117 142
118 AIL_LOCKINIT(&mp->m_ail_lock, "xfs_ail"); 143 AIL_LOCKINIT(&mp->m_ail_lock, "xfs_ail");
119 spinlock_init(&mp->m_sb_lock, "xfs_sb"); 144 spinlock_init(&mp->m_sb_lock, "xfs_sb");
@@ -136,8 +161,8 @@ xfs_mount_init(void)
136 */ 161 */
137void 162void
138xfs_mount_free( 163xfs_mount_free(
139 xfs_mount_t *mp, 164 xfs_mount_t *mp,
140 int remove_bhv) 165 int remove_bhv)
141{ 166{
142 if (mp->m_ihash) 167 if (mp->m_ihash)
143 xfs_ihash_free(mp); 168 xfs_ihash_free(mp);
@@ -177,6 +202,7 @@ xfs_mount_free(
177 VFS_REMOVEBHV(vfsp, &mp->m_bhv); 202 VFS_REMOVEBHV(vfsp, &mp->m_bhv);
178 } 203 }
179 204
205 xfs_icsb_destroy_counters(mp);
180 kmem_free(mp, sizeof(xfs_mount_t)); 206 kmem_free(mp, sizeof(xfs_mount_t));
181} 207}
182 208
@@ -242,9 +268,12 @@ xfs_mount_validate_sb(
242 sbp->sb_blocklog > XFS_MAX_BLOCKSIZE_LOG || 268 sbp->sb_blocklog > XFS_MAX_BLOCKSIZE_LOG ||
243 sbp->sb_inodesize < XFS_DINODE_MIN_SIZE || 269 sbp->sb_inodesize < XFS_DINODE_MIN_SIZE ||
244 sbp->sb_inodesize > XFS_DINODE_MAX_SIZE || 270 sbp->sb_inodesize > XFS_DINODE_MAX_SIZE ||
271 sbp->sb_inodelog < XFS_DINODE_MIN_LOG ||
272 sbp->sb_inodelog > XFS_DINODE_MAX_LOG ||
273 (sbp->sb_blocklog - sbp->sb_inodelog != sbp->sb_inopblog) ||
245 (sbp->sb_rextsize * sbp->sb_blocksize > XFS_MAX_RTEXTSIZE) || 274 (sbp->sb_rextsize * sbp->sb_blocksize > XFS_MAX_RTEXTSIZE) ||
246 (sbp->sb_rextsize * sbp->sb_blocksize < XFS_MIN_RTEXTSIZE) || 275 (sbp->sb_rextsize * sbp->sb_blocksize < XFS_MIN_RTEXTSIZE) ||
247 sbp->sb_imax_pct > 100)) { 276 (sbp->sb_imax_pct > 100 || sbp->sb_imax_pct < 1))) {
248 cmn_err(CE_WARN, "XFS: SB sanity check 1 failed"); 277 cmn_err(CE_WARN, "XFS: SB sanity check 1 failed");
249 XFS_CORRUPTION_ERROR("xfs_mount_validate_sb(3)", 278 XFS_CORRUPTION_ERROR("xfs_mount_validate_sb(3)",
250 XFS_ERRLEVEL_LOW, mp, sbp); 279 XFS_ERRLEVEL_LOW, mp, sbp);
@@ -527,6 +556,10 @@ xfs_readsb(xfs_mount_t *mp)
527 ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); 556 ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
528 } 557 }
529 558
559 xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0);
560 xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0);
561 xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0);
562
530 mp->m_sb_bp = bp; 563 mp->m_sb_bp = bp;
531 xfs_buf_relse(bp); 564 xfs_buf_relse(bp);
532 ASSERT(XFS_BUF_VALUSEMA(bp) > 0); 565 ASSERT(XFS_BUF_VALUSEMA(bp) > 0);
@@ -1154,6 +1187,9 @@ xfs_unmountfs_writesb(xfs_mount_t *mp)
1154 sbp = xfs_getsb(mp, 0); 1187 sbp = xfs_getsb(mp, 0);
1155 if (!(XFS_MTOVFS(mp)->vfs_flag & VFS_RDONLY || 1188 if (!(XFS_MTOVFS(mp)->vfs_flag & VFS_RDONLY ||
1156 XFS_FORCED_SHUTDOWN(mp))) { 1189 XFS_FORCED_SHUTDOWN(mp))) {
1190
1191 xfs_icsb_sync_counters(mp);
1192
1157 /* 1193 /*
1158 * mark shared-readonly if desired 1194 * mark shared-readonly if desired
1159 */ 1195 */
@@ -1227,7 +1263,6 @@ xfs_mod_sb(xfs_trans_t *tp, __int64_t fields)
1227 1263
1228 xfs_trans_log_buf(tp, bp, first, last); 1264 xfs_trans_log_buf(tp, bp, first, last);
1229} 1265}
1230
1231/* 1266/*
1232 * xfs_mod_incore_sb_unlocked() is a utility routine common used to apply 1267 * xfs_mod_incore_sb_unlocked() is a utility routine common used to apply
1233 * a delta to a specified field in the in-core superblock. Simply 1268 * a delta to a specified field in the in-core superblock. Simply
@@ -1237,7 +1272,7 @@ xfs_mod_sb(xfs_trans_t *tp, __int64_t fields)
1237 * 1272 *
1238 * The SB_LOCK must be held when this routine is called. 1273 * The SB_LOCK must be held when this routine is called.
1239 */ 1274 */
1240STATIC int 1275int
1241xfs_mod_incore_sb_unlocked(xfs_mount_t *mp, xfs_sb_field_t field, 1276xfs_mod_incore_sb_unlocked(xfs_mount_t *mp, xfs_sb_field_t field,
1242 int delta, int rsvd) 1277 int delta, int rsvd)
1243{ 1278{
@@ -1406,9 +1441,26 @@ xfs_mod_incore_sb(xfs_mount_t *mp, xfs_sb_field_t field, int delta, int rsvd)
1406 unsigned long s; 1441 unsigned long s;
1407 int status; 1442 int status;
1408 1443
1409 s = XFS_SB_LOCK(mp); 1444 /* check for per-cpu counters */
1410 status = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd); 1445 switch (field) {
1411 XFS_SB_UNLOCK(mp, s); 1446#ifdef HAVE_PERCPU_SB
1447 case XFS_SBS_ICOUNT:
1448 case XFS_SBS_IFREE:
1449 case XFS_SBS_FDBLOCKS:
1450 if (!(mp->m_flags & XFS_MOUNT_NO_PERCPU_SB)) {
1451 status = xfs_icsb_modify_counters(mp, field,
1452 delta, rsvd);
1453 break;
1454 }
1455 /* FALLTHROUGH */
1456#endif
1457 default:
1458 s = XFS_SB_LOCK(mp);
1459 status = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd);
1460 XFS_SB_UNLOCK(mp, s);
1461 break;
1462 }
1463
1412 return status; 1464 return status;
1413} 1465}
1414 1466
@@ -1445,8 +1497,26 @@ xfs_mod_incore_sb_batch(xfs_mount_t *mp, xfs_mod_sb_t *msb, uint nmsb, int rsvd)
1445 * from the loop so we'll fall into the undo loop 1497 * from the loop so we'll fall into the undo loop
1446 * below. 1498 * below.
1447 */ 1499 */
1448 status = xfs_mod_incore_sb_unlocked(mp, msbp->msb_field, 1500 switch (msbp->msb_field) {
1449 msbp->msb_delta, rsvd); 1501#ifdef HAVE_PERCPU_SB
1502 case XFS_SBS_ICOUNT:
1503 case XFS_SBS_IFREE:
1504 case XFS_SBS_FDBLOCKS:
1505 if (!(mp->m_flags & XFS_MOUNT_NO_PERCPU_SB)) {
1506 status = xfs_icsb_modify_counters_locked(mp,
1507 msbp->msb_field,
1508 msbp->msb_delta, rsvd);
1509 break;
1510 }
1511 /* FALLTHROUGH */
1512#endif
1513 default:
1514 status = xfs_mod_incore_sb_unlocked(mp,
1515 msbp->msb_field,
1516 msbp->msb_delta, rsvd);
1517 break;
1518 }
1519
1450 if (status != 0) { 1520 if (status != 0) {
1451 break; 1521 break;
1452 } 1522 }
@@ -1463,8 +1533,28 @@ xfs_mod_incore_sb_batch(xfs_mount_t *mp, xfs_mod_sb_t *msb, uint nmsb, int rsvd)
1463 if (status != 0) { 1533 if (status != 0) {
1464 msbp--; 1534 msbp--;
1465 while (msbp >= msb) { 1535 while (msbp >= msb) {
1466 status = xfs_mod_incore_sb_unlocked(mp, 1536 switch (msbp->msb_field) {
1467 msbp->msb_field, -(msbp->msb_delta), rsvd); 1537#ifdef HAVE_PERCPU_SB
1538 case XFS_SBS_ICOUNT:
1539 case XFS_SBS_IFREE:
1540 case XFS_SBS_FDBLOCKS:
1541 if (!(mp->m_flags & XFS_MOUNT_NO_PERCPU_SB)) {
1542 status =
1543 xfs_icsb_modify_counters_locked(mp,
1544 msbp->msb_field,
1545 -(msbp->msb_delta),
1546 rsvd);
1547 break;
1548 }
1549 /* FALLTHROUGH */
1550#endif
1551 default:
1552 status = xfs_mod_incore_sb_unlocked(mp,
1553 msbp->msb_field,
1554 -(msbp->msb_delta),
1555 rsvd);
1556 break;
1557 }
1468 ASSERT(status == 0); 1558 ASSERT(status == 0);
1469 msbp--; 1559 msbp--;
1470 } 1560 }
@@ -1577,3 +1667,525 @@ xfs_mount_log_sbunit(
1577 xfs_mod_sb(tp, fields); 1667 xfs_mod_sb(tp, fields);
1578 xfs_trans_commit(tp, 0, NULL); 1668 xfs_trans_commit(tp, 0, NULL);
1579} 1669}
1670
1671
1672#ifdef HAVE_PERCPU_SB
1673/*
1674 * Per-cpu incore superblock counters
1675 *
1676 * Simple concept, difficult implementation
1677 *
1678 * Basically, replace the incore superblock counters with a distributed per cpu
1679 * counter for contended fields (e.g. free block count).
1680 *
1681 * Difficulties arise in that the incore sb is used for ENOSPC checking, and
1682 * hence needs to be accurately read when we are running low on space. Hence
1683 * there is a method to enable and disable the per-cpu counters based on how
1684 * much "stuff" is available in them.
1685 *
1686 * Basically, a counter is enabled if there is enough free resource to justify
1687 * running a per-cpu fast-path. If the per-cpu counter runs out (i.e. a local
1688 * ENOSPC), then we disable the counters to synchronise all callers and
1689 * re-distribute the available resources.
1690 *
1691 * If, once we redistributed the available resources, we still get a failure,
1692 * we disable the per-cpu counter and go through the slow path.
1693 *
1694 * The slow path is the current xfs_mod_incore_sb() function. This means that
1695 * when we disable a per-cpu counter, we need to drain it's resources back to
1696 * the global superblock. We do this after disabling the counter to prevent
1697 * more threads from queueing up on the counter.
1698 *
1699 * Essentially, this means that we still need a lock in the fast path to enable
1700 * synchronisation between the global counters and the per-cpu counters. This
1701 * is not a problem because the lock will be local to a CPU almost all the time
1702 * and have little contention except when we get to ENOSPC conditions.
1703 *
1704 * Basically, this lock becomes a barrier that enables us to lock out the fast
1705 * path while we do things like enabling and disabling counters and
1706 * synchronising the counters.
1707 *
1708 * Locking rules:
1709 *
1710 * 1. XFS_SB_LOCK() before picking up per-cpu locks
1711 * 2. per-cpu locks always picked up via for_each_online_cpu() order
1712 * 3. accurate counter sync requires XFS_SB_LOCK + per cpu locks
1713 * 4. modifying per-cpu counters requires holding per-cpu lock
1714 * 5. modifying global counters requires holding XFS_SB_LOCK
1715 * 6. enabling or disabling a counter requires holding the XFS_SB_LOCK
1716 * and _none_ of the per-cpu locks.
1717 *
1718 * Disabled counters are only ever re-enabled by a balance operation
1719 * that results in more free resources per CPU than a given threshold.
1720 * To ensure counters don't remain disabled, they are rebalanced when
1721 * the global resource goes above a higher threshold (i.e. some hysteresis
1722 * is present to prevent thrashing).
1723 */
1724
1725/*
1726 * hot-plug CPU notifier support.
1727 *
1728 * We cannot use the hotcpu_register() function because it does
1729 * not allow notifier instances. We need a notifier per filesystem
1730 * as we need to be able to identify the filesystem to balance
1731 * the counters out. This is acheived by having a notifier block
1732 * embedded in the xfs_mount_t and doing pointer magic to get the
1733 * mount pointer from the notifier block address.
1734 */
1735STATIC int
1736xfs_icsb_cpu_notify(
1737 struct notifier_block *nfb,
1738 unsigned long action,
1739 void *hcpu)
1740{
1741 xfs_icsb_cnts_t *cntp;
1742 xfs_mount_t *mp;
1743 int s;
1744
1745 mp = (xfs_mount_t *)container_of(nfb, xfs_mount_t, m_icsb_notifier);
1746 cntp = (xfs_icsb_cnts_t *)
1747 per_cpu_ptr(mp->m_sb_cnts, (unsigned long)hcpu);
1748 switch (action) {
1749 case CPU_UP_PREPARE:
1750 /* Easy Case - initialize the area and locks, and
1751 * then rebalance when online does everything else for us. */
1752 memset(cntp, 0, sizeof(xfs_icsb_cnts_t));
1753 break;
1754 case CPU_ONLINE:
1755 xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0);
1756 xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0);
1757 xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0);
1758 break;
1759 case CPU_DEAD:
1760 /* Disable all the counters, then fold the dead cpu's
1761 * count into the total on the global superblock and
1762 * re-enable the counters. */
1763 s = XFS_SB_LOCK(mp);
1764 xfs_icsb_disable_counter(mp, XFS_SBS_ICOUNT);
1765 xfs_icsb_disable_counter(mp, XFS_SBS_IFREE);
1766 xfs_icsb_disable_counter(mp, XFS_SBS_FDBLOCKS);
1767
1768 mp->m_sb.sb_icount += cntp->icsb_icount;
1769 mp->m_sb.sb_ifree += cntp->icsb_ifree;
1770 mp->m_sb.sb_fdblocks += cntp->icsb_fdblocks;
1771
1772 memset(cntp, 0, sizeof(xfs_icsb_cnts_t));
1773
1774 xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, XFS_ICSB_SB_LOCKED);
1775 xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, XFS_ICSB_SB_LOCKED);
1776 xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, XFS_ICSB_SB_LOCKED);
1777 XFS_SB_UNLOCK(mp, s);
1778 break;
1779 }
1780
1781 return NOTIFY_OK;
1782}
1783
1784int
1785xfs_icsb_init_counters(
1786 xfs_mount_t *mp)
1787{
1788 xfs_icsb_cnts_t *cntp;
1789 int i;
1790
1791 mp->m_sb_cnts = alloc_percpu(xfs_icsb_cnts_t);
1792 if (mp->m_sb_cnts == NULL)
1793 return -ENOMEM;
1794
1795 mp->m_icsb_notifier.notifier_call = xfs_icsb_cpu_notify;
1796 mp->m_icsb_notifier.priority = 0;
1797 register_cpu_notifier(&mp->m_icsb_notifier);
1798
1799 for_each_online_cpu(i) {
1800 cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i);
1801 memset(cntp, 0, sizeof(xfs_icsb_cnts_t));
1802 }
1803 /*
1804 * start with all counters disabled so that the
1805 * initial balance kicks us off correctly
1806 */
1807 mp->m_icsb_counters = -1;
1808 return 0;
1809}
1810
1811STATIC void
1812xfs_icsb_destroy_counters(
1813 xfs_mount_t *mp)
1814{
1815 if (mp->m_sb_cnts) {
1816 unregister_cpu_notifier(&mp->m_icsb_notifier);
1817 free_percpu(mp->m_sb_cnts);
1818 }
1819}
1820
1821STATIC inline void
1822xfs_icsb_lock_cntr(
1823 xfs_icsb_cnts_t *icsbp)
1824{
1825 while (test_and_set_bit(XFS_ICSB_FLAG_LOCK, &icsbp->icsb_flags)) {
1826 ndelay(1000);
1827 }
1828}
1829
1830STATIC inline void
1831xfs_icsb_unlock_cntr(
1832 xfs_icsb_cnts_t *icsbp)
1833{
1834 clear_bit(XFS_ICSB_FLAG_LOCK, &icsbp->icsb_flags);
1835}
1836
1837
1838STATIC inline void
1839xfs_icsb_lock_all_counters(
1840 xfs_mount_t *mp)
1841{
1842 xfs_icsb_cnts_t *cntp;
1843 int i;
1844
1845 for_each_online_cpu(i) {
1846 cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i);
1847 xfs_icsb_lock_cntr(cntp);
1848 }
1849}
1850
1851STATIC inline void
1852xfs_icsb_unlock_all_counters(
1853 xfs_mount_t *mp)
1854{
1855 xfs_icsb_cnts_t *cntp;
1856 int i;
1857
1858 for_each_online_cpu(i) {
1859 cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i);
1860 xfs_icsb_unlock_cntr(cntp);
1861 }
1862}
1863
1864STATIC void
1865xfs_icsb_count(
1866 xfs_mount_t *mp,
1867 xfs_icsb_cnts_t *cnt,
1868 int flags)
1869{
1870 xfs_icsb_cnts_t *cntp;
1871 int i;
1872
1873 memset(cnt, 0, sizeof(xfs_icsb_cnts_t));
1874
1875 if (!(flags & XFS_ICSB_LAZY_COUNT))
1876 xfs_icsb_lock_all_counters(mp);
1877
1878 for_each_online_cpu(i) {
1879 cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i);
1880 cnt->icsb_icount += cntp->icsb_icount;
1881 cnt->icsb_ifree += cntp->icsb_ifree;
1882 cnt->icsb_fdblocks += cntp->icsb_fdblocks;
1883 }
1884
1885 if (!(flags & XFS_ICSB_LAZY_COUNT))
1886 xfs_icsb_unlock_all_counters(mp);
1887}
1888
1889STATIC int
1890xfs_icsb_counter_disabled(
1891 xfs_mount_t *mp,
1892 xfs_sb_field_t field)
1893{
1894 ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS));
1895 return test_bit(field, &mp->m_icsb_counters);
1896}
1897
1898STATIC int
1899xfs_icsb_disable_counter(
1900 xfs_mount_t *mp,
1901 xfs_sb_field_t field)
1902{
1903 xfs_icsb_cnts_t cnt;
1904
1905 ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS));
1906
1907 xfs_icsb_lock_all_counters(mp);
1908 if (!test_and_set_bit(field, &mp->m_icsb_counters)) {
1909 /* drain back to superblock */
1910
1911 xfs_icsb_count(mp, &cnt, XFS_ICSB_SB_LOCKED|XFS_ICSB_LAZY_COUNT);
1912 switch(field) {
1913 case XFS_SBS_ICOUNT:
1914 mp->m_sb.sb_icount = cnt.icsb_icount;
1915 break;
1916 case XFS_SBS_IFREE:
1917 mp->m_sb.sb_ifree = cnt.icsb_ifree;
1918 break;
1919 case XFS_SBS_FDBLOCKS:
1920 mp->m_sb.sb_fdblocks = cnt.icsb_fdblocks;
1921 break;
1922 default:
1923 BUG();
1924 }
1925 }
1926
1927 xfs_icsb_unlock_all_counters(mp);
1928
1929 return 0;
1930}
1931
1932STATIC void
1933xfs_icsb_enable_counter(
1934 xfs_mount_t *mp,
1935 xfs_sb_field_t field,
1936 uint64_t count,
1937 uint64_t resid)
1938{
1939 xfs_icsb_cnts_t *cntp;
1940 int i;
1941
1942 ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS));
1943
1944 xfs_icsb_lock_all_counters(mp);
1945 for_each_online_cpu(i) {
1946 cntp = per_cpu_ptr(mp->m_sb_cnts, i);
1947 switch (field) {
1948 case XFS_SBS_ICOUNT:
1949 cntp->icsb_icount = count + resid;
1950 break;
1951 case XFS_SBS_IFREE:
1952 cntp->icsb_ifree = count + resid;
1953 break;
1954 case XFS_SBS_FDBLOCKS:
1955 cntp->icsb_fdblocks = count + resid;
1956 break;
1957 default:
1958 BUG();
1959 break;
1960 }
1961 resid = 0;
1962 }
1963 clear_bit(field, &mp->m_icsb_counters);
1964 xfs_icsb_unlock_all_counters(mp);
1965}
1966
1967STATIC void
1968xfs_icsb_sync_counters_int(
1969 xfs_mount_t *mp,
1970 int flags)
1971{
1972 xfs_icsb_cnts_t cnt;
1973 int s;
1974
1975 /* Pass 1: lock all counters */
1976 if ((flags & XFS_ICSB_SB_LOCKED) == 0)
1977 s = XFS_SB_LOCK(mp);
1978
1979 xfs_icsb_count(mp, &cnt, flags);
1980
1981 /* Step 3: update mp->m_sb fields */
1982 if (!xfs_icsb_counter_disabled(mp, XFS_SBS_ICOUNT))
1983 mp->m_sb.sb_icount = cnt.icsb_icount;
1984 if (!xfs_icsb_counter_disabled(mp, XFS_SBS_IFREE))
1985 mp->m_sb.sb_ifree = cnt.icsb_ifree;
1986 if (!xfs_icsb_counter_disabled(mp, XFS_SBS_FDBLOCKS))
1987 mp->m_sb.sb_fdblocks = cnt.icsb_fdblocks;
1988
1989 if ((flags & XFS_ICSB_SB_LOCKED) == 0)
1990 XFS_SB_UNLOCK(mp, s);
1991}
1992
1993/*
1994 * Accurate update of per-cpu counters to incore superblock
1995 */
1996STATIC void
1997xfs_icsb_sync_counters(
1998 xfs_mount_t *mp)
1999{
2000 xfs_icsb_sync_counters_int(mp, 0);
2001}
2002
2003/*
2004 * lazy addition used for things like df, background sb syncs, etc
2005 */
2006void
2007xfs_icsb_sync_counters_lazy(
2008 xfs_mount_t *mp)
2009{
2010 xfs_icsb_sync_counters_int(mp, XFS_ICSB_LAZY_COUNT);
2011}
2012
2013/*
2014 * Balance and enable/disable counters as necessary.
2015 *
2016 * Thresholds for re-enabling counters are somewhat magic.
2017 * inode counts are chosen to be the same number as single
2018 * on disk allocation chunk per CPU, and free blocks is
2019 * something far enough zero that we aren't going thrash
2020 * when we get near ENOSPC.
2021 */
2022#define XFS_ICSB_INO_CNTR_REENABLE 64
2023#define XFS_ICSB_FDBLK_CNTR_REENABLE 512
2024STATIC void
2025xfs_icsb_balance_counter(
2026 xfs_mount_t *mp,
2027 xfs_sb_field_t field,
2028 int flags)
2029{
2030 uint64_t count, resid = 0;
2031 int weight = num_online_cpus();
2032 int s;
2033
2034 if (!(flags & XFS_ICSB_SB_LOCKED))
2035 s = XFS_SB_LOCK(mp);
2036
2037 /* disable counter and sync counter */
2038 xfs_icsb_disable_counter(mp, field);
2039
2040 /* update counters - first CPU gets residual*/
2041 switch (field) {
2042 case XFS_SBS_ICOUNT:
2043 count = mp->m_sb.sb_icount;
2044 resid = do_div(count, weight);
2045 if (count < XFS_ICSB_INO_CNTR_REENABLE)
2046 goto out;
2047 break;
2048 case XFS_SBS_IFREE:
2049 count = mp->m_sb.sb_ifree;
2050 resid = do_div(count, weight);
2051 if (count < XFS_ICSB_INO_CNTR_REENABLE)
2052 goto out;
2053 break;
2054 case XFS_SBS_FDBLOCKS:
2055 count = mp->m_sb.sb_fdblocks;
2056 resid = do_div(count, weight);
2057 if (count < XFS_ICSB_FDBLK_CNTR_REENABLE)
2058 goto out;
2059 break;
2060 default:
2061 BUG();
2062 break;
2063 }
2064
2065 xfs_icsb_enable_counter(mp, field, count, resid);
2066out:
2067 if (!(flags & XFS_ICSB_SB_LOCKED))
2068 XFS_SB_UNLOCK(mp, s);
2069}
2070
2071STATIC int
2072xfs_icsb_modify_counters_int(
2073 xfs_mount_t *mp,
2074 xfs_sb_field_t field,
2075 int delta,
2076 int rsvd,
2077 int flags)
2078{
2079 xfs_icsb_cnts_t *icsbp;
2080 long long lcounter; /* long counter for 64 bit fields */
2081 int cpu, s, locked = 0;
2082 int ret = 0, balance_done = 0;
2083
2084again:
2085 cpu = get_cpu();
2086 icsbp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, cpu),
2087 xfs_icsb_lock_cntr(icsbp);
2088 if (unlikely(xfs_icsb_counter_disabled(mp, field)))
2089 goto slow_path;
2090
2091 switch (field) {
2092 case XFS_SBS_ICOUNT:
2093 lcounter = icsbp->icsb_icount;
2094 lcounter += delta;
2095 if (unlikely(lcounter < 0))
2096 goto slow_path;
2097 icsbp->icsb_icount = lcounter;
2098 break;
2099
2100 case XFS_SBS_IFREE:
2101 lcounter = icsbp->icsb_ifree;
2102 lcounter += delta;
2103 if (unlikely(lcounter < 0))
2104 goto slow_path;
2105 icsbp->icsb_ifree = lcounter;
2106 break;
2107
2108 case XFS_SBS_FDBLOCKS:
2109 BUG_ON((mp->m_resblks - mp->m_resblks_avail) != 0);
2110
2111 lcounter = icsbp->icsb_fdblocks;
2112 lcounter += delta;
2113 if (unlikely(lcounter < 0))
2114 goto slow_path;
2115 icsbp->icsb_fdblocks = lcounter;
2116 break;
2117 default:
2118 BUG();
2119 break;
2120 }
2121 xfs_icsb_unlock_cntr(icsbp);
2122 put_cpu();
2123 if (locked)
2124 XFS_SB_UNLOCK(mp, s);
2125 return 0;
2126
2127 /*
2128 * The slow path needs to be run with the SBLOCK
2129 * held so that we prevent other threads from
2130 * attempting to run this path at the same time.
2131 * this provides exclusion for the balancing code,
2132 * and exclusive fallback if the balance does not
2133 * provide enough resources to continue in an unlocked
2134 * manner.
2135 */
2136slow_path:
2137 xfs_icsb_unlock_cntr(icsbp);
2138 put_cpu();
2139
2140 /* need to hold superblock incase we need
2141 * to disable a counter */
2142 if (!(flags & XFS_ICSB_SB_LOCKED)) {
2143 s = XFS_SB_LOCK(mp);
2144 locked = 1;
2145 flags |= XFS_ICSB_SB_LOCKED;
2146 }
2147 if (!balance_done) {
2148 xfs_icsb_balance_counter(mp, field, flags);
2149 balance_done = 1;
2150 goto again;
2151 } else {
2152 /*
2153 * we might not have enough on this local
2154 * cpu to allocate for a bulk request.
2155 * We need to drain this field from all CPUs
2156 * and disable the counter fastpath
2157 */
2158 xfs_icsb_disable_counter(mp, field);
2159 }
2160
2161 ret = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd);
2162
2163 if (locked)
2164 XFS_SB_UNLOCK(mp, s);
2165 return ret;
2166}
2167
2168STATIC int
2169xfs_icsb_modify_counters(
2170 xfs_mount_t *mp,
2171 xfs_sb_field_t field,
2172 int delta,
2173 int rsvd)
2174{
2175 return xfs_icsb_modify_counters_int(mp, field, delta, rsvd, 0);
2176}
2177
2178/*
2179 * Called when superblock is already locked
2180 */
2181STATIC int
2182xfs_icsb_modify_counters_locked(
2183 xfs_mount_t *mp,
2184 xfs_sb_field_t field,
2185 int delta,
2186 int rsvd)
2187{
2188 return xfs_icsb_modify_counters_int(mp, field, delta,
2189 rsvd, XFS_ICSB_SB_LOCKED);
2190}
2191#endif
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index cd3cf9613a00..ebd73960e9db 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -267,6 +267,34 @@ typedef struct xfs_ioops {
267#define XFS_IODONE(vfsp) \ 267#define XFS_IODONE(vfsp) \
268 (*(mp)->m_io_ops.xfs_iodone)(vfsp) 268 (*(mp)->m_io_ops.xfs_iodone)(vfsp)
269 269
270#ifdef HAVE_PERCPU_SB
271
272/*
273 * Valid per-cpu incore superblock counters. Note that if you add new counters,
274 * you may need to define new counter disabled bit field descriptors as there
275 * are more possible fields in the superblock that can fit in a bitfield on a
276 * 32 bit platform. The XFS_SBS_* values for the current current counters just
277 * fit.
278 */
279typedef struct xfs_icsb_cnts {
280 uint64_t icsb_fdblocks;
281 uint64_t icsb_ifree;
282 uint64_t icsb_icount;
283 unsigned long icsb_flags;
284} xfs_icsb_cnts_t;
285
286#define XFS_ICSB_FLAG_LOCK (1 << 0) /* counter lock bit */
287
288#define XFS_ICSB_SB_LOCKED (1 << 0) /* sb already locked */
289#define XFS_ICSB_LAZY_COUNT (1 << 1) /* accuracy not needed */
290
291extern int xfs_icsb_init_counters(struct xfs_mount *);
292extern void xfs_icsb_sync_counters_lazy(struct xfs_mount *);
293
294#else
295#define xfs_icsb_init_counters(mp) (0)
296#define xfs_icsb_sync_counters_lazy(mp) do { } while (0)
297#endif
270 298
271typedef struct xfs_mount { 299typedef struct xfs_mount {
272 bhv_desc_t m_bhv; /* vfs xfs behavior */ 300 bhv_desc_t m_bhv; /* vfs xfs behavior */
@@ -372,6 +400,11 @@ typedef struct xfs_mount {
372 struct xfs_qmops m_qm_ops; /* vector of XQM ops */ 400 struct xfs_qmops m_qm_ops; /* vector of XQM ops */
373 struct xfs_ioops m_io_ops; /* vector of I/O ops */ 401 struct xfs_ioops m_io_ops; /* vector of I/O ops */
374 atomic_t m_active_trans; /* number trans frozen */ 402 atomic_t m_active_trans; /* number trans frozen */
403#ifdef HAVE_PERCPU_SB
404 xfs_icsb_cnts_t *m_sb_cnts; /* per-cpu superblock counters */
405 unsigned long m_icsb_counters; /* disabled per-cpu counters */
406 struct notifier_block m_icsb_notifier; /* hotplug cpu notifier */
407#endif
375} xfs_mount_t; 408} xfs_mount_t;
376 409
377/* 410/*
@@ -386,8 +419,6 @@ typedef struct xfs_mount {
386#define XFS_MOUNT_FS_SHUTDOWN (1ULL << 4) /* atomic stop of all filesystem 419#define XFS_MOUNT_FS_SHUTDOWN (1ULL << 4) /* atomic stop of all filesystem
387 operations, typically for 420 operations, typically for
388 disk errors in metadata */ 421 disk errors in metadata */
389#define XFS_MOUNT_NOATIME (1ULL << 5) /* don't modify inode access
390 times on reads */
391#define XFS_MOUNT_RETERR (1ULL << 6) /* return alignment errors to 422#define XFS_MOUNT_RETERR (1ULL << 6) /* return alignment errors to
392 user */ 423 user */
393#define XFS_MOUNT_NOALIGN (1ULL << 7) /* turn off stripe alignment 424#define XFS_MOUNT_NOALIGN (1ULL << 7) /* turn off stripe alignment
@@ -411,6 +442,8 @@ typedef struct xfs_mount {
411#define XFS_MOUNT_DIRSYNC (1ULL << 21) /* synchronous directory ops */ 442#define XFS_MOUNT_DIRSYNC (1ULL << 21) /* synchronous directory ops */
412#define XFS_MOUNT_COMPAT_IOSIZE (1ULL << 22) /* don't report large preferred 443#define XFS_MOUNT_COMPAT_IOSIZE (1ULL << 22) /* don't report large preferred
413 * I/O size in stat() */ 444 * I/O size in stat() */
445#define XFS_MOUNT_NO_PERCPU_SB (1ULL << 23) /* don't use per-cpu superblock
446 counters */
414 447
415 448
416/* 449/*
@@ -473,11 +506,6 @@ xfs_preferred_iosize(xfs_mount_t *mp)
473#define XFS_SHUTDOWN_REMOTE_REQ 0x10 /* Shutdown came from remote cell */ 506#define XFS_SHUTDOWN_REMOTE_REQ 0x10 /* Shutdown came from remote cell */
474 507
475/* 508/*
476 * xflags for xfs_syncsub
477 */
478#define XFS_XSYNC_RELOC 0x01
479
480/*
481 * Flags for xfs_mountfs 509 * Flags for xfs_mountfs
482 */ 510 */
483#define XFS_MFSI_SECOND 0x01 /* Secondary mount -- skip stuff */ 511#define XFS_MFSI_SECOND 0x01 /* Secondary mount -- skip stuff */
@@ -548,6 +576,8 @@ extern void xfs_unmountfs_close(xfs_mount_t *, struct cred *);
548extern int xfs_unmountfs_writesb(xfs_mount_t *); 576extern int xfs_unmountfs_writesb(xfs_mount_t *);
549extern int xfs_unmount_flush(xfs_mount_t *, int); 577extern int xfs_unmount_flush(xfs_mount_t *, int);
550extern int xfs_mod_incore_sb(xfs_mount_t *, xfs_sb_field_t, int, int); 578extern int xfs_mod_incore_sb(xfs_mount_t *, xfs_sb_field_t, int, int);
579extern int xfs_mod_incore_sb_unlocked(xfs_mount_t *, xfs_sb_field_t,
580 int, int);
551extern int xfs_mod_incore_sb_batch(xfs_mount_t *, xfs_mod_sb_t *, 581extern int xfs_mod_incore_sb_batch(xfs_mount_t *, xfs_mod_sb_t *,
552 uint, int); 582 uint, int);
553extern struct xfs_buf *xfs_getsb(xfs_mount_t *, int); 583extern struct xfs_buf *xfs_getsb(xfs_mount_t *, int);
diff --git a/fs/xfs/xfs_rw.h b/fs/xfs/xfs_rw.h
index de85eefb7966..e63795644478 100644
--- a/fs/xfs/xfs_rw.h
+++ b/fs/xfs/xfs_rw.h
@@ -89,6 +89,7 @@ extern void xfs_ioerror_alert(char *func, struct xfs_mount *mp,
89 */ 89 */
90extern int xfs_rwlock(bhv_desc_t *bdp, vrwlock_t write_lock); 90extern int xfs_rwlock(bhv_desc_t *bdp, vrwlock_t write_lock);
91extern void xfs_rwunlock(bhv_desc_t *bdp, vrwlock_t write_lock); 91extern void xfs_rwunlock(bhv_desc_t *bdp, vrwlock_t write_lock);
92extern int xfs_setattr(bhv_desc_t *bdp, vattr_t *vap, int flags, cred_t *credp);
92extern int xfs_change_file_space(bhv_desc_t *bdp, int cmd, xfs_flock64_t *bf, 93extern int xfs_change_file_space(bhv_desc_t *bdp, int cmd, xfs_flock64_t *bf,
93 xfs_off_t offset, cred_t *credp, int flags); 94 xfs_off_t offset, cred_t *credp, int flags);
94extern int xfs_set_dmattrs(bhv_desc_t *bdp, u_int evmask, u_int16_t state, 95extern int xfs_set_dmattrs(bhv_desc_t *bdp, u_int evmask, u_int16_t state,
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index d3d714e6b32a..2918956553a5 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -55,10 +55,141 @@ STATIC void xfs_trans_committed(xfs_trans_t *, int);
55STATIC void xfs_trans_chunk_committed(xfs_log_item_chunk_t *, xfs_lsn_t, int); 55STATIC void xfs_trans_chunk_committed(xfs_log_item_chunk_t *, xfs_lsn_t, int);
56STATIC void xfs_trans_free(xfs_trans_t *); 56STATIC void xfs_trans_free(xfs_trans_t *);
57 57
58kmem_zone_t *xfs_trans_zone; 58kmem_zone_t *xfs_trans_zone;
59 59
60 60
61/* 61/*
62 * Reservation functions here avoid a huge stack in xfs_trans_init
63 * due to register overflow from temporaries in the calculations.
64 */
65
66STATIC uint
67xfs_calc_write_reservation(xfs_mount_t *mp)
68{
69 return XFS_CALC_WRITE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp);
70}
71
72STATIC uint
73xfs_calc_itruncate_reservation(xfs_mount_t *mp)
74{
75 return XFS_CALC_ITRUNCATE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp);
76}
77
78STATIC uint
79xfs_calc_rename_reservation(xfs_mount_t *mp)
80{
81 return XFS_CALC_RENAME_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp);
82}
83
84STATIC uint
85xfs_calc_link_reservation(xfs_mount_t *mp)
86{
87 return XFS_CALC_LINK_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp);
88}
89
90STATIC uint
91xfs_calc_remove_reservation(xfs_mount_t *mp)
92{
93 return XFS_CALC_REMOVE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp);
94}
95
96STATIC uint
97xfs_calc_symlink_reservation(xfs_mount_t *mp)
98{
99 return XFS_CALC_SYMLINK_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp);
100}
101
102STATIC uint
103xfs_calc_create_reservation(xfs_mount_t *mp)
104{
105 return XFS_CALC_CREATE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp);
106}
107
108STATIC uint
109xfs_calc_mkdir_reservation(xfs_mount_t *mp)
110{
111 return XFS_CALC_MKDIR_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp);
112}
113
114STATIC uint
115xfs_calc_ifree_reservation(xfs_mount_t *mp)
116{
117 return XFS_CALC_IFREE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp);
118}
119
120STATIC uint
121xfs_calc_ichange_reservation(xfs_mount_t *mp)
122{
123 return XFS_CALC_ICHANGE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp);
124}
125
126STATIC uint
127xfs_calc_growdata_reservation(xfs_mount_t *mp)
128{
129 return XFS_CALC_GROWDATA_LOG_RES(mp);
130}
131
132STATIC uint
133xfs_calc_growrtalloc_reservation(xfs_mount_t *mp)
134{
135 return XFS_CALC_GROWRTALLOC_LOG_RES(mp);
136}
137
138STATIC uint
139xfs_calc_growrtzero_reservation(xfs_mount_t *mp)
140{
141 return XFS_CALC_GROWRTZERO_LOG_RES(mp);
142}
143
144STATIC uint
145xfs_calc_growrtfree_reservation(xfs_mount_t *mp)
146{
147 return XFS_CALC_GROWRTFREE_LOG_RES(mp);
148}
149
150STATIC uint
151xfs_calc_swrite_reservation(xfs_mount_t *mp)
152{
153 return XFS_CALC_SWRITE_LOG_RES(mp);
154}
155
156STATIC uint
157xfs_calc_writeid_reservation(xfs_mount_t *mp)
158{
159 return XFS_CALC_WRITEID_LOG_RES(mp);
160}
161
162STATIC uint
163xfs_calc_addafork_reservation(xfs_mount_t *mp)
164{
165 return XFS_CALC_ADDAFORK_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp);
166}
167
168STATIC uint
169xfs_calc_attrinval_reservation(xfs_mount_t *mp)
170{
171 return XFS_CALC_ATTRINVAL_LOG_RES(mp);
172}
173
174STATIC uint
175xfs_calc_attrset_reservation(xfs_mount_t *mp)
176{
177 return XFS_CALC_ATTRSET_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp);
178}
179
180STATIC uint
181xfs_calc_attrrm_reservation(xfs_mount_t *mp)
182{
183 return XFS_CALC_ATTRRM_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp);
184}
185
186STATIC uint
187xfs_calc_clear_agi_bucket_reservation(xfs_mount_t *mp)
188{
189 return XFS_CALC_CLEAR_AGI_BUCKET_LOG_RES(mp);
190}
191
192/*
62 * Initialize the precomputed transaction reservation values 193 * Initialize the precomputed transaction reservation values
63 * in the mount structure. 194 * in the mount structure.
64 */ 195 */
@@ -69,39 +200,27 @@ xfs_trans_init(
69 xfs_trans_reservations_t *resp; 200 xfs_trans_reservations_t *resp;
70 201
71 resp = &(mp->m_reservations); 202 resp = &(mp->m_reservations);
72 resp->tr_write = 203 resp->tr_write = xfs_calc_write_reservation(mp);
73 (uint)(XFS_CALC_WRITE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); 204 resp->tr_itruncate = xfs_calc_itruncate_reservation(mp);
74 resp->tr_itruncate = 205 resp->tr_rename = xfs_calc_rename_reservation(mp);
75 (uint)(XFS_CALC_ITRUNCATE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); 206 resp->tr_link = xfs_calc_link_reservation(mp);
76 resp->tr_rename = 207 resp->tr_remove = xfs_calc_remove_reservation(mp);
77 (uint)(XFS_CALC_RENAME_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); 208 resp->tr_symlink = xfs_calc_symlink_reservation(mp);
78 resp->tr_link = (uint)XFS_CALC_LINK_LOG_RES(mp); 209 resp->tr_create = xfs_calc_create_reservation(mp);
79 resp->tr_remove = 210 resp->tr_mkdir = xfs_calc_mkdir_reservation(mp);
80 (uint)(XFS_CALC_REMOVE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); 211 resp->tr_ifree = xfs_calc_ifree_reservation(mp);
81 resp->tr_symlink = 212 resp->tr_ichange = xfs_calc_ichange_reservation(mp);
82 (uint)(XFS_CALC_SYMLINK_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); 213 resp->tr_growdata = xfs_calc_growdata_reservation(mp);
83 resp->tr_create = 214 resp->tr_swrite = xfs_calc_swrite_reservation(mp);
84 (uint)(XFS_CALC_CREATE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); 215 resp->tr_writeid = xfs_calc_writeid_reservation(mp);
85 resp->tr_mkdir = 216 resp->tr_addafork = xfs_calc_addafork_reservation(mp);
86 (uint)(XFS_CALC_MKDIR_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); 217 resp->tr_attrinval = xfs_calc_attrinval_reservation(mp);
87 resp->tr_ifree = 218 resp->tr_attrset = xfs_calc_attrset_reservation(mp);
88 (uint)(XFS_CALC_IFREE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); 219 resp->tr_attrrm = xfs_calc_attrrm_reservation(mp);
89 resp->tr_ichange = 220 resp->tr_clearagi = xfs_calc_clear_agi_bucket_reservation(mp);
90 (uint)(XFS_CALC_ICHANGE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); 221 resp->tr_growrtalloc = xfs_calc_growrtalloc_reservation(mp);
91 resp->tr_growdata = (uint)XFS_CALC_GROWDATA_LOG_RES(mp); 222 resp->tr_growrtzero = xfs_calc_growrtzero_reservation(mp);
92 resp->tr_swrite = (uint)XFS_CALC_SWRITE_LOG_RES(mp); 223 resp->tr_growrtfree = xfs_calc_growrtfree_reservation(mp);
93 resp->tr_writeid = (uint)XFS_CALC_WRITEID_LOG_RES(mp);
94 resp->tr_addafork =
95 (uint)(XFS_CALC_ADDAFORK_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp));
96 resp->tr_attrinval = (uint)XFS_CALC_ATTRINVAL_LOG_RES(mp);
97 resp->tr_attrset =
98 (uint)(XFS_CALC_ATTRSET_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp));
99 resp->tr_attrrm =
100 (uint)(XFS_CALC_ATTRRM_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp));
101 resp->tr_clearagi = (uint)XFS_CALC_CLEAR_AGI_BUCKET_LOG_RES(mp);
102 resp->tr_growrtalloc = (uint)XFS_CALC_GROWRTALLOC_LOG_RES(mp);
103 resp->tr_growrtzero = (uint)XFS_CALC_GROWRTZERO_LOG_RES(mp);
104 resp->tr_growrtfree = (uint)XFS_CALC_GROWRTFREE_LOG_RES(mp);
105} 224}
106 225
107/* 226/*
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index d77901c07f63..e48befa4e337 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -380,7 +380,7 @@ typedef struct xfs_trans {
380 xfs_trans_header_t t_header; /* header for in-log trans */ 380 xfs_trans_header_t t_header; /* header for in-log trans */
381 unsigned int t_busy_free; /* busy descs free */ 381 unsigned int t_busy_free; /* busy descs free */
382 xfs_log_busy_chunk_t t_busy; /* busy/async free blocks */ 382 xfs_log_busy_chunk_t t_busy; /* busy/async free blocks */
383 xfs_pflags_t t_pflags; /* saved pflags state */ 383 unsigned long t_pflags; /* saved process flags state */
384} xfs_trans_t; 384} xfs_trans_t;
385 385
386#endif /* __KERNEL__ */ 386#endif /* __KERNEL__ */
diff --git a/fs/xfs/xfs_vfsops.c b/fs/xfs/xfs_vfsops.c
index b6ad370fab3d..d4ec4dfaf19c 100644
--- a/fs/xfs/xfs_vfsops.c
+++ b/fs/xfs/xfs_vfsops.c
@@ -55,7 +55,7 @@
55#include "xfs_clnt.h" 55#include "xfs_clnt.h"
56#include "xfs_fsops.h" 56#include "xfs_fsops.h"
57 57
58STATIC int xfs_sync(bhv_desc_t *, int, cred_t *); 58STATIC int xfs_sync(bhv_desc_t *, int, cred_t *);
59 59
60int 60int
61xfs_init(void) 61xfs_init(void)
@@ -77,11 +77,12 @@ xfs_init(void)
77 "xfs_bmap_free_item"); 77 "xfs_bmap_free_item");
78 xfs_btree_cur_zone = kmem_zone_init(sizeof(xfs_btree_cur_t), 78 xfs_btree_cur_zone = kmem_zone_init(sizeof(xfs_btree_cur_t),
79 "xfs_btree_cur"); 79 "xfs_btree_cur");
80 xfs_inode_zone = kmem_zone_init(sizeof(xfs_inode_t), "xfs_inode");
81 xfs_trans_zone = kmem_zone_init(sizeof(xfs_trans_t), "xfs_trans"); 80 xfs_trans_zone = kmem_zone_init(sizeof(xfs_trans_t), "xfs_trans");
82 xfs_da_state_zone = 81 xfs_da_state_zone =
83 kmem_zone_init(sizeof(xfs_da_state_t), "xfs_da_state"); 82 kmem_zone_init(sizeof(xfs_da_state_t), "xfs_da_state");
84 xfs_dabuf_zone = kmem_zone_init(sizeof(xfs_dabuf_t), "xfs_dabuf"); 83 xfs_dabuf_zone = kmem_zone_init(sizeof(xfs_dabuf_t), "xfs_dabuf");
84 xfs_ifork_zone = kmem_zone_init(sizeof(xfs_ifork_t), "xfs_ifork");
85 xfs_acl_zone_init(xfs_acl_zone, "xfs_acl");
85 86
86 /* 87 /*
87 * The size of the zone allocated buf log item is the maximum 88 * The size of the zone allocated buf log item is the maximum
@@ -93,17 +94,30 @@ xfs_init(void)
93 (((XFS_MAX_BLOCKSIZE / XFS_BLI_CHUNK) / 94 (((XFS_MAX_BLOCKSIZE / XFS_BLI_CHUNK) /
94 NBWORD) * sizeof(int))), 95 NBWORD) * sizeof(int))),
95 "xfs_buf_item"); 96 "xfs_buf_item");
96 xfs_efd_zone = kmem_zone_init((sizeof(xfs_efd_log_item_t) + 97 xfs_efd_zone =
97 ((XFS_EFD_MAX_FAST_EXTENTS - 1) * sizeof(xfs_extent_t))), 98 kmem_zone_init((sizeof(xfs_efd_log_item_t) +
99 ((XFS_EFD_MAX_FAST_EXTENTS - 1) *
100 sizeof(xfs_extent_t))),
98 "xfs_efd_item"); 101 "xfs_efd_item");
99 xfs_efi_zone = kmem_zone_init((sizeof(xfs_efi_log_item_t) + 102 xfs_efi_zone =
100 ((XFS_EFI_MAX_FAST_EXTENTS - 1) * sizeof(xfs_extent_t))), 103 kmem_zone_init((sizeof(xfs_efi_log_item_t) +
104 ((XFS_EFI_MAX_FAST_EXTENTS - 1) *
105 sizeof(xfs_extent_t))),
101 "xfs_efi_item"); 106 "xfs_efi_item");
102 xfs_ifork_zone = kmem_zone_init(sizeof(xfs_ifork_t), "xfs_ifork"); 107
103 xfs_ili_zone = kmem_zone_init(sizeof(xfs_inode_log_item_t), "xfs_ili"); 108 /*
104 xfs_chashlist_zone = kmem_zone_init(sizeof(xfs_chashlist_t), 109 * These zones warrant special memory allocator hints
105 "xfs_chashlist"); 110 */
106 xfs_acl_zone_init(xfs_acl_zone, "xfs_acl"); 111 xfs_inode_zone =
112 kmem_zone_init_flags(sizeof(xfs_inode_t), "xfs_inode",
113 KM_ZONE_HWALIGN | KM_ZONE_RECLAIM |
114 KM_ZONE_SPREAD, NULL);
115 xfs_ili_zone =
116 kmem_zone_init_flags(sizeof(xfs_inode_log_item_t), "xfs_ili",
117 KM_ZONE_SPREAD, NULL);
118 xfs_chashlist_zone =
119 kmem_zone_init_flags(sizeof(xfs_chashlist_t), "xfs_chashlist",
120 KM_ZONE_SPREAD, NULL);
107 121
108 /* 122 /*
109 * Allocate global trace buffers. 123 * Allocate global trace buffers.
@@ -176,18 +190,18 @@ xfs_cleanup(void)
176 ktrace_free(xfs_alloc_trace_buf); 190 ktrace_free(xfs_alloc_trace_buf);
177#endif 191#endif
178 192
179 kmem_cache_destroy(xfs_bmap_free_item_zone); 193 kmem_zone_destroy(xfs_bmap_free_item_zone);
180 kmem_cache_destroy(xfs_btree_cur_zone); 194 kmem_zone_destroy(xfs_btree_cur_zone);
181 kmem_cache_destroy(xfs_inode_zone); 195 kmem_zone_destroy(xfs_inode_zone);
182 kmem_cache_destroy(xfs_trans_zone); 196 kmem_zone_destroy(xfs_trans_zone);
183 kmem_cache_destroy(xfs_da_state_zone); 197 kmem_zone_destroy(xfs_da_state_zone);
184 kmem_cache_destroy(xfs_dabuf_zone); 198 kmem_zone_destroy(xfs_dabuf_zone);
185 kmem_cache_destroy(xfs_buf_item_zone); 199 kmem_zone_destroy(xfs_buf_item_zone);
186 kmem_cache_destroy(xfs_efd_zone); 200 kmem_zone_destroy(xfs_efd_zone);
187 kmem_cache_destroy(xfs_efi_zone); 201 kmem_zone_destroy(xfs_efi_zone);
188 kmem_cache_destroy(xfs_ifork_zone); 202 kmem_zone_destroy(xfs_ifork_zone);
189 kmem_cache_destroy(xfs_ili_zone); 203 kmem_zone_destroy(xfs_ili_zone);
190 kmem_cache_destroy(xfs_chashlist_zone); 204 kmem_zone_destroy(xfs_chashlist_zone);
191} 205}
192 206
193/* 207/*
@@ -258,8 +272,6 @@ xfs_start_flags(
258 mp->m_inoadd = XFS_INO64_OFFSET; 272 mp->m_inoadd = XFS_INO64_OFFSET;
259 } 273 }
260#endif 274#endif
261 if (ap->flags & XFSMNT_NOATIME)
262 mp->m_flags |= XFS_MOUNT_NOATIME;
263 if (ap->flags & XFSMNT_RETERR) 275 if (ap->flags & XFSMNT_RETERR)
264 mp->m_flags |= XFS_MOUNT_RETERR; 276 mp->m_flags |= XFS_MOUNT_RETERR;
265 if (ap->flags & XFSMNT_NOALIGN) 277 if (ap->flags & XFSMNT_NOALIGN)
@@ -620,7 +632,7 @@ xfs_quiesce_fs(
620 xfs_mount_t *mp) 632 xfs_mount_t *mp)
621{ 633{
622 int count = 0, pincount; 634 int count = 0, pincount;
623 635
624 xfs_refcache_purge_mp(mp); 636 xfs_refcache_purge_mp(mp);
625 xfs_flush_buftarg(mp->m_ddev_targp, 0); 637 xfs_flush_buftarg(mp->m_ddev_targp, 0);
626 xfs_finish_reclaim_all(mp, 0); 638 xfs_finish_reclaim_all(mp, 0);
@@ -631,7 +643,7 @@ xfs_quiesce_fs(
631 * meta data (typically directory updates). 643 * meta data (typically directory updates).
632 * Which then must be flushed and logged before 644 * Which then must be flushed and logged before
633 * we can write the unmount record. 645 * we can write the unmount record.
634 */ 646 */
635 do { 647 do {
636 xfs_syncsub(mp, SYNC_REMOUNT|SYNC_ATTR|SYNC_WAIT, 0, NULL); 648 xfs_syncsub(mp, SYNC_REMOUNT|SYNC_ATTR|SYNC_WAIT, 0, NULL);
637 pincount = xfs_flush_buftarg(mp->m_ddev_targp, 1); 649 pincount = xfs_flush_buftarg(mp->m_ddev_targp, 1);
@@ -654,11 +666,6 @@ xfs_mntupdate(
654 xfs_mount_t *mp = XFS_BHVTOM(bdp); 666 xfs_mount_t *mp = XFS_BHVTOM(bdp);
655 int error; 667 int error;
656 668
657 if (args->flags & XFSMNT_NOATIME)
658 mp->m_flags |= XFS_MOUNT_NOATIME;
659 else
660 mp->m_flags &= ~XFS_MOUNT_NOATIME;
661
662 if (args->flags & XFSMNT_BARRIER) 669 if (args->flags & XFSMNT_BARRIER)
663 mp->m_flags |= XFS_MOUNT_BARRIER; 670 mp->m_flags |= XFS_MOUNT_BARRIER;
664 else 671 else
@@ -814,6 +821,7 @@ xfs_statvfs(
814 821
815 statp->f_type = XFS_SB_MAGIC; 822 statp->f_type = XFS_SB_MAGIC;
816 823
824 xfs_icsb_sync_counters_lazy(mp);
817 s = XFS_SB_LOCK(mp); 825 s = XFS_SB_LOCK(mp);
818 statp->f_bsize = sbp->sb_blocksize; 826 statp->f_bsize = sbp->sb_blocksize;
819 lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0; 827 lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0;
@@ -1221,7 +1229,7 @@ xfs_sync_inodes(
1221 xfs_iunlock(ip, XFS_ILOCK_SHARED); 1229 xfs_iunlock(ip, XFS_ILOCK_SHARED);
1222 1230
1223 error = xfs_itobp(mp, NULL, ip, 1231 error = xfs_itobp(mp, NULL, ip,
1224 &dip, &bp, 0); 1232 &dip, &bp, 0, 0);
1225 if (!error) { 1233 if (!error) {
1226 xfs_buf_relse(bp); 1234 xfs_buf_relse(bp);
1227 } else { 1235 } else {
@@ -1690,10 +1698,7 @@ xfs_parseargs(
1690 int iosize; 1698 int iosize;
1691 1699
1692 args->flags2 |= XFSMNT2_COMPAT_IOSIZE; 1700 args->flags2 |= XFSMNT2_COMPAT_IOSIZE;
1693 1701 args->flags |= XFSMNT_IDELETE;
1694#if 0 /* XXX: off by default, until some remaining issues ironed out */
1695 args->flags |= XFSMNT_IDELETE; /* default to on */
1696#endif
1697 1702
1698 if (!options) 1703 if (!options)
1699 goto done; 1704 goto done;
@@ -1903,7 +1908,6 @@ xfs_showargs(
1903 { XFS_MOUNT_NOUUID, "," MNTOPT_NOUUID }, 1908 { XFS_MOUNT_NOUUID, "," MNTOPT_NOUUID },
1904 { XFS_MOUNT_NORECOVERY, "," MNTOPT_NORECOVERY }, 1909 { XFS_MOUNT_NORECOVERY, "," MNTOPT_NORECOVERY },
1905 { XFS_MOUNT_OSYNCISOSYNC, "," MNTOPT_OSYNCISOSYNC }, 1910 { XFS_MOUNT_OSYNCISOSYNC, "," MNTOPT_OSYNCISOSYNC },
1906 { XFS_MOUNT_IDELETE, "," MNTOPT_NOIKEEP },
1907 { 0, NULL } 1911 { 0, NULL }
1908 }; 1912 };
1909 struct proc_xfs_info *xfs_infop; 1913 struct proc_xfs_info *xfs_infop;
@@ -1939,6 +1943,8 @@ xfs_showargs(
1939 seq_printf(m, "," MNTOPT_SWIDTH "=%d", 1943 seq_printf(m, "," MNTOPT_SWIDTH "=%d",
1940 (int)XFS_FSB_TO_BB(mp, mp->m_swidth)); 1944 (int)XFS_FSB_TO_BB(mp, mp->m_swidth));
1941 1945
1946 if (!(mp->m_flags & XFS_MOUNT_IDELETE))
1947 seq_printf(m, "," MNTOPT_IKEEP);
1942 if (!(mp->m_flags & XFS_MOUNT_COMPAT_IOSIZE)) 1948 if (!(mp->m_flags & XFS_MOUNT_COMPAT_IOSIZE))
1943 seq_printf(m, "," MNTOPT_LARGEIO); 1949 seq_printf(m, "," MNTOPT_LARGEIO);
1944 if (mp->m_flags & XFS_MOUNT_BARRIER) 1950 if (mp->m_flags & XFS_MOUNT_BARRIER)
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index eaab355f5a89..a478f42e63ff 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -615,6 +615,7 @@ xfs_setattr(
615 code = xfs_igrow_start(ip, vap->va_size, credp); 615 code = xfs_igrow_start(ip, vap->va_size, credp);
616 } 616 }
617 xfs_iunlock(ip, XFS_ILOCK_EXCL); 617 xfs_iunlock(ip, XFS_ILOCK_EXCL);
618 vn_iowait(vp); /* wait for the completion of any pending DIOs */
618 if (!code) 619 if (!code)
619 code = xfs_itruncate_data(ip, vap->va_size); 620 code = xfs_itruncate_data(ip, vap->va_size);
620 if (code) { 621 if (code) {
@@ -1556,7 +1557,7 @@ xfs_release(
1556 if ((error = xfs_inactive_free_eofblocks(mp, ip))) 1557 if ((error = xfs_inactive_free_eofblocks(mp, ip)))
1557 return error; 1558 return error;
1558 /* Update linux inode block count after free above */ 1559 /* Update linux inode block count after free above */
1559 LINVFS_GET_IP(vp)->i_blocks = XFS_FSB_TO_BB(mp, 1560 vn_to_inode(vp)->i_blocks = XFS_FSB_TO_BB(mp,
1560 ip->i_d.di_nblocks + ip->i_delayed_blks); 1561 ip->i_d.di_nblocks + ip->i_delayed_blks);
1561 } 1562 }
1562 } 1563 }
@@ -1637,7 +1638,7 @@ xfs_inactive(
1637 if ((error = xfs_inactive_free_eofblocks(mp, ip))) 1638 if ((error = xfs_inactive_free_eofblocks(mp, ip)))
1638 return VN_INACTIVE_CACHE; 1639 return VN_INACTIVE_CACHE;
1639 /* Update linux inode block count after free above */ 1640 /* Update linux inode block count after free above */
1640 LINVFS_GET_IP(vp)->i_blocks = XFS_FSB_TO_BB(mp, 1641 vn_to_inode(vp)->i_blocks = XFS_FSB_TO_BB(mp,
1641 ip->i_d.di_nblocks + ip->i_delayed_blks); 1642 ip->i_d.di_nblocks + ip->i_delayed_blks);
1642 } 1643 }
1643 goto out; 1644 goto out;
@@ -3186,7 +3187,7 @@ xfs_rmdir(
3186 3187
3187 /* Fall through to std_return with error = 0 or the errno 3188 /* Fall through to std_return with error = 0 or the errno
3188 * from xfs_trans_commit. */ 3189 * from xfs_trans_commit. */
3189std_return: 3190 std_return:
3190 if (DM_EVENT_ENABLED(dir_vp->v_vfsp, dp, DM_EVENT_POSTREMOVE)) { 3191 if (DM_EVENT_ENABLED(dir_vp->v_vfsp, dp, DM_EVENT_POSTREMOVE)) {
3191 (void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTREMOVE, 3192 (void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTREMOVE,
3192 dir_vp, DM_RIGHT_NULL, 3193 dir_vp, DM_RIGHT_NULL,
@@ -3196,12 +3197,12 @@ std_return:
3196 } 3197 }
3197 return error; 3198 return error;
3198 3199
3199error1: 3200 error1:
3200 xfs_bmap_cancel(&free_list); 3201 xfs_bmap_cancel(&free_list);
3201 cancel_flags |= XFS_TRANS_ABORT; 3202 cancel_flags |= XFS_TRANS_ABORT;
3202 /* FALLTHROUGH */ 3203 /* FALLTHROUGH */
3203 3204
3204error_return: 3205 error_return:
3205 xfs_trans_cancel(tp, cancel_flags); 3206 xfs_trans_cancel(tp, cancel_flags);
3206 goto std_return; 3207 goto std_return;
3207} 3208}
@@ -4310,8 +4311,10 @@ xfs_free_file_space(
4310 ASSERT(attr_flags & ATTR_NOLOCK ? attr_flags & ATTR_DMI : 1); 4311 ASSERT(attr_flags & ATTR_NOLOCK ? attr_flags & ATTR_DMI : 1);
4311 if (attr_flags & ATTR_NOLOCK) 4312 if (attr_flags & ATTR_NOLOCK)
4312 need_iolock = 0; 4313 need_iolock = 0;
4313 if (need_iolock) 4314 if (need_iolock) {
4314 xfs_ilock(ip, XFS_IOLOCK_EXCL); 4315 xfs_ilock(ip, XFS_IOLOCK_EXCL);
4316 vn_iowait(vp); /* wait for the completion of any pending DIOs */
4317 }
4315 4318
4316 rounding = MAX((__uint8_t)(1 << mp->m_sb.sb_blocklog), 4319 rounding = MAX((__uint8_t)(1 << mp->m_sb.sb_blocklog),
4317 (__uint8_t)NBPP); 4320 (__uint8_t)NBPP);