aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/xfs/Makefile151
-rw-r--r--fs/xfs/Makefile-linux-2.6141
-rw-r--r--fs/xfs/linux-2.6/kmem.c23
-rw-r--r--fs/xfs/linux-2.6/kmem.h23
-rw-r--r--fs/xfs/linux-2.6/spin.h3
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c259
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.h50
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c117
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.h12
-rw-r--r--fs/xfs/linux-2.6/xfs_file.c90
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl.c18
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl32.c65
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.c15
-rw-r--r--fs/xfs/linux-2.6/xfs_linux.h13
-rw-r--r--fs/xfs/linux-2.6/xfs_lrw.c3
-rw-r--r--fs/xfs/linux-2.6/xfs_lrw.h7
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c166
-rw-r--r--fs/xfs/linux-2.6/xfs_vfs.c1
-rw-r--r--fs/xfs/linux-2.6/xfs_vfs.h2
-rw-r--r--fs/xfs/linux-2.6/xfs_vnode.c251
-rw-r--r--fs/xfs/linux-2.6/xfs_vnode.h60
-rw-r--r--fs/xfs/quota/Makefile1
-rw-r--r--fs/xfs/quota/Makefile-linux-2.653
-rw-r--r--fs/xfs/quota/xfs_dquot.c43
-rw-r--r--fs/xfs/quota/xfs_dquot.h16
-rw-r--r--fs/xfs/quota/xfs_dquot_item.c1
-rw-r--r--fs/xfs/quota/xfs_qm.c26
-rw-r--r--fs/xfs/quota/xfs_qm.h2
-rw-r--r--fs/xfs/quota/xfs_qm_bhv.c44
-rw-r--r--fs/xfs/quota/xfs_qm_syscalls.c16
-rw-r--r--fs/xfs/support/debug.c1
-rw-r--r--fs/xfs/xfs_acl.c6
-rw-r--r--fs/xfs/xfs_bmap.c12
-rw-r--r--fs/xfs/xfs_buf_item.c4
-rw-r--r--fs/xfs/xfs_dmapi.h2
-rw-r--r--fs/xfs/xfs_extfree_item.c2
-rw-r--r--fs/xfs/xfs_iget.c35
-rw-r--r--fs/xfs/xfs_inode.c3
-rw-r--r--fs/xfs/xfs_inode_item.c9
-rw-r--r--fs/xfs/xfs_iomap.c22
-rw-r--r--fs/xfs/xfs_log.c215
-rw-r--r--fs/xfs/xfs_log.h38
-rw-r--r--fs/xfs/xfs_log_priv.h68
-rw-r--r--fs/xfs/xfs_log_recover.c2
-rw-r--r--fs/xfs/xfs_qmops.c78
-rw-r--r--fs/xfs/xfs_quota.h17
-rw-r--r--fs/xfs/xfs_trans.c3
-rw-r--r--fs/xfs/xfs_trans.h2
-rw-r--r--fs/xfs/xfs_trans_ail.c2
-rw-r--r--fs/xfs/xfs_trans_buf.c23
-rw-r--r--fs/xfs/xfs_vfsops.c62
-rw-r--r--fs/xfs/xfs_vnodeops.c92
52 files changed, 1320 insertions, 1050 deletions
diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile
index d3ff78354638..49e3e7e5e3dc 100644
--- a/fs/xfs/Makefile
+++ b/fs/xfs/Makefile
@@ -1,150 +1 @@
1# include $(TOPDIR)/fs/xfs/Makefile-linux-$(VERSION).$(PATCHLEVEL)
2# Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved.
3#
4# This program is free software; you can redistribute it and/or modify it
5# under the terms of version 2 of the GNU General Public License as
6# published by the Free Software Foundation.
7#
8# This program is distributed in the hope that it would be useful, but
9# WITHOUT ANY WARRANTY; without even the implied warranty of
10# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
11#
12# Further, this software is distributed without any warranty that it is
13# free of the rightful claim of any third person regarding infringement
14# or the like. Any license provided herein, whether implied or
15# otherwise, applies only to this software file. Patent licenses, if
16# any, provided herein do not apply to combinations of this program with
17# other software, or any other product whatsoever.
18#
19# You should have received a copy of the GNU General Public License along
20# with this program; if not, write the Free Software Foundation, Inc., 59
21# Temple Place - Suite 330, Boston MA 02111-1307, USA.
22#
23# Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
24# Mountain View, CA 94043, or:
25#
26# http://www.sgi.com
27#
28# For further information regarding this notice, see:
29#
30# http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
31#
32
33EXTRA_CFLAGS += -Ifs/xfs -Ifs/xfs/linux-2.6 -funsigned-char
34
35ifeq ($(CONFIG_XFS_DEBUG),y)
36 EXTRA_CFLAGS += -g -DSTATIC="" -DDEBUG
37 EXTRA_CFLAGS += -DPAGEBUF_LOCK_TRACKING
38endif
39ifeq ($(CONFIG_XFS_TRACE),y)
40 EXTRA_CFLAGS += -DXFS_ALLOC_TRACE
41 EXTRA_CFLAGS += -DXFS_ATTR_TRACE
42 EXTRA_CFLAGS += -DXFS_BLI_TRACE
43 EXTRA_CFLAGS += -DXFS_BMAP_TRACE
44 EXTRA_CFLAGS += -DXFS_BMBT_TRACE
45 EXTRA_CFLAGS += -DXFS_DIR_TRACE
46 EXTRA_CFLAGS += -DXFS_DIR2_TRACE
47 EXTRA_CFLAGS += -DXFS_DQUOT_TRACE
48 EXTRA_CFLAGS += -DXFS_ILOCK_TRACE
49 EXTRA_CFLAGS += -DXFS_LOG_TRACE
50 EXTRA_CFLAGS += -DXFS_RW_TRACE
51 EXTRA_CFLAGS += -DPAGEBUF_TRACE
52 EXTRA_CFLAGS += -DXFS_VNODE_TRACE
53endif
54
55obj-$(CONFIG_XFS_FS) += xfs.o
56
57xfs-$(CONFIG_XFS_QUOTA) += $(addprefix quota/, \
58 xfs_dquot.o \
59 xfs_dquot_item.o \
60 xfs_trans_dquot.o \
61 xfs_qm_syscalls.o \
62 xfs_qm_bhv.o \
63 xfs_qm.o)
64ifeq ($(CONFIG_XFS_QUOTA),y)
65xfs-$(CONFIG_PROC_FS) += quota/xfs_qm_stats.o
66endif
67
68xfs-$(CONFIG_XFS_RT) += xfs_rtalloc.o
69xfs-$(CONFIG_XFS_POSIX_ACL) += xfs_acl.o
70xfs-$(CONFIG_PROC_FS) += linux-2.6/xfs_stats.o
71xfs-$(CONFIG_SYSCTL) += linux-2.6/xfs_sysctl.o
72xfs-$(CONFIG_COMPAT) += linux-2.6/xfs_ioctl32.o
73xfs-$(CONFIG_XFS_EXPORT) += linux-2.6/xfs_export.o
74
75
76xfs-y += xfs_alloc.o \
77 xfs_alloc_btree.o \
78 xfs_attr.o \
79 xfs_attr_leaf.o \
80 xfs_behavior.o \
81 xfs_bit.o \
82 xfs_bmap.o \
83 xfs_bmap_btree.o \
84 xfs_btree.o \
85 xfs_buf_item.o \
86 xfs_da_btree.o \
87 xfs_dir.o \
88 xfs_dir2.o \
89 xfs_dir2_block.o \
90 xfs_dir2_data.o \
91 xfs_dir2_leaf.o \
92 xfs_dir2_node.o \
93 xfs_dir2_sf.o \
94 xfs_dir_leaf.o \
95 xfs_error.o \
96 xfs_extfree_item.o \
97 xfs_fsops.o \
98 xfs_ialloc.o \
99 xfs_ialloc_btree.o \
100 xfs_iget.o \
101 xfs_inode.o \
102 xfs_inode_item.o \
103 xfs_iocore.o \
104 xfs_iomap.o \
105 xfs_itable.o \
106 xfs_dfrag.o \
107 xfs_log.o \
108 xfs_log_recover.o \
109 xfs_macros.o \
110 xfs_mount.o \
111 xfs_rename.o \
112 xfs_trans.o \
113 xfs_trans_ail.o \
114 xfs_trans_buf.o \
115 xfs_trans_extfree.o \
116 xfs_trans_inode.o \
117 xfs_trans_item.o \
118 xfs_utils.o \
119 xfs_vfsops.o \
120 xfs_vnodeops.o \
121 xfs_rw.o \
122 xfs_dmops.o \
123 xfs_qmops.o
124
125xfs-$(CONFIG_XFS_TRACE) += xfs_dir2_trace.o
126
127# Objects in linux-2.6/
128xfs-y += $(addprefix linux-2.6/, \
129 kmem.o \
130 xfs_aops.o \
131 xfs_buf.o \
132 xfs_file.o \
133 xfs_fs_subr.o \
134 xfs_globals.o \
135 xfs_ioctl.o \
136 xfs_iops.o \
137 xfs_lrw.o \
138 xfs_super.o \
139 xfs_vfs.o \
140 xfs_vnode.o)
141
142# Objects in support/
143xfs-y += $(addprefix support/, \
144 debug.o \
145 move.o \
146 qsort.o \
147 uuid.o)
148
149xfs-$(CONFIG_XFS_TRACE) += support/ktrace.o
150
diff --git a/fs/xfs/Makefile-linux-2.6 b/fs/xfs/Makefile-linux-2.6
new file mode 100644
index 000000000000..fbfcbe5a7cda
--- /dev/null
+++ b/fs/xfs/Makefile-linux-2.6
@@ -0,0 +1,141 @@
1#
2# Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved.
3#
4# This program is free software; you can redistribute it and/or modify it
5# under the terms of version 2 of the GNU General Public License as
6# published by the Free Software Foundation.
7#
8# This program is distributed in the hope that it would be useful, but
9# WITHOUT ANY WARRANTY; without even the implied warranty of
10# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
11#
12# Further, this software is distributed without any warranty that it is
13# free of the rightful claim of any third person regarding infringement
14# or the like. Any license provided herein, whether implied or
15# otherwise, applies only to this software file. Patent licenses, if
16# any, provided herein do not apply to combinations of this program with
17# other software, or any other product whatsoever.
18#
19# You should have received a copy of the GNU General Public License along
20# with this program; if not, write the Free Software Foundation, Inc., 59
21# Temple Place - Suite 330, Boston MA 02111-1307, USA.
22#
23# Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
24# Mountain View, CA 94043, or:
25#
26# http://www.sgi.com
27#
28# For further information regarding this notice, see:
29#
30# http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
31#
32
33EXTRA_CFLAGS += -Ifs/xfs -Ifs/xfs/linux-2.6 -funsigned-char
34
35XFS_LINUX := linux-2.6
36
37ifeq ($(CONFIG_XFS_DEBUG),y)
38 EXTRA_CFLAGS += -g -DSTATIC="" -DDEBUG
39 EXTRA_CFLAGS += -DPAGEBUF_LOCK_TRACKING
40endif
41ifeq ($(CONFIG_XFS_TRACE),y)
42 EXTRA_CFLAGS += -DXFS_ALLOC_TRACE
43 EXTRA_CFLAGS += -DXFS_ATTR_TRACE
44 EXTRA_CFLAGS += -DXFS_BLI_TRACE
45 EXTRA_CFLAGS += -DXFS_BMAP_TRACE
46 EXTRA_CFLAGS += -DXFS_BMBT_TRACE
47 EXTRA_CFLAGS += -DXFS_DIR_TRACE
48 EXTRA_CFLAGS += -DXFS_DIR2_TRACE
49 EXTRA_CFLAGS += -DXFS_DQUOT_TRACE
50 EXTRA_CFLAGS += -DXFS_ILOCK_TRACE
51 EXTRA_CFLAGS += -DXFS_LOG_TRACE
52 EXTRA_CFLAGS += -DXFS_RW_TRACE
53 EXTRA_CFLAGS += -DPAGEBUF_TRACE
54 EXTRA_CFLAGS += -DXFS_VNODE_TRACE
55endif
56
57obj-$(CONFIG_XFS_FS) += xfs.o
58obj-$(CONFIG_XFS_QUOTA) += quota/
59
60xfs-$(CONFIG_XFS_RT) += xfs_rtalloc.o
61xfs-$(CONFIG_XFS_POSIX_ACL) += xfs_acl.o
62xfs-$(CONFIG_PROC_FS) += $(XFS_LINUX)/xfs_stats.o
63xfs-$(CONFIG_SYSCTL) += $(XFS_LINUX)/xfs_sysctl.o
64xfs-$(CONFIG_COMPAT) += $(XFS_LINUX)/xfs_ioctl32.o
65xfs-$(CONFIG_XFS_EXPORT) += $(XFS_LINUX)/xfs_export.o
66
67
68xfs-y += xfs_alloc.o \
69 xfs_alloc_btree.o \
70 xfs_attr.o \
71 xfs_attr_leaf.o \
72 xfs_behavior.o \
73 xfs_bit.o \
74 xfs_bmap.o \
75 xfs_bmap_btree.o \
76 xfs_btree.o \
77 xfs_buf_item.o \
78 xfs_da_btree.o \
79 xfs_dir.o \
80 xfs_dir2.o \
81 xfs_dir2_block.o \
82 xfs_dir2_data.o \
83 xfs_dir2_leaf.o \
84 xfs_dir2_node.o \
85 xfs_dir2_sf.o \
86 xfs_dir_leaf.o \
87 xfs_error.o \
88 xfs_extfree_item.o \
89 xfs_fsops.o \
90 xfs_ialloc.o \
91 xfs_ialloc_btree.o \
92 xfs_iget.o \
93 xfs_inode.o \
94 xfs_inode_item.o \
95 xfs_iocore.o \
96 xfs_iomap.o \
97 xfs_itable.o \
98 xfs_dfrag.o \
99 xfs_log.o \
100 xfs_log_recover.o \
101 xfs_macros.o \
102 xfs_mount.o \
103 xfs_rename.o \
104 xfs_trans.o \
105 xfs_trans_ail.o \
106 xfs_trans_buf.o \
107 xfs_trans_extfree.o \
108 xfs_trans_inode.o \
109 xfs_trans_item.o \
110 xfs_utils.o \
111 xfs_vfsops.o \
112 xfs_vnodeops.o \
113 xfs_rw.o \
114 xfs_dmops.o \
115 xfs_qmops.o
116
117xfs-$(CONFIG_XFS_TRACE) += xfs_dir2_trace.o
118
119# Objects in linux/
120xfs-y += $(addprefix $(XFS_LINUX)/, \
121 kmem.o \
122 xfs_aops.o \
123 xfs_buf.o \
124 xfs_file.o \
125 xfs_fs_subr.o \
126 xfs_globals.o \
127 xfs_ioctl.o \
128 xfs_iops.o \
129 xfs_lrw.o \
130 xfs_super.o \
131 xfs_vfs.o \
132 xfs_vnode.o)
133
134# Objects in support/
135xfs-y += $(addprefix support/, \
136 debug.o \
137 move.o \
138 uuid.o)
139
140xfs-$(CONFIG_XFS_TRACE) += support/ktrace.o
141
diff --git a/fs/xfs/linux-2.6/kmem.c b/fs/xfs/linux-2.6/kmem.c
index 364ea8c386b1..4b184559f231 100644
--- a/fs/xfs/linux-2.6/kmem.c
+++ b/fs/xfs/linux-2.6/kmem.c
@@ -45,11 +45,11 @@
45 45
46 46
47void * 47void *
48kmem_alloc(size_t size, int flags) 48kmem_alloc(size_t size, unsigned int __nocast flags)
49{ 49{
50 int retries = 0; 50 int retries = 0;
51 int lflags = kmem_flags_convert(flags); 51 unsigned int lflags = kmem_flags_convert(flags);
52 void *ptr; 52 void *ptr;
53 53
54 do { 54 do {
55 if (size < MAX_SLAB_SIZE || retries > MAX_VMALLOCS) 55 if (size < MAX_SLAB_SIZE || retries > MAX_VMALLOCS)
@@ -67,7 +67,7 @@ kmem_alloc(size_t size, int flags)
67} 67}
68 68
69void * 69void *
70kmem_zalloc(size_t size, int flags) 70kmem_zalloc(size_t size, unsigned int __nocast flags)
71{ 71{
72 void *ptr; 72 void *ptr;
73 73
@@ -89,7 +89,8 @@ kmem_free(void *ptr, size_t size)
89} 89}
90 90
91void * 91void *
92kmem_realloc(void *ptr, size_t newsize, size_t oldsize, int flags) 92kmem_realloc(void *ptr, size_t newsize, size_t oldsize,
93 unsigned int __nocast flags)
93{ 94{
94 void *new; 95 void *new;
95 96
@@ -104,11 +105,11 @@ kmem_realloc(void *ptr, size_t newsize, size_t oldsize, int flags)
104} 105}
105 106
106void * 107void *
107kmem_zone_alloc(kmem_zone_t *zone, int flags) 108kmem_zone_alloc(kmem_zone_t *zone, unsigned int __nocast flags)
108{ 109{
109 int retries = 0; 110 int retries = 0;
110 int lflags = kmem_flags_convert(flags); 111 unsigned int lflags = kmem_flags_convert(flags);
111 void *ptr; 112 void *ptr;
112 113
113 do { 114 do {
114 ptr = kmem_cache_alloc(zone, lflags); 115 ptr = kmem_cache_alloc(zone, lflags);
@@ -123,7 +124,7 @@ kmem_zone_alloc(kmem_zone_t *zone, int flags)
123} 124}
124 125
125void * 126void *
126kmem_zone_zalloc(kmem_zone_t *zone, int flags) 127kmem_zone_zalloc(kmem_zone_t *zone, unsigned int __nocast flags)
127{ 128{
128 void *ptr; 129 void *ptr;
129 130
diff --git a/fs/xfs/linux-2.6/kmem.h b/fs/xfs/linux-2.6/kmem.h
index 1397b669b059..109fcf27e256 100644
--- a/fs/xfs/linux-2.6/kmem.h
+++ b/fs/xfs/linux-2.6/kmem.h
@@ -39,10 +39,10 @@
39/* 39/*
40 * memory management routines 40 * memory management routines
41 */ 41 */
42#define KM_SLEEP 0x0001 42#define KM_SLEEP 0x0001u
43#define KM_NOSLEEP 0x0002 43#define KM_NOSLEEP 0x0002u
44#define KM_NOFS 0x0004 44#define KM_NOFS 0x0004u
45#define KM_MAYFAIL 0x0008 45#define KM_MAYFAIL 0x0008u
46 46
47#define kmem_zone kmem_cache_s 47#define kmem_zone kmem_cache_s
48#define kmem_zone_t kmem_cache_t 48#define kmem_zone_t kmem_cache_t
@@ -81,9 +81,9 @@ typedef unsigned long xfs_pflags_t;
81 *(NSTATEP) = *(OSTATEP); \ 81 *(NSTATEP) = *(OSTATEP); \
82} while (0) 82} while (0)
83 83
84static __inline unsigned int kmem_flags_convert(int flags) 84static __inline unsigned int kmem_flags_convert(unsigned int __nocast flags)
85{ 85{
86 int lflags = __GFP_NOWARN; /* we'll report problems, if need be */ 86 unsigned int lflags = __GFP_NOWARN; /* we'll report problems, if need be */
87 87
88#ifdef DEBUG 88#ifdef DEBUG
89 if (unlikely(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL))) { 89 if (unlikely(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL))) {
@@ -125,12 +125,13 @@ kmem_zone_destroy(kmem_zone_t *zone)
125 BUG(); 125 BUG();
126} 126}
127 127
128extern void *kmem_zone_zalloc(kmem_zone_t *, int); 128extern void *kmem_zone_zalloc(kmem_zone_t *, unsigned int __nocast);
129extern void *kmem_zone_alloc(kmem_zone_t *, int); 129extern void *kmem_zone_alloc(kmem_zone_t *, unsigned int __nocast);
130 130
131extern void *kmem_alloc(size_t, int); 131extern void *kmem_alloc(size_t, unsigned int __nocast);
132extern void *kmem_realloc(void *, size_t, size_t, int); 132extern void *kmem_realloc(void *, size_t, size_t,
133extern void *kmem_zalloc(size_t, int); 133 unsigned int __nocast);
134extern void *kmem_zalloc(size_t, unsigned int __nocast);
134extern void kmem_free(void *, size_t); 135extern void kmem_free(void *, size_t);
135 136
136typedef struct shrinker *kmem_shaker_t; 137typedef struct shrinker *kmem_shaker_t;
diff --git a/fs/xfs/linux-2.6/spin.h b/fs/xfs/linux-2.6/spin.h
index bcf60a0b8df0..0039504069a5 100644
--- a/fs/xfs/linux-2.6/spin.h
+++ b/fs/xfs/linux-2.6/spin.h
@@ -45,6 +45,9 @@
45typedef spinlock_t lock_t; 45typedef spinlock_t lock_t;
46 46
47#define SPLDECL(s) unsigned long s 47#define SPLDECL(s) unsigned long s
48#ifndef DEFINE_SPINLOCK
49#define DEFINE_SPINLOCK(s) spinlock_t s = SPIN_LOCK_UNLOCKED
50#endif
48 51
49#define spinlock_init(lock, name) spin_lock_init(lock) 52#define spinlock_init(lock, name) spin_lock_init(lock)
50#define spinlock_destroy(lock) 53#define spinlock_destroy(lock)
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index a3a4b5aaf5d9..c6c077978fe3 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -104,66 +104,114 @@ xfs_page_trace(
104#define xfs_page_trace(tag, inode, page, mask) 104#define xfs_page_trace(tag, inode, page, mask)
105#endif 105#endif
106 106
107void 107/*
108linvfs_unwritten_done( 108 * Schedule IO completion handling on a xfsdatad if this was
109 struct buffer_head *bh, 109 * the final hold on this ioend.
110 int uptodate) 110 */
111STATIC void
112xfs_finish_ioend(
113 xfs_ioend_t *ioend)
111{ 114{
112 xfs_buf_t *pb = (xfs_buf_t *)bh->b_private; 115 if (atomic_dec_and_test(&ioend->io_remaining))
116 queue_work(xfsdatad_workqueue, &ioend->io_work);
117}
113 118
114 ASSERT(buffer_unwritten(bh)); 119STATIC void
115 bh->b_end_io = NULL; 120xfs_destroy_ioend(
116 clear_buffer_unwritten(bh); 121 xfs_ioend_t *ioend)
117 if (!uptodate) 122{
118 pagebuf_ioerror(pb, EIO); 123 vn_iowake(ioend->io_vnode);
119 if (atomic_dec_and_test(&pb->pb_io_remaining) == 1) { 124 mempool_free(ioend, xfs_ioend_pool);
120 pagebuf_iodone(pb, 1, 1);
121 }
122 end_buffer_async_write(bh, uptodate);
123} 125}
124 126
125/* 127/*
126 * Issue transactions to convert a buffer range from unwritten 128 * Issue transactions to convert a buffer range from unwritten
127 * to written extents (buffered IO). 129 * to written extents.
128 */ 130 */
129STATIC void 131STATIC void
130linvfs_unwritten_convert( 132xfs_end_bio_unwritten(
131 xfs_buf_t *bp) 133 void *data)
132{ 134{
133 vnode_t *vp = XFS_BUF_FSPRIVATE(bp, vnode_t *); 135 xfs_ioend_t *ioend = data;
134 int error; 136 vnode_t *vp = ioend->io_vnode;
137 xfs_off_t offset = ioend->io_offset;
138 size_t size = ioend->io_size;
139 struct buffer_head *bh, *next;
140 int error;
141
142 if (ioend->io_uptodate)
143 VOP_BMAP(vp, offset, size, BMAPI_UNWRITTEN, NULL, NULL, error);
144
145 /* ioend->io_buffer_head is only non-NULL for buffered I/O */
146 for (bh = ioend->io_buffer_head; bh; bh = next) {
147 next = bh->b_private;
148
149 bh->b_end_io = NULL;
150 clear_buffer_unwritten(bh);
151 end_buffer_async_write(bh, ioend->io_uptodate);
152 }
135 153
136 BUG_ON(atomic_read(&bp->pb_hold) < 1); 154 xfs_destroy_ioend(ioend);
137 VOP_BMAP(vp, XFS_BUF_OFFSET(bp), XFS_BUF_SIZE(bp),
138 BMAPI_UNWRITTEN, NULL, NULL, error);
139 XFS_BUF_SET_FSPRIVATE(bp, NULL);
140 XFS_BUF_CLR_IODONE_FUNC(bp);
141 XFS_BUF_UNDATAIO(bp);
142 iput(LINVFS_GET_IP(vp));
143 pagebuf_iodone(bp, 0, 0);
144} 155}
145 156
146/* 157/*
147 * Issue transactions to convert a buffer range from unwritten 158 * Allocate and initialise an IO completion structure.
148 * to written extents (direct IO). 159 * We need to track unwritten extent write completion here initially.
160 * We'll need to extend this for updating the ondisk inode size later
161 * (vs. incore size).
149 */ 162 */
150STATIC void 163STATIC xfs_ioend_t *
151linvfs_unwritten_convert_direct( 164xfs_alloc_ioend(
152 struct kiocb *iocb, 165 struct inode *inode)
153 loff_t offset,
154 ssize_t size,
155 void *private)
156{ 166{
157 struct inode *inode = iocb->ki_filp->f_dentry->d_inode; 167 xfs_ioend_t *ioend;
158 ASSERT(!private || inode == (struct inode *)private);
159 168
160 /* private indicates an unwritten extent lay beneath this IO */ 169 ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS);
161 if (private && size > 0) {
162 vnode_t *vp = LINVFS_GET_VP(inode);
163 int error;
164 170
165 VOP_BMAP(vp, offset, size, BMAPI_UNWRITTEN, NULL, NULL, error); 171 /*
166 } 172 * Set the count to 1 initially, which will prevent an I/O
173 * completion callback from happening before we have started
174 * all the I/O from calling the completion routine too early.
175 */
176 atomic_set(&ioend->io_remaining, 1);
177 ioend->io_uptodate = 1; /* cleared if any I/O fails */
178 ioend->io_vnode = LINVFS_GET_VP(inode);
179 ioend->io_buffer_head = NULL;
180 atomic_inc(&ioend->io_vnode->v_iocount);
181 ioend->io_offset = 0;
182 ioend->io_size = 0;
183
184 INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten, ioend);
185
186 return ioend;
187}
188
189void
190linvfs_unwritten_done(
191 struct buffer_head *bh,
192 int uptodate)
193{
194 xfs_ioend_t *ioend = bh->b_private;
195 static spinlock_t unwritten_done_lock = SPIN_LOCK_UNLOCKED;
196 unsigned long flags;
197
198 ASSERT(buffer_unwritten(bh));
199 bh->b_end_io = NULL;
200
201 if (!uptodate)
202 ioend->io_uptodate = 0;
203
204 /*
205 * Deep magic here. We reuse b_private in the buffer_heads to build
206 * a chain for completing the I/O from user context after we've issued
207 * a transaction to convert the unwritten extent.
208 */
209 spin_lock_irqsave(&unwritten_done_lock, flags);
210 bh->b_private = ioend->io_buffer_head;
211 ioend->io_buffer_head = bh;
212 spin_unlock_irqrestore(&unwritten_done_lock, flags);
213
214 xfs_finish_ioend(ioend);
167} 215}
168 216
169STATIC int 217STATIC int
@@ -255,7 +303,7 @@ xfs_probe_unwritten_page(
255 struct address_space *mapping, 303 struct address_space *mapping,
256 pgoff_t index, 304 pgoff_t index,
257 xfs_iomap_t *iomapp, 305 xfs_iomap_t *iomapp,
258 xfs_buf_t *pb, 306 xfs_ioend_t *ioend,
259 unsigned long max_offset, 307 unsigned long max_offset,
260 unsigned long *fsbs, 308 unsigned long *fsbs,
261 unsigned int bbits) 309 unsigned int bbits)
@@ -283,7 +331,7 @@ xfs_probe_unwritten_page(
283 break; 331 break;
284 xfs_map_at_offset(page, bh, p_offset, bbits, iomapp); 332 xfs_map_at_offset(page, bh, p_offset, bbits, iomapp);
285 set_buffer_unwritten_io(bh); 333 set_buffer_unwritten_io(bh);
286 bh->b_private = pb; 334 bh->b_private = ioend;
287 p_offset += bh->b_size; 335 p_offset += bh->b_size;
288 (*fsbs)++; 336 (*fsbs)++;
289 } while ((bh = bh->b_this_page) != head); 337 } while ((bh = bh->b_this_page) != head);
@@ -434,34 +482,15 @@ xfs_map_unwritten(
434{ 482{
435 struct buffer_head *bh = curr; 483 struct buffer_head *bh = curr;
436 xfs_iomap_t *tmp; 484 xfs_iomap_t *tmp;
437 xfs_buf_t *pb; 485 xfs_ioend_t *ioend;
438 loff_t offset, size; 486 loff_t offset;
439 unsigned long nblocks = 0; 487 unsigned long nblocks = 0;
440 488
441 offset = start_page->index; 489 offset = start_page->index;
442 offset <<= PAGE_CACHE_SHIFT; 490 offset <<= PAGE_CACHE_SHIFT;
443 offset += p_offset; 491 offset += p_offset;
444 492
445 /* get an "empty" pagebuf to manage IO completion 493 ioend = xfs_alloc_ioend(inode);
446 * Proper values will be set before returning */
447 pb = pagebuf_lookup(iomapp->iomap_target, 0, 0, 0);
448 if (!pb)
449 return -EAGAIN;
450
451 /* Take a reference to the inode to prevent it from
452 * being reclaimed while we have outstanding unwritten
453 * extent IO on it.
454 */
455 if ((igrab(inode)) != inode) {
456 pagebuf_free(pb);
457 return -EAGAIN;
458 }
459
460 /* Set the count to 1 initially, this will stop an I/O
461 * completion callout which happens before we have started
462 * all the I/O from calling pagebuf_iodone too early.
463 */
464 atomic_set(&pb->pb_io_remaining, 1);
465 494
466 /* First map forwards in the page consecutive buffers 495 /* First map forwards in the page consecutive buffers
467 * covering this unwritten extent 496 * covering this unwritten extent
@@ -474,12 +503,12 @@ xfs_map_unwritten(
474 break; 503 break;
475 xfs_map_at_offset(start_page, bh, p_offset, block_bits, iomapp); 504 xfs_map_at_offset(start_page, bh, p_offset, block_bits, iomapp);
476 set_buffer_unwritten_io(bh); 505 set_buffer_unwritten_io(bh);
477 bh->b_private = pb; 506 bh->b_private = ioend;
478 p_offset += bh->b_size; 507 p_offset += bh->b_size;
479 nblocks++; 508 nblocks++;
480 } while ((bh = bh->b_this_page) != head); 509 } while ((bh = bh->b_this_page) != head);
481 510
482 atomic_add(nblocks, &pb->pb_io_remaining); 511 atomic_add(nblocks, &ioend->io_remaining);
483 512
484 /* If we reached the end of the page, map forwards in any 513 /* If we reached the end of the page, map forwards in any
485 * following pages which are also covered by this extent. 514 * following pages which are also covered by this extent.
@@ -496,13 +525,13 @@ xfs_map_unwritten(
496 tloff = min(tlast, tloff); 525 tloff = min(tlast, tloff);
497 for (tindex = start_page->index + 1; tindex < tloff; tindex++) { 526 for (tindex = start_page->index + 1; tindex < tloff; tindex++) {
498 page = xfs_probe_unwritten_page(mapping, 527 page = xfs_probe_unwritten_page(mapping,
499 tindex, iomapp, pb, 528 tindex, iomapp, ioend,
500 PAGE_CACHE_SIZE, &bs, bbits); 529 PAGE_CACHE_SIZE, &bs, bbits);
501 if (!page) 530 if (!page)
502 break; 531 break;
503 nblocks += bs; 532 nblocks += bs;
504 atomic_add(bs, &pb->pb_io_remaining); 533 atomic_add(bs, &ioend->io_remaining);
505 xfs_convert_page(inode, page, iomapp, wbc, pb, 534 xfs_convert_page(inode, page, iomapp, wbc, ioend,
506 startio, all_bh); 535 startio, all_bh);
507 /* stop if converting the next page might add 536 /* stop if converting the next page might add
508 * enough blocks that the corresponding byte 537 * enough blocks that the corresponding byte
@@ -514,12 +543,12 @@ xfs_map_unwritten(
514 if (tindex == tlast && 543 if (tindex == tlast &&
515 (pg_offset = (i_size_read(inode) & (PAGE_CACHE_SIZE - 1)))) { 544 (pg_offset = (i_size_read(inode) & (PAGE_CACHE_SIZE - 1)))) {
516 page = xfs_probe_unwritten_page(mapping, 545 page = xfs_probe_unwritten_page(mapping,
517 tindex, iomapp, pb, 546 tindex, iomapp, ioend,
518 pg_offset, &bs, bbits); 547 pg_offset, &bs, bbits);
519 if (page) { 548 if (page) {
520 nblocks += bs; 549 nblocks += bs;
521 atomic_add(bs, &pb->pb_io_remaining); 550 atomic_add(bs, &ioend->io_remaining);
522 xfs_convert_page(inode, page, iomapp, wbc, pb, 551 xfs_convert_page(inode, page, iomapp, wbc, ioend,
523 startio, all_bh); 552 startio, all_bh);
524 if (nblocks >= ((ULONG_MAX - PAGE_SIZE) >> block_bits)) 553 if (nblocks >= ((ULONG_MAX - PAGE_SIZE) >> block_bits))
525 goto enough; 554 goto enough;
@@ -528,21 +557,9 @@ xfs_map_unwritten(
528 } 557 }
529 558
530enough: 559enough:
531 size = nblocks; /* NB: using 64bit number here */ 560 ioend->io_size = (xfs_off_t)nblocks << block_bits;
532 size <<= block_bits; /* convert fsb's to byte range */ 561 ioend->io_offset = offset;
533 562 xfs_finish_ioend(ioend);
534 XFS_BUF_DATAIO(pb);
535 XFS_BUF_ASYNC(pb);
536 XFS_BUF_SET_SIZE(pb, size);
537 XFS_BUF_SET_COUNT(pb, size);
538 XFS_BUF_SET_OFFSET(pb, offset);
539 XFS_BUF_SET_FSPRIVATE(pb, LINVFS_GET_VP(inode));
540 XFS_BUF_SET_IODONE_FUNC(pb, linvfs_unwritten_convert);
541
542 if (atomic_dec_and_test(&pb->pb_io_remaining) == 1) {
543 pagebuf_iodone(pb, 1, 1);
544 }
545
546 return 0; 563 return 0;
547} 564}
548 565
@@ -787,7 +804,7 @@ xfs_page_state_convert(
787 continue; 804 continue;
788 if (!iomp) { 805 if (!iomp) {
789 err = xfs_map_blocks(inode, offset, len, &iomap, 806 err = xfs_map_blocks(inode, offset, len, &iomap,
790 BMAPI_READ|BMAPI_IGNSTATE); 807 BMAPI_WRITE|BMAPI_IGNSTATE);
791 if (err) { 808 if (err) {
792 goto error; 809 goto error;
793 } 810 }
@@ -1028,6 +1045,44 @@ linvfs_get_blocks_direct(
1028 create, 1, BMAPI_WRITE|BMAPI_DIRECT); 1045 create, 1, BMAPI_WRITE|BMAPI_DIRECT);
1029} 1046}
1030 1047
1048STATIC void
1049linvfs_end_io_direct(
1050 struct kiocb *iocb,
1051 loff_t offset,
1052 ssize_t size,
1053 void *private)
1054{
1055 xfs_ioend_t *ioend = iocb->private;
1056
1057 /*
1058 * Non-NULL private data means we need to issue a transaction to
1059 * convert a range from unwritten to written extents. This needs
1060 * to happen from process contect but aio+dio I/O completion
1061 * happens from irq context so we need to defer it to a workqueue.
1062 * This is not nessecary for synchronous direct I/O, but we do
1063 * it anyway to keep the code uniform and simpler.
1064 *
1065 * The core direct I/O code might be changed to always call the
1066 * completion handler in the future, in which case all this can
1067 * go away.
1068 */
1069 if (private && size > 0) {
1070 ioend->io_offset = offset;
1071 ioend->io_size = size;
1072 xfs_finish_ioend(ioend);
1073 } else {
1074 ASSERT(size >= 0);
1075 xfs_destroy_ioend(ioend);
1076 }
1077
1078 /*
1079 * blockdev_direct_IO can return an error even afer the I/O
1080 * completion handler was called. Thus we need to protect
1081 * against double-freeing.
1082 */
1083 iocb->private = NULL;
1084}
1085
1031STATIC ssize_t 1086STATIC ssize_t
1032linvfs_direct_IO( 1087linvfs_direct_IO(
1033 int rw, 1088 int rw,
@@ -1042,16 +1097,23 @@ linvfs_direct_IO(
1042 xfs_iomap_t iomap; 1097 xfs_iomap_t iomap;
1043 int maps = 1; 1098 int maps = 1;
1044 int error; 1099 int error;
1100 ssize_t ret;
1045 1101
1046 VOP_BMAP(vp, offset, 0, BMAPI_DEVICE, &iomap, &maps, error); 1102 VOP_BMAP(vp, offset, 0, BMAPI_DEVICE, &iomap, &maps, error);
1047 if (error) 1103 if (error)
1048 return -error; 1104 return -error;
1049 1105
1050 return blockdev_direct_IO_own_locking(rw, iocb, inode, 1106 iocb->private = xfs_alloc_ioend(inode);
1107
1108 ret = blockdev_direct_IO_own_locking(rw, iocb, inode,
1051 iomap.iomap_target->pbr_bdev, 1109 iomap.iomap_target->pbr_bdev,
1052 iov, offset, nr_segs, 1110 iov, offset, nr_segs,
1053 linvfs_get_blocks_direct, 1111 linvfs_get_blocks_direct,
1054 linvfs_unwritten_convert_direct); 1112 linvfs_end_io_direct);
1113
1114 if (unlikely(ret <= 0 && iocb->private))
1115 xfs_destroy_ioend(iocb->private);
1116 return ret;
1055} 1117}
1056 1118
1057 1119
@@ -1202,6 +1264,16 @@ out_unlock:
1202 return error; 1264 return error;
1203} 1265}
1204 1266
1267STATIC int
1268linvfs_invalidate_page(
1269 struct page *page,
1270 unsigned long offset)
1271{
1272 xfs_page_trace(XFS_INVALIDPAGE_ENTER,
1273 page->mapping->host, page, offset);
1274 return block_invalidatepage(page, offset);
1275}
1276
1205/* 1277/*
1206 * Called to move a page into cleanable state - and from there 1278 * Called to move a page into cleanable state - and from there
1207 * to be released. Possibly the page is already clean. We always 1279 * to be released. Possibly the page is already clean. We always
@@ -1279,6 +1351,7 @@ struct address_space_operations linvfs_aops = {
1279 .writepage = linvfs_writepage, 1351 .writepage = linvfs_writepage,
1280 .sync_page = block_sync_page, 1352 .sync_page = block_sync_page,
1281 .releasepage = linvfs_release_page, 1353 .releasepage = linvfs_release_page,
1354 .invalidatepage = linvfs_invalidate_page,
1282 .prepare_write = linvfs_prepare_write, 1355 .prepare_write = linvfs_prepare_write,
1283 .commit_write = generic_commit_write, 1356 .commit_write = generic_commit_write,
1284 .bmap = linvfs_bmap, 1357 .bmap = linvfs_bmap,
diff --git a/fs/xfs/linux-2.6/xfs_aops.h b/fs/xfs/linux-2.6/xfs_aops.h
new file mode 100644
index 000000000000..2fa62974a04d
--- /dev/null
+++ b/fs/xfs/linux-2.6/xfs_aops.h
@@ -0,0 +1,50 @@
1/*
2 * Copyright (c) 2005 Silicon Graphics, Inc. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it would be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
11 *
12 * Further, this software is distributed without any warranty that it is
13 * free of the rightful claim of any third person regarding infringement
14 * or the like. Any license provided herein, whether implied or
15 * otherwise, applies only to this software file. Patent licenses, if
16 * any, provided herein do not apply to combinations of this program with
17 * other software, or any other product whatsoever.
18 *
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write the Free Software Foundation, Inc., 59
21 * Temple Place - Suite 330, Boston MA 02111-1307, USA.
22 *
23 * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
24 * Mountain View, CA 94043, or:
25 *
26 * http://www.sgi.com
27 *
28 * For further information regarding this notice, see:
29 *
30 * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
31 */
32#ifndef __XFS_AOPS_H__
33#define __XFS_AOPS_H__
34
35extern struct workqueue_struct *xfsdatad_workqueue;
36extern mempool_t *xfs_ioend_pool;
37
38typedef void (*xfs_ioend_func_t)(void *);
39
40typedef struct xfs_ioend {
41 unsigned int io_uptodate; /* I/O status register */
42 atomic_t io_remaining; /* hold count */
43 struct vnode *io_vnode; /* file being written to */
44 struct buffer_head *io_buffer_head;/* buffer linked list head */
45 size_t io_size; /* size of the extent */
46 xfs_off_t io_offset; /* offset in the file */
47 struct work_struct io_work; /* xfsdatad work queue */
48} xfs_ioend_t;
49
50#endif /* __XFS_IOPS_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index df0cba239dd5..655bf4a78afe 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved. 2 * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved.
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify it 4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License as 5 * under the terms of version 2 of the GNU General Public License as
@@ -54,6 +54,7 @@
54#include <linux/percpu.h> 54#include <linux/percpu.h>
55#include <linux/blkdev.h> 55#include <linux/blkdev.h>
56#include <linux/hash.h> 56#include <linux/hash.h>
57#include <linux/kthread.h>
57 58
58#include "xfs_linux.h" 59#include "xfs_linux.h"
59 60
@@ -67,7 +68,7 @@ STATIC int xfsbufd_wakeup(int, unsigned int);
67STATIC void pagebuf_delwri_queue(xfs_buf_t *, int); 68STATIC void pagebuf_delwri_queue(xfs_buf_t *, int);
68 69
69STATIC struct workqueue_struct *xfslogd_workqueue; 70STATIC struct workqueue_struct *xfslogd_workqueue;
70STATIC struct workqueue_struct *xfsdatad_workqueue; 71struct workqueue_struct *xfsdatad_workqueue;
71 72
72/* 73/*
73 * Pagebuf debugging 74 * Pagebuf debugging
@@ -590,8 +591,10 @@ found:
590 PB_SET_OWNER(pb); 591 PB_SET_OWNER(pb);
591 } 592 }
592 593
593 if (pb->pb_flags & PBF_STALE) 594 if (pb->pb_flags & PBF_STALE) {
595 ASSERT((pb->pb_flags & _PBF_DELWRI_Q) == 0);
594 pb->pb_flags &= PBF_MAPPED; 596 pb->pb_flags &= PBF_MAPPED;
597 }
595 PB_TRACE(pb, "got_lock", 0); 598 PB_TRACE(pb, "got_lock", 0);
596 XFS_STATS_INC(pb_get_locked); 599 XFS_STATS_INC(pb_get_locked);
597 return (pb); 600 return (pb);
@@ -700,25 +703,6 @@ xfs_buf_read_flags(
700} 703}
701 704
702/* 705/*
703 * Create a skeletal pagebuf (no pages associated with it).
704 */
705xfs_buf_t *
706pagebuf_lookup(
707 xfs_buftarg_t *target,
708 loff_t ioff,
709 size_t isize,
710 page_buf_flags_t flags)
711{
712 xfs_buf_t *pb;
713
714 pb = pagebuf_allocate(flags);
715 if (pb) {
716 _pagebuf_initialize(pb, target, ioff, isize, flags);
717 }
718 return pb;
719}
720
721/*
722 * If we are not low on memory then do the readahead in a deadlock 706 * If we are not low on memory then do the readahead in a deadlock
723 * safe manner. 707 * safe manner.
724 */ 708 */
@@ -913,22 +897,23 @@ pagebuf_rele(
913 do_free = 0; 897 do_free = 0;
914 } 898 }
915 899
916 if (pb->pb_flags & PBF_DELWRI) { 900 if (pb->pb_flags & PBF_FS_MANAGED) {
917 pb->pb_flags |= PBF_ASYNC;
918 atomic_inc(&pb->pb_hold);
919 pagebuf_delwri_queue(pb, 0);
920 do_free = 0;
921 } else if (pb->pb_flags & PBF_FS_MANAGED) {
922 do_free = 0; 901 do_free = 0;
923 } 902 }
924 903
925 if (do_free) { 904 if (do_free) {
905 ASSERT((pb->pb_flags & (PBF_DELWRI|_PBF_DELWRI_Q)) == 0);
926 list_del_init(&pb->pb_hash_list); 906 list_del_init(&pb->pb_hash_list);
927 spin_unlock(&hash->bh_lock); 907 spin_unlock(&hash->bh_lock);
928 pagebuf_free(pb); 908 pagebuf_free(pb);
929 } else { 909 } else {
930 spin_unlock(&hash->bh_lock); 910 spin_unlock(&hash->bh_lock);
931 } 911 }
912 } else {
913 /*
914 * Catch reference count leaks
915 */
916 ASSERT(atomic_read(&pb->pb_hold) >= 0);
932 } 917 }
933} 918}
934 919
@@ -1006,13 +991,24 @@ pagebuf_lock(
1006 * pagebuf_unlock 991 * pagebuf_unlock
1007 * 992 *
1008 * pagebuf_unlock releases the lock on the buffer object created by 993 * pagebuf_unlock releases the lock on the buffer object created by
1009 * pagebuf_lock or pagebuf_cond_lock (not any 994 * pagebuf_lock or pagebuf_cond_lock (not any pinning of underlying pages
1010 * pinning of underlying pages created by pagebuf_pin). 995 * created by pagebuf_pin).
996 *
997 * If the buffer is marked delwri but is not queued, do so before we
998 * unlock the buffer as we need to set flags correctly. We also need to
999 * take a reference for the delwri queue because the unlocker is going to
1000 * drop their's and they don't know we just queued it.
1011 */ 1001 */
1012void 1002void
1013pagebuf_unlock( /* unlock buffer */ 1003pagebuf_unlock( /* unlock buffer */
1014 xfs_buf_t *pb) /* buffer to unlock */ 1004 xfs_buf_t *pb) /* buffer to unlock */
1015{ 1005{
1006 if ((pb->pb_flags & (PBF_DELWRI|_PBF_DELWRI_Q)) == PBF_DELWRI) {
1007 atomic_inc(&pb->pb_hold);
1008 pb->pb_flags |= PBF_ASYNC;
1009 pagebuf_delwri_queue(pb, 0);
1010 }
1011
1016 PB_CLEAR_OWNER(pb); 1012 PB_CLEAR_OWNER(pb);
1017 up(&pb->pb_sema); 1013 up(&pb->pb_sema);
1018 PB_TRACE(pb, "unlock", 0); 1014 PB_TRACE(pb, "unlock", 0);
@@ -1249,8 +1245,8 @@ bio_end_io_pagebuf(
1249 int error) 1245 int error)
1250{ 1246{
1251 xfs_buf_t *pb = (xfs_buf_t *)bio->bi_private; 1247 xfs_buf_t *pb = (xfs_buf_t *)bio->bi_private;
1252 unsigned int i, blocksize = pb->pb_target->pbr_bsize; 1248 unsigned int blocksize = pb->pb_target->pbr_bsize;
1253 struct bio_vec *bvec = bio->bi_io_vec; 1249 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1254 1250
1255 if (bio->bi_size) 1251 if (bio->bi_size)
1256 return 1; 1252 return 1;
@@ -1258,10 +1254,12 @@ bio_end_io_pagebuf(
1258 if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) 1254 if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
1259 pb->pb_error = EIO; 1255 pb->pb_error = EIO;
1260 1256
1261 for (i = 0; i < bio->bi_vcnt; i++, bvec++) { 1257 do {
1262 struct page *page = bvec->bv_page; 1258 struct page *page = bvec->bv_page;
1263 1259
1264 if (pb->pb_error) { 1260 if (unlikely(pb->pb_error)) {
1261 if (pb->pb_flags & PBF_READ)
1262 ClearPageUptodate(page);
1265 SetPageError(page); 1263 SetPageError(page);
1266 } else if (blocksize == PAGE_CACHE_SIZE) { 1264 } else if (blocksize == PAGE_CACHE_SIZE) {
1267 SetPageUptodate(page); 1265 SetPageUptodate(page);
@@ -1270,10 +1268,13 @@ bio_end_io_pagebuf(
1270 set_page_region(page, bvec->bv_offset, bvec->bv_len); 1268 set_page_region(page, bvec->bv_offset, bvec->bv_len);
1271 } 1269 }
1272 1270
1271 if (--bvec >= bio->bi_io_vec)
1272 prefetchw(&bvec->bv_page->flags);
1273
1273 if (_pagebuf_iolocked(pb)) { 1274 if (_pagebuf_iolocked(pb)) {
1274 unlock_page(page); 1275 unlock_page(page);
1275 } 1276 }
1276 } 1277 } while (bvec >= bio->bi_io_vec);
1277 1278
1278 _pagebuf_iodone(pb, 1); 1279 _pagebuf_iodone(pb, 1);
1279 bio_put(bio); 1280 bio_put(bio);
@@ -1511,6 +1512,11 @@ again:
1511 ASSERT(btp == bp->pb_target); 1512 ASSERT(btp == bp->pb_target);
1512 if (!(bp->pb_flags & PBF_FS_MANAGED)) { 1513 if (!(bp->pb_flags & PBF_FS_MANAGED)) {
1513 spin_unlock(&hash->bh_lock); 1514 spin_unlock(&hash->bh_lock);
1515 /*
1516 * Catch superblock reference count leaks
1517 * immediately
1518 */
1519 BUG_ON(bp->pb_bn == 0);
1514 delay(100); 1520 delay(100);
1515 goto again; 1521 goto again;
1516 } 1522 }
@@ -1686,17 +1692,20 @@ pagebuf_delwri_queue(
1686 int unlock) 1692 int unlock)
1687{ 1693{
1688 PB_TRACE(pb, "delwri_q", (long)unlock); 1694 PB_TRACE(pb, "delwri_q", (long)unlock);
1689 ASSERT(pb->pb_flags & PBF_DELWRI); 1695 ASSERT((pb->pb_flags & (PBF_DELWRI|PBF_ASYNC)) ==
1696 (PBF_DELWRI|PBF_ASYNC));
1690 1697
1691 spin_lock(&pbd_delwrite_lock); 1698 spin_lock(&pbd_delwrite_lock);
1692 /* If already in the queue, dequeue and place at tail */ 1699 /* If already in the queue, dequeue and place at tail */
1693 if (!list_empty(&pb->pb_list)) { 1700 if (!list_empty(&pb->pb_list)) {
1701 ASSERT(pb->pb_flags & _PBF_DELWRI_Q);
1694 if (unlock) { 1702 if (unlock) {
1695 atomic_dec(&pb->pb_hold); 1703 atomic_dec(&pb->pb_hold);
1696 } 1704 }
1697 list_del(&pb->pb_list); 1705 list_del(&pb->pb_list);
1698 } 1706 }
1699 1707
1708 pb->pb_flags |= _PBF_DELWRI_Q;
1700 list_add_tail(&pb->pb_list, &pbd_delwrite_queue); 1709 list_add_tail(&pb->pb_list, &pbd_delwrite_queue);
1701 pb->pb_queuetime = jiffies; 1710 pb->pb_queuetime = jiffies;
1702 spin_unlock(&pbd_delwrite_lock); 1711 spin_unlock(&pbd_delwrite_lock);
@@ -1713,10 +1722,11 @@ pagebuf_delwri_dequeue(
1713 1722
1714 spin_lock(&pbd_delwrite_lock); 1723 spin_lock(&pbd_delwrite_lock);
1715 if ((pb->pb_flags & PBF_DELWRI) && !list_empty(&pb->pb_list)) { 1724 if ((pb->pb_flags & PBF_DELWRI) && !list_empty(&pb->pb_list)) {
1725 ASSERT(pb->pb_flags & _PBF_DELWRI_Q);
1716 list_del_init(&pb->pb_list); 1726 list_del_init(&pb->pb_list);
1717 dequeued = 1; 1727 dequeued = 1;
1718 } 1728 }
1719 pb->pb_flags &= ~PBF_DELWRI; 1729 pb->pb_flags &= ~(PBF_DELWRI|_PBF_DELWRI_Q);
1720 spin_unlock(&pbd_delwrite_lock); 1730 spin_unlock(&pbd_delwrite_lock);
1721 1731
1722 if (dequeued) 1732 if (dequeued)
@@ -1733,9 +1743,7 @@ pagebuf_runall_queues(
1733} 1743}
1734 1744
1735/* Defines for pagebuf daemon */ 1745/* Defines for pagebuf daemon */
1736STATIC DECLARE_COMPLETION(xfsbufd_done);
1737STATIC struct task_struct *xfsbufd_task; 1746STATIC struct task_struct *xfsbufd_task;
1738STATIC int xfsbufd_active;
1739STATIC int xfsbufd_force_flush; 1747STATIC int xfsbufd_force_flush;
1740STATIC int xfsbufd_force_sleep; 1748STATIC int xfsbufd_force_sleep;
1741 1749
@@ -1761,14 +1769,8 @@ xfsbufd(
1761 xfs_buftarg_t *target; 1769 xfs_buftarg_t *target;
1762 xfs_buf_t *pb, *n; 1770 xfs_buf_t *pb, *n;
1763 1771
1764 /* Set up the thread */
1765 daemonize("xfsbufd");
1766 current->flags |= PF_MEMALLOC; 1772 current->flags |= PF_MEMALLOC;
1767 1773
1768 xfsbufd_task = current;
1769 xfsbufd_active = 1;
1770 barrier();
1771
1772 INIT_LIST_HEAD(&tmp); 1774 INIT_LIST_HEAD(&tmp);
1773 do { 1775 do {
1774 if (unlikely(freezing(current))) { 1776 if (unlikely(freezing(current))) {
@@ -1795,7 +1797,7 @@ xfsbufd(
1795 break; 1797 break;
1796 } 1798 }
1797 1799
1798 pb->pb_flags &= ~PBF_DELWRI; 1800 pb->pb_flags &= ~(PBF_DELWRI|_PBF_DELWRI_Q);
1799 pb->pb_flags |= PBF_WRITE; 1801 pb->pb_flags |= PBF_WRITE;
1800 list_move(&pb->pb_list, &tmp); 1802 list_move(&pb->pb_list, &tmp);
1801 } 1803 }
@@ -1816,9 +1818,9 @@ xfsbufd(
1816 purge_addresses(); 1818 purge_addresses();
1817 1819
1818 xfsbufd_force_flush = 0; 1820 xfsbufd_force_flush = 0;
1819 } while (xfsbufd_active); 1821 } while (!kthread_should_stop());
1820 1822
1821 complete_and_exit(&xfsbufd_done, 0); 1823 return 0;
1822} 1824}
1823 1825
1824/* 1826/*
@@ -1845,15 +1847,13 @@ xfs_flush_buftarg(
1845 if (pb->pb_target != target) 1847 if (pb->pb_target != target)
1846 continue; 1848 continue;
1847 1849
1848 ASSERT(pb->pb_flags & PBF_DELWRI); 1850 ASSERT(pb->pb_flags & (PBF_DELWRI|_PBF_DELWRI_Q));
1849 PB_TRACE(pb, "walkq2", (long)pagebuf_ispin(pb)); 1851 PB_TRACE(pb, "walkq2", (long)pagebuf_ispin(pb));
1850 if (pagebuf_ispin(pb)) { 1852 if (pagebuf_ispin(pb)) {
1851 pincount++; 1853 pincount++;
1852 continue; 1854 continue;
1853 } 1855 }
1854 1856
1855 pb->pb_flags &= ~PBF_DELWRI;
1856 pb->pb_flags |= PBF_WRITE;
1857 list_move(&pb->pb_list, &tmp); 1857 list_move(&pb->pb_list, &tmp);
1858 } 1858 }
1859 spin_unlock(&pbd_delwrite_lock); 1859 spin_unlock(&pbd_delwrite_lock);
@@ -1862,12 +1862,14 @@ xfs_flush_buftarg(
1862 * Dropped the delayed write list lock, now walk the temporary list 1862 * Dropped the delayed write list lock, now walk the temporary list
1863 */ 1863 */
1864 list_for_each_entry_safe(pb, n, &tmp, pb_list) { 1864 list_for_each_entry_safe(pb, n, &tmp, pb_list) {
1865 pagebuf_lock(pb);
1866 pb->pb_flags &= ~(PBF_DELWRI|_PBF_DELWRI_Q);
1867 pb->pb_flags |= PBF_WRITE;
1865 if (wait) 1868 if (wait)
1866 pb->pb_flags &= ~PBF_ASYNC; 1869 pb->pb_flags &= ~PBF_ASYNC;
1867 else 1870 else
1868 list_del_init(&pb->pb_list); 1871 list_del_init(&pb->pb_list);
1869 1872
1870 pagebuf_lock(pb);
1871 pagebuf_iostrategy(pb); 1873 pagebuf_iostrategy(pb);
1872 } 1874 }
1873 1875
@@ -1901,9 +1903,11 @@ xfs_buf_daemons_start(void)
1901 if (!xfsdatad_workqueue) 1903 if (!xfsdatad_workqueue)
1902 goto out_destroy_xfslogd_workqueue; 1904 goto out_destroy_xfslogd_workqueue;
1903 1905
1904 error = kernel_thread(xfsbufd, NULL, CLONE_FS|CLONE_FILES); 1906 xfsbufd_task = kthread_run(xfsbufd, NULL, "xfsbufd");
1905 if (error < 0) 1907 if (IS_ERR(xfsbufd_task)) {
1908 error = PTR_ERR(xfsbufd_task);
1906 goto out_destroy_xfsdatad_workqueue; 1909 goto out_destroy_xfsdatad_workqueue;
1910 }
1907 return 0; 1911 return 0;
1908 1912
1909 out_destroy_xfsdatad_workqueue: 1913 out_destroy_xfsdatad_workqueue:
@@ -1920,10 +1924,7 @@ xfs_buf_daemons_start(void)
1920STATIC void 1924STATIC void
1921xfs_buf_daemons_stop(void) 1925xfs_buf_daemons_stop(void)
1922{ 1926{
1923 xfsbufd_active = 0; 1927 kthread_stop(xfsbufd_task);
1924 barrier();
1925 wait_for_completion(&xfsbufd_done);
1926
1927 destroy_workqueue(xfslogd_workqueue); 1928 destroy_workqueue(xfslogd_workqueue);
1928 destroy_workqueue(xfsdatad_workqueue); 1929 destroy_workqueue(xfsdatad_workqueue);
1929} 1930}
diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h
index 3f8f69a66aea..67c19f799232 100644
--- a/fs/xfs/linux-2.6/xfs_buf.h
+++ b/fs/xfs/linux-2.6/xfs_buf.h
@@ -89,6 +89,7 @@ typedef enum page_buf_flags_e { /* pb_flags values */
89 _PBF_PAGE_CACHE = (1 << 17),/* backed by pagecache */ 89 _PBF_PAGE_CACHE = (1 << 17),/* backed by pagecache */
90 _PBF_KMEM_ALLOC = (1 << 18),/* backed by kmem_alloc() */ 90 _PBF_KMEM_ALLOC = (1 << 18),/* backed by kmem_alloc() */
91 _PBF_RUN_QUEUES = (1 << 19),/* run block device task queue */ 91 _PBF_RUN_QUEUES = (1 << 19),/* run block device task queue */
92 _PBF_DELWRI_Q = (1 << 21), /* buffer on delwri queue */
92} page_buf_flags_t; 93} page_buf_flags_t;
93 94
94#define PBF_UPDATE (PBF_READ | PBF_WRITE) 95#define PBF_UPDATE (PBF_READ | PBF_WRITE)
@@ -206,13 +207,6 @@ extern xfs_buf_t *xfs_buf_read_flags( /* allocate and read a buffer */
206#define xfs_buf_read(target, blkno, len, flags) \ 207#define xfs_buf_read(target, blkno, len, flags) \
207 xfs_buf_read_flags((target), (blkno), (len), PBF_LOCK | PBF_MAPPED) 208 xfs_buf_read_flags((target), (blkno), (len), PBF_LOCK | PBF_MAPPED)
208 209
209extern xfs_buf_t *pagebuf_lookup(
210 xfs_buftarg_t *,
211 loff_t, /* starting offset of range */
212 size_t, /* length of range */
213 page_buf_flags_t); /* PBF_READ, PBF_WRITE, */
214 /* PBF_FORCEIO, */
215
216extern xfs_buf_t *pagebuf_get_empty( /* allocate pagebuf struct with */ 210extern xfs_buf_t *pagebuf_get_empty( /* allocate pagebuf struct with */
217 /* no memory or disk address */ 211 /* no memory or disk address */
218 size_t len, 212 size_t len,
@@ -344,8 +338,6 @@ extern void pagebuf_trace(
344 338
345 339
346 340
347
348
349/* These are just for xfs_syncsub... it sets an internal variable 341/* These are just for xfs_syncsub... it sets an internal variable
350 * then passes it to VOP_FLUSH_PAGES or adds the flags to a newly gotten buf_t 342 * then passes it to VOP_FLUSH_PAGES or adds the flags to a newly gotten buf_t
351 */ 343 */
@@ -452,7 +444,7 @@ extern void pagebuf_trace(
452 444
453#define XFS_BUF_PTR(bp) (xfs_caddr_t)((bp)->pb_addr) 445#define XFS_BUF_PTR(bp) (xfs_caddr_t)((bp)->pb_addr)
454 446
455extern inline xfs_caddr_t xfs_buf_offset(xfs_buf_t *bp, size_t offset) 447static inline xfs_caddr_t xfs_buf_offset(xfs_buf_t *bp, size_t offset)
456{ 448{
457 if (bp->pb_flags & PBF_MAPPED) 449 if (bp->pb_flags & PBF_MAPPED)
458 return XFS_BUF_PTR(bp) + offset; 450 return XFS_BUF_PTR(bp) + offset;
diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c
index f1ce4323f56e..3881622bcf08 100644
--- a/fs/xfs/linux-2.6/xfs_file.c
+++ b/fs/xfs/linux-2.6/xfs_file.c
@@ -311,6 +311,31 @@ linvfs_fsync(
311 311
312#define nextdp(dp) ((struct xfs_dirent *)((char *)(dp) + (dp)->d_reclen)) 312#define nextdp(dp) ((struct xfs_dirent *)((char *)(dp) + (dp)->d_reclen))
313 313
314#ifdef CONFIG_XFS_DMAPI
315
316STATIC struct page *
317linvfs_filemap_nopage(
318 struct vm_area_struct *area,
319 unsigned long address,
320 int *type)
321{
322 struct inode *inode = area->vm_file->f_dentry->d_inode;
323 vnode_t *vp = LINVFS_GET_VP(inode);
324 xfs_mount_t *mp = XFS_VFSTOM(vp->v_vfsp);
325 int error;
326
327 ASSERT_ALWAYS(vp->v_vfsp->vfs_flag & VFS_DMI);
328
329 error = XFS_SEND_MMAP(mp, area, 0);
330 if (error)
331 return NULL;
332
333 return filemap_nopage(area, address, type);
334}
335
336#endif /* CONFIG_XFS_DMAPI */
337
338
314STATIC int 339STATIC int
315linvfs_readdir( 340linvfs_readdir(
316 struct file *filp, 341 struct file *filp,
@@ -390,14 +415,6 @@ done:
390 return -error; 415 return -error;
391} 416}
392 417
393#ifdef CONFIG_XFS_DMAPI
394STATIC void
395linvfs_mmap_close(
396 struct vm_area_struct *vma)
397{
398 xfs_dm_mm_put(vma);
399}
400#endif /* CONFIG_XFS_DMAPI */
401 418
402STATIC int 419STATIC int
403linvfs_file_mmap( 420linvfs_file_mmap(
@@ -411,16 +428,11 @@ linvfs_file_mmap(
411 428
412 vma->vm_ops = &linvfs_file_vm_ops; 429 vma->vm_ops = &linvfs_file_vm_ops;
413 430
414 if (vp->v_vfsp->vfs_flag & VFS_DMI) {
415 xfs_mount_t *mp = XFS_VFSTOM(vp->v_vfsp);
416
417 error = -XFS_SEND_MMAP(mp, vma, 0);
418 if (error)
419 return error;
420#ifdef CONFIG_XFS_DMAPI 431#ifdef CONFIG_XFS_DMAPI
432 if (vp->v_vfsp->vfs_flag & VFS_DMI) {
421 vma->vm_ops = &linvfs_dmapi_file_vm_ops; 433 vma->vm_ops = &linvfs_dmapi_file_vm_ops;
422#endif
423 } 434 }
435#endif /* CONFIG_XFS_DMAPI */
424 436
425 VOP_SETATTR(vp, &va, XFS_AT_UPDATIME, NULL, error); 437 VOP_SETATTR(vp, &va, XFS_AT_UPDATIME, NULL, error);
426 if (!error) 438 if (!error)
@@ -474,6 +486,7 @@ linvfs_ioctl_invis(
474 return error; 486 return error;
475} 487}
476 488
489#ifdef CONFIG_XFS_DMAPI
477#ifdef HAVE_VMOP_MPROTECT 490#ifdef HAVE_VMOP_MPROTECT
478STATIC int 491STATIC int
479linvfs_mprotect( 492linvfs_mprotect(
@@ -494,6 +507,7 @@ linvfs_mprotect(
494 return error; 507 return error;
495} 508}
496#endif /* HAVE_VMOP_MPROTECT */ 509#endif /* HAVE_VMOP_MPROTECT */
510#endif /* CONFIG_XFS_DMAPI */
497 511
498#ifdef HAVE_FOP_OPEN_EXEC 512#ifdef HAVE_FOP_OPEN_EXEC
499/* If the user is attempting to execute a file that is offline then 513/* If the user is attempting to execute a file that is offline then
@@ -528,49 +542,10 @@ open_exec_out:
528} 542}
529#endif /* HAVE_FOP_OPEN_EXEC */ 543#endif /* HAVE_FOP_OPEN_EXEC */
530 544
531/*
532 * Temporary workaround to the AIO direct IO write problem.
533 * This code can go and we can revert to do_sync_write once
534 * the writepage(s) rework is merged.
535 */
536STATIC ssize_t
537linvfs_write(
538 struct file *filp,
539 const char __user *buf,
540 size_t len,
541 loff_t *ppos)
542{
543 struct kiocb kiocb;
544 ssize_t ret;
545
546 init_sync_kiocb(&kiocb, filp);
547 kiocb.ki_pos = *ppos;
548 ret = __linvfs_write(&kiocb, buf, 0, len, kiocb.ki_pos);
549 *ppos = kiocb.ki_pos;
550 return ret;
551}
552STATIC ssize_t
553linvfs_write_invis(
554 struct file *filp,
555 const char __user *buf,
556 size_t len,
557 loff_t *ppos)
558{
559 struct kiocb kiocb;
560 ssize_t ret;
561
562 init_sync_kiocb(&kiocb, filp);
563 kiocb.ki_pos = *ppos;
564 ret = __linvfs_write(&kiocb, buf, IO_INVIS, len, kiocb.ki_pos);
565 *ppos = kiocb.ki_pos;
566 return ret;
567}
568
569
570struct file_operations linvfs_file_operations = { 545struct file_operations linvfs_file_operations = {
571 .llseek = generic_file_llseek, 546 .llseek = generic_file_llseek,
572 .read = do_sync_read, 547 .read = do_sync_read,
573 .write = linvfs_write, 548 .write = do_sync_write,
574 .readv = linvfs_readv, 549 .readv = linvfs_readv,
575 .writev = linvfs_writev, 550 .writev = linvfs_writev,
576 .aio_read = linvfs_aio_read, 551 .aio_read = linvfs_aio_read,
@@ -592,7 +567,7 @@ struct file_operations linvfs_file_operations = {
592struct file_operations linvfs_invis_file_operations = { 567struct file_operations linvfs_invis_file_operations = {
593 .llseek = generic_file_llseek, 568 .llseek = generic_file_llseek,
594 .read = do_sync_read, 569 .read = do_sync_read,
595 .write = linvfs_write_invis, 570 .write = do_sync_write,
596 .readv = linvfs_readv_invis, 571 .readv = linvfs_readv_invis,
597 .writev = linvfs_writev_invis, 572 .writev = linvfs_writev_invis,
598 .aio_read = linvfs_aio_read_invis, 573 .aio_read = linvfs_aio_read_invis,
@@ -626,8 +601,7 @@ static struct vm_operations_struct linvfs_file_vm_ops = {
626 601
627#ifdef CONFIG_XFS_DMAPI 602#ifdef CONFIG_XFS_DMAPI
628static struct vm_operations_struct linvfs_dmapi_file_vm_ops = { 603static struct vm_operations_struct linvfs_dmapi_file_vm_ops = {
629 .close = linvfs_mmap_close, 604 .nopage = linvfs_filemap_nopage,
630 .nopage = filemap_nopage,
631 .populate = filemap_populate, 605 .populate = filemap_populate,
632#ifdef HAVE_VMOP_MPROTECT 606#ifdef HAVE_VMOP_MPROTECT
633 .mprotect = linvfs_mprotect, 607 .mprotect = linvfs_mprotect,
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
index 05a447e51cc0..6a3326bcd8d0 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
@@ -141,13 +141,19 @@ xfs_find_handle(
141 return -XFS_ERROR(EINVAL); 141 return -XFS_ERROR(EINVAL);
142 } 142 }
143 143
144 /* we need the vnode */ 144 switch (inode->i_mode & S_IFMT) {
145 vp = LINVFS_GET_VP(inode); 145 case S_IFREG:
146 if (vp->v_type != VREG && vp->v_type != VDIR && vp->v_type != VLNK) { 146 case S_IFDIR:
147 case S_IFLNK:
148 break;
149 default:
147 iput(inode); 150 iput(inode);
148 return -XFS_ERROR(EBADF); 151 return -XFS_ERROR(EBADF);
149 } 152 }
150 153
154 /* we need the vnode */
155 vp = LINVFS_GET_VP(inode);
156
151 /* now we can grab the fsid */ 157 /* now we can grab the fsid */
152 memcpy(&handle.ha_fsid, vp->v_vfsp->vfs_altfsid, sizeof(xfs_fsid_t)); 158 memcpy(&handle.ha_fsid, vp->v_vfsp->vfs_altfsid, sizeof(xfs_fsid_t));
153 hsize = sizeof(xfs_fsid_t); 159 hsize = sizeof(xfs_fsid_t);
@@ -386,7 +392,7 @@ xfs_readlink_by_handle(
386 return -error; 392 return -error;
387 393
388 /* Restrict this handle operation to symlinks only. */ 394 /* Restrict this handle operation to symlinks only. */
389 if (vp->v_type != VLNK) { 395 if (!S_ISLNK(inode->i_mode)) {
390 VN_RELE(vp); 396 VN_RELE(vp);
391 return -XFS_ERROR(EINVAL); 397 return -XFS_ERROR(EINVAL);
392 } 398 }
@@ -982,10 +988,10 @@ xfs_ioc_space(
982 if (vp->v_inode.i_flags & (S_IMMUTABLE|S_APPEND)) 988 if (vp->v_inode.i_flags & (S_IMMUTABLE|S_APPEND))
983 return -XFS_ERROR(EPERM); 989 return -XFS_ERROR(EPERM);
984 990
985 if (!(filp->f_flags & FMODE_WRITE)) 991 if (!(filp->f_mode & FMODE_WRITE))
986 return -XFS_ERROR(EBADF); 992 return -XFS_ERROR(EBADF);
987 993
988 if (vp->v_type != VREG) 994 if (!VN_ISREG(vp))
989 return -XFS_ERROR(EINVAL); 995 return -XFS_ERROR(EINVAL);
990 996
991 if (copy_from_user(&bf, arg, sizeof(bf))) 997 if (copy_from_user(&bf, arg, sizeof(bf)))
diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c
index 0f8f1384eb36..4636b7f86f1f 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl32.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl32.c
@@ -47,8 +47,52 @@
47#include "xfs_vnode.h" 47#include "xfs_vnode.h"
48#include "xfs_dfrag.h" 48#include "xfs_dfrag.h"
49 49
50#define _NATIVE_IOC(cmd, type) \
51 _IOC(_IOC_DIR(cmd), _IOC_TYPE(cmd), _IOC_NR(cmd), sizeof(type))
52
50#if defined(CONFIG_IA64) || defined(CONFIG_X86_64) 53#if defined(CONFIG_IA64) || defined(CONFIG_X86_64)
51#define BROKEN_X86_ALIGNMENT 54#define BROKEN_X86_ALIGNMENT
55/* on ia32 l_start is on a 32-bit boundary */
56typedef struct xfs_flock64_32 {
57 __s16 l_type;
58 __s16 l_whence;
59 __s64 l_start __attribute__((packed));
60 /* len == 0 means until end of file */
61 __s64 l_len __attribute__((packed));
62 __s32 l_sysid;
63 __u32 l_pid;
64 __s32 l_pad[4]; /* reserve area */
65} xfs_flock64_32_t;
66
67#define XFS_IOC_ALLOCSP_32 _IOW ('X', 10, struct xfs_flock64_32)
68#define XFS_IOC_FREESP_32 _IOW ('X', 11, struct xfs_flock64_32)
69#define XFS_IOC_ALLOCSP64_32 _IOW ('X', 36, struct xfs_flock64_32)
70#define XFS_IOC_FREESP64_32 _IOW ('X', 37, struct xfs_flock64_32)
71#define XFS_IOC_RESVSP_32 _IOW ('X', 40, struct xfs_flock64_32)
72#define XFS_IOC_UNRESVSP_32 _IOW ('X', 41, struct xfs_flock64_32)
73#define XFS_IOC_RESVSP64_32 _IOW ('X', 42, struct xfs_flock64_32)
74#define XFS_IOC_UNRESVSP64_32 _IOW ('X', 43, struct xfs_flock64_32)
75
76/* just account for different alignment */
77STATIC unsigned long
78xfs_ioctl32_flock(
79 unsigned long arg)
80{
81 xfs_flock64_32_t __user *p32 = (void __user *)arg;
82 xfs_flock64_t __user *p = compat_alloc_user_space(sizeof(*p));
83
84 if (copy_in_user(&p->l_type, &p32->l_type, sizeof(s16)) ||
85 copy_in_user(&p->l_whence, &p32->l_whence, sizeof(s16)) ||
86 copy_in_user(&p->l_start, &p32->l_start, sizeof(s64)) ||
87 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
88 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
89 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
90 copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
91 return -EFAULT;
92
93 return (unsigned long)p;
94}
95
52#else 96#else
53 97
54typedef struct xfs_fsop_bulkreq32 { 98typedef struct xfs_fsop_bulkreq32 {
@@ -103,7 +147,6 @@ __linvfs_compat_ioctl(int mode, struct file *f, unsigned cmd, unsigned long arg)
103/* not handled 147/* not handled
104 case XFS_IOC_FD_TO_HANDLE: 148 case XFS_IOC_FD_TO_HANDLE:
105 case XFS_IOC_PATH_TO_HANDLE: 149 case XFS_IOC_PATH_TO_HANDLE:
106 case XFS_IOC_PATH_TO_HANDLE:
107 case XFS_IOC_PATH_TO_FSHANDLE: 150 case XFS_IOC_PATH_TO_FSHANDLE:
108 case XFS_IOC_OPEN_BY_HANDLE: 151 case XFS_IOC_OPEN_BY_HANDLE:
109 case XFS_IOC_FSSETDM_BY_HANDLE: 152 case XFS_IOC_FSSETDM_BY_HANDLE:
@@ -124,8 +167,21 @@ __linvfs_compat_ioctl(int mode, struct file *f, unsigned cmd, unsigned long arg)
124 case XFS_IOC_ERROR_CLEARALL: 167 case XFS_IOC_ERROR_CLEARALL:
125 break; 168 break;
126 169
127#ifndef BROKEN_X86_ALIGNMENT 170#ifdef BROKEN_X86_ALIGNMENT
128 /* xfs_flock_t and xfs_bstat_t have wrong u32 vs u64 alignment */ 171 /* xfs_flock_t has wrong u32 vs u64 alignment */
172 case XFS_IOC_ALLOCSP_32:
173 case XFS_IOC_FREESP_32:
174 case XFS_IOC_ALLOCSP64_32:
175 case XFS_IOC_FREESP64_32:
176 case XFS_IOC_RESVSP_32:
177 case XFS_IOC_UNRESVSP_32:
178 case XFS_IOC_RESVSP64_32:
179 case XFS_IOC_UNRESVSP64_32:
180 arg = xfs_ioctl32_flock(arg);
181 cmd = _NATIVE_IOC(cmd, struct xfs_flock64);
182 break;
183
184#else /* These are handled fine if no alignment issues */
129 case XFS_IOC_ALLOCSP: 185 case XFS_IOC_ALLOCSP:
130 case XFS_IOC_FREESP: 186 case XFS_IOC_FREESP:
131 case XFS_IOC_RESVSP: 187 case XFS_IOC_RESVSP:
@@ -134,6 +190,9 @@ __linvfs_compat_ioctl(int mode, struct file *f, unsigned cmd, unsigned long arg)
134 case XFS_IOC_FREESP64: 190 case XFS_IOC_FREESP64:
135 case XFS_IOC_RESVSP64: 191 case XFS_IOC_RESVSP64:
136 case XFS_IOC_UNRESVSP64: 192 case XFS_IOC_UNRESVSP64:
193 break;
194
195 /* xfs_bstat_t still has wrong u32 vs u64 alignment */
137 case XFS_IOC_SWAPEXT: 196 case XFS_IOC_SWAPEXT:
138 break; 197 break;
139 198
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
index f252605514eb..77708a8c9f87 100644
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ b/fs/xfs/linux-2.6/xfs_iops.c
@@ -140,7 +140,6 @@ linvfs_mknod(
140 140
141 memset(&va, 0, sizeof(va)); 141 memset(&va, 0, sizeof(va));
142 va.va_mask = XFS_AT_TYPE|XFS_AT_MODE; 142 va.va_mask = XFS_AT_TYPE|XFS_AT_MODE;
143 va.va_type = IFTOVT(mode);
144 va.va_mode = mode; 143 va.va_mode = mode;
145 144
146 switch (mode & S_IFMT) { 145 switch (mode & S_IFMT) {
@@ -308,14 +307,13 @@ linvfs_symlink(
308 cvp = NULL; 307 cvp = NULL;
309 308
310 memset(&va, 0, sizeof(va)); 309 memset(&va, 0, sizeof(va));
311 va.va_type = VLNK; 310 va.va_mode = S_IFLNK |
312 va.va_mode = irix_symlink_mode ? 0777 & ~current->fs->umask : S_IRWXUGO; 311 (irix_symlink_mode ? 0777 & ~current->fs->umask : S_IRWXUGO);
313 va.va_mask = XFS_AT_TYPE|XFS_AT_MODE; 312 va.va_mask = XFS_AT_TYPE|XFS_AT_MODE;
314 313
315 error = 0; 314 error = 0;
316 VOP_SYMLINK(dvp, dentry, &va, (char *)symname, &cvp, NULL, error); 315 VOP_SYMLINK(dvp, dentry, &va, (char *)symname, &cvp, NULL, error);
317 if (!error && cvp) { 316 if (!error && cvp) {
318 ASSERT(cvp->v_type == VLNK);
319 ip = LINVFS_GET_IP(cvp); 317 ip = LINVFS_GET_IP(cvp);
320 d_instantiate(dentry, ip); 318 d_instantiate(dentry, ip);
321 validate_fields(dir); 319 validate_fields(dir);
@@ -425,9 +423,14 @@ linvfs_follow_link(
425 return NULL; 423 return NULL;
426} 424}
427 425
428static void linvfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p) 426STATIC void
427linvfs_put_link(
428 struct dentry *dentry,
429 struct nameidata *nd,
430 void *p)
429{ 431{
430 char *s = nd_get_link(nd); 432 char *s = nd_get_link(nd);
433
431 if (!IS_ERR(s)) 434 if (!IS_ERR(s))
432 kfree(s); 435 kfree(s);
433} 436}
diff --git a/fs/xfs/linux-2.6/xfs_linux.h b/fs/xfs/linux-2.6/xfs_linux.h
index 42dc5e4662ed..68c5d885ed9c 100644
--- a/fs/xfs/linux-2.6/xfs_linux.h
+++ b/fs/xfs/linux-2.6/xfs_linux.h
@@ -64,7 +64,6 @@
64#include <sema.h> 64#include <sema.h>
65#include <time.h> 65#include <time.h>
66 66
67#include <support/qsort.h>
68#include <support/ktrace.h> 67#include <support/ktrace.h>
69#include <support/debug.h> 68#include <support/debug.h>
70#include <support/move.h> 69#include <support/move.h>
@@ -104,6 +103,7 @@
104#include <xfs_stats.h> 103#include <xfs_stats.h>
105#include <xfs_sysctl.h> 104#include <xfs_sysctl.h>
106#include <xfs_iops.h> 105#include <xfs_iops.h>
106#include <xfs_aops.h>
107#include <xfs_super.h> 107#include <xfs_super.h>
108#include <xfs_globals.h> 108#include <xfs_globals.h>
109#include <xfs_fs_subr.h> 109#include <xfs_fs_subr.h>
@@ -254,11 +254,18 @@ static inline void set_buffer_unwritten_io(struct buffer_head *bh)
254#define MAX(a,b) (max(a,b)) 254#define MAX(a,b) (max(a,b))
255#define howmany(x, y) (((x)+((y)-1))/(y)) 255#define howmany(x, y) (((x)+((y)-1))/(y))
256#define roundup(x, y) ((((x)+((y)-1))/(y))*(y)) 256#define roundup(x, y) ((((x)+((y)-1))/(y))*(y))
257#define qsort(a,n,s,fn) sort(a,n,s,fn,NULL)
257 258
259/*
260 * Various platform dependent calls that don't fit anywhere else
261 */
258#define xfs_stack_trace() dump_stack() 262#define xfs_stack_trace() dump_stack()
259
260#define xfs_itruncate_data(ip, off) \ 263#define xfs_itruncate_data(ip, off) \
261 (-vmtruncate(LINVFS_GET_IP(XFS_ITOV(ip)), (off))) 264 (-vmtruncate(LINVFS_GET_IP(XFS_ITOV(ip)), (off)))
265#define xfs_statvfs_fsid(statp, mp) \
266 ({ u64 id = huge_encode_dev((mp)->m_dev); \
267 __kernel_fsid_t *fsid = &(statp)->f_fsid; \
268 (fsid->val[0] = (u32)id, fsid->val[1] = (u32)(id >> 32)); })
262 269
263 270
264/* Move the kernel do_div definition off to one side */ 271/* Move the kernel do_div definition off to one side */
@@ -371,6 +378,4 @@ static inline __uint64_t roundup_64(__uint64_t x, __uint32_t y)
371 return(x * y); 378 return(x * y);
372} 379}
373 380
374#define qsort(a, n, s, cmp) sort(a, n, s, cmp, NULL)
375
376#endif /* __XFS_LINUX__ */ 381#endif /* __XFS_LINUX__ */
diff --git a/fs/xfs/linux-2.6/xfs_lrw.c b/fs/xfs/linux-2.6/xfs_lrw.c
index acab58c48043..3b5fabe8dae9 100644
--- a/fs/xfs/linux-2.6/xfs_lrw.c
+++ b/fs/xfs/linux-2.6/xfs_lrw.c
@@ -660,9 +660,6 @@ xfs_write(
660 (xip->i_d.di_flags & XFS_DIFLAG_REALTIME) ? 660 (xip->i_d.di_flags & XFS_DIFLAG_REALTIME) ?
661 mp->m_rtdev_targp : mp->m_ddev_targp; 661 mp->m_rtdev_targp : mp->m_ddev_targp;
662 662
663 if (ioflags & IO_ISAIO)
664 return XFS_ERROR(-ENOSYS);
665
666 if ((pos & target->pbr_smask) || (count & target->pbr_smask)) 663 if ((pos & target->pbr_smask) || (count & target->pbr_smask))
667 return XFS_ERROR(-EINVAL); 664 return XFS_ERROR(-EINVAL);
668 665
diff --git a/fs/xfs/linux-2.6/xfs_lrw.h b/fs/xfs/linux-2.6/xfs_lrw.h
index f197a720e394..6294dcdb797c 100644
--- a/fs/xfs/linux-2.6/xfs_lrw.h
+++ b/fs/xfs/linux-2.6/xfs_lrw.h
@@ -70,9 +70,10 @@ struct xfs_iomap;
70#define XFS_SENDFILE_ENTER 21 70#define XFS_SENDFILE_ENTER 21
71#define XFS_WRITEPAGE_ENTER 22 71#define XFS_WRITEPAGE_ENTER 22
72#define XFS_RELEASEPAGE_ENTER 23 72#define XFS_RELEASEPAGE_ENTER 23
73#define XFS_IOMAP_ALLOC_ENTER 24 73#define XFS_INVALIDPAGE_ENTER 24
74#define XFS_IOMAP_ALLOC_MAP 25 74#define XFS_IOMAP_ALLOC_ENTER 25
75#define XFS_IOMAP_UNWRITTEN 26 75#define XFS_IOMAP_ALLOC_MAP 26
76#define XFS_IOMAP_UNWRITTEN 27
76extern void xfs_rw_enter_trace(int, struct xfs_iocore *, 77extern void xfs_rw_enter_trace(int, struct xfs_iocore *,
77 void *, size_t, loff_t, int); 78 void *, size_t, loff_t, int);
78extern void xfs_inval_cached_trace(struct xfs_iocore *, 79extern void xfs_inval_cached_trace(struct xfs_iocore *,
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index f6dd7de25927..0da87bfc9999 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -70,11 +70,15 @@
70#include <linux/namei.h> 70#include <linux/namei.h>
71#include <linux/init.h> 71#include <linux/init.h>
72#include <linux/mount.h> 72#include <linux/mount.h>
73#include <linux/mempool.h>
73#include <linux/writeback.h> 74#include <linux/writeback.h>
75#include <linux/kthread.h>
74 76
75STATIC struct quotactl_ops linvfs_qops; 77STATIC struct quotactl_ops linvfs_qops;
76STATIC struct super_operations linvfs_sops; 78STATIC struct super_operations linvfs_sops;
77STATIC kmem_zone_t *linvfs_inode_zone; 79STATIC kmem_zone_t *xfs_vnode_zone;
80STATIC kmem_zone_t *xfs_ioend_zone;
81mempool_t *xfs_ioend_pool;
78 82
79STATIC struct xfs_mount_args * 83STATIC struct xfs_mount_args *
80xfs_args_allocate( 84xfs_args_allocate(
@@ -138,24 +142,25 @@ STATIC __inline__ void
138xfs_set_inodeops( 142xfs_set_inodeops(
139 struct inode *inode) 143 struct inode *inode)
140{ 144{
141 vnode_t *vp = LINVFS_GET_VP(inode); 145 switch (inode->i_mode & S_IFMT) {
142 146 case S_IFREG:
143 if (vp->v_type == VNON) {
144 vn_mark_bad(vp);
145 } else if (S_ISREG(inode->i_mode)) {
146 inode->i_op = &linvfs_file_inode_operations; 147 inode->i_op = &linvfs_file_inode_operations;
147 inode->i_fop = &linvfs_file_operations; 148 inode->i_fop = &linvfs_file_operations;
148 inode->i_mapping->a_ops = &linvfs_aops; 149 inode->i_mapping->a_ops = &linvfs_aops;
149 } else if (S_ISDIR(inode->i_mode)) { 150 break;
151 case S_IFDIR:
150 inode->i_op = &linvfs_dir_inode_operations; 152 inode->i_op = &linvfs_dir_inode_operations;
151 inode->i_fop = &linvfs_dir_operations; 153 inode->i_fop = &linvfs_dir_operations;
152 } else if (S_ISLNK(inode->i_mode)) { 154 break;
155 case S_IFLNK:
153 inode->i_op = &linvfs_symlink_inode_operations; 156 inode->i_op = &linvfs_symlink_inode_operations;
154 if (inode->i_blocks) 157 if (inode->i_blocks)
155 inode->i_mapping->a_ops = &linvfs_aops; 158 inode->i_mapping->a_ops = &linvfs_aops;
156 } else { 159 break;
160 default:
157 inode->i_op = &linvfs_file_inode_operations; 161 inode->i_op = &linvfs_file_inode_operations;
158 init_special_inode(inode, inode->i_mode, inode->i_rdev); 162 init_special_inode(inode, inode->i_mode, inode->i_rdev);
163 break;
159 } 164 }
160} 165}
161 166
@@ -167,16 +172,23 @@ xfs_revalidate_inode(
167{ 172{
168 struct inode *inode = LINVFS_GET_IP(vp); 173 struct inode *inode = LINVFS_GET_IP(vp);
169 174
170 inode->i_mode = (ip->i_d.di_mode & MODEMASK) | VTTOIF(vp->v_type); 175 inode->i_mode = ip->i_d.di_mode;
171 inode->i_nlink = ip->i_d.di_nlink; 176 inode->i_nlink = ip->i_d.di_nlink;
172 inode->i_uid = ip->i_d.di_uid; 177 inode->i_uid = ip->i_d.di_uid;
173 inode->i_gid = ip->i_d.di_gid; 178 inode->i_gid = ip->i_d.di_gid;
174 if (((1 << vp->v_type) & ((1<<VBLK) | (1<<VCHR))) == 0) { 179
180 switch (inode->i_mode & S_IFMT) {
181 case S_IFBLK:
182 case S_IFCHR:
183 inode->i_rdev =
184 MKDEV(sysv_major(ip->i_df.if_u2.if_rdev) & 0x1ff,
185 sysv_minor(ip->i_df.if_u2.if_rdev));
186 break;
187 default:
175 inode->i_rdev = 0; 188 inode->i_rdev = 0;
176 } else { 189 break;
177 xfs_dev_t dev = ip->i_df.if_u2.if_rdev;
178 inode->i_rdev = MKDEV(sysv_major(dev) & 0x1ff, sysv_minor(dev));
179 } 190 }
191
180 inode->i_blksize = PAGE_CACHE_SIZE; 192 inode->i_blksize = PAGE_CACHE_SIZE;
181 inode->i_generation = ip->i_d.di_gen; 193 inode->i_generation = ip->i_d.di_gen;
182 i_size_write(inode, ip->i_d.di_size); 194 i_size_write(inode, ip->i_d.di_size);
@@ -231,7 +243,6 @@ xfs_initialize_vnode(
231 * finish our work. 243 * finish our work.
232 */ 244 */
233 if (ip->i_d.di_mode != 0 && unlock && (inode->i_state & I_NEW)) { 245 if (ip->i_d.di_mode != 0 && unlock && (inode->i_state & I_NEW)) {
234 vp->v_type = IFTOVT(ip->i_d.di_mode);
235 xfs_revalidate_inode(XFS_BHVTOM(bdp), vp, ip); 246 xfs_revalidate_inode(XFS_BHVTOM(bdp), vp, ip);
236 xfs_set_inodeops(inode); 247 xfs_set_inodeops(inode);
237 248
@@ -274,8 +285,7 @@ linvfs_alloc_inode(
274{ 285{
275 vnode_t *vp; 286 vnode_t *vp;
276 287
277 vp = (vnode_t *)kmem_cache_alloc(linvfs_inode_zone, 288 vp = kmem_cache_alloc(xfs_vnode_zone, kmem_flags_convert(KM_SLEEP));
278 kmem_flags_convert(KM_SLEEP));
279 if (!vp) 289 if (!vp)
280 return NULL; 290 return NULL;
281 return LINVFS_GET_IP(vp); 291 return LINVFS_GET_IP(vp);
@@ -285,11 +295,11 @@ STATIC void
285linvfs_destroy_inode( 295linvfs_destroy_inode(
286 struct inode *inode) 296 struct inode *inode)
287{ 297{
288 kmem_cache_free(linvfs_inode_zone, LINVFS_GET_VP(inode)); 298 kmem_zone_free(xfs_vnode_zone, LINVFS_GET_VP(inode));
289} 299}
290 300
291STATIC void 301STATIC void
292init_once( 302linvfs_inode_init_once(
293 void *data, 303 void *data,
294 kmem_cache_t *cachep, 304 kmem_cache_t *cachep,
295 unsigned long flags) 305 unsigned long flags)
@@ -302,21 +312,41 @@ init_once(
302} 312}
303 313
304STATIC int 314STATIC int
305init_inodecache( void ) 315linvfs_init_zones(void)
306{ 316{
307 linvfs_inode_zone = kmem_cache_create("linvfs_icache", 317 xfs_vnode_zone = kmem_cache_create("xfs_vnode",
308 sizeof(vnode_t), 0, SLAB_RECLAIM_ACCOUNT, 318 sizeof(vnode_t), 0, SLAB_RECLAIM_ACCOUNT,
309 init_once, NULL); 319 linvfs_inode_init_once, NULL);
310 if (linvfs_inode_zone == NULL) 320 if (!xfs_vnode_zone)
311 return -ENOMEM; 321 goto out;
322
323 xfs_ioend_zone = kmem_zone_init(sizeof(xfs_ioend_t), "xfs_ioend");
324 if (!xfs_ioend_zone)
325 goto out_destroy_vnode_zone;
326
327 xfs_ioend_pool = mempool_create(4 * MAX_BUF_PER_PAGE,
328 mempool_alloc_slab, mempool_free_slab,
329 xfs_ioend_zone);
330 if (!xfs_ioend_pool)
331 goto out_free_ioend_zone;
332
312 return 0; 333 return 0;
334
335
336 out_free_ioend_zone:
337 kmem_zone_destroy(xfs_ioend_zone);
338 out_destroy_vnode_zone:
339 kmem_zone_destroy(xfs_vnode_zone);
340 out:
341 return -ENOMEM;
313} 342}
314 343
315STATIC void 344STATIC void
316destroy_inodecache( void ) 345linvfs_destroy_zones(void)
317{ 346{
318 if (kmem_cache_destroy(linvfs_inode_zone)) 347 mempool_destroy(xfs_ioend_pool);
319 printk(KERN_WARNING "%s: cache still in use!\n", __FUNCTION__); 348 kmem_zone_destroy(xfs_vnode_zone);
349 kmem_zone_destroy(xfs_ioend_zone);
320} 350}
321 351
322/* 352/*
@@ -354,17 +384,38 @@ linvfs_clear_inode(
354 struct inode *inode) 384 struct inode *inode)
355{ 385{
356 vnode_t *vp = LINVFS_GET_VP(inode); 386 vnode_t *vp = LINVFS_GET_VP(inode);
387 int error, cache;
357 388
358 if (vp) { 389 vn_trace_entry(vp, "clear_inode", (inst_t *)__return_address);
359 vn_rele(vp); 390
360 vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address); 391 XFS_STATS_INC(vn_rele);
361 /* 392 XFS_STATS_INC(vn_remove);
362 * Do all our cleanup, and remove this vnode. 393 XFS_STATS_INC(vn_reclaim);
363 */ 394 XFS_STATS_DEC(vn_active);
364 vn_remove(vp); 395
396 /*
397 * This can happen because xfs_iget_core calls xfs_idestroy if we
398 * find an inode with di_mode == 0 but without IGET_CREATE set.
399 */
400 if (vp->v_fbhv)
401 VOP_INACTIVE(vp, NULL, cache);
402
403 VN_LOCK(vp);
404 vp->v_flag &= ~VMODIFIED;
405 VN_UNLOCK(vp, 0);
406
407 if (vp->v_fbhv) {
408 VOP_RECLAIM(vp, error);
409 if (error)
410 panic("vn_purge: cannot reclaim");
365 } 411 }
366}
367 412
413 ASSERT(vp->v_fbhv == NULL);
414
415#ifdef XFS_VNODE_TRACE
416 ktrace_free(vp->v_trace);
417#endif
418}
368 419
369/* 420/*
370 * Enqueue a work item to be picked up by the vfs xfssyncd thread. 421 * Enqueue a work item to be picked up by the vfs xfssyncd thread.
@@ -466,25 +517,16 @@ xfssyncd(
466{ 517{
467 long timeleft; 518 long timeleft;
468 vfs_t *vfsp = (vfs_t *) arg; 519 vfs_t *vfsp = (vfs_t *) arg;
469 struct list_head tmp;
470 struct vfs_sync_work *work, *n; 520 struct vfs_sync_work *work, *n;
521 LIST_HEAD (tmp);
471 522
472 daemonize("xfssyncd");
473
474 vfsp->vfs_sync_work.w_vfs = vfsp;
475 vfsp->vfs_sync_work.w_syncer = vfs_sync_worker;
476 vfsp->vfs_sync_task = current;
477 wmb();
478 wake_up(&vfsp->vfs_wait_sync_task);
479
480 INIT_LIST_HEAD(&tmp);
481 timeleft = (xfs_syncd_centisecs * HZ) / 100; 523 timeleft = (xfs_syncd_centisecs * HZ) / 100;
482 for (;;) { 524 for (;;) {
483 set_current_state(TASK_INTERRUPTIBLE); 525 set_current_state(TASK_INTERRUPTIBLE);
484 timeleft = schedule_timeout(timeleft); 526 timeleft = schedule_timeout(timeleft);
485 /* swsusp */ 527 /* swsusp */
486 try_to_freeze(); 528 try_to_freeze();
487 if (vfsp->vfs_flag & VFS_UMOUNT) 529 if (kthread_should_stop())
488 break; 530 break;
489 531
490 spin_lock(&vfsp->vfs_sync_lock); 532 spin_lock(&vfsp->vfs_sync_lock);
@@ -513,10 +555,6 @@ xfssyncd(
513 } 555 }
514 } 556 }
515 557
516 vfsp->vfs_sync_task = NULL;
517 wmb();
518 wake_up(&vfsp->vfs_wait_sync_task);
519
520 return 0; 558 return 0;
521} 559}
522 560
@@ -524,13 +562,11 @@ STATIC int
524linvfs_start_syncd( 562linvfs_start_syncd(
525 vfs_t *vfsp) 563 vfs_t *vfsp)
526{ 564{
527 int pid; 565 vfsp->vfs_sync_work.w_syncer = vfs_sync_worker;
528 566 vfsp->vfs_sync_work.w_vfs = vfsp;
529 pid = kernel_thread(xfssyncd, (void *) vfsp, 567 vfsp->vfs_sync_task = kthread_run(xfssyncd, vfsp, "xfssyncd");
530 CLONE_VM | CLONE_FS | CLONE_FILES); 568 if (IS_ERR(vfsp->vfs_sync_task))
531 if (pid < 0) 569 return -PTR_ERR(vfsp->vfs_sync_task);
532 return -pid;
533 wait_event(vfsp->vfs_wait_sync_task, vfsp->vfs_sync_task);
534 return 0; 570 return 0;
535} 571}
536 572
@@ -538,11 +574,7 @@ STATIC void
538linvfs_stop_syncd( 574linvfs_stop_syncd(
539 vfs_t *vfsp) 575 vfs_t *vfsp)
540{ 576{
541 vfsp->vfs_flag |= VFS_UMOUNT; 577 kthread_stop(vfsp->vfs_sync_task);
542 wmb();
543
544 wake_up_process(vfsp->vfs_sync_task);
545 wait_event(vfsp->vfs_wait_sync_task, !vfsp->vfs_sync_task);
546} 578}
547 579
548STATIC void 580STATIC void
@@ -866,9 +898,9 @@ init_xfs_fs( void )
866 898
867 ktrace_init(64); 899 ktrace_init(64);
868 900
869 error = init_inodecache(); 901 error = linvfs_init_zones();
870 if (error < 0) 902 if (error < 0)
871 goto undo_inodecache; 903 goto undo_zones;
872 904
873 error = pagebuf_init(); 905 error = pagebuf_init();
874 if (error < 0) 906 if (error < 0)
@@ -889,9 +921,9 @@ undo_register:
889 pagebuf_terminate(); 921 pagebuf_terminate();
890 922
891undo_pagebuf: 923undo_pagebuf:
892 destroy_inodecache(); 924 linvfs_destroy_zones();
893 925
894undo_inodecache: 926undo_zones:
895 return error; 927 return error;
896} 928}
897 929
@@ -903,7 +935,7 @@ exit_xfs_fs( void )
903 unregister_filesystem(&xfs_fs_type); 935 unregister_filesystem(&xfs_fs_type);
904 xfs_cleanup(); 936 xfs_cleanup();
905 pagebuf_terminate(); 937 pagebuf_terminate();
906 destroy_inodecache(); 938 linvfs_destroy_zones();
907 ktrace_uninit(); 939 ktrace_uninit();
908} 940}
909 941
diff --git a/fs/xfs/linux-2.6/xfs_vfs.c b/fs/xfs/linux-2.6/xfs_vfs.c
index 669c61644959..34cc902ec119 100644
--- a/fs/xfs/linux-2.6/xfs_vfs.c
+++ b/fs/xfs/linux-2.6/xfs_vfs.c
@@ -251,7 +251,6 @@ vfs_allocate( void )
251 bhv_head_init(VFS_BHVHEAD(vfsp), "vfs"); 251 bhv_head_init(VFS_BHVHEAD(vfsp), "vfs");
252 INIT_LIST_HEAD(&vfsp->vfs_sync_list); 252 INIT_LIST_HEAD(&vfsp->vfs_sync_list);
253 spin_lock_init(&vfsp->vfs_sync_lock); 253 spin_lock_init(&vfsp->vfs_sync_lock);
254 init_waitqueue_head(&vfsp->vfs_wait_sync_task);
255 init_waitqueue_head(&vfsp->vfs_wait_single_sync_task); 254 init_waitqueue_head(&vfsp->vfs_wait_single_sync_task);
256 return vfsp; 255 return vfsp;
257} 256}
diff --git a/fs/xfs/linux-2.6/xfs_vfs.h b/fs/xfs/linux-2.6/xfs_vfs.h
index 7ee1f714e9ba..f0ab574fb47a 100644
--- a/fs/xfs/linux-2.6/xfs_vfs.h
+++ b/fs/xfs/linux-2.6/xfs_vfs.h
@@ -65,7 +65,6 @@ typedef struct vfs {
65 spinlock_t vfs_sync_lock; /* work item list lock */ 65 spinlock_t vfs_sync_lock; /* work item list lock */
66 int vfs_sync_seq; /* sync thread generation no. */ 66 int vfs_sync_seq; /* sync thread generation no. */
67 wait_queue_head_t vfs_wait_single_sync_task; 67 wait_queue_head_t vfs_wait_single_sync_task;
68 wait_queue_head_t vfs_wait_sync_task;
69} vfs_t; 68} vfs_t;
70 69
71#define vfs_fbhv vfs_bh.bh_first /* 1st on vfs behavior chain */ 70#define vfs_fbhv vfs_bh.bh_first /* 1st on vfs behavior chain */
@@ -96,7 +95,6 @@ typedef enum {
96#define VFS_RDONLY 0x0001 /* read-only vfs */ 95#define VFS_RDONLY 0x0001 /* read-only vfs */
97#define VFS_GRPID 0x0002 /* group-ID assigned from directory */ 96#define VFS_GRPID 0x0002 /* group-ID assigned from directory */
98#define VFS_DMI 0x0004 /* filesystem has the DMI enabled */ 97#define VFS_DMI 0x0004 /* filesystem has the DMI enabled */
99#define VFS_UMOUNT 0x0008 /* unmount in progress */
100#define VFS_END 0x0008 /* max flag */ 98#define VFS_END 0x0008 /* max flag */
101 99
102#define SYNC_ATTR 0x0001 /* sync attributes */ 100#define SYNC_ATTR 0x0001 /* sync attributes */
diff --git a/fs/xfs/linux-2.6/xfs_vnode.c b/fs/xfs/linux-2.6/xfs_vnode.c
index 250cad54e892..268f45bf6a9a 100644
--- a/fs/xfs/linux-2.6/xfs_vnode.c
+++ b/fs/xfs/linux-2.6/xfs_vnode.c
@@ -42,93 +42,33 @@ DEFINE_SPINLOCK(vnumber_lock);
42 */ 42 */
43#define NVSYNC 37 43#define NVSYNC 37
44#define vptosync(v) (&vsync[((unsigned long)v) % NVSYNC]) 44#define vptosync(v) (&vsync[((unsigned long)v) % NVSYNC])
45sv_t vsync[NVSYNC]; 45STATIC wait_queue_head_t vsync[NVSYNC];
46
47/*
48 * Translate stat(2) file types to vnode types and vice versa.
49 * Aware of numeric order of S_IFMT and vnode type values.
50 */
51enum vtype iftovt_tab[] = {
52 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
53 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VNON
54};
55
56u_short vttoif_tab[] = {
57 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, S_IFIFO, 0, S_IFSOCK
58};
59 46
60 47
61void 48void
62vn_init(void) 49vn_init(void)
63{ 50{
64 register sv_t *svp; 51 int i;
65 register int i;
66 52
67 for (svp = vsync, i = 0; i < NVSYNC; i++, svp++) 53 for (i = 0; i < NVSYNC; i++)
68 init_sv(svp, SV_DEFAULT, "vsy", i); 54 init_waitqueue_head(&vsync[i]);
69} 55}
70 56
71/* 57void
72 * Clean a vnode of filesystem-specific data and prepare it for reuse. 58vn_iowait(
73 */
74STATIC int
75vn_reclaim(
76 struct vnode *vp) 59 struct vnode *vp)
77{ 60{
78 int error; 61 wait_queue_head_t *wq = vptosync(vp);
79 62
80 XFS_STATS_INC(vn_reclaim); 63 wait_event(*wq, (atomic_read(&vp->v_iocount) == 0));
81 vn_trace_entry(vp, "vn_reclaim", (inst_t *)__return_address);
82
83 /*
84 * Only make the VOP_RECLAIM call if there are behaviors
85 * to call.
86 */
87 if (vp->v_fbhv) {
88 VOP_RECLAIM(vp, error);
89 if (error)
90 return -error;
91 }
92 ASSERT(vp->v_fbhv == NULL);
93
94 VN_LOCK(vp);
95 vp->v_flag &= (VRECLM|VWAIT);
96 VN_UNLOCK(vp, 0);
97
98 vp->v_type = VNON;
99 vp->v_fbhv = NULL;
100
101#ifdef XFS_VNODE_TRACE
102 ktrace_free(vp->v_trace);
103 vp->v_trace = NULL;
104#endif
105
106 return 0;
107}
108
109STATIC void
110vn_wakeup(
111 struct vnode *vp)
112{
113 VN_LOCK(vp);
114 if (vp->v_flag & VWAIT)
115 sv_broadcast(vptosync(vp));
116 vp->v_flag &= ~(VRECLM|VWAIT|VMODIFIED);
117 VN_UNLOCK(vp, 0);
118} 64}
119 65
120int 66void
121vn_wait( 67vn_iowake(
122 struct vnode *vp) 68 struct vnode *vp)
123{ 69{
124 VN_LOCK(vp); 70 if (atomic_dec_and_test(&vp->v_iocount))
125 if (vp->v_flag & (VINACT | VRECLM)) { 71 wake_up(vptosync(vp));
126 vp->v_flag |= VWAIT;
127 sv_wait(vptosync(vp), PINOD, &vp->v_lock, 0);
128 return 1;
129 }
130 VN_UNLOCK(vp, 0);
131 return 0;
132} 72}
133 73
134struct vnode * 74struct vnode *
@@ -154,6 +94,8 @@ vn_initialize(
154 /* Initialize the first behavior and the behavior chain head. */ 94 /* Initialize the first behavior and the behavior chain head. */
155 vn_bhv_head_init(VN_BHV_HEAD(vp), "vnode"); 95 vn_bhv_head_init(VN_BHV_HEAD(vp), "vnode");
156 96
97 atomic_set(&vp->v_iocount, 0);
98
157#ifdef XFS_VNODE_TRACE 99#ifdef XFS_VNODE_TRACE
158 vp->v_trace = ktrace_alloc(VNODE_TRACE_SIZE, KM_SLEEP); 100 vp->v_trace = ktrace_alloc(VNODE_TRACE_SIZE, KM_SLEEP);
159#endif /* XFS_VNODE_TRACE */ 101#endif /* XFS_VNODE_TRACE */
@@ -163,30 +105,6 @@ vn_initialize(
163} 105}
164 106
165/* 107/*
166 * Get a reference on a vnode.
167 */
168vnode_t *
169vn_get(
170 struct vnode *vp,
171 vmap_t *vmap)
172{
173 struct inode *inode;
174
175 XFS_STATS_INC(vn_get);
176 inode = LINVFS_GET_IP(vp);
177 if (inode->i_state & I_FREEING)
178 return NULL;
179
180 inode = ilookup(vmap->v_vfsp->vfs_super, vmap->v_ino);
181 if (!inode) /* Inode not present */
182 return NULL;
183
184 vn_trace_exit(vp, "vn_get", (inst_t *)__return_address);
185
186 return vp;
187}
188
189/*
190 * Revalidate the Linux inode from the vattr. 108 * Revalidate the Linux inode from the vattr.
191 * Note: i_size _not_ updated; we must hold the inode 109 * Note: i_size _not_ updated; we must hold the inode
192 * semaphore when doing that - callers responsibility. 110 * semaphore when doing that - callers responsibility.
@@ -198,7 +116,7 @@ vn_revalidate_core(
198{ 116{
199 struct inode *inode = LINVFS_GET_IP(vp); 117 struct inode *inode = LINVFS_GET_IP(vp);
200 118
201 inode->i_mode = VTTOIF(vap->va_type) | vap->va_mode; 119 inode->i_mode = vap->va_mode;
202 inode->i_nlink = vap->va_nlink; 120 inode->i_nlink = vap->va_nlink;
203 inode->i_uid = vap->va_uid; 121 inode->i_uid = vap->va_uid;
204 inode->i_gid = vap->va_gid; 122 inode->i_gid = vap->va_gid;
@@ -247,71 +165,6 @@ vn_revalidate(
247} 165}
248 166
249/* 167/*
250 * purge a vnode from the cache
251 * At this point the vnode is guaranteed to have no references (vn_count == 0)
252 * The caller has to make sure that there are no ways someone could
253 * get a handle (via vn_get) on the vnode (usually done via a mount/vfs lock).
254 */
255void
256vn_purge(
257 struct vnode *vp,
258 vmap_t *vmap)
259{
260 vn_trace_entry(vp, "vn_purge", (inst_t *)__return_address);
261
262again:
263 /*
264 * Check whether vp has already been reclaimed since our caller
265 * sampled its version while holding a filesystem cache lock that
266 * its VOP_RECLAIM function acquires.
267 */
268 VN_LOCK(vp);
269 if (vp->v_number != vmap->v_number) {
270 VN_UNLOCK(vp, 0);
271 return;
272 }
273
274 /*
275 * If vp is being reclaimed or inactivated, wait until it is inert,
276 * then proceed. Can't assume that vnode is actually reclaimed
277 * just because the reclaimed flag is asserted -- a vn_alloc
278 * reclaim can fail.
279 */
280 if (vp->v_flag & (VINACT | VRECLM)) {
281 ASSERT(vn_count(vp) == 0);
282 vp->v_flag |= VWAIT;
283 sv_wait(vptosync(vp), PINOD, &vp->v_lock, 0);
284 goto again;
285 }
286
287 /*
288 * Another process could have raced in and gotten this vnode...
289 */
290 if (vn_count(vp) > 0) {
291 VN_UNLOCK(vp, 0);
292 return;
293 }
294
295 XFS_STATS_DEC(vn_active);
296 vp->v_flag |= VRECLM;
297 VN_UNLOCK(vp, 0);
298
299 /*
300 * Call VOP_RECLAIM and clean vp. The FSYNC_INVAL flag tells
301 * vp's filesystem to flush and invalidate all cached resources.
302 * When vn_reclaim returns, vp should have no private data,
303 * either in a system cache or attached to v_data.
304 */
305 if (vn_reclaim(vp) != 0)
306 panic("vn_purge: cannot reclaim");
307
308 /*
309 * Wakeup anyone waiting for vp to be reclaimed.
310 */
311 vn_wakeup(vp);
312}
313
314/*
315 * Add a reference to a referenced vnode. 168 * Add a reference to a referenced vnode.
316 */ 169 */
317struct vnode * 170struct vnode *
@@ -330,80 +183,6 @@ vn_hold(
330 return vp; 183 return vp;
331} 184}
332 185
333/*
334 * Call VOP_INACTIVE on last reference.
335 */
336void
337vn_rele(
338 struct vnode *vp)
339{
340 int vcnt;
341 int cache;
342
343 XFS_STATS_INC(vn_rele);
344
345 VN_LOCK(vp);
346
347 vn_trace_entry(vp, "vn_rele", (inst_t *)__return_address);
348 vcnt = vn_count(vp);
349
350 /*
351 * Since we always get called from put_inode we know
352 * that i_count won't be decremented after we
353 * return.
354 */
355 if (!vcnt) {
356 /*
357 * As soon as we turn this on, noone can find us in vn_get
358 * until we turn off VINACT or VRECLM
359 */
360 vp->v_flag |= VINACT;
361 VN_UNLOCK(vp, 0);
362
363 /*
364 * Do not make the VOP_INACTIVE call if there
365 * are no behaviors attached to the vnode to call.
366 */
367 if (vp->v_fbhv)
368 VOP_INACTIVE(vp, NULL, cache);
369
370 VN_LOCK(vp);
371 if (vp->v_flag & VWAIT)
372 sv_broadcast(vptosync(vp));
373
374 vp->v_flag &= ~(VINACT|VWAIT|VRECLM|VMODIFIED);
375 }
376
377 VN_UNLOCK(vp, 0);
378
379 vn_trace_exit(vp, "vn_rele", (inst_t *)__return_address);
380}
381
382/*
383 * Finish the removal of a vnode.
384 */
385void
386vn_remove(
387 struct vnode *vp)
388{
389 vmap_t vmap;
390
391 /* Make sure we don't do this to the same vnode twice */
392 if (!(vp->v_fbhv))
393 return;
394
395 XFS_STATS_INC(vn_remove);
396 vn_trace_exit(vp, "vn_remove", (inst_t *)__return_address);
397
398 /*
399 * After the following purge the vnode
400 * will no longer exist.
401 */
402 VMAP(vp, vmap);
403 vn_purge(vp, &vmap);
404}
405
406
407#ifdef XFS_VNODE_TRACE 186#ifdef XFS_VNODE_TRACE
408 187
409#define KTRACE_ENTER(vp, vk, s, line, ra) \ 188#define KTRACE_ENTER(vp, vk, s, line, ra) \
diff --git a/fs/xfs/linux-2.6/xfs_vnode.h b/fs/xfs/linux-2.6/xfs_vnode.h
index a6e57c647be4..35f306cebb87 100644
--- a/fs/xfs/linux-2.6/xfs_vnode.h
+++ b/fs/xfs/linux-2.6/xfs_vnode.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. 2 * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved.
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify it 4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License as 5 * under the terms of version 2 of the GNU General Public License as
@@ -65,10 +65,6 @@ struct vattr;
65struct xfs_iomap; 65struct xfs_iomap;
66struct attrlist_cursor_kern; 66struct attrlist_cursor_kern;
67 67
68/*
69 * Vnode types. VNON means no type.
70 */
71enum vtype { VNON, VREG, VDIR, VBLK, VCHR, VLNK, VFIFO, VBAD, VSOCK };
72 68
73typedef xfs_ino_t vnumber_t; 69typedef xfs_ino_t vnumber_t;
74typedef struct dentry vname_t; 70typedef struct dentry vname_t;
@@ -77,15 +73,14 @@ typedef bhv_head_t vn_bhv_head_t;
77/* 73/*
78 * MP locking protocols: 74 * MP locking protocols:
79 * v_flag, v_vfsp VN_LOCK/VN_UNLOCK 75 * v_flag, v_vfsp VN_LOCK/VN_UNLOCK
80 * v_type read-only or fs-dependent
81 */ 76 */
82typedef struct vnode { 77typedef struct vnode {
83 __u32 v_flag; /* vnode flags (see below) */ 78 __u32 v_flag; /* vnode flags (see below) */
84 enum vtype v_type; /* vnode type */
85 struct vfs *v_vfsp; /* ptr to containing VFS */ 79 struct vfs *v_vfsp; /* ptr to containing VFS */
86 vnumber_t v_number; /* in-core vnode number */ 80 vnumber_t v_number; /* in-core vnode number */
87 vn_bhv_head_t v_bh; /* behavior head */ 81 vn_bhv_head_t v_bh; /* behavior head */
88 spinlock_t v_lock; /* VN_LOCK/VN_UNLOCK */ 82 spinlock_t v_lock; /* VN_LOCK/VN_UNLOCK */
83 atomic_t v_iocount; /* outstanding I/O count */
89#ifdef XFS_VNODE_TRACE 84#ifdef XFS_VNODE_TRACE
90 struct ktrace *v_trace; /* trace header structure */ 85 struct ktrace *v_trace; /* trace header structure */
91#endif 86#endif
@@ -93,6 +88,12 @@ typedef struct vnode {
93 /* inode MUST be last */ 88 /* inode MUST be last */
94} vnode_t; 89} vnode_t;
95 90
91#define VN_ISLNK(vp) S_ISLNK((vp)->v_inode.i_mode)
92#define VN_ISREG(vp) S_ISREG((vp)->v_inode.i_mode)
93#define VN_ISDIR(vp) S_ISDIR((vp)->v_inode.i_mode)
94#define VN_ISCHR(vp) S_ISCHR((vp)->v_inode.i_mode)
95#define VN_ISBLK(vp) S_ISBLK((vp)->v_inode.i_mode)
96
96#define v_fbhv v_bh.bh_first /* first behavior */ 97#define v_fbhv v_bh.bh_first /* first behavior */
97#define v_fops v_bh.bh_first->bd_ops /* first behavior ops */ 98#define v_fops v_bh.bh_first->bd_ops /* first behavior ops */
98 99
@@ -133,22 +134,8 @@ typedef enum {
133#define LINVFS_GET_IP(vp) (&(vp)->v_inode) 134#define LINVFS_GET_IP(vp) (&(vp)->v_inode)
134 135
135/* 136/*
136 * Convert between vnode types and inode formats (since POSIX.1
137 * defines mode word of stat structure in terms of inode formats).
138 */
139extern enum vtype iftovt_tab[];
140extern u_short vttoif_tab[];
141#define IFTOVT(mode) (iftovt_tab[((mode) & S_IFMT) >> 12])
142#define VTTOIF(indx) (vttoif_tab[(int)(indx)])
143#define MAKEIMODE(indx, mode) (int)(VTTOIF(indx) | (mode))
144
145
146/*
147 * Vnode flags. 137 * Vnode flags.
148 */ 138 */
149#define VINACT 0x1 /* vnode is being inactivated */
150#define VRECLM 0x2 /* vnode is being reclaimed */
151#define VWAIT 0x4 /* waiting for VINACT/VRECLM to end */
152#define VMODIFIED 0x8 /* XFS inode state possibly differs */ 139#define VMODIFIED 0x8 /* XFS inode state possibly differs */
153 /* to the Linux inode state. */ 140 /* to the Linux inode state. */
154 141
@@ -408,7 +395,6 @@ typedef struct vnodeops {
408 */ 395 */
409typedef struct vattr { 396typedef struct vattr {
410 int va_mask; /* bit-mask of attributes present */ 397 int va_mask; /* bit-mask of attributes present */
411 enum vtype va_type; /* vnode type (for create) */
412 mode_t va_mode; /* file access mode and type */ 398 mode_t va_mode; /* file access mode and type */
413 xfs_nlink_t va_nlink; /* number of references to file */ 399 xfs_nlink_t va_nlink; /* number of references to file */
414 uid_t va_uid; /* owner user id */ 400 uid_t va_uid; /* owner user id */
@@ -498,27 +484,12 @@ typedef struct vattr {
498 * Check whether mandatory file locking is enabled. 484 * Check whether mandatory file locking is enabled.
499 */ 485 */
500#define MANDLOCK(vp, mode) \ 486#define MANDLOCK(vp, mode) \
501 ((vp)->v_type == VREG && ((mode) & (VSGID|(VEXEC>>3))) == VSGID) 487 (VN_ISREG(vp) && ((mode) & (VSGID|(VEXEC>>3))) == VSGID)
502 488
503extern void vn_init(void); 489extern void vn_init(void);
504extern int vn_wait(struct vnode *);
505extern vnode_t *vn_initialize(struct inode *); 490extern vnode_t *vn_initialize(struct inode *);
506 491
507/* 492/*
508 * Acquiring and invalidating vnodes:
509 *
510 * if (vn_get(vp, version, 0))
511 * ...;
512 * vn_purge(vp, version);
513 *
514 * vn_get and vn_purge must be called with vmap_t arguments, sampled
515 * while a lock that the vnode's VOP_RECLAIM function acquires is
516 * held, to ensure that the vnode sampled with the lock held isn't
517 * recycled (VOP_RECLAIMed) or deallocated between the release of the lock
518 * and the subsequent vn_get or vn_purge.
519 */
520
521/*
522 * vnode_map structures _must_ match vn_epoch and vnode structure sizes. 493 * vnode_map structures _must_ match vn_epoch and vnode structure sizes.
523 */ 494 */
524typedef struct vnode_map { 495typedef struct vnode_map {
@@ -531,11 +502,11 @@ typedef struct vnode_map {
531 (vmap).v_number = (vp)->v_number, \ 502 (vmap).v_number = (vp)->v_number, \
532 (vmap).v_ino = (vp)->v_inode.i_ino; } 503 (vmap).v_ino = (vp)->v_inode.i_ino; }
533 504
534extern void vn_purge(struct vnode *, vmap_t *);
535extern vnode_t *vn_get(struct vnode *, vmap_t *);
536extern int vn_revalidate(struct vnode *); 505extern int vn_revalidate(struct vnode *);
537extern void vn_revalidate_core(struct vnode *, vattr_t *); 506extern void vn_revalidate_core(struct vnode *, vattr_t *);
538extern void vn_remove(struct vnode *); 507
508extern void vn_iowait(struct vnode *vp);
509extern void vn_iowake(struct vnode *vp);
539 510
540static inline int vn_count(struct vnode *vp) 511static inline int vn_count(struct vnode *vp)
541{ 512{
@@ -546,7 +517,6 @@ static inline int vn_count(struct vnode *vp)
546 * Vnode reference counting functions (and macros for compatibility). 517 * Vnode reference counting functions (and macros for compatibility).
547 */ 518 */
548extern vnode_t *vn_hold(struct vnode *); 519extern vnode_t *vn_hold(struct vnode *);
549extern void vn_rele(struct vnode *);
550 520
551#if defined(XFS_VNODE_TRACE) 521#if defined(XFS_VNODE_TRACE)
552#define VN_HOLD(vp) \ 522#define VN_HOLD(vp) \
@@ -560,6 +530,12 @@ extern void vn_rele(struct vnode *);
560#define VN_RELE(vp) (iput(LINVFS_GET_IP(vp))) 530#define VN_RELE(vp) (iput(LINVFS_GET_IP(vp)))
561#endif 531#endif
562 532
533static inline struct vnode *vn_grab(struct vnode *vp)
534{
535 struct inode *inode = igrab(LINVFS_GET_IP(vp));
536 return inode ? LINVFS_GET_VP(inode) : NULL;
537}
538
563/* 539/*
564 * Vname handling macros. 540 * Vname handling macros.
565 */ 541 */
diff --git a/fs/xfs/quota/Makefile b/fs/xfs/quota/Makefile
new file mode 100644
index 000000000000..7a4f725b2824
--- /dev/null
+++ b/fs/xfs/quota/Makefile
@@ -0,0 +1 @@
include $(TOPDIR)/fs/xfs/quota/Makefile-linux-$(VERSION).$(PATCHLEVEL)
diff --git a/fs/xfs/quota/Makefile-linux-2.6 b/fs/xfs/quota/Makefile-linux-2.6
new file mode 100644
index 000000000000..8b7b676718b9
--- /dev/null
+++ b/fs/xfs/quota/Makefile-linux-2.6
@@ -0,0 +1,53 @@
1#
2# Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
3#
4# This program is free software; you can redistribute it and/or modify it
5# under the terms of version 2 of the GNU General Public License as
6# published by the Free Software Foundation.
7#
8# This program is distributed in the hope that it would be useful, but
9# WITHOUT ANY WARRANTY; without even the implied warranty of
10# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
11#
12# Further, this software is distributed without any warranty that it is
13# free of the rightful claim of any third person regarding infringement
14# or the like. Any license provided herein, whether implied or
15# otherwise, applies only to this software file. Patent licenses, if
16# any, provided herein do not apply to combinations of this program with
17# other software, or any other product whatsoever.
18#
19# You should have received a copy of the GNU General Public License along
20# with this program; if not, write the Free Software Foundation, Inc., 59
21# Temple Place - Suite 330, Boston MA 02111-1307, USA.
22#
23# Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
24# Mountain View, CA 94043, or:
25#
26# http://www.sgi.com
27#
28# For further information regarding this notice, see:
29#
30# http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
31#
32
33EXTRA_CFLAGS += -I $(TOPDIR)/fs/xfs -I $(TOPDIR)/fs/xfs/linux-2.6
34
35ifeq ($(CONFIG_XFS_DEBUG),y)
36 EXTRA_CFLAGS += -g -DDEBUG
37 #EXTRA_CFLAGS += -DQUOTADEBUG
38endif
39ifeq ($(CONFIG_XFS_TRACE),y)
40 EXTRA_CFLAGS += -DXFS_DQUOT_TRACE
41 EXTRA_CFLAGS += -DXFS_VNODE_TRACE
42endif
43
44obj-$(CONFIG_XFS_QUOTA) += xfs_quota.o
45
46xfs_quota-y += xfs_dquot.o \
47 xfs_dquot_item.o \
48 xfs_trans_dquot.o \
49 xfs_qm_syscalls.o \
50 xfs_qm_bhv.o \
51 xfs_qm.o
52
53xfs_quota-$(CONFIG_PROC_FS) += xfs_qm_stats.o
diff --git a/fs/xfs/quota/xfs_dquot.c b/fs/xfs/quota/xfs_dquot.c
index 46ce1e3ce1d6..e2e8d35fa4d0 100644
--- a/fs/xfs/quota/xfs_dquot.c
+++ b/fs/xfs/quota/xfs_dquot.c
@@ -421,7 +421,7 @@ xfs_qm_init_dquot_blk(
421 */ 421 */
422STATIC int 422STATIC int
423xfs_qm_dqalloc( 423xfs_qm_dqalloc(
424 xfs_trans_t *tp, 424 xfs_trans_t **tpp,
425 xfs_mount_t *mp, 425 xfs_mount_t *mp,
426 xfs_dquot_t *dqp, 426 xfs_dquot_t *dqp,
427 xfs_inode_t *quotip, 427 xfs_inode_t *quotip,
@@ -433,6 +433,7 @@ xfs_qm_dqalloc(
433 xfs_bmbt_irec_t map; 433 xfs_bmbt_irec_t map;
434 int nmaps, error, committed; 434 int nmaps, error, committed;
435 xfs_buf_t *bp; 435 xfs_buf_t *bp;
436 xfs_trans_t *tp = *tpp;
436 437
437 ASSERT(tp != NULL); 438 ASSERT(tp != NULL);
438 xfs_dqtrace_entry(dqp, "DQALLOC"); 439 xfs_dqtrace_entry(dqp, "DQALLOC");
@@ -492,10 +493,32 @@ xfs_qm_dqalloc(
492 xfs_qm_init_dquot_blk(tp, mp, INT_GET(dqp->q_core.d_id, ARCH_CONVERT), 493 xfs_qm_init_dquot_blk(tp, mp, INT_GET(dqp->q_core.d_id, ARCH_CONVERT),
493 dqp->dq_flags & XFS_DQ_ALLTYPES, bp); 494 dqp->dq_flags & XFS_DQ_ALLTYPES, bp);
494 495
495 if ((error = xfs_bmap_finish(&tp, &flist, firstblock, &committed))) { 496 /*
497 * xfs_bmap_finish() may commit the current transaction and
498 * start a second transaction if the freelist is not empty.
499 *
500 * Since we still want to modify this buffer, we need to
501 * ensure that the buffer is not released on commit of
502 * the first transaction and ensure the buffer is added to the
503 * second transaction.
504 *
505 * If there is only one transaction then don't stop the buffer
506 * from being released when it commits later on.
507 */
508
509 xfs_trans_bhold(tp, bp);
510
511 if ((error = xfs_bmap_finish(tpp, &flist, firstblock, &committed))) {
496 goto error1; 512 goto error1;
497 } 513 }
498 514
515 if (committed) {
516 tp = *tpp;
517 xfs_trans_bjoin(tp, bp);
518 } else {
519 xfs_trans_bhold_release(tp, bp);
520 }
521
499 *O_bpp = bp; 522 *O_bpp = bp;
500 return 0; 523 return 0;
501 524
@@ -514,7 +537,7 @@ xfs_qm_dqalloc(
514 */ 537 */
515STATIC int 538STATIC int
516xfs_qm_dqtobp( 539xfs_qm_dqtobp(
517 xfs_trans_t *tp, 540 xfs_trans_t **tpp,
518 xfs_dquot_t *dqp, 541 xfs_dquot_t *dqp,
519 xfs_disk_dquot_t **O_ddpp, 542 xfs_disk_dquot_t **O_ddpp,
520 xfs_buf_t **O_bpp, 543 xfs_buf_t **O_bpp,
@@ -528,6 +551,7 @@ xfs_qm_dqtobp(
528 xfs_disk_dquot_t *ddq; 551 xfs_disk_dquot_t *ddq;
529 xfs_dqid_t id; 552 xfs_dqid_t id;
530 boolean_t newdquot; 553 boolean_t newdquot;
554 xfs_trans_t *tp = (tpp ? *tpp : NULL);
531 555
532 mp = dqp->q_mount; 556 mp = dqp->q_mount;
533 id = INT_GET(dqp->q_core.d_id, ARCH_CONVERT); 557 id = INT_GET(dqp->q_core.d_id, ARCH_CONVERT);
@@ -579,9 +603,10 @@ xfs_qm_dqtobp(
579 return (ENOENT); 603 return (ENOENT);
580 604
581 ASSERT(tp); 605 ASSERT(tp);
582 if ((error = xfs_qm_dqalloc(tp, mp, dqp, quotip, 606 if ((error = xfs_qm_dqalloc(tpp, mp, dqp, quotip,
583 dqp->q_fileoffset, &bp))) 607 dqp->q_fileoffset, &bp)))
584 return (error); 608 return (error);
609 tp = *tpp;
585 newdquot = B_TRUE; 610 newdquot = B_TRUE;
586 } else { 611 } else {
587 /* 612 /*
@@ -645,7 +670,7 @@ xfs_qm_dqtobp(
645/* ARGSUSED */ 670/* ARGSUSED */
646STATIC int 671STATIC int
647xfs_qm_dqread( 672xfs_qm_dqread(
648 xfs_trans_t *tp, 673 xfs_trans_t **tpp,
649 xfs_dqid_t id, 674 xfs_dqid_t id,
650 xfs_dquot_t *dqp, /* dquot to get filled in */ 675 xfs_dquot_t *dqp, /* dquot to get filled in */
651 uint flags) 676 uint flags)
@@ -653,15 +678,19 @@ xfs_qm_dqread(
653 xfs_disk_dquot_t *ddqp; 678 xfs_disk_dquot_t *ddqp;
654 xfs_buf_t *bp; 679 xfs_buf_t *bp;
655 int error; 680 int error;
681 xfs_trans_t *tp;
682
683 ASSERT(tpp);
656 684
657 /* 685 /*
658 * get a pointer to the on-disk dquot and the buffer containing it 686 * get a pointer to the on-disk dquot and the buffer containing it
659 * dqp already knows its own type (GROUP/USER). 687 * dqp already knows its own type (GROUP/USER).
660 */ 688 */
661 xfs_dqtrace_entry(dqp, "DQREAD"); 689 xfs_dqtrace_entry(dqp, "DQREAD");
662 if ((error = xfs_qm_dqtobp(tp, dqp, &ddqp, &bp, flags))) { 690 if ((error = xfs_qm_dqtobp(tpp, dqp, &ddqp, &bp, flags))) {
663 return (error); 691 return (error);
664 } 692 }
693 tp = *tpp;
665 694
666 /* copy everything from disk dquot to the incore dquot */ 695 /* copy everything from disk dquot to the incore dquot */
667 memcpy(&dqp->q_core, ddqp, sizeof(xfs_disk_dquot_t)); 696 memcpy(&dqp->q_core, ddqp, sizeof(xfs_disk_dquot_t));
@@ -740,7 +769,7 @@ xfs_qm_idtodq(
740 * Read it from disk; xfs_dqread() takes care of 769 * Read it from disk; xfs_dqread() takes care of
741 * all the necessary initialization of dquot's fields (locks, etc) 770 * all the necessary initialization of dquot's fields (locks, etc)
742 */ 771 */
743 if ((error = xfs_qm_dqread(tp, id, dqp, flags))) { 772 if ((error = xfs_qm_dqread(&tp, id, dqp, flags))) {
744 /* 773 /*
745 * This can happen if quotas got turned off (ESRCH), 774 * This can happen if quotas got turned off (ESRCH),
746 * or if the dquot didn't exist on disk and we ask to 775 * or if the dquot didn't exist on disk and we ask to
diff --git a/fs/xfs/quota/xfs_dquot.h b/fs/xfs/quota/xfs_dquot.h
index 39175103c8e0..8ebc87176c78 100644
--- a/fs/xfs/quota/xfs_dquot.h
+++ b/fs/xfs/quota/xfs_dquot.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. 2 * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved.
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify it 4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License as 5 * under the terms of version 2 of the GNU General Public License as
@@ -113,20 +113,6 @@ typedef struct xfs_dquot {
113 113
114#define XFS_DQHOLD(dqp) ((dqp)->q_nrefs++) 114#define XFS_DQHOLD(dqp) ((dqp)->q_nrefs++)
115 115
116/*
117 * Quota Accounting/Enforcement flags
118 */
119#define XFS_ALL_QUOTA_ACCT \
120 (XFS_UQUOTA_ACCT | XFS_GQUOTA_ACCT | XFS_PQUOTA_ACCT)
121#define XFS_ALL_QUOTA_ENFD (XFS_UQUOTA_ENFD | XFS_OQUOTA_ENFD)
122#define XFS_ALL_QUOTA_CHKD (XFS_UQUOTA_CHKD | XFS_OQUOTA_CHKD)
123
124#define XFS_IS_QUOTA_RUNNING(mp) ((mp)->m_qflags & XFS_ALL_QUOTA_ACCT)
125#define XFS_IS_QUOTA_ENFORCED(mp) ((mp)->m_qflags & XFS_ALL_QUOTA_ENFD)
126#define XFS_IS_UQUOTA_RUNNING(mp) ((mp)->m_qflags & XFS_UQUOTA_ACCT)
127#define XFS_IS_PQUOTA_RUNNING(mp) ((mp)->m_qflags & XFS_PQUOTA_ACCT)
128#define XFS_IS_GQUOTA_RUNNING(mp) ((mp)->m_qflags & XFS_GQUOTA_ACCT)
129
130#ifdef DEBUG 116#ifdef DEBUG
131static inline int 117static inline int
132XFS_DQ_IS_LOCKED(xfs_dquot_t *dqp) 118XFS_DQ_IS_LOCKED(xfs_dquot_t *dqp)
diff --git a/fs/xfs/quota/xfs_dquot_item.c b/fs/xfs/quota/xfs_dquot_item.c
index f5271b7b1e84..e74eaa7dd1bc 100644
--- a/fs/xfs/quota/xfs_dquot_item.c
+++ b/fs/xfs/quota/xfs_dquot_item.c
@@ -509,6 +509,7 @@ xfs_qm_qoff_logitem_format(xfs_qoff_logitem_t *qf,
509 509
510 log_vector->i_addr = (xfs_caddr_t)&(qf->qql_format); 510 log_vector->i_addr = (xfs_caddr_t)&(qf->qql_format);
511 log_vector->i_len = sizeof(xfs_qoff_logitem_t); 511 log_vector->i_len = sizeof(xfs_qoff_logitem_t);
512 XLOG_VEC_SET_TYPE(log_vector, XLOG_REG_TYPE_QUOTAOFF);
512 qf->qql_format.qf_size = 1; 513 qf->qql_format.qf_size = 1;
513} 514}
514 515
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c
index f665ca8f9e96..efde16e0a913 100644
--- a/fs/xfs/quota/xfs_qm.c
+++ b/fs/xfs/quota/xfs_qm.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved. 2 * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved.
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify it 4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License as 5 * under the terms of version 2 of the GNU General Public License as
@@ -365,16 +365,6 @@ xfs_qm_mount_quotas(
365 int error = 0; 365 int error = 0;
366 uint sbf; 366 uint sbf;
367 367
368 /*
369 * If a file system had quotas running earlier, but decided to
370 * mount without -o uquota/pquota/gquota options, revoke the
371 * quotachecked license, and bail out.
372 */
373 if (! XFS_IS_QUOTA_ON(mp) &&
374 (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_ACCT)) {
375 mp->m_qflags = 0;
376 goto write_changes;
377 }
378 368
379 /* 369 /*
380 * If quotas on realtime volumes is not supported, we disable 370 * If quotas on realtime volumes is not supported, we disable
@@ -388,11 +378,8 @@ xfs_qm_mount_quotas(
388 goto write_changes; 378 goto write_changes;
389 } 379 }
390 380
391#if defined(DEBUG) && defined(XFS_LOUD_RECOVERY)
392 cmn_err(CE_NOTE, "Attempting to turn on disk quotas.");
393#endif
394
395 ASSERT(XFS_IS_QUOTA_RUNNING(mp)); 381 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
382
396 /* 383 /*
397 * Allocate the quotainfo structure inside the mount struct, and 384 * Allocate the quotainfo structure inside the mount struct, and
398 * create quotainode(s), and change/rev superblock if necessary. 385 * create quotainode(s), and change/rev superblock if necessary.
@@ -410,19 +397,14 @@ xfs_qm_mount_quotas(
410 */ 397 */
411 if (XFS_QM_NEED_QUOTACHECK(mp) && 398 if (XFS_QM_NEED_QUOTACHECK(mp) &&
412 !(mfsi_flags & XFS_MFSI_NO_QUOTACHECK)) { 399 !(mfsi_flags & XFS_MFSI_NO_QUOTACHECK)) {
413#ifdef DEBUG
414 cmn_err(CE_NOTE, "Doing a quotacheck. Please wait.");
415#endif
416 if ((error = xfs_qm_quotacheck(mp))) { 400 if ((error = xfs_qm_quotacheck(mp))) {
417 /* Quotacheck has failed and quotas have 401 /* Quotacheck has failed and quotas have
418 * been disabled. 402 * been disabled.
419 */ 403 */
420 return XFS_ERROR(error); 404 return XFS_ERROR(error);
421 } 405 }
422#ifdef DEBUG
423 cmn_err(CE_NOTE, "Done quotacheck.");
424#endif
425 } 406 }
407
426 write_changes: 408 write_changes:
427 /* 409 /*
428 * We actually don't have to acquire the SB_LOCK at all. 410 * We actually don't have to acquire the SB_LOCK at all.
@@ -2010,7 +1992,7 @@ xfs_qm_quotacheck(
2010 ASSERT(mp->m_quotainfo != NULL); 1992 ASSERT(mp->m_quotainfo != NULL);
2011 ASSERT(xfs_Gqm != NULL); 1993 ASSERT(xfs_Gqm != NULL);
2012 xfs_qm_destroy_quotainfo(mp); 1994 xfs_qm_destroy_quotainfo(mp);
2013 xfs_mount_reset_sbqflags(mp); 1995 (void)xfs_mount_reset_sbqflags(mp);
2014 } else { 1996 } else {
2015 cmn_err(CE_NOTE, "XFS quotacheck %s: Done.", mp->m_fsname); 1997 cmn_err(CE_NOTE, "XFS quotacheck %s: Done.", mp->m_fsname);
2016 } 1998 }
diff --git a/fs/xfs/quota/xfs_qm.h b/fs/xfs/quota/xfs_qm.h
index b03eecf3b6cb..0b00b3c67015 100644
--- a/fs/xfs/quota/xfs_qm.h
+++ b/fs/xfs/quota/xfs_qm.h
@@ -184,8 +184,6 @@ typedef struct xfs_dquot_acct {
184#define XFS_QM_HOLD(xqm) ((xqm)->qm_nrefs++) 184#define XFS_QM_HOLD(xqm) ((xqm)->qm_nrefs++)
185#define XFS_QM_RELE(xqm) ((xqm)->qm_nrefs--) 185#define XFS_QM_RELE(xqm) ((xqm)->qm_nrefs--)
186 186
187extern void xfs_mount_reset_sbqflags(xfs_mount_t *);
188
189extern void xfs_qm_destroy_quotainfo(xfs_mount_t *); 187extern void xfs_qm_destroy_quotainfo(xfs_mount_t *);
190extern int xfs_qm_mount_quotas(xfs_mount_t *, int); 188extern int xfs_qm_mount_quotas(xfs_mount_t *, int);
191extern void xfs_qm_mount_quotainit(xfs_mount_t *, uint); 189extern void xfs_qm_mount_quotainit(xfs_mount_t *, uint);
diff --git a/fs/xfs/quota/xfs_qm_bhv.c b/fs/xfs/quota/xfs_qm_bhv.c
index dc3c37a1e158..8890a18a99d8 100644
--- a/fs/xfs/quota/xfs_qm_bhv.c
+++ b/fs/xfs/quota/xfs_qm_bhv.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved. 2 * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved.
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify it 4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License as 5 * under the terms of version 2 of the GNU General Public License as
@@ -229,48 +229,6 @@ xfs_qm_syncall(
229 return error; 229 return error;
230} 230}
231 231
232/*
233 * Clear the quotaflags in memory and in the superblock.
234 */
235void
236xfs_mount_reset_sbqflags(
237 xfs_mount_t *mp)
238{
239 xfs_trans_t *tp;
240 unsigned long s;
241
242 mp->m_qflags = 0;
243 /*
244 * It is OK to look at sb_qflags here in mount path,
245 * without SB_LOCK.
246 */
247 if (mp->m_sb.sb_qflags == 0)
248 return;
249 s = XFS_SB_LOCK(mp);
250 mp->m_sb.sb_qflags = 0;
251 XFS_SB_UNLOCK(mp, s);
252
253 /*
254 * if the fs is readonly, let the incore superblock run
255 * with quotas off but don't flush the update out to disk
256 */
257 if (XFS_MTOVFS(mp)->vfs_flag & VFS_RDONLY)
258 return;
259#ifdef QUOTADEBUG
260 xfs_fs_cmn_err(CE_NOTE, mp, "Writing superblock quota changes");
261#endif
262 tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE);
263 if (xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0,
264 XFS_DEFAULT_LOG_COUNT)) {
265 xfs_trans_cancel(tp, 0);
266 xfs_fs_cmn_err(CE_ALERT, mp,
267 "xfs_mount_reset_sbqflags: Superblock update failed!");
268 return;
269 }
270 xfs_mod_sb(tp, XFS_SB_QFLAGS);
271 xfs_trans_commit(tp, 0, NULL);
272}
273
274STATIC int 232STATIC int
275xfs_qm_newmount( 233xfs_qm_newmount(
276 xfs_mount_t *mp, 234 xfs_mount_t *mp,
diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c
index 68e98962dbef..15e02e8a9d4f 100644
--- a/fs/xfs/quota/xfs_qm_syscalls.c
+++ b/fs/xfs/quota/xfs_qm_syscalls.c
@@ -1053,7 +1053,6 @@ xfs_qm_dqrele_all_inodes(
1053 struct xfs_mount *mp, 1053 struct xfs_mount *mp,
1054 uint flags) 1054 uint flags)
1055{ 1055{
1056 vmap_t vmap;
1057 xfs_inode_t *ip, *topino; 1056 xfs_inode_t *ip, *topino;
1058 uint ireclaims; 1057 uint ireclaims;
1059 vnode_t *vp; 1058 vnode_t *vp;
@@ -1061,8 +1060,8 @@ xfs_qm_dqrele_all_inodes(
1061 1060
1062 ASSERT(mp->m_quotainfo); 1061 ASSERT(mp->m_quotainfo);
1063 1062
1064again:
1065 XFS_MOUNT_ILOCK(mp); 1063 XFS_MOUNT_ILOCK(mp);
1064again:
1066 ip = mp->m_inodes; 1065 ip = mp->m_inodes;
1067 if (ip == NULL) { 1066 if (ip == NULL) {
1068 XFS_MOUNT_IUNLOCK(mp); 1067 XFS_MOUNT_IUNLOCK(mp);
@@ -1090,18 +1089,14 @@ again:
1090 } 1089 }
1091 vnode_refd = B_FALSE; 1090 vnode_refd = B_FALSE;
1092 if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL) == 0) { 1091 if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL) == 0) {
1093 /*
1094 * Sample vp mapping while holding the mplock, lest
1095 * we come across a non-existent vnode.
1096 */
1097 VMAP(vp, vmap);
1098 ireclaims = mp->m_ireclaims; 1092 ireclaims = mp->m_ireclaims;
1099 topino = mp->m_inodes; 1093 topino = mp->m_inodes;
1100 XFS_MOUNT_IUNLOCK(mp); 1094 vp = vn_grab(vp);
1095 if (!vp)
1096 goto again;
1101 1097
1098 XFS_MOUNT_IUNLOCK(mp);
1102 /* XXX restart limit ? */ 1099 /* XXX restart limit ? */
1103 if ( ! (vp = vn_get(vp, &vmap)))
1104 goto again;
1105 xfs_ilock(ip, XFS_ILOCK_EXCL); 1100 xfs_ilock(ip, XFS_ILOCK_EXCL);
1106 vnode_refd = B_TRUE; 1101 vnode_refd = B_TRUE;
1107 } else { 1102 } else {
@@ -1137,7 +1132,6 @@ again:
1137 */ 1132 */
1138 if (topino != mp->m_inodes || mp->m_ireclaims != ireclaims) { 1133 if (topino != mp->m_inodes || mp->m_ireclaims != ireclaims) {
1139 /* XXX use a sentinel */ 1134 /* XXX use a sentinel */
1140 XFS_MOUNT_IUNLOCK(mp);
1141 goto again; 1135 goto again;
1142 } 1136 }
1143 ip = ip->i_mnext; 1137 ip = ip->i_mnext;
diff --git a/fs/xfs/support/debug.c b/fs/xfs/support/debug.c
index 4ed7b6928cd7..4e1a5ec22fa3 100644
--- a/fs/xfs/support/debug.c
+++ b/fs/xfs/support/debug.c
@@ -31,6 +31,7 @@
31 */ 31 */
32 32
33#include "debug.h" 33#include "debug.h"
34#include "spin.h"
34 35
35#include <asm/page.h> 36#include <asm/page.h>
36#include <linux/sched.h> 37#include <linux/sched.h>
diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c
index 8d01dce8c532..92fd1d67f878 100644
--- a/fs/xfs/xfs_acl.c
+++ b/fs/xfs/xfs_acl.c
@@ -85,7 +85,7 @@ xfs_acl_vhasacl_default(
85{ 85{
86 int error; 86 int error;
87 87
88 if (vp->v_type != VDIR) 88 if (!VN_ISDIR(vp))
89 return 0; 89 return 0;
90 xfs_acl_get_attr(vp, NULL, _ACL_TYPE_DEFAULT, ATTR_KERNOVAL, &error); 90 xfs_acl_get_attr(vp, NULL, _ACL_TYPE_DEFAULT, ATTR_KERNOVAL, &error);
91 return (error == 0); 91 return (error == 0);
@@ -389,7 +389,7 @@ xfs_acl_allow_set(
389 389
390 if (vp->v_inode.i_flags & (S_IMMUTABLE|S_APPEND)) 390 if (vp->v_inode.i_flags & (S_IMMUTABLE|S_APPEND))
391 return EPERM; 391 return EPERM;
392 if (kind == _ACL_TYPE_DEFAULT && vp->v_type != VDIR) 392 if (kind == _ACL_TYPE_DEFAULT && !VN_ISDIR(vp))
393 return ENOTDIR; 393 return ENOTDIR;
394 if (vp->v_vfsp->vfs_flag & VFS_RDONLY) 394 if (vp->v_vfsp->vfs_flag & VFS_RDONLY)
395 return EROFS; 395 return EROFS;
@@ -750,7 +750,7 @@ xfs_acl_inherit(
750 * If the new file is a directory, its default ACL is a copy of 750 * If the new file is a directory, its default ACL is a copy of
751 * the containing directory's default ACL. 751 * the containing directory's default ACL.
752 */ 752 */
753 if (vp->v_type == VDIR) 753 if (VN_ISDIR(vp))
754 xfs_acl_set_attr(vp, pdaclp, _ACL_TYPE_DEFAULT, &error); 754 xfs_acl_set_attr(vp, pdaclp, _ACL_TYPE_DEFAULT, &error);
755 if (!error && !basicperms) 755 if (!error && !basicperms)
756 xfs_acl_set_attr(vp, cacl, _ACL_TYPE_ACCESS, &error); 756 xfs_acl_set_attr(vp, cacl, _ACL_TYPE_ACCESS, &error);
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index 6f5d283888aa..3e76def1283d 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -4754,10 +4754,20 @@ xfs_bmapi(
4754 error = xfs_mod_incore_sb(mp, 4754 error = xfs_mod_incore_sb(mp,
4755 XFS_SBS_FDBLOCKS, 4755 XFS_SBS_FDBLOCKS,
4756 -(alen), rsvd); 4756 -(alen), rsvd);
4757 if (!error) 4757 if (!error) {
4758 error = xfs_mod_incore_sb(mp, 4758 error = xfs_mod_incore_sb(mp,
4759 XFS_SBS_FDBLOCKS, 4759 XFS_SBS_FDBLOCKS,
4760 -(indlen), rsvd); 4760 -(indlen), rsvd);
4761 if (error && rt) {
4762 xfs_mod_incore_sb(ip->i_mount,
4763 XFS_SBS_FREXTENTS,
4764 extsz, rsvd);
4765 } else if (error) {
4766 xfs_mod_incore_sb(ip->i_mount,
4767 XFS_SBS_FDBLOCKS,
4768 alen, rsvd);
4769 }
4770 }
4761 4771
4762 if (error) { 4772 if (error) {
4763 if (XFS_IS_QUOTA_ON(ip->i_mount)) 4773 if (XFS_IS_QUOTA_ON(ip->i_mount))
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index 30b8285ad476..a264657acfd9 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -274,6 +274,7 @@ xfs_buf_item_format(
274 ((bip->bli_format.blf_map_size - 1) * sizeof(uint))); 274 ((bip->bli_format.blf_map_size - 1) * sizeof(uint)));
275 vecp->i_addr = (xfs_caddr_t)&bip->bli_format; 275 vecp->i_addr = (xfs_caddr_t)&bip->bli_format;
276 vecp->i_len = base_size; 276 vecp->i_len = base_size;
277 XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_BFORMAT);
277 vecp++; 278 vecp++;
278 nvecs = 1; 279 nvecs = 1;
279 280
@@ -320,12 +321,14 @@ xfs_buf_item_format(
320 buffer_offset = first_bit * XFS_BLI_CHUNK; 321 buffer_offset = first_bit * XFS_BLI_CHUNK;
321 vecp->i_addr = xfs_buf_offset(bp, buffer_offset); 322 vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
322 vecp->i_len = nbits * XFS_BLI_CHUNK; 323 vecp->i_len = nbits * XFS_BLI_CHUNK;
324 XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_BCHUNK);
323 nvecs++; 325 nvecs++;
324 break; 326 break;
325 } else if (next_bit != last_bit + 1) { 327 } else if (next_bit != last_bit + 1) {
326 buffer_offset = first_bit * XFS_BLI_CHUNK; 328 buffer_offset = first_bit * XFS_BLI_CHUNK;
327 vecp->i_addr = xfs_buf_offset(bp, buffer_offset); 329 vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
328 vecp->i_len = nbits * XFS_BLI_CHUNK; 330 vecp->i_len = nbits * XFS_BLI_CHUNK;
331 XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_BCHUNK);
329 nvecs++; 332 nvecs++;
330 vecp++; 333 vecp++;
331 first_bit = next_bit; 334 first_bit = next_bit;
@@ -337,6 +340,7 @@ xfs_buf_item_format(
337 buffer_offset = first_bit * XFS_BLI_CHUNK; 340 buffer_offset = first_bit * XFS_BLI_CHUNK;
338 vecp->i_addr = xfs_buf_offset(bp, buffer_offset); 341 vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
339 vecp->i_len = nbits * XFS_BLI_CHUNK; 342 vecp->i_len = nbits * XFS_BLI_CHUNK;
343 XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_BCHUNK);
340/* You would think we need to bump the nvecs here too, but we do not 344/* You would think we need to bump the nvecs here too, but we do not
341 * this number is used by recovery, and it gets confused by the boundary 345 * this number is used by recovery, and it gets confused by the boundary
342 * split here 346 * split here
diff --git a/fs/xfs/xfs_dmapi.h b/fs/xfs/xfs_dmapi.h
index 55c17adaaa37..19e872856f6b 100644
--- a/fs/xfs/xfs_dmapi.h
+++ b/fs/xfs/xfs_dmapi.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved. 2 * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved.
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify it 4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License as 5 * under the terms of version 2 of the GNU General Public License as
diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c
index db7cbd1bc857..cc7d1494a45d 100644
--- a/fs/xfs/xfs_extfree_item.c
+++ b/fs/xfs/xfs_extfree_item.c
@@ -107,6 +107,7 @@ xfs_efi_item_format(xfs_efi_log_item_t *efip,
107 107
108 log_vector->i_addr = (xfs_caddr_t)&(efip->efi_format); 108 log_vector->i_addr = (xfs_caddr_t)&(efip->efi_format);
109 log_vector->i_len = size; 109 log_vector->i_len = size;
110 XLOG_VEC_SET_TYPE(log_vector, XLOG_REG_TYPE_EFI_FORMAT);
110 ASSERT(size >= sizeof(xfs_efi_log_format_t)); 111 ASSERT(size >= sizeof(xfs_efi_log_format_t));
111} 112}
112 113
@@ -426,6 +427,7 @@ xfs_efd_item_format(xfs_efd_log_item_t *efdp,
426 427
427 log_vector->i_addr = (xfs_caddr_t)&(efdp->efd_format); 428 log_vector->i_addr = (xfs_caddr_t)&(efdp->efd_format);
428 log_vector->i_len = size; 429 log_vector->i_len = size;
430 XLOG_VEC_SET_TYPE(log_vector, XLOG_REG_TYPE_EFD_FORMAT);
429 ASSERT(size >= sizeof(xfs_efd_log_format_t)); 431 ASSERT(size >= sizeof(xfs_efd_log_format_t));
430} 432}
431 433
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c
index d3da00045f26..0d9ae8fb4138 100644
--- a/fs/xfs/xfs_iget.c
+++ b/fs/xfs/xfs_iget.c
@@ -30,6 +30,8 @@
30 * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ 30 * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
31 */ 31 */
32 32
33#include <linux/delay.h>
34
33#include "xfs.h" 35#include "xfs.h"
34 36
35#include "xfs_macros.h" 37#include "xfs_macros.h"
@@ -505,17 +507,15 @@ xfs_iget(
505 vnode_t *vp = NULL; 507 vnode_t *vp = NULL;
506 int error; 508 int error;
507 509
508retry:
509 XFS_STATS_INC(xs_ig_attempts); 510 XFS_STATS_INC(xs_ig_attempts);
510 511
512retry:
511 if ((inode = iget_locked(XFS_MTOVFS(mp)->vfs_super, ino))) { 513 if ((inode = iget_locked(XFS_MTOVFS(mp)->vfs_super, ino))) {
512 bhv_desc_t *bdp; 514 bhv_desc_t *bdp;
513 xfs_inode_t *ip; 515 xfs_inode_t *ip;
514 int newnode;
515 516
516 vp = LINVFS_GET_VP(inode); 517 vp = LINVFS_GET_VP(inode);
517 if (inode->i_state & I_NEW) { 518 if (inode->i_state & I_NEW) {
518inode_allocate:
519 vn_initialize(inode); 519 vn_initialize(inode);
520 error = xfs_iget_core(vp, mp, tp, ino, flags, 520 error = xfs_iget_core(vp, mp, tp, ino, flags,
521 lock_flags, ipp, bno); 521 lock_flags, ipp, bno);
@@ -526,32 +526,25 @@ inode_allocate:
526 iput(inode); 526 iput(inode);
527 } 527 }
528 } else { 528 } else {
529 /* These are true if the inode is in inactive or 529 /*
530 * reclaim. The linux inode is about to go away, 530 * If the inode is not fully constructed due to
531 * wait for that path to finish, and try again. 531 * filehandle mistmatches wait for the inode to go
532 * away and try again.
533 *
534 * iget_locked will call __wait_on_freeing_inode
535 * to wait for the inode to go away.
532 */ 536 */
533 if (vp->v_flag & (VINACT | VRECLM)) { 537 if (is_bad_inode(inode) ||
534 vn_wait(vp); 538 ((bdp = vn_bhv_lookup(VN_BHV_HEAD(vp),
539 &xfs_vnodeops)) == NULL)) {
535 iput(inode); 540 iput(inode);
541 delay(1);
536 goto retry; 542 goto retry;
537 } 543 }
538 544
539 if (is_bad_inode(inode)) {
540 iput(inode);
541 return EIO;
542 }
543
544 bdp = vn_bhv_lookup(VN_BHV_HEAD(vp), &xfs_vnodeops);
545 if (bdp == NULL) {
546 XFS_STATS_INC(xs_ig_dup);
547 goto inode_allocate;
548 }
549 ip = XFS_BHVTOI(bdp); 545 ip = XFS_BHVTOI(bdp);
550 if (lock_flags != 0) 546 if (lock_flags != 0)
551 xfs_ilock(ip, lock_flags); 547 xfs_ilock(ip, lock_flags);
552 newnode = (ip->i_d.di_mode == 0);
553 if (newnode)
554 xfs_iocore_inode_reinit(ip);
555 XFS_STATS_INC(xs_ig_found); 548 XFS_STATS_INC(xs_ig_found);
556 *ipp = ip; 549 *ipp = ip;
557 error = 0; 550 error = 0;
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 34bdf5909687..db43308aae93 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -1128,7 +1128,6 @@ xfs_ialloc(
1128 ASSERT(ip != NULL); 1128 ASSERT(ip != NULL);
1129 1129
1130 vp = XFS_ITOV(ip); 1130 vp = XFS_ITOV(ip);
1131 vp->v_type = IFTOVT(mode);
1132 ip->i_d.di_mode = (__uint16_t)mode; 1131 ip->i_d.di_mode = (__uint16_t)mode;
1133 ip->i_d.di_onlink = 0; 1132 ip->i_d.di_onlink = 0;
1134 ip->i_d.di_nlink = nlink; 1133 ip->i_d.di_nlink = nlink;
@@ -1250,7 +1249,7 @@ xfs_ialloc(
1250 */ 1249 */
1251 xfs_trans_log_inode(tp, ip, flags); 1250 xfs_trans_log_inode(tp, ip, flags);
1252 1251
1253 /* now that we have a v_type we can set Linux inode ops (& unlock) */ 1252 /* now that we have an i_mode we can set Linux inode ops (& unlock) */
1254 VFS_INIT_VNODE(XFS_MTOVFS(tp->t_mountp), vp, XFS_ITOBHV(ip), 1); 1253 VFS_INIT_VNODE(XFS_MTOVFS(tp->t_mountp), vp, XFS_ITOBHV(ip), 1);
1255 1254
1256 *ipp = ip; 1255 *ipp = ip;
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index 0eed30f5cb19..276ec70eb7f9 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -248,6 +248,7 @@ xfs_inode_item_format(
248 248
249 vecp->i_addr = (xfs_caddr_t)&iip->ili_format; 249 vecp->i_addr = (xfs_caddr_t)&iip->ili_format;
250 vecp->i_len = sizeof(xfs_inode_log_format_t); 250 vecp->i_len = sizeof(xfs_inode_log_format_t);
251 XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_IFORMAT);
251 vecp++; 252 vecp++;
252 nvecs = 1; 253 nvecs = 1;
253 254
@@ -292,6 +293,7 @@ xfs_inode_item_format(
292 293
293 vecp->i_addr = (xfs_caddr_t)&ip->i_d; 294 vecp->i_addr = (xfs_caddr_t)&ip->i_d;
294 vecp->i_len = sizeof(xfs_dinode_core_t); 295 vecp->i_len = sizeof(xfs_dinode_core_t);
296 XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_ICORE);
295 vecp++; 297 vecp++;
296 nvecs++; 298 nvecs++;
297 iip->ili_format.ilf_fields |= XFS_ILOG_CORE; 299 iip->ili_format.ilf_fields |= XFS_ILOG_CORE;
@@ -349,6 +351,7 @@ xfs_inode_item_format(
349 vecp->i_addr = 351 vecp->i_addr =
350 (char *)(ip->i_df.if_u1.if_extents); 352 (char *)(ip->i_df.if_u1.if_extents);
351 vecp->i_len = ip->i_df.if_bytes; 353 vecp->i_len = ip->i_df.if_bytes;
354 XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_IEXT);
352 } else 355 } else
353#endif 356#endif
354 { 357 {
@@ -367,6 +370,7 @@ xfs_inode_item_format(
367 vecp->i_addr = (xfs_caddr_t)ext_buffer; 370 vecp->i_addr = (xfs_caddr_t)ext_buffer;
368 vecp->i_len = xfs_iextents_copy(ip, ext_buffer, 371 vecp->i_len = xfs_iextents_copy(ip, ext_buffer,
369 XFS_DATA_FORK); 372 XFS_DATA_FORK);
373 XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_IEXT);
370 } 374 }
371 ASSERT(vecp->i_len <= ip->i_df.if_bytes); 375 ASSERT(vecp->i_len <= ip->i_df.if_bytes);
372 iip->ili_format.ilf_dsize = vecp->i_len; 376 iip->ili_format.ilf_dsize = vecp->i_len;
@@ -384,6 +388,7 @@ xfs_inode_item_format(
384 ASSERT(ip->i_df.if_broot != NULL); 388 ASSERT(ip->i_df.if_broot != NULL);
385 vecp->i_addr = (xfs_caddr_t)ip->i_df.if_broot; 389 vecp->i_addr = (xfs_caddr_t)ip->i_df.if_broot;
386 vecp->i_len = ip->i_df.if_broot_bytes; 390 vecp->i_len = ip->i_df.if_broot_bytes;
391 XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_IBROOT);
387 vecp++; 392 vecp++;
388 nvecs++; 393 nvecs++;
389 iip->ili_format.ilf_dsize = ip->i_df.if_broot_bytes; 394 iip->ili_format.ilf_dsize = ip->i_df.if_broot_bytes;
@@ -409,6 +414,7 @@ xfs_inode_item_format(
409 ASSERT((ip->i_df.if_real_bytes == 0) || 414 ASSERT((ip->i_df.if_real_bytes == 0) ||
410 (ip->i_df.if_real_bytes == data_bytes)); 415 (ip->i_df.if_real_bytes == data_bytes));
411 vecp->i_len = (int)data_bytes; 416 vecp->i_len = (int)data_bytes;
417 XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_ILOCAL);
412 vecp++; 418 vecp++;
413 nvecs++; 419 nvecs++;
414 iip->ili_format.ilf_dsize = (unsigned)data_bytes; 420 iip->ili_format.ilf_dsize = (unsigned)data_bytes;
@@ -486,6 +492,7 @@ xfs_inode_item_format(
486 vecp->i_len = xfs_iextents_copy(ip, ext_buffer, 492 vecp->i_len = xfs_iextents_copy(ip, ext_buffer,
487 XFS_ATTR_FORK); 493 XFS_ATTR_FORK);
488#endif 494#endif
495 XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_IATTR_EXT);
489 iip->ili_format.ilf_asize = vecp->i_len; 496 iip->ili_format.ilf_asize = vecp->i_len;
490 vecp++; 497 vecp++;
491 nvecs++; 498 nvecs++;
@@ -500,6 +507,7 @@ xfs_inode_item_format(
500 ASSERT(ip->i_afp->if_broot != NULL); 507 ASSERT(ip->i_afp->if_broot != NULL);
501 vecp->i_addr = (xfs_caddr_t)ip->i_afp->if_broot; 508 vecp->i_addr = (xfs_caddr_t)ip->i_afp->if_broot;
502 vecp->i_len = ip->i_afp->if_broot_bytes; 509 vecp->i_len = ip->i_afp->if_broot_bytes;
510 XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_IATTR_BROOT);
503 vecp++; 511 vecp++;
504 nvecs++; 512 nvecs++;
505 iip->ili_format.ilf_asize = ip->i_afp->if_broot_bytes; 513 iip->ili_format.ilf_asize = ip->i_afp->if_broot_bytes;
@@ -523,6 +531,7 @@ xfs_inode_item_format(
523 ASSERT((ip->i_afp->if_real_bytes == 0) || 531 ASSERT((ip->i_afp->if_real_bytes == 0) ||
524 (ip->i_afp->if_real_bytes == data_bytes)); 532 (ip->i_afp->if_real_bytes == data_bytes));
525 vecp->i_len = (int)data_bytes; 533 vecp->i_len = (int)data_bytes;
534 XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_IATTR_LOCAL);
526 vecp++; 535 vecp++;
527 nvecs++; 536 nvecs++;
528 iip->ili_format.ilf_asize = (unsigned)data_bytes; 537 iip->ili_format.ilf_asize = (unsigned)data_bytes;
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 2edd6769e5d3..d0f5be63cddb 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -226,13 +226,12 @@ xfs_iomap(
226 xfs_iomap_enter_trace(XFS_IOMAP_READ_ENTER, io, offset, count); 226 xfs_iomap_enter_trace(XFS_IOMAP_READ_ENTER, io, offset, count);
227 lockmode = XFS_LCK_MAP_SHARED(mp, io); 227 lockmode = XFS_LCK_MAP_SHARED(mp, io);
228 bmapi_flags = XFS_BMAPI_ENTIRE; 228 bmapi_flags = XFS_BMAPI_ENTIRE;
229 if (flags & BMAPI_IGNSTATE)
230 bmapi_flags |= XFS_BMAPI_IGSTATE;
231 break; 229 break;
232 case BMAPI_WRITE: 230 case BMAPI_WRITE:
233 xfs_iomap_enter_trace(XFS_IOMAP_WRITE_ENTER, io, offset, count); 231 xfs_iomap_enter_trace(XFS_IOMAP_WRITE_ENTER, io, offset, count);
234 lockmode = XFS_ILOCK_EXCL|XFS_EXTSIZE_WR; 232 lockmode = XFS_ILOCK_EXCL|XFS_EXTSIZE_WR;
235 bmapi_flags = 0; 233 if (flags & BMAPI_IGNSTATE)
234 bmapi_flags |= XFS_BMAPI_IGSTATE|XFS_BMAPI_ENTIRE;
236 XFS_ILOCK(mp, io, lockmode); 235 XFS_ILOCK(mp, io, lockmode);
237 break; 236 break;
238 case BMAPI_ALLOCATE: 237 case BMAPI_ALLOCATE:
@@ -391,9 +390,9 @@ xfs_iomap_write_direct(
391 xfs_bmbt_irec_t imap[XFS_WRITE_IMAPS], *imapp; 390 xfs_bmbt_irec_t imap[XFS_WRITE_IMAPS], *imapp;
392 xfs_bmap_free_t free_list; 391 xfs_bmap_free_t free_list;
393 int aeof; 392 int aeof;
394 xfs_filblks_t datablocks, qblocks, resblks; 393 xfs_filblks_t qblocks, resblks;
395 int committed; 394 int committed;
396 int numrtextents; 395 int resrtextents;
397 396
398 /* 397 /*
399 * Make sure that the dquots are there. This doesn't hold 398 * Make sure that the dquots are there. This doesn't hold
@@ -434,14 +433,14 @@ xfs_iomap_write_direct(
434 433
435 if (!(extsz = ip->i_d.di_extsize)) 434 if (!(extsz = ip->i_d.di_extsize))
436 extsz = mp->m_sb.sb_rextsize; 435 extsz = mp->m_sb.sb_rextsize;
437 numrtextents = qblocks = (count_fsb + extsz - 1); 436 resrtextents = qblocks = (count_fsb + extsz - 1);
438 do_div(numrtextents, mp->m_sb.sb_rextsize); 437 do_div(resrtextents, mp->m_sb.sb_rextsize);
438 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
439 quota_flag = XFS_QMOPT_RES_RTBLKS; 439 quota_flag = XFS_QMOPT_RES_RTBLKS;
440 datablocks = 0;
441 } else { 440 } else {
442 datablocks = qblocks = count_fsb; 441 resrtextents = 0;
442 resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, count_fsb);
443 quota_flag = XFS_QMOPT_RES_REGBLKS; 443 quota_flag = XFS_QMOPT_RES_REGBLKS;
444 numrtextents = 0;
445 } 444 }
446 445
447 /* 446 /*
@@ -449,9 +448,8 @@ xfs_iomap_write_direct(
449 */ 448 */
450 xfs_iunlock(ip, XFS_ILOCK_EXCL); 449 xfs_iunlock(ip, XFS_ILOCK_EXCL);
451 tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT); 450 tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
452 resblks = XFS_DIOSTRAT_SPACE_RES(mp, datablocks);
453 error = xfs_trans_reserve(tp, resblks, 451 error = xfs_trans_reserve(tp, resblks,
454 XFS_WRITE_LOG_RES(mp), numrtextents, 452 XFS_WRITE_LOG_RES(mp), resrtextents,
455 XFS_TRANS_PERM_LOG_RES, 453 XFS_TRANS_PERM_LOG_RES,
456 XFS_WRITE_LOG_COUNT); 454 XFS_WRITE_LOG_COUNT);
457 455
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 1cd2ac163877..54a6f1142403 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -159,11 +159,15 @@ xfs_buftarg_t *xlog_target;
159void 159void
160xlog_trace_loggrant(xlog_t *log, xlog_ticket_t *tic, xfs_caddr_t string) 160xlog_trace_loggrant(xlog_t *log, xlog_ticket_t *tic, xfs_caddr_t string)
161{ 161{
162 if (! log->l_grant_trace) { 162 unsigned long cnts;
163 log->l_grant_trace = ktrace_alloc(1024, KM_NOSLEEP); 163
164 if (! log->l_grant_trace) 164 if (!log->l_grant_trace) {
165 log->l_grant_trace = ktrace_alloc(2048, KM_NOSLEEP);
166 if (!log->l_grant_trace)
165 return; 167 return;
166 } 168 }
169 /* ticket counts are 1 byte each */
170 cnts = ((unsigned long)tic->t_ocnt) | ((unsigned long)tic->t_cnt) << 8;
167 171
168 ktrace_enter(log->l_grant_trace, 172 ktrace_enter(log->l_grant_trace,
169 (void *)tic, 173 (void *)tic,
@@ -178,10 +182,10 @@ xlog_trace_loggrant(xlog_t *log, xlog_ticket_t *tic, xfs_caddr_t string)
178 (void *)((unsigned long)CYCLE_LSN(log->l_tail_lsn)), 182 (void *)((unsigned long)CYCLE_LSN(log->l_tail_lsn)),
179 (void *)((unsigned long)BLOCK_LSN(log->l_tail_lsn)), 183 (void *)((unsigned long)BLOCK_LSN(log->l_tail_lsn)),
180 (void *)string, 184 (void *)string,
181 (void *)((unsigned long)13), 185 (void *)((unsigned long)tic->t_trans_type),
182 (void *)((unsigned long)14), 186 (void *)cnts,
183 (void *)((unsigned long)15), 187 (void *)((unsigned long)tic->t_curr_res),
184 (void *)((unsigned long)16)); 188 (void *)((unsigned long)tic->t_unit_res));
185} 189}
186 190
187void 191void
@@ -274,9 +278,11 @@ xfs_log_done(xfs_mount_t *mp,
274 * Release ticket if not permanent reservation or a specifc 278 * Release ticket if not permanent reservation or a specifc
275 * request has been made to release a permanent reservation. 279 * request has been made to release a permanent reservation.
276 */ 280 */
281 xlog_trace_loggrant(log, ticket, "xfs_log_done: (non-permanent)");
277 xlog_ungrant_log_space(log, ticket); 282 xlog_ungrant_log_space(log, ticket);
278 xlog_state_put_ticket(log, ticket); 283 xlog_state_put_ticket(log, ticket);
279 } else { 284 } else {
285 xlog_trace_loggrant(log, ticket, "xfs_log_done: (permanent)");
280 xlog_regrant_reserve_log_space(log, ticket); 286 xlog_regrant_reserve_log_space(log, ticket);
281 } 287 }
282 288
@@ -399,7 +405,8 @@ xfs_log_reserve(xfs_mount_t *mp,
399 int cnt, 405 int cnt,
400 xfs_log_ticket_t *ticket, 406 xfs_log_ticket_t *ticket,
401 __uint8_t client, 407 __uint8_t client,
402 uint flags) 408 uint flags,
409 uint t_type)
403{ 410{
404 xlog_t *log = mp->m_log; 411 xlog_t *log = mp->m_log;
405 xlog_ticket_t *internal_ticket; 412 xlog_ticket_t *internal_ticket;
@@ -421,13 +428,19 @@ xfs_log_reserve(xfs_mount_t *mp,
421 if (*ticket != NULL) { 428 if (*ticket != NULL) {
422 ASSERT(flags & XFS_LOG_PERM_RESERV); 429 ASSERT(flags & XFS_LOG_PERM_RESERV);
423 internal_ticket = (xlog_ticket_t *)*ticket; 430 internal_ticket = (xlog_ticket_t *)*ticket;
431 xlog_trace_loggrant(log, internal_ticket, "xfs_log_reserve: existing ticket (permanent trans)");
424 xlog_grant_push_ail(mp, internal_ticket->t_unit_res); 432 xlog_grant_push_ail(mp, internal_ticket->t_unit_res);
425 retval = xlog_regrant_write_log_space(log, internal_ticket); 433 retval = xlog_regrant_write_log_space(log, internal_ticket);
426 } else { 434 } else {
427 /* may sleep if need to allocate more tickets */ 435 /* may sleep if need to allocate more tickets */
428 internal_ticket = xlog_ticket_get(log, unit_bytes, cnt, 436 internal_ticket = xlog_ticket_get(log, unit_bytes, cnt,
429 client, flags); 437 client, flags);
438 internal_ticket->t_trans_type = t_type;
430 *ticket = internal_ticket; 439 *ticket = internal_ticket;
440 xlog_trace_loggrant(log, internal_ticket,
441 (internal_ticket->t_flags & XLOG_TIC_PERM_RESERV) ?
442 "xfs_log_reserve: create new ticket (permanent trans)" :
443 "xfs_log_reserve: create new ticket");
431 xlog_grant_push_ail(mp, 444 xlog_grant_push_ail(mp,
432 (internal_ticket->t_unit_res * 445 (internal_ticket->t_unit_res *
433 internal_ticket->t_cnt)); 446 internal_ticket->t_cnt));
@@ -601,8 +614,9 @@ xfs_log_unmount_write(xfs_mount_t *mp)
601 if (! (XLOG_FORCED_SHUTDOWN(log))) { 614 if (! (XLOG_FORCED_SHUTDOWN(log))) {
602 reg[0].i_addr = (void*)&magic; 615 reg[0].i_addr = (void*)&magic;
603 reg[0].i_len = sizeof(magic); 616 reg[0].i_len = sizeof(magic);
617 XLOG_VEC_SET_TYPE(&reg[0], XLOG_REG_TYPE_UNMOUNT);
604 618
605 error = xfs_log_reserve(mp, 600, 1, &tic, XFS_LOG, 0); 619 error = xfs_log_reserve(mp, 600, 1, &tic, XFS_LOG, 0, 0);
606 if (!error) { 620 if (!error) {
607 /* remove inited flag */ 621 /* remove inited flag */
608 ((xlog_ticket_t *)tic)->t_flags = 0; 622 ((xlog_ticket_t *)tic)->t_flags = 0;
@@ -1272,6 +1286,7 @@ xlog_commit_record(xfs_mount_t *mp,
1272 1286
1273 reg[0].i_addr = NULL; 1287 reg[0].i_addr = NULL;
1274 reg[0].i_len = 0; 1288 reg[0].i_len = 0;
1289 XLOG_VEC_SET_TYPE(&reg[0], XLOG_REG_TYPE_COMMIT);
1275 1290
1276 ASSERT_ALWAYS(iclog); 1291 ASSERT_ALWAYS(iclog);
1277 if ((error = xlog_write(mp, reg, 1, ticket, commitlsnp, 1292 if ((error = xlog_write(mp, reg, 1, ticket, commitlsnp,
@@ -1605,6 +1620,117 @@ xlog_state_finish_copy(xlog_t *log,
1605 1620
1606 1621
1607/* 1622/*
1623 * print out info relating to regions written which consume
1624 * the reservation
1625 */
1626#if defined(XFS_LOG_RES_DEBUG)
1627STATIC void
1628xlog_print_tic_res(xfs_mount_t *mp, xlog_ticket_t *ticket)
1629{
1630 uint i;
1631 uint ophdr_spc = ticket->t_res_num_ophdrs * (uint)sizeof(xlog_op_header_t);
1632
1633 /* match with XLOG_REG_TYPE_* in xfs_log.h */
1634 static char *res_type_str[XLOG_REG_TYPE_MAX] = {
1635 "bformat",
1636 "bchunk",
1637 "efi_format",
1638 "efd_format",
1639 "iformat",
1640 "icore",
1641 "iext",
1642 "ibroot",
1643 "ilocal",
1644 "iattr_ext",
1645 "iattr_broot",
1646 "iattr_local",
1647 "qformat",
1648 "dquot",
1649 "quotaoff",
1650 "LR header",
1651 "unmount",
1652 "commit",
1653 "trans header"
1654 };
1655 static char *trans_type_str[XFS_TRANS_TYPE_MAX] = {
1656 "SETATTR_NOT_SIZE",
1657 "SETATTR_SIZE",
1658 "INACTIVE",
1659 "CREATE",
1660 "CREATE_TRUNC",
1661 "TRUNCATE_FILE",
1662 "REMOVE",
1663 "LINK",
1664 "RENAME",
1665 "MKDIR",
1666 "RMDIR",
1667 "SYMLINK",
1668 "SET_DMATTRS",
1669 "GROWFS",
1670 "STRAT_WRITE",
1671 "DIOSTRAT",
1672 "WRITE_SYNC",
1673 "WRITEID",
1674 "ADDAFORK",
1675 "ATTRINVAL",
1676 "ATRUNCATE",
1677 "ATTR_SET",
1678 "ATTR_RM",
1679 "ATTR_FLAG",
1680 "CLEAR_AGI_BUCKET",
1681 "QM_SBCHANGE",
1682 "DUMMY1",
1683 "DUMMY2",
1684 "QM_QUOTAOFF",
1685 "QM_DQALLOC",
1686 "QM_SETQLIM",
1687 "QM_DQCLUSTER",
1688 "QM_QINOCREATE",
1689 "QM_QUOTAOFF_END",
1690 "SB_UNIT",
1691 "FSYNC_TS",
1692 "GROWFSRT_ALLOC",
1693 "GROWFSRT_ZERO",
1694 "GROWFSRT_FREE",
1695 "SWAPEXT"
1696 };
1697
1698 xfs_fs_cmn_err(CE_WARN, mp,
1699 "xfs_log_write: reservation summary:\n"
1700 " trans type = %s (%u)\n"
1701 " unit res = %d bytes\n"
1702 " current res = %d bytes\n"
1703 " total reg = %u bytes (o/flow = %u bytes)\n"
1704 " ophdrs = %u (ophdr space = %u bytes)\n"
1705 " ophdr + reg = %u bytes\n"
1706 " num regions = %u\n",
1707 ((ticket->t_trans_type <= 0 ||
1708 ticket->t_trans_type > XFS_TRANS_TYPE_MAX) ?
1709 "bad-trans-type" : trans_type_str[ticket->t_trans_type-1]),
1710 ticket->t_trans_type,
1711 ticket->t_unit_res,
1712 ticket->t_curr_res,
1713 ticket->t_res_arr_sum, ticket->t_res_o_flow,
1714 ticket->t_res_num_ophdrs, ophdr_spc,
1715 ticket->t_res_arr_sum +
1716 ticket->t_res_o_flow + ophdr_spc,
1717 ticket->t_res_num);
1718
1719 for (i = 0; i < ticket->t_res_num; i++) {
1720 uint r_type = ticket->t_res_arr[i].r_type;
1721 cmn_err(CE_WARN,
1722 "region[%u]: %s - %u bytes\n",
1723 i,
1724 ((r_type <= 0 || r_type > XLOG_REG_TYPE_MAX) ?
1725 "bad-rtype" : res_type_str[r_type-1]),
1726 ticket->t_res_arr[i].r_len);
1727 }
1728}
1729#else
1730#define xlog_print_tic_res(mp, ticket)
1731#endif
1732
1733/*
1608 * Write some region out to in-core log 1734 * Write some region out to in-core log
1609 * 1735 *
1610 * This will be called when writing externally provided regions or when 1736 * This will be called when writing externally provided regions or when
@@ -1677,16 +1803,21 @@ xlog_write(xfs_mount_t * mp,
1677 * xlog_op_header_t and may need to be double word aligned. 1803 * xlog_op_header_t and may need to be double word aligned.
1678 */ 1804 */
1679 len = 0; 1805 len = 0;
1680 if (ticket->t_flags & XLOG_TIC_INITED) /* acct for start rec of xact */ 1806 if (ticket->t_flags & XLOG_TIC_INITED) { /* acct for start rec of xact */
1681 len += sizeof(xlog_op_header_t); 1807 len += sizeof(xlog_op_header_t);
1808 XLOG_TIC_ADD_OPHDR(ticket);
1809 }
1682 1810
1683 for (index = 0; index < nentries; index++) { 1811 for (index = 0; index < nentries; index++) {
1684 len += sizeof(xlog_op_header_t); /* each region gets >= 1 */ 1812 len += sizeof(xlog_op_header_t); /* each region gets >= 1 */
1813 XLOG_TIC_ADD_OPHDR(ticket);
1685 len += reg[index].i_len; 1814 len += reg[index].i_len;
1815 XLOG_TIC_ADD_REGION(ticket, reg[index].i_len, reg[index].i_type);
1686 } 1816 }
1687 contwr = *start_lsn = 0; 1817 contwr = *start_lsn = 0;
1688 1818
1689 if (ticket->t_curr_res < len) { 1819 if (ticket->t_curr_res < len) {
1820 xlog_print_tic_res(mp, ticket);
1690#ifdef DEBUG 1821#ifdef DEBUG
1691 xlog_panic( 1822 xlog_panic(
1692 "xfs_log_write: reservation ran out. Need to up reservation"); 1823 "xfs_log_write: reservation ran out. Need to up reservation");
@@ -1790,6 +1921,7 @@ xlog_write(xfs_mount_t * mp,
1790 len += sizeof(xlog_op_header_t); /* from splitting of region */ 1921 len += sizeof(xlog_op_header_t); /* from splitting of region */
1791 /* account for new log op header */ 1922 /* account for new log op header */
1792 ticket->t_curr_res -= sizeof(xlog_op_header_t); 1923 ticket->t_curr_res -= sizeof(xlog_op_header_t);
1924 XLOG_TIC_ADD_OPHDR(ticket);
1793 } 1925 }
1794 xlog_verify_dest_ptr(log, ptr); 1926 xlog_verify_dest_ptr(log, ptr);
1795 1927
@@ -2282,6 +2414,9 @@ restart:
2282 */ 2414 */
2283 if (log_offset == 0) { 2415 if (log_offset == 0) {
2284 ticket->t_curr_res -= log->l_iclog_hsize; 2416 ticket->t_curr_res -= log->l_iclog_hsize;
2417 XLOG_TIC_ADD_REGION(ticket,
2418 log->l_iclog_hsize,
2419 XLOG_REG_TYPE_LRHEADER);
2285 INT_SET(head->h_cycle, ARCH_CONVERT, log->l_curr_cycle); 2420 INT_SET(head->h_cycle, ARCH_CONVERT, log->l_curr_cycle);
2286 ASSIGN_LSN(head->h_lsn, log); 2421 ASSIGN_LSN(head->h_lsn, log);
2287 ASSERT(log->l_curr_block >= 0); 2422 ASSERT(log->l_curr_block >= 0);
@@ -2468,6 +2603,7 @@ xlog_regrant_write_log_space(xlog_t *log,
2468#endif 2603#endif
2469 2604
2470 tic->t_curr_res = tic->t_unit_res; 2605 tic->t_curr_res = tic->t_unit_res;
2606 XLOG_TIC_RESET_RES(tic);
2471 2607
2472 if (tic->t_cnt > 0) 2608 if (tic->t_cnt > 0)
2473 return (0); 2609 return (0);
@@ -2608,6 +2744,7 @@ xlog_regrant_reserve_log_space(xlog_t *log,
2608 XLOG_GRANT_SUB_SPACE(log, ticket->t_curr_res, 'w'); 2744 XLOG_GRANT_SUB_SPACE(log, ticket->t_curr_res, 'w');
2609 XLOG_GRANT_SUB_SPACE(log, ticket->t_curr_res, 'r'); 2745 XLOG_GRANT_SUB_SPACE(log, ticket->t_curr_res, 'r');
2610 ticket->t_curr_res = ticket->t_unit_res; 2746 ticket->t_curr_res = ticket->t_unit_res;
2747 XLOG_TIC_RESET_RES(ticket);
2611 xlog_trace_loggrant(log, ticket, 2748 xlog_trace_loggrant(log, ticket,
2612 "xlog_regrant_reserve_log_space: sub current res"); 2749 "xlog_regrant_reserve_log_space: sub current res");
2613 xlog_verify_grant_head(log, 1); 2750 xlog_verify_grant_head(log, 1);
@@ -2624,6 +2761,7 @@ xlog_regrant_reserve_log_space(xlog_t *log,
2624 xlog_verify_grant_head(log, 0); 2761 xlog_verify_grant_head(log, 0);
2625 GRANT_UNLOCK(log, s); 2762 GRANT_UNLOCK(log, s);
2626 ticket->t_curr_res = ticket->t_unit_res; 2763 ticket->t_curr_res = ticket->t_unit_res;
2764 XLOG_TIC_RESET_RES(ticket);
2627} /* xlog_regrant_reserve_log_space */ 2765} /* xlog_regrant_reserve_log_space */
2628 2766
2629 2767
@@ -3179,29 +3317,57 @@ xlog_ticket_get(xlog_t *log,
3179 * and their unit amount is the total amount of space required. 3317 * and their unit amount is the total amount of space required.
3180 * 3318 *
3181 * The following lines of code account for non-transaction data 3319 * The following lines of code account for non-transaction data
3182 * which occupy space in the on-disk log. 3320 * which occupy space in the on-disk log.
3321 *
3322 * Normal form of a transaction is:
3323 * <oph><trans-hdr><start-oph><reg1-oph><reg1><reg2-oph>...<commit-oph>
3324 * and then there are LR hdrs, split-recs and roundoff at end of syncs.
3325 *
3326 * We need to account for all the leadup data and trailer data
3327 * around the transaction data.
3328 * And then we need to account for the worst case in terms of using
3329 * more space.
3330 * The worst case will happen if:
3331 * - the placement of the transaction happens to be such that the
3332 * roundoff is at its maximum
3333 * - the transaction data is synced before the commit record is synced
3334 * i.e. <transaction-data><roundoff> | <commit-rec><roundoff>
3335 * Therefore the commit record is in its own Log Record.
3336 * This can happen as the commit record is called with its
3337 * own region to xlog_write().
3338 * This then means that in the worst case, roundoff can happen for
3339 * the commit-rec as well.
3340 * The commit-rec is smaller than padding in this scenario and so it is
3341 * not added separately.
3183 */ 3342 */
3184 3343
3344 /* for trans header */
3345 unit_bytes += sizeof(xlog_op_header_t);
3346 unit_bytes += sizeof(xfs_trans_header_t);
3347
3185 /* for start-rec */ 3348 /* for start-rec */
3186 unit_bytes += sizeof(xlog_op_header_t); 3349 unit_bytes += sizeof(xlog_op_header_t);
3350
3351 /* for LR headers */
3352 num_headers = ((unit_bytes + log->l_iclog_size-1) >> log->l_iclog_size_log);
3353 unit_bytes += log->l_iclog_hsize * num_headers;
3354
3355 /* for commit-rec LR header - note: padding will subsume the ophdr */
3356 unit_bytes += log->l_iclog_hsize;
3357
3358 /* for split-recs - ophdrs added when data split over LRs */
3359 unit_bytes += sizeof(xlog_op_header_t) * num_headers;
3187 3360
3188 /* for padding */ 3361 /* for roundoff padding for transaction data and one for commit record */
3189 if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb) && 3362 if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb) &&
3190 log->l_mp->m_sb.sb_logsunit > 1) { 3363 log->l_mp->m_sb.sb_logsunit > 1) {
3191 /* log su roundoff */ 3364 /* log su roundoff */
3192 unit_bytes += log->l_mp->m_sb.sb_logsunit; 3365 unit_bytes += 2*log->l_mp->m_sb.sb_logsunit;
3193 } else { 3366 } else {
3194 /* BB roundoff */ 3367 /* BB roundoff */
3195 unit_bytes += BBSIZE; 3368 unit_bytes += 2*BBSIZE;
3196 } 3369 }
3197 3370
3198 /* for commit-rec */
3199 unit_bytes += sizeof(xlog_op_header_t);
3200
3201 /* for LR headers */
3202 num_headers = ((unit_bytes + log->l_iclog_size-1) >> log->l_iclog_size_log);
3203 unit_bytes += log->l_iclog_hsize * num_headers;
3204
3205 tic->t_unit_res = unit_bytes; 3371 tic->t_unit_res = unit_bytes;
3206 tic->t_curr_res = unit_bytes; 3372 tic->t_curr_res = unit_bytes;
3207 tic->t_cnt = cnt; 3373 tic->t_cnt = cnt;
@@ -3209,10 +3375,13 @@ xlog_ticket_get(xlog_t *log,
3209 tic->t_tid = (xlog_tid_t)((__psint_t)tic & 0xffffffff); 3375 tic->t_tid = (xlog_tid_t)((__psint_t)tic & 0xffffffff);
3210 tic->t_clientid = client; 3376 tic->t_clientid = client;
3211 tic->t_flags = XLOG_TIC_INITED; 3377 tic->t_flags = XLOG_TIC_INITED;
3378 tic->t_trans_type = 0;
3212 if (xflags & XFS_LOG_PERM_RESERV) 3379 if (xflags & XFS_LOG_PERM_RESERV)
3213 tic->t_flags |= XLOG_TIC_PERM_RESERV; 3380 tic->t_flags |= XLOG_TIC_PERM_RESERV;
3214 sv_init(&(tic->t_sema), SV_DEFAULT, "logtick"); 3381 sv_init(&(tic->t_sema), SV_DEFAULT, "logtick");
3215 3382
3383 XLOG_TIC_RESET_RES(tic);
3384
3216 return tic; 3385 return tic;
3217} /* xlog_ticket_get */ 3386} /* xlog_ticket_get */
3218 3387
diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h
index 0db122ddda3f..18961119fc65 100644
--- a/fs/xfs/xfs_log.h
+++ b/fs/xfs/xfs_log.h
@@ -114,9 +114,44 @@ xfs_lsn_t _lsn_cmp(xfs_lsn_t lsn1, xfs_lsn_t lsn2)
114#define XFS_VOLUME 0x2 114#define XFS_VOLUME 0x2
115#define XFS_LOG 0xaa 115#define XFS_LOG 0xaa
116 116
117
118/* Region types for iovec's i_type */
119#if defined(XFS_LOG_RES_DEBUG)
120#define XLOG_REG_TYPE_BFORMAT 1
121#define XLOG_REG_TYPE_BCHUNK 2
122#define XLOG_REG_TYPE_EFI_FORMAT 3
123#define XLOG_REG_TYPE_EFD_FORMAT 4
124#define XLOG_REG_TYPE_IFORMAT 5
125#define XLOG_REG_TYPE_ICORE 6
126#define XLOG_REG_TYPE_IEXT 7
127#define XLOG_REG_TYPE_IBROOT 8
128#define XLOG_REG_TYPE_ILOCAL 9
129#define XLOG_REG_TYPE_IATTR_EXT 10
130#define XLOG_REG_TYPE_IATTR_BROOT 11
131#define XLOG_REG_TYPE_IATTR_LOCAL 12
132#define XLOG_REG_TYPE_QFORMAT 13
133#define XLOG_REG_TYPE_DQUOT 14
134#define XLOG_REG_TYPE_QUOTAOFF 15
135#define XLOG_REG_TYPE_LRHEADER 16
136#define XLOG_REG_TYPE_UNMOUNT 17
137#define XLOG_REG_TYPE_COMMIT 18
138#define XLOG_REG_TYPE_TRANSHDR 19
139#define XLOG_REG_TYPE_MAX 19
140#endif
141
142#if defined(XFS_LOG_RES_DEBUG)
143#define XLOG_VEC_SET_TYPE(vecp, t) ((vecp)->i_type = (t))
144#else
145#define XLOG_VEC_SET_TYPE(vecp, t)
146#endif
147
148
117typedef struct xfs_log_iovec { 149typedef struct xfs_log_iovec {
118 xfs_caddr_t i_addr; /* beginning address of region */ 150 xfs_caddr_t i_addr; /* beginning address of region */
119 int i_len; /* length in bytes of region */ 151 int i_len; /* length in bytes of region */
152#if defined(XFS_LOG_RES_DEBUG)
153 uint i_type; /* type of region */
154#endif
120} xfs_log_iovec_t; 155} xfs_log_iovec_t;
121 156
122typedef void* xfs_log_ticket_t; 157typedef void* xfs_log_ticket_t;
@@ -159,7 +194,8 @@ int xfs_log_reserve(struct xfs_mount *mp,
159 int count, 194 int count,
160 xfs_log_ticket_t *ticket, 195 xfs_log_ticket_t *ticket,
161 __uint8_t clientid, 196 __uint8_t clientid,
162 uint flags); 197 uint flags,
198 uint t_type);
163int xfs_log_write(struct xfs_mount *mp, 199int xfs_log_write(struct xfs_mount *mp,
164 xfs_log_iovec_t region[], 200 xfs_log_iovec_t region[],
165 int nentries, 201 int nentries,
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index 1a1d452f15f9..eb7fdc6ebc32 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -335,18 +335,66 @@ typedef __uint32_t xlog_tid_t;
335 335
336#define XLOG_COVER_OPS 5 336#define XLOG_COVER_OPS 5
337 337
338
339/* Ticket reservation region accounting */
340#if defined(XFS_LOG_RES_DEBUG)
341#define XLOG_TIC_LEN_MAX 15
342#define XLOG_TIC_RESET_RES(t) ((t)->t_res_num = \
343 (t)->t_res_arr_sum = (t)->t_res_num_ophdrs = 0)
344#define XLOG_TIC_ADD_OPHDR(t) ((t)->t_res_num_ophdrs++)
345#define XLOG_TIC_ADD_REGION(t, len, type) \
346 do { \
347 if ((t)->t_res_num == XLOG_TIC_LEN_MAX) { \
348 /* add to overflow and start again */ \
349 (t)->t_res_o_flow += (t)->t_res_arr_sum; \
350 (t)->t_res_num = 0; \
351 (t)->t_res_arr_sum = 0; \
352 } \
353 (t)->t_res_arr[(t)->t_res_num].r_len = (len); \
354 (t)->t_res_arr[(t)->t_res_num].r_type = (type); \
355 (t)->t_res_arr_sum += (len); \
356 (t)->t_res_num++; \
357 } while (0)
358
359/*
360 * Reservation region
361 * As would be stored in xfs_log_iovec but without the i_addr which
362 * we don't care about.
363 */
364typedef struct xlog_res {
365 uint r_len;
366 uint r_type;
367} xlog_res_t;
368#else
369#define XLOG_TIC_RESET_RES(t)
370#define XLOG_TIC_ADD_OPHDR(t)
371#define XLOG_TIC_ADD_REGION(t, len, type)
372#endif
373
374
338typedef struct xlog_ticket { 375typedef struct xlog_ticket {
339 sv_t t_sema; /* sleep on this semaphore :20 */ 376 sv_t t_sema; /* sleep on this semaphore : 20 */
340 struct xlog_ticket *t_next; /* : 4 */ 377 struct xlog_ticket *t_next; /* :4|8 */
341 struct xlog_ticket *t_prev; /* : 4 */ 378 struct xlog_ticket *t_prev; /* :4|8 */
342 xlog_tid_t t_tid; /* transaction identifier : 4 */ 379 xlog_tid_t t_tid; /* transaction identifier : 4 */
343 int t_curr_res; /* current reservation in bytes : 4 */ 380 int t_curr_res; /* current reservation in bytes : 4 */
344 int t_unit_res; /* unit reservation in bytes : 4 */ 381 int t_unit_res; /* unit reservation in bytes : 4 */
345 __uint8_t t_ocnt; /* original count : 1 */ 382 char t_ocnt; /* original count : 1 */
346 __uint8_t t_cnt; /* current count : 1 */ 383 char t_cnt; /* current count : 1 */
347 __uint8_t t_clientid; /* who does this belong to; : 1 */ 384 char t_clientid; /* who does this belong to; : 1 */
348 __uint8_t t_flags; /* properties of reservation : 1 */ 385 char t_flags; /* properties of reservation : 1 */
386 uint t_trans_type; /* transaction type : 4 */
387
388#if defined (XFS_LOG_RES_DEBUG)
389 /* reservation array fields */
390 uint t_res_num; /* num in array : 4 */
391 xlog_res_t t_res_arr[XLOG_TIC_LEN_MAX]; /* array of res : X */
392 uint t_res_num_ophdrs; /* num op hdrs : 4 */
393 uint t_res_arr_sum; /* array sum : 4 */
394 uint t_res_o_flow; /* sum overflow : 4 */
395#endif
349} xlog_ticket_t; 396} xlog_ticket_t;
397
350#endif 398#endif
351 399
352 400
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 0aac28ddb81c..14faabaabf29 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -1387,7 +1387,7 @@ xlog_recover_add_to_cont_trans(
1387 old_ptr = item->ri_buf[item->ri_cnt-1].i_addr; 1387 old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
1388 old_len = item->ri_buf[item->ri_cnt-1].i_len; 1388 old_len = item->ri_buf[item->ri_cnt-1].i_len;
1389 1389
1390 ptr = kmem_realloc(old_ptr, len+old_len, old_len, 0); 1390 ptr = kmem_realloc(old_ptr, len+old_len, old_len, 0u);
1391 memcpy(&ptr[old_len], dp, len); /* d, s, l */ 1391 memcpy(&ptr[old_len], dp, len); /* d, s, l */
1392 item->ri_buf[item->ri_cnt-1].i_len += len; 1392 item->ri_buf[item->ri_cnt-1].i_len += len;
1393 item->ri_buf[item->ri_cnt-1].i_addr = ptr; 1393 item->ri_buf[item->ri_cnt-1].i_addr = ptr;
diff --git a/fs/xfs/xfs_qmops.c b/fs/xfs/xfs_qmops.c
index 4f40c92863d5..a6cd6324e946 100644
--- a/fs/xfs/xfs_qmops.c
+++ b/fs/xfs/xfs_qmops.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. 2 * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved.
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify it 4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License as 5 * under the terms of version 2 of the GNU General Public License as
@@ -42,7 +42,8 @@
42#include "xfs_dir2.h" 42#include "xfs_dir2.h"
43#include "xfs_dmapi.h" 43#include "xfs_dmapi.h"
44#include "xfs_mount.h" 44#include "xfs_mount.h"
45 45#include "xfs_quota.h"
46#include "xfs_error.h"
46 47
47STATIC struct xfs_dquot * 48STATIC struct xfs_dquot *
48xfs_dqvopchown_default( 49xfs_dqvopchown_default(
@@ -54,8 +55,79 @@ xfs_dqvopchown_default(
54 return NULL; 55 return NULL;
55} 56}
56 57
58/*
59 * Clear the quotaflags in memory and in the superblock.
60 */
61int
62xfs_mount_reset_sbqflags(xfs_mount_t *mp)
63{
64 int error;
65 xfs_trans_t *tp;
66 unsigned long s;
67
68 mp->m_qflags = 0;
69 /*
70 * It is OK to look at sb_qflags here in mount path,
71 * without SB_LOCK.
72 */
73 if (mp->m_sb.sb_qflags == 0)
74 return 0;
75 s = XFS_SB_LOCK(mp);
76 mp->m_sb.sb_qflags = 0;
77 XFS_SB_UNLOCK(mp, s);
78
79 /*
80 * if the fs is readonly, let the incore superblock run
81 * with quotas off but don't flush the update out to disk
82 */
83 if (XFS_MTOVFS(mp)->vfs_flag & VFS_RDONLY)
84 return 0;
85#ifdef QUOTADEBUG
86 xfs_fs_cmn_err(CE_NOTE, mp, "Writing superblock quota changes");
87#endif
88 tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE);
89 if ((error = xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0,
90 XFS_DEFAULT_LOG_COUNT))) {
91 xfs_trans_cancel(tp, 0);
92 xfs_fs_cmn_err(CE_ALERT, mp,
93 "xfs_mount_reset_sbqflags: Superblock update failed!");
94 return error;
95 }
96 xfs_mod_sb(tp, XFS_SB_QFLAGS);
97 error = xfs_trans_commit(tp, 0, NULL);
98 return error;
99}
100
101STATIC int
102xfs_noquota_init(
103 xfs_mount_t *mp,
104 uint *needquotamount,
105 uint *quotaflags)
106{
107 int error = 0;
108
109 *quotaflags = 0;
110 *needquotamount = B_FALSE;
111
112 ASSERT(!XFS_IS_QUOTA_ON(mp));
113
114 /*
115 * If a file system had quotas running earlier, but decided to
116 * mount without -o uquota/pquota/gquota options, revoke the
117 * quotachecked license.
118 */
119 if (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_ACCT) {
120 cmn_err(CE_NOTE,
121 "XFS resetting qflags for filesystem %s",
122 mp->m_fsname);
123
124 error = xfs_mount_reset_sbqflags(mp);
125 }
126 return error;
127}
128
57xfs_qmops_t xfs_qmcore_stub = { 129xfs_qmops_t xfs_qmcore_stub = {
58 .xfs_qminit = (xfs_qminit_t) fs_noerr, 130 .xfs_qminit = (xfs_qminit_t) xfs_noquota_init,
59 .xfs_qmdone = (xfs_qmdone_t) fs_noerr, 131 .xfs_qmdone = (xfs_qmdone_t) fs_noerr,
60 .xfs_qmmount = (xfs_qmmount_t) fs_noerr, 132 .xfs_qmmount = (xfs_qmmount_t) fs_noerr,
61 .xfs_qmunmount = (xfs_qmunmount_t) fs_noerr, 133 .xfs_qmunmount = (xfs_qmunmount_t) fs_noerr,
diff --git a/fs/xfs/xfs_quota.h b/fs/xfs/xfs_quota.h
index 7134576ae7fa..32cb79752d5d 100644
--- a/fs/xfs/xfs_quota.h
+++ b/fs/xfs/xfs_quota.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. 2 * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved.
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify it 4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License as 5 * under the terms of version 2 of the GNU General Public License as
@@ -160,6 +160,20 @@ typedef struct xfs_qoff_logformat {
160#define XFS_GQUOTA_ACCT 0x0040 /* group quota accounting ON */ 160#define XFS_GQUOTA_ACCT 0x0040 /* group quota accounting ON */
161 161
162/* 162/*
163 * Quota Accounting/Enforcement flags
164 */
165#define XFS_ALL_QUOTA_ACCT \
166 (XFS_UQUOTA_ACCT | XFS_GQUOTA_ACCT | XFS_PQUOTA_ACCT)
167#define XFS_ALL_QUOTA_ENFD (XFS_UQUOTA_ENFD | XFS_OQUOTA_ENFD)
168#define XFS_ALL_QUOTA_CHKD (XFS_UQUOTA_CHKD | XFS_OQUOTA_CHKD)
169
170#define XFS_IS_QUOTA_RUNNING(mp) ((mp)->m_qflags & XFS_ALL_QUOTA_ACCT)
171#define XFS_IS_QUOTA_ENFORCED(mp) ((mp)->m_qflags & XFS_ALL_QUOTA_ENFD)
172#define XFS_IS_UQUOTA_RUNNING(mp) ((mp)->m_qflags & XFS_UQUOTA_ACCT)
173#define XFS_IS_PQUOTA_RUNNING(mp) ((mp)->m_qflags & XFS_PQUOTA_ACCT)
174#define XFS_IS_GQUOTA_RUNNING(mp) ((mp)->m_qflags & XFS_GQUOTA_ACCT)
175
176/*
163 * Incore only flags for quotaoff - these bits get cleared when quota(s) 177 * Incore only flags for quotaoff - these bits get cleared when quota(s)
164 * are in the process of getting turned off. These flags are in m_qflags but 178 * are in the process of getting turned off. These flags are in m_qflags but
165 * never in sb_qflags. 179 * never in sb_qflags.
@@ -362,6 +376,7 @@ typedef struct xfs_dqtrxops {
362 f | XFS_QMOPT_RES_REGBLKS) 376 f | XFS_QMOPT_RES_REGBLKS)
363 377
364extern int xfs_qm_dqcheck(xfs_disk_dquot_t *, xfs_dqid_t, uint, uint, char *); 378extern int xfs_qm_dqcheck(xfs_disk_dquot_t *, xfs_dqid_t, uint, uint, char *);
379extern int xfs_mount_reset_sbqflags(struct xfs_mount *);
365 380
366extern struct bhv_vfsops xfs_qmops; 381extern struct bhv_vfsops xfs_qmops;
367 382
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index 06dfca531f79..92efe272b83d 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -276,7 +276,7 @@ xfs_trans_reserve(
276 276
277 error = xfs_log_reserve(tp->t_mountp, logspace, logcount, 277 error = xfs_log_reserve(tp->t_mountp, logspace, logcount,
278 &tp->t_ticket, 278 &tp->t_ticket,
279 XFS_TRANSACTION, log_flags); 279 XFS_TRANSACTION, log_flags, tp->t_type);
280 if (error) { 280 if (error) {
281 goto undo_blocks; 281 goto undo_blocks;
282 } 282 }
@@ -1032,6 +1032,7 @@ xfs_trans_fill_vecs(
1032 tp->t_header.th_num_items = nitems; 1032 tp->t_header.th_num_items = nitems;
1033 log_vector->i_addr = (xfs_caddr_t)&tp->t_header; 1033 log_vector->i_addr = (xfs_caddr_t)&tp->t_header;
1034 log_vector->i_len = sizeof(xfs_trans_header_t); 1034 log_vector->i_len = sizeof(xfs_trans_header_t);
1035 XLOG_VEC_SET_TYPE(log_vector, XLOG_REG_TYPE_TRANSHDR);
1035} 1036}
1036 1037
1037 1038
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index ec541d66fa2a..a263aec8b3a6 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -112,6 +112,7 @@ typedef struct xfs_trans_header {
112#define XFS_TRANS_GROWFSRT_ZERO 38 112#define XFS_TRANS_GROWFSRT_ZERO 38
113#define XFS_TRANS_GROWFSRT_FREE 39 113#define XFS_TRANS_GROWFSRT_FREE 39
114#define XFS_TRANS_SWAPEXT 40 114#define XFS_TRANS_SWAPEXT 40
115#define XFS_TRANS_TYPE_MAX 40
115/* new transaction types need to be reflected in xfs_logprint(8) */ 116/* new transaction types need to be reflected in xfs_logprint(8) */
116 117
117 118
@@ -998,6 +999,7 @@ struct xfs_buf *xfs_trans_getsb(xfs_trans_t *, struct xfs_mount *, int);
998void xfs_trans_brelse(xfs_trans_t *, struct xfs_buf *); 999void xfs_trans_brelse(xfs_trans_t *, struct xfs_buf *);
999void xfs_trans_bjoin(xfs_trans_t *, struct xfs_buf *); 1000void xfs_trans_bjoin(xfs_trans_t *, struct xfs_buf *);
1000void xfs_trans_bhold(xfs_trans_t *, struct xfs_buf *); 1001void xfs_trans_bhold(xfs_trans_t *, struct xfs_buf *);
1002void xfs_trans_bhold_release(xfs_trans_t *, struct xfs_buf *);
1001void xfs_trans_binval(xfs_trans_t *, struct xfs_buf *); 1003void xfs_trans_binval(xfs_trans_t *, struct xfs_buf *);
1002void xfs_trans_inode_buf(xfs_trans_t *, struct xfs_buf *); 1004void xfs_trans_inode_buf(xfs_trans_t *, struct xfs_buf *);
1003void xfs_trans_inode_buf(xfs_trans_t *, struct xfs_buf *); 1005void xfs_trans_inode_buf(xfs_trans_t *, struct xfs_buf *);
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
index 7bc5eab4c2c1..2a71b4f91bfa 100644
--- a/fs/xfs/xfs_trans_ail.c
+++ b/fs/xfs/xfs_trans_ail.c
@@ -379,8 +379,8 @@ xfs_trans_delete_ail(
379 else { 379 else {
380 xfs_cmn_err(XFS_PTAG_AILDELETE, CE_ALERT, mp, 380 xfs_cmn_err(XFS_PTAG_AILDELETE, CE_ALERT, mp,
381 "xfs_trans_delete_ail: attempting to delete a log item that is not in the AIL"); 381 "xfs_trans_delete_ail: attempting to delete a log item that is not in the AIL");
382 xfs_force_shutdown(mp, XFS_CORRUPT_INCORE);
383 AIL_UNLOCK(mp, s); 382 AIL_UNLOCK(mp, s);
383 xfs_force_shutdown(mp, XFS_CORRUPT_INCORE);
384 } 384 }
385 } 385 }
386} 386}
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c
index 144da7a85466..e733293dd7f4 100644
--- a/fs/xfs/xfs_trans_buf.c
+++ b/fs/xfs/xfs_trans_buf.c
@@ -714,6 +714,29 @@ xfs_trans_bhold(xfs_trans_t *tp,
714} 714}
715 715
716/* 716/*
717 * Cancel the previous buffer hold request made on this buffer
718 * for this transaction.
719 */
720void
721xfs_trans_bhold_release(xfs_trans_t *tp,
722 xfs_buf_t *bp)
723{
724 xfs_buf_log_item_t *bip;
725
726 ASSERT(XFS_BUF_ISBUSY(bp));
727 ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
728 ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
729
730 bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
731 ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
732 ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL));
733 ASSERT(atomic_read(&bip->bli_refcount) > 0);
734 ASSERT(bip->bli_flags & XFS_BLI_HOLD);
735 bip->bli_flags &= ~XFS_BLI_HOLD;
736 xfs_buf_item_trace("BHOLD RELEASE", bip);
737}
738
739/*
717 * This is called to mark bytes first through last inclusive of the given 740 * This is called to mark bytes first through last inclusive of the given
718 * buffer as needing to be logged when the transaction is committed. 741 * buffer as needing to be logged when the transaction is committed.
719 * The buffer must already be associated with the given transaction. 742 * The buffer must already be associated with the given transaction.
diff --git a/fs/xfs/xfs_vfsops.c b/fs/xfs/xfs_vfsops.c
index 42bcc0215203..f1a904e23ade 100644
--- a/fs/xfs/xfs_vfsops.c
+++ b/fs/xfs/xfs_vfsops.c
@@ -795,7 +795,6 @@ xfs_statvfs(
795 xfs_mount_t *mp; 795 xfs_mount_t *mp;
796 xfs_sb_t *sbp; 796 xfs_sb_t *sbp;
797 unsigned long s; 797 unsigned long s;
798 u64 id;
799 798
800 mp = XFS_BHVTOM(bdp); 799 mp = XFS_BHVTOM(bdp);
801 sbp = &(mp->m_sb); 800 sbp = &(mp->m_sb);
@@ -823,9 +822,7 @@ xfs_statvfs(
823 statp->f_ffree = statp->f_files - (sbp->sb_icount - sbp->sb_ifree); 822 statp->f_ffree = statp->f_files - (sbp->sb_icount - sbp->sb_ifree);
824 XFS_SB_UNLOCK(mp, s); 823 XFS_SB_UNLOCK(mp, s);
825 824
826 id = huge_encode_dev(mp->m_dev); 825 xfs_statvfs_fsid(statp, mp);
827 statp->f_fsid.val[0] = (u32)id;
828 statp->f_fsid.val[1] = (u32)(id >> 32);
829 statp->f_namelen = MAXNAMELEN - 1; 826 statp->f_namelen = MAXNAMELEN - 1;
830 827
831 return 0; 828 return 0;
@@ -906,7 +903,6 @@ xfs_sync_inodes(
906 xfs_inode_t *ip_next; 903 xfs_inode_t *ip_next;
907 xfs_buf_t *bp; 904 xfs_buf_t *bp;
908 vnode_t *vp = NULL; 905 vnode_t *vp = NULL;
909 vmap_t vmap;
910 int error; 906 int error;
911 int last_error; 907 int last_error;
912 uint64_t fflag; 908 uint64_t fflag;
@@ -1101,48 +1097,21 @@ xfs_sync_inodes(
1101 * lock in xfs_ireclaim() after the inode is pulled from 1097 * lock in xfs_ireclaim() after the inode is pulled from
1102 * the mount list will sleep until we release it here. 1098 * the mount list will sleep until we release it here.
1103 * This keeps the vnode from being freed while we reference 1099 * This keeps the vnode from being freed while we reference
1104 * it. It is also cheaper and simpler than actually doing 1100 * it.
1105 * a vn_get() for every inode we touch here.
1106 */ 1101 */
1107 if (xfs_ilock_nowait(ip, lock_flags) == 0) { 1102 if (xfs_ilock_nowait(ip, lock_flags) == 0) {
1108
1109 if ((flags & SYNC_BDFLUSH) || (vp == NULL)) { 1103 if ((flags & SYNC_BDFLUSH) || (vp == NULL)) {
1110 ip = ip->i_mnext; 1104 ip = ip->i_mnext;
1111 continue; 1105 continue;
1112 } 1106 }
1113 1107
1114 /* 1108 vp = vn_grab(vp);
1115 * We need to unlock the inode list lock in order
1116 * to lock the inode. Insert a marker record into
1117 * the inode list to remember our position, dropping
1118 * the lock is now done inside the IPOINTER_INSERT
1119 * macro.
1120 *
1121 * We also use the inode list lock to protect us
1122 * in taking a snapshot of the vnode version number
1123 * for use in calling vn_get().
1124 */
1125 VMAP(vp, vmap);
1126 IPOINTER_INSERT(ip, mp);
1127
1128 vp = vn_get(vp, &vmap);
1129 if (vp == NULL) { 1109 if (vp == NULL) {
1130 /* 1110 ip = ip->i_mnext;
1131 * The vnode was reclaimed once we let go
1132 * of the inode list lock. Skip to the
1133 * next list entry. Remove the marker.
1134 */
1135
1136 XFS_MOUNT_ILOCK(mp);
1137
1138 mount_locked = B_TRUE;
1139 vnode_refed = B_FALSE;
1140
1141 IPOINTER_REMOVE(ip, mp);
1142
1143 continue; 1111 continue;
1144 } 1112 }
1145 1113
1114 IPOINTER_INSERT(ip, mp);
1146 xfs_ilock(ip, lock_flags); 1115 xfs_ilock(ip, lock_flags);
1147 1116
1148 ASSERT(vp == XFS_ITOV(ip)); 1117 ASSERT(vp == XFS_ITOV(ip));
@@ -1533,7 +1502,10 @@ xfs_syncsub(
1533 * eventually kicked out of the cache. 1502 * eventually kicked out of the cache.
1534 */ 1503 */
1535 if (flags & SYNC_REFCACHE) { 1504 if (flags & SYNC_REFCACHE) {
1536 xfs_refcache_purge_some(mp); 1505 if (flags & SYNC_WAIT)
1506 xfs_refcache_purge_mp(mp);
1507 else
1508 xfs_refcache_purge_some(mp);
1537 } 1509 }
1538 1510
1539 /* 1511 /*
@@ -1649,6 +1621,10 @@ xfs_vget(
1649#define MNTOPT_SWIDTH "swidth" /* data volume stripe width */ 1621#define MNTOPT_SWIDTH "swidth" /* data volume stripe width */
1650#define MNTOPT_NOUUID "nouuid" /* ignore filesystem UUID */ 1622#define MNTOPT_NOUUID "nouuid" /* ignore filesystem UUID */
1651#define MNTOPT_MTPT "mtpt" /* filesystem mount point */ 1623#define MNTOPT_MTPT "mtpt" /* filesystem mount point */
1624#define MNTOPT_GRPID "grpid" /* group-ID from parent directory */
1625#define MNTOPT_NOGRPID "nogrpid" /* group-ID from current process */
1626#define MNTOPT_BSDGROUPS "bsdgroups" /* group-ID from parent directory */
1627#define MNTOPT_SYSVGROUPS "sysvgroups" /* group-ID from current process */
1652#define MNTOPT_ALLOCSIZE "allocsize" /* preferred allocation size */ 1628#define MNTOPT_ALLOCSIZE "allocsize" /* preferred allocation size */
1653#define MNTOPT_IHASHSIZE "ihashsize" /* size of inode hash table */ 1629#define MNTOPT_IHASHSIZE "ihashsize" /* size of inode hash table */
1654#define MNTOPT_NORECOVERY "norecovery" /* don't run XFS recovery */ 1630#define MNTOPT_NORECOVERY "norecovery" /* don't run XFS recovery */
@@ -1769,6 +1745,12 @@ xfs_parseargs(
1769 } 1745 }
1770 args->flags |= XFSMNT_IHASHSIZE; 1746 args->flags |= XFSMNT_IHASHSIZE;
1771 args->ihashsize = simple_strtoul(value, &eov, 10); 1747 args->ihashsize = simple_strtoul(value, &eov, 10);
1748 } else if (!strcmp(this_char, MNTOPT_GRPID) ||
1749 !strcmp(this_char, MNTOPT_BSDGROUPS)) {
1750 vfsp->vfs_flag |= VFS_GRPID;
1751 } else if (!strcmp(this_char, MNTOPT_NOGRPID) ||
1752 !strcmp(this_char, MNTOPT_SYSVGROUPS)) {
1753 vfsp->vfs_flag &= ~VFS_GRPID;
1772 } else if (!strcmp(this_char, MNTOPT_WSYNC)) { 1754 } else if (!strcmp(this_char, MNTOPT_WSYNC)) {
1773 args->flags |= XFSMNT_WSYNC; 1755 args->flags |= XFSMNT_WSYNC;
1774 } else if (!strcmp(this_char, MNTOPT_OSYNCISOSYNC)) { 1756 } else if (!strcmp(this_char, MNTOPT_OSYNCISOSYNC)) {
@@ -1890,6 +1872,7 @@ xfs_showargs(
1890 }; 1872 };
1891 struct proc_xfs_info *xfs_infop; 1873 struct proc_xfs_info *xfs_infop;
1892 struct xfs_mount *mp = XFS_BHVTOM(bhv); 1874 struct xfs_mount *mp = XFS_BHVTOM(bhv);
1875 struct vfs *vfsp = XFS_MTOVFS(mp);
1893 1876
1894 for (xfs_infop = xfs_info; xfs_infop->flag; xfs_infop++) { 1877 for (xfs_infop = xfs_info; xfs_infop->flag; xfs_infop++) {
1895 if (mp->m_flags & xfs_infop->flag) 1878 if (mp->m_flags & xfs_infop->flag)
@@ -1926,7 +1909,10 @@ xfs_showargs(
1926 1909
1927 if (!(mp->m_flags & XFS_MOUNT_32BITINOOPT)) 1910 if (!(mp->m_flags & XFS_MOUNT_32BITINOOPT))
1928 seq_printf(m, "," MNTOPT_64BITINODE); 1911 seq_printf(m, "," MNTOPT_64BITINODE);
1929 1912
1913 if (vfsp->vfs_flag & VFS_GRPID)
1914 seq_printf(m, "," MNTOPT_GRPID);
1915
1930 return 0; 1916 return 0;
1931} 1917}
1932 1918
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index 1377c868f3f4..58bfe629b933 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -104,7 +104,7 @@ xfs_open(
104 * If it's a directory with any blocks, read-ahead block 0 104 * If it's a directory with any blocks, read-ahead block 0
105 * as we're almost certain to have the next operation be a read there. 105 * as we're almost certain to have the next operation be a read there.
106 */ 106 */
107 if (vp->v_type == VDIR && ip->i_d.di_nextents > 0) { 107 if (VN_ISDIR(vp) && ip->i_d.di_nextents > 0) {
108 mode = xfs_ilock_map_shared(ip); 108 mode = xfs_ilock_map_shared(ip);
109 if (ip->i_d.di_nextents > 0) 109 if (ip->i_d.di_nextents > 0)
110 (void)xfs_da_reada_buf(NULL, ip, 0, XFS_DATA_FORK); 110 (void)xfs_da_reada_buf(NULL, ip, 0, XFS_DATA_FORK);
@@ -163,18 +163,21 @@ xfs_getattr(
163 /* 163 /*
164 * Copy from in-core inode. 164 * Copy from in-core inode.
165 */ 165 */
166 vap->va_type = vp->v_type; 166 vap->va_mode = ip->i_d.di_mode;
167 vap->va_mode = ip->i_d.di_mode & MODEMASK;
168 vap->va_uid = ip->i_d.di_uid; 167 vap->va_uid = ip->i_d.di_uid;
169 vap->va_gid = ip->i_d.di_gid; 168 vap->va_gid = ip->i_d.di_gid;
170 vap->va_projid = ip->i_d.di_projid; 169 vap->va_projid = ip->i_d.di_projid;
171 170
172 /* 171 /*
173 * Check vnode type block/char vs. everything else. 172 * Check vnode type block/char vs. everything else.
174 * Do it with bitmask because that's faster than looking
175 * for multiple values individually.
176 */ 173 */
177 if (((1 << vp->v_type) & ((1<<VBLK) | (1<<VCHR))) == 0) { 174 switch (ip->i_d.di_mode & S_IFMT) {
175 case S_IFBLK:
176 case S_IFCHR:
177 vap->va_rdev = ip->i_df.if_u2.if_rdev;
178 vap->va_blocksize = BLKDEV_IOSIZE;
179 break;
180 default:
178 vap->va_rdev = 0; 181 vap->va_rdev = 0;
179 182
180 if (!(ip->i_d.di_flags & XFS_DIFLAG_REALTIME)) { 183 if (!(ip->i_d.di_flags & XFS_DIFLAG_REALTIME)) {
@@ -224,9 +227,7 @@ xfs_getattr(
224 (ip->i_d.di_extsize << mp->m_sb.sb_blocklog) : 227 (ip->i_d.di_extsize << mp->m_sb.sb_blocklog) :
225 (mp->m_sb.sb_rextsize << mp->m_sb.sb_blocklog); 228 (mp->m_sb.sb_rextsize << mp->m_sb.sb_blocklog);
226 } 229 }
227 } else { 230 break;
228 vap->va_rdev = ip->i_df.if_u2.if_rdev;
229 vap->va_blocksize = BLKDEV_IOSIZE;
230 } 231 }
231 232
232 vap->va_atime.tv_sec = ip->i_d.di_atime.t_sec; 233 vap->va_atime.tv_sec = ip->i_d.di_atime.t_sec;
@@ -468,7 +469,7 @@ xfs_setattr(
468 m |= S_ISGID; 469 m |= S_ISGID;
469#if 0 470#if 0
470 /* Linux allows this, Irix doesn't. */ 471 /* Linux allows this, Irix doesn't. */
471 if ((vap->va_mode & S_ISVTX) && vp->v_type != VDIR) 472 if ((vap->va_mode & S_ISVTX) && !VN_ISDIR(vp))
472 m |= S_ISVTX; 473 m |= S_ISVTX;
473#endif 474#endif
474 if (m && !capable(CAP_FSETID)) 475 if (m && !capable(CAP_FSETID))
@@ -546,10 +547,10 @@ xfs_setattr(
546 goto error_return; 547 goto error_return;
547 } 548 }
548 549
549 if (vp->v_type == VDIR) { 550 if (VN_ISDIR(vp)) {
550 code = XFS_ERROR(EISDIR); 551 code = XFS_ERROR(EISDIR);
551 goto error_return; 552 goto error_return;
552 } else if (vp->v_type != VREG) { 553 } else if (!VN_ISREG(vp)) {
553 code = XFS_ERROR(EINVAL); 554 code = XFS_ERROR(EINVAL);
554 goto error_return; 555 goto error_return;
555 } 556 }
@@ -1567,7 +1568,7 @@ xfs_release(
1567 vp = BHV_TO_VNODE(bdp); 1568 vp = BHV_TO_VNODE(bdp);
1568 ip = XFS_BHVTOI(bdp); 1569 ip = XFS_BHVTOI(bdp);
1569 1570
1570 if ((vp->v_type != VREG) || (ip->i_d.di_mode == 0)) { 1571 if (!VN_ISREG(vp) || (ip->i_d.di_mode == 0)) {
1571 return 0; 1572 return 0;
1572 } 1573 }
1573 1574
@@ -1895,7 +1896,7 @@ xfs_create(
1895 dp = XFS_BHVTOI(dir_bdp); 1896 dp = XFS_BHVTOI(dir_bdp);
1896 mp = dp->i_mount; 1897 mp = dp->i_mount;
1897 1898
1898 dm_di_mode = vap->va_mode|VTTOIF(vap->va_type); 1899 dm_di_mode = vap->va_mode;
1899 namelen = VNAMELEN(dentry); 1900 namelen = VNAMELEN(dentry);
1900 1901
1901 if (DM_EVENT_ENABLED(dir_vp->v_vfsp, dp, DM_EVENT_CREATE)) { 1902 if (DM_EVENT_ENABLED(dir_vp->v_vfsp, dp, DM_EVENT_CREATE)) {
@@ -1973,8 +1974,7 @@ xfs_create(
1973 (error = XFS_DIR_CANENTER(mp, tp, dp, name, namelen))) 1974 (error = XFS_DIR_CANENTER(mp, tp, dp, name, namelen)))
1974 goto error_return; 1975 goto error_return;
1975 rdev = (vap->va_mask & XFS_AT_RDEV) ? vap->va_rdev : 0; 1976 rdev = (vap->va_mask & XFS_AT_RDEV) ? vap->va_rdev : 0;
1976 error = xfs_dir_ialloc(&tp, dp, 1977 error = xfs_dir_ialloc(&tp, dp, vap->va_mode, 1,
1977 MAKEIMODE(vap->va_type,vap->va_mode), 1,
1978 rdev, credp, prid, resblks > 0, 1978 rdev, credp, prid, resblks > 0,
1979 &ip, &committed); 1979 &ip, &committed);
1980 if (error) { 1980 if (error) {
@@ -2620,7 +2620,7 @@ xfs_link(
2620 vn_trace_entry(src_vp, __FUNCTION__, (inst_t *)__return_address); 2620 vn_trace_entry(src_vp, __FUNCTION__, (inst_t *)__return_address);
2621 2621
2622 target_namelen = VNAMELEN(dentry); 2622 target_namelen = VNAMELEN(dentry);
2623 if (src_vp->v_type == VDIR) 2623 if (VN_ISDIR(src_vp))
2624 return XFS_ERROR(EPERM); 2624 return XFS_ERROR(EPERM);
2625 2625
2626 src_bdp = vn_bhv_lookup_unlocked(VN_BHV_HEAD(src_vp), &xfs_vnodeops); 2626 src_bdp = vn_bhv_lookup_unlocked(VN_BHV_HEAD(src_vp), &xfs_vnodeops);
@@ -2805,7 +2805,7 @@ xfs_mkdir(
2805 2805
2806 tp = NULL; 2806 tp = NULL;
2807 dp_joined_to_trans = B_FALSE; 2807 dp_joined_to_trans = B_FALSE;
2808 dm_di_mode = vap->va_mode|VTTOIF(vap->va_type); 2808 dm_di_mode = vap->va_mode;
2809 2809
2810 if (DM_EVENT_ENABLED(dir_vp->v_vfsp, dp, DM_EVENT_CREATE)) { 2810 if (DM_EVENT_ENABLED(dir_vp->v_vfsp, dp, DM_EVENT_CREATE)) {
2811 error = XFS_SEND_NAMESP(mp, DM_EVENT_CREATE, 2811 error = XFS_SEND_NAMESP(mp, DM_EVENT_CREATE,
@@ -2879,8 +2879,7 @@ xfs_mkdir(
2879 /* 2879 /*
2880 * create the directory inode. 2880 * create the directory inode.
2881 */ 2881 */
2882 error = xfs_dir_ialloc(&tp, dp, 2882 error = xfs_dir_ialloc(&tp, dp, vap->va_mode, 2,
2883 MAKEIMODE(vap->va_type,vap->va_mode), 2,
2884 0, credp, prid, resblks > 0, 2883 0, credp, prid, resblks > 0,
2885 &cdp, NULL); 2884 &cdp, NULL);
2886 if (error) { 2885 if (error) {
@@ -3650,7 +3649,7 @@ xfs_rwlock(
3650 vnode_t *vp; 3649 vnode_t *vp;
3651 3650
3652 vp = BHV_TO_VNODE(bdp); 3651 vp = BHV_TO_VNODE(bdp);
3653 if (vp->v_type == VDIR) 3652 if (VN_ISDIR(vp))
3654 return 1; 3653 return 1;
3655 ip = XFS_BHVTOI(bdp); 3654 ip = XFS_BHVTOI(bdp);
3656 if (locktype == VRWLOCK_WRITE) { 3655 if (locktype == VRWLOCK_WRITE) {
@@ -3681,7 +3680,7 @@ xfs_rwunlock(
3681 vnode_t *vp; 3680 vnode_t *vp;
3682 3681
3683 vp = BHV_TO_VNODE(bdp); 3682 vp = BHV_TO_VNODE(bdp);
3684 if (vp->v_type == VDIR) 3683 if (VN_ISDIR(vp))
3685 return; 3684 return;
3686 ip = XFS_BHVTOI(bdp); 3685 ip = XFS_BHVTOI(bdp);
3687 if (locktype == VRWLOCK_WRITE) { 3686 if (locktype == VRWLOCK_WRITE) {
@@ -3847,51 +3846,10 @@ xfs_reclaim(
3847 return 0; 3846 return 0;
3848 } 3847 }
3849 3848
3850 if ((ip->i_d.di_mode & S_IFMT) == S_IFREG) { 3849 vn_iowait(vp);
3851 if (ip->i_d.di_size > 0) {
3852 /*
3853 * Flush and invalidate any data left around that is
3854 * a part of this file.
3855 *
3856 * Get the inode's i/o lock so that buffers are pushed
3857 * out while holding the proper lock. We can't hold
3858 * the inode lock here since flushing out buffers may
3859 * cause us to try to get the lock in xfs_strategy().
3860 *
3861 * We don't have to call remapf() here, because there
3862 * cannot be any mapped file references to this vnode
3863 * since it is being reclaimed.
3864 */
3865 xfs_ilock(ip, XFS_IOLOCK_EXCL);
3866
3867 /*
3868 * If we hit an IO error, we need to make sure that the
3869 * buffer and page caches of file data for
3870 * the file are tossed away. We don't want to use
3871 * VOP_FLUSHINVAL_PAGES here because we don't want dirty
3872 * pages to stay attached to the vnode, but be
3873 * marked P_BAD. pdflush/vnode_pagebad
3874 * hates that.
3875 */
3876 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
3877 VOP_FLUSHINVAL_PAGES(vp, 0, -1, FI_NONE);
3878 } else {
3879 VOP_TOSS_PAGES(vp, 0, -1, FI_NONE);
3880 }
3881 3850
3882 ASSERT(VN_CACHED(vp) == 0); 3851 ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || ip->i_delayed_blks == 0);
3883 ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || 3852 ASSERT(VN_CACHED(vp) == 0);
3884 ip->i_delayed_blks == 0);
3885 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
3886 } else if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
3887 /*
3888 * di_size field may not be quite accurate if we're
3889 * shutting down.
3890 */
3891 VOP_TOSS_PAGES(vp, 0, -1, FI_NONE);
3892 ASSERT(VN_CACHED(vp) == 0);
3893 }
3894 }
3895 3853
3896 /* If we have nothing to flush with this inode then complete the 3854 /* If we have nothing to flush with this inode then complete the
3897 * teardown now, otherwise break the link between the xfs inode 3855 * teardown now, otherwise break the link between the xfs inode
@@ -4567,7 +4525,7 @@ xfs_change_file_space(
4567 /* 4525 /*
4568 * must be a regular file and have write permission 4526 * must be a regular file and have write permission
4569 */ 4527 */
4570 if (vp->v_type != VREG) 4528 if (!VN_ISREG(vp))
4571 return XFS_ERROR(EINVAL); 4529 return XFS_ERROR(EINVAL);
4572 4530
4573 xfs_ilock(ip, XFS_ILOCK_SHARED); 4531 xfs_ilock(ip, XFS_ILOCK_SHARED);