aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNathan Scott <nathans@sgi.com>2006-06-09 01:27:16 -0400
committerNathan Scott <nathans@sgi.com>2006-06-09 01:27:16 -0400
commit7d4fb40ad7efe4586d1341d4731377fb4530836f (patch)
treebf802cce6bc60627186b02b71014a683f6cb4a05
parent59c1b082f5fff8269565039600a2ef18d48649b5 (diff)
[XFS] Start writeout earlier (on last close) in the case where we have a
truncate down followed by delayed allocation (buffered writes) - worst case scenario for the notorious NULL files problem. This reduces the window where we are exposed to that problem significantly. SGI-PV: 917976 SGI-Modid: xfs-linux-melb:xfs-kern:26100a Signed-off-by: Nathan Scott <nathans@sgi.com>
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c13
-rw-r--r--fs/xfs/linux-2.6/xfs_file.c15
-rw-r--r--fs/xfs/linux-2.6/xfs_fs_subr.c53
-rw-r--r--fs/xfs/linux-2.6/xfs_vnode.h39
-rw-r--r--fs/xfs/xfs_vnodeops.c88
5 files changed, 114 insertions, 94 deletions
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index 5835e699a7fc..c0a904316854 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -1157,6 +1157,18 @@ out_unlock:
1157 return error; 1157 return error;
1158} 1158}
1159 1159
1160STATIC int
1161xfs_vm_writepages(
1162 struct address_space *mapping,
1163 struct writeback_control *wbc)
1164{
1165 struct vnode *vp = vn_from_inode(mapping->host);
1166
1167 if (VN_TRUNC(vp))
1168 VUNTRUNCATE(vp);
1169 return generic_writepages(mapping, wbc);
1170}
1171
1160/* 1172/*
1161 * Called to move a page into cleanable state - and from there 1173 * Called to move a page into cleanable state - and from there
1162 * to be released. Possibly the page is already clean. We always 1174 * to be released. Possibly the page is already clean. We always
@@ -1451,6 +1463,7 @@ struct address_space_operations xfs_address_space_operations = {
1451 .readpage = xfs_vm_readpage, 1463 .readpage = xfs_vm_readpage,
1452 .readpages = xfs_vm_readpages, 1464 .readpages = xfs_vm_readpages,
1453 .writepage = xfs_vm_writepage, 1465 .writepage = xfs_vm_writepage,
1466 .writepages = xfs_vm_writepages,
1454 .sync_page = block_sync_page, 1467 .sync_page = block_sync_page,
1455 .releasepage = xfs_vm_releasepage, 1468 .releasepage = xfs_vm_releasepage,
1456 .invalidatepage = xfs_vm_invalidatepage, 1469 .invalidatepage = xfs_vm_invalidatepage,
diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c
index 7c9f7598807f..97615cc74ef5 100644
--- a/fs/xfs/linux-2.6/xfs_file.c
+++ b/fs/xfs/linux-2.6/xfs_file.c
@@ -324,6 +324,17 @@ xfs_file_open(
324} 324}
325 325
326STATIC int 326STATIC int
327xfs_file_close(
328 struct file *filp)
329{
330 vnode_t *vp = vn_from_inode(filp->f_dentry->d_inode);
331 int error;
332
333 VOP_CLOSE(vp, 0, file_count(filp) > 1 ? L_FALSE : L_TRUE, NULL, error);
334 return -error;
335}
336
337STATIC int
327xfs_file_release( 338xfs_file_release(
328 struct inode *inode, 339 struct inode *inode,
329 struct file *filp) 340 struct file *filp)
@@ -349,6 +360,8 @@ xfs_file_fsync(
349 360
350 if (datasync) 361 if (datasync)
351 flags |= FSYNC_DATA; 362 flags |= FSYNC_DATA;
363 if (VN_TRUNC(vp))
364 VUNTRUNCATE(vp);
352 VOP_FSYNC(vp, flags, NULL, (xfs_off_t)0, (xfs_off_t)-1, error); 365 VOP_FSYNC(vp, flags, NULL, (xfs_off_t)0, (xfs_off_t)-1, error);
353 return -error; 366 return -error;
354} 367}
@@ -578,6 +591,7 @@ const struct file_operations xfs_file_operations = {
578#endif 591#endif
579 .mmap = xfs_file_mmap, 592 .mmap = xfs_file_mmap,
580 .open = xfs_file_open, 593 .open = xfs_file_open,
594 .flush = xfs_file_close,
581 .release = xfs_file_release, 595 .release = xfs_file_release,
582 .fsync = xfs_file_fsync, 596 .fsync = xfs_file_fsync,
583#ifdef HAVE_FOP_OPEN_EXEC 597#ifdef HAVE_FOP_OPEN_EXEC
@@ -602,6 +616,7 @@ const struct file_operations xfs_invis_file_operations = {
602#endif 616#endif
603 .mmap = xfs_file_mmap, 617 .mmap = xfs_file_mmap,
604 .open = xfs_file_open, 618 .open = xfs_file_open,
619 .flush = xfs_file_close,
605 .release = xfs_file_release, 620 .release = xfs_file_release,
606 .fsync = xfs_file_fsync, 621 .fsync = xfs_file_fsync,
607}; 622};
diff --git a/fs/xfs/linux-2.6/xfs_fs_subr.c b/fs/xfs/linux-2.6/xfs_fs_subr.c
index 575f2a790f31..f0c56daf4d6d 100644
--- a/fs/xfs/linux-2.6/xfs_fs_subr.c
+++ b/fs/xfs/linux-2.6/xfs_fs_subr.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc. 2 * Copyright (c) 2000-2002,2005-2006 Silicon Graphics, Inc.
3 * All Rights Reserved. 3 * All Rights Reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -15,40 +15,12 @@
15 * along with this program; if not, write the Free Software Foundation, 15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */ 17 */
18
19#include "xfs.h" 18#include "xfs.h"
20 19
21/* 20int fs_noerr(void) { return 0; }
22 * Stub for no-op vnode operations that return error status. 21int fs_nosys(void) { return ENOSYS; }
23 */ 22void fs_noval(void) { return; }
24int
25fs_noerr(void)
26{
27 return 0;
28}
29 23
30/*
31 * Operation unsupported under this file system.
32 */
33int
34fs_nosys(void)
35{
36 return ENOSYS;
37}
38
39/*
40 * Stub for inactive, strategy, and read/write lock/unlock. Does nothing.
41 */
42/* ARGSUSED */
43void
44fs_noval(void)
45{
46}
47
48/*
49 * vnode pcache layer for vnode_tosspages.
50 * 'last' parameter unused but left in for IRIX compatibility
51 */
52void 24void
53fs_tosspages( 25fs_tosspages(
54 bhv_desc_t *bdp, 26 bhv_desc_t *bdp,
@@ -63,11 +35,6 @@ fs_tosspages(
63 truncate_inode_pages(ip->i_mapping, first); 35 truncate_inode_pages(ip->i_mapping, first);
64} 36}
65 37
66
67/*
68 * vnode pcache layer for vnode_flushinval_pages.
69 * 'last' parameter unused but left in for IRIX compatibility
70 */
71void 38void
72fs_flushinval_pages( 39fs_flushinval_pages(
73 bhv_desc_t *bdp, 40 bhv_desc_t *bdp,
@@ -79,16 +46,13 @@ fs_flushinval_pages(
79 struct inode *ip = vn_to_inode(vp); 46 struct inode *ip = vn_to_inode(vp);
80 47
81 if (VN_CACHED(vp)) { 48 if (VN_CACHED(vp)) {
49 if (VN_TRUNC(vp))
50 VUNTRUNCATE(vp);
82 filemap_write_and_wait(ip->i_mapping); 51 filemap_write_and_wait(ip->i_mapping);
83
84 truncate_inode_pages(ip->i_mapping, first); 52 truncate_inode_pages(ip->i_mapping, first);
85 } 53 }
86} 54}
87 55
88/*
89 * vnode pcache layer for vnode_flush_pages.
90 * 'last' parameter unused but left in for IRIX compatibility
91 */
92int 56int
93fs_flush_pages( 57fs_flush_pages(
94 bhv_desc_t *bdp, 58 bhv_desc_t *bdp,
@@ -100,12 +64,13 @@ fs_flush_pages(
100 vnode_t *vp = BHV_TO_VNODE(bdp); 64 vnode_t *vp = BHV_TO_VNODE(bdp);
101 struct inode *ip = vn_to_inode(vp); 65 struct inode *ip = vn_to_inode(vp);
102 66
103 if (VN_CACHED(vp)) { 67 if (VN_DIRTY(vp)) {
68 if (VN_TRUNC(vp))
69 VUNTRUNCATE(vp);
104 filemap_fdatawrite(ip->i_mapping); 70 filemap_fdatawrite(ip->i_mapping);
105 if (flags & XFS_B_ASYNC) 71 if (flags & XFS_B_ASYNC)
106 return 0; 72 return 0;
107 filemap_fdatawait(ip->i_mapping); 73 filemap_fdatawait(ip->i_mapping);
108 } 74 }
109
110 return 0; 75 return 0;
111} 76}
diff --git a/fs/xfs/linux-2.6/xfs_vnode.h b/fs/xfs/linux-2.6/xfs_vnode.h
index a64b7db67003..569a4e7b5cc1 100644
--- a/fs/xfs/linux-2.6/xfs_vnode.h
+++ b/fs/xfs/linux-2.6/xfs_vnode.h
@@ -56,12 +56,18 @@ typedef xfs_ino_t vnumber_t;
56typedef struct dentry vname_t; 56typedef struct dentry vname_t;
57typedef bhv_head_t vn_bhv_head_t; 57typedef bhv_head_t vn_bhv_head_t;
58 58
59typedef enum vflags {
60 VMODIFIED = 0x08, /* XFS inode state possibly differs */
61 /* to the Linux inode state. */
62 VTRUNCATED = 0x40, /* truncated down so flush-on-close */
63} vflags_t;
64
59/* 65/*
60 * MP locking protocols: 66 * MP locking protocols:
61 * v_flag, v_vfsp VN_LOCK/VN_UNLOCK 67 * v_flag, v_vfsp VN_LOCK/VN_UNLOCK
62 */ 68 */
63typedef struct vnode { 69typedef struct vnode {
64 __u32 v_flag; /* vnode flags (see below) */ 70 vflags_t v_flag; /* vnode flags (see above) */
65 struct vfs *v_vfsp; /* ptr to containing VFS */ 71 struct vfs *v_vfsp; /* ptr to containing VFS */
66 vnumber_t v_number; /* in-core vnode number */ 72 vnumber_t v_number; /* in-core vnode number */
67 vn_bhv_head_t v_bh; /* behavior head */ 73 vn_bhv_head_t v_bh; /* behavior head */
@@ -126,12 +132,6 @@ static inline struct inode *vn_to_inode(struct vnode *vnode)
126} 132}
127 133
128/* 134/*
129 * Vnode flags.
130 */
131#define VMODIFIED 0x8 /* XFS inode state possibly differs */
132 /* to the Linux inode state. */
133
134/*
135 * Values for the VOP_RWLOCK and VOP_RWUNLOCK flags parameter. 135 * Values for the VOP_RWLOCK and VOP_RWUNLOCK flags parameter.
136 */ 136 */
137typedef enum vrwlock { 137typedef enum vrwlock {
@@ -162,8 +162,10 @@ typedef enum vchange {
162 VCHANGE_FLAGS_IOEXCL_COUNT = 4 162 VCHANGE_FLAGS_IOEXCL_COUNT = 4
163} vchange_t; 163} vchange_t;
164 164
165typedef enum { L_FALSE, L_TRUE } lastclose_t;
165 166
166typedef int (*vop_open_t)(bhv_desc_t *, struct cred *); 167typedef int (*vop_open_t)(bhv_desc_t *, struct cred *);
168typedef int (*vop_close_t)(bhv_desc_t *, int, lastclose_t, struct cred *);
167typedef ssize_t (*vop_read_t)(bhv_desc_t *, struct kiocb *, 169typedef ssize_t (*vop_read_t)(bhv_desc_t *, struct kiocb *,
168 const struct iovec *, unsigned int, 170 const struct iovec *, unsigned int,
169 loff_t *, int, struct cred *); 171 loff_t *, int, struct cred *);
@@ -234,6 +236,7 @@ typedef int (*vop_iflush_t)(bhv_desc_t *, int);
234typedef struct vnodeops { 236typedef struct vnodeops {
235 bhv_position_t vn_position; /* position within behavior chain */ 237 bhv_position_t vn_position; /* position within behavior chain */
236 vop_open_t vop_open; 238 vop_open_t vop_open;
239 vop_close_t vop_close;
237 vop_read_t vop_read; 240 vop_read_t vop_read;
238 vop_write_t vop_write; 241 vop_write_t vop_write;
239 vop_sendfile_t vop_sendfile; 242 vop_sendfile_t vop_sendfile;
@@ -278,6 +281,10 @@ typedef struct vnodeops {
278 */ 281 */
279#define _VOP_(op, vp) (*((vnodeops_t *)(vp)->v_fops)->op) 282#define _VOP_(op, vp) (*((vnodeops_t *)(vp)->v_fops)->op)
280 283
284#define VOP_OPEN(vp, cr, rv) \
285 rv = _VOP_(vop_open, vp)((vp)->v_fbhv, cr)
286#define VOP_CLOSE(vp, f, last, cr, rv) \
287 rv = _VOP_(vop_close, vp)((vp)->v_fbhv, f, last, cr)
281#define VOP_READ(vp,file,iov,segs,offset,ioflags,cr,rv) \ 288#define VOP_READ(vp,file,iov,segs,offset,ioflags,cr,rv) \
282 rv = _VOP_(vop_read, vp)((vp)->v_fbhv,file,iov,segs,offset,ioflags,cr) 289 rv = _VOP_(vop_read, vp)((vp)->v_fbhv,file,iov,segs,offset,ioflags,cr)
283#define VOP_WRITE(vp,file,iov,segs,offset,ioflags,cr,rv) \ 290#define VOP_WRITE(vp,file,iov,segs,offset,ioflags,cr,rv) \
@@ -290,8 +297,6 @@ typedef struct vnodeops {
290 rv = _VOP_(vop_splice_write, vp)((vp)->v_fbhv,f,o,pipe,cnt,fl,iofl,cr) 297 rv = _VOP_(vop_splice_write, vp)((vp)->v_fbhv,f,o,pipe,cnt,fl,iofl,cr)
291#define VOP_BMAP(vp,of,sz,rw,b,n,rv) \ 298#define VOP_BMAP(vp,of,sz,rw,b,n,rv) \
292 rv = _VOP_(vop_bmap, vp)((vp)->v_fbhv,of,sz,rw,b,n) 299 rv = _VOP_(vop_bmap, vp)((vp)->v_fbhv,of,sz,rw,b,n)
293#define VOP_OPEN(vp, cr, rv) \
294 rv = _VOP_(vop_open, vp)((vp)->v_fbhv, cr)
295#define VOP_GETATTR(vp, vap, f, cr, rv) \ 300#define VOP_GETATTR(vp, vap, f, cr, rv) \
296 rv = _VOP_(vop_getattr, vp)((vp)->v_fbhv, vap, f, cr) 301 rv = _VOP_(vop_getattr, vp)((vp)->v_fbhv, vap, f, cr)
297#define VOP_SETATTR(vp, vap, f, cr, rv) \ 302#define VOP_SETATTR(vp, vap, f, cr, rv) \
@@ -556,8 +561,6 @@ static inline struct vnode *vn_grab(struct vnode *vp)
556 */ 561 */
557#define VN_LOCK(vp) mutex_spinlock(&(vp)->v_lock) 562#define VN_LOCK(vp) mutex_spinlock(&(vp)->v_lock)
558#define VN_UNLOCK(vp, s) mutex_spinunlock(&(vp)->v_lock, s) 563#define VN_UNLOCK(vp, s) mutex_spinunlock(&(vp)->v_lock, s)
559#define VN_FLAGSET(vp,b) vn_flagset(vp,b)
560#define VN_FLAGCLR(vp,b) vn_flagclr(vp,b)
561 564
562static __inline__ void vn_flagset(struct vnode *vp, uint flag) 565static __inline__ void vn_flagset(struct vnode *vp, uint flag)
563{ 566{
@@ -566,13 +569,22 @@ static __inline__ void vn_flagset(struct vnode *vp, uint flag)
566 spin_unlock(&vp->v_lock); 569 spin_unlock(&vp->v_lock);
567} 570}
568 571
569static __inline__ void vn_flagclr(struct vnode *vp, uint flag) 572static __inline__ uint vn_flagclr(struct vnode *vp, uint flag)
570{ 573{
574 uint cleared;
575
571 spin_lock(&vp->v_lock); 576 spin_lock(&vp->v_lock);
577 cleared = (vp->v_flag & flag);
572 vp->v_flag &= ~flag; 578 vp->v_flag &= ~flag;
573 spin_unlock(&vp->v_lock); 579 spin_unlock(&vp->v_lock);
580 return cleared;
574} 581}
575 582
583#define VMODIFY(vp) vn_flagset(vp, VMODIFIED)
584#define VUNMODIFY(vp) vn_flagclr(vp, VMODIFIED)
585#define VTRUNCATE(vp) vn_flagset(vp, VTRUNCATED)
586#define VUNTRUNCATE(vp) vn_flagclr(vp, VTRUNCATED)
587
576/* 588/*
577 * Dealing with bad inodes 589 * Dealing with bad inodes
578 */ 590 */
@@ -612,8 +624,7 @@ static inline void vn_atime_to_time_t(struct vnode *vp, time_t *tt)
612#define VN_CACHED(vp) (vn_to_inode(vp)->i_mapping->nrpages) 624#define VN_CACHED(vp) (vn_to_inode(vp)->i_mapping->nrpages)
613#define VN_DIRTY(vp) mapping_tagged(vn_to_inode(vp)->i_mapping, \ 625#define VN_DIRTY(vp) mapping_tagged(vn_to_inode(vp)->i_mapping, \
614 PAGECACHE_TAG_DIRTY) 626 PAGECACHE_TAG_DIRTY)
615#define VMODIFY(vp) VN_FLAGSET(vp, VMODIFIED) 627#define VN_TRUNC(vp) ((vp)->v_flag & VTRUNCATED)
616#define VUNMODIFY(vp) VN_FLAGCLR(vp, VMODIFIED)
617 628
618/* 629/*
619 * Flags to VOP_SETATTR/VOP_GETATTR. 630 * Flags to VOP_SETATTR/VOP_GETATTR.
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index cb36a56392e7..35906bae92e1 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -16,8 +16,6 @@
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */ 17 */
18 18
19#include <linux/capability.h>
20
21#include "xfs.h" 19#include "xfs.h"
22#include "xfs_fs.h" 20#include "xfs_fs.h"
23#include "xfs_types.h" 21#include "xfs_types.h"
@@ -58,32 +56,14 @@
58#include "xfs_log_priv.h" 56#include "xfs_log_priv.h"
59#include "xfs_mac.h" 57#include "xfs_mac.h"
60 58
61
62/*
63 * The maximum pathlen is 1024 bytes. Since the minimum file system
64 * blocksize is 512 bytes, we can get a max of 2 extents back from
65 * bmapi.
66 */
67#define SYMLINK_MAPS 2
68
69/*
70 * For xfs, we check that the file isn't too big to be opened by this kernel.
71 * No other open action is required for regular files. Devices are handled
72 * through the specfs file system, pipes through fifofs. Device and
73 * fifo vnodes are "wrapped" by specfs and fifofs vnodes, respectively,
74 * when a new vnode is first looked up or created.
75 */
76STATIC int 59STATIC int
77xfs_open( 60xfs_open(
78 bhv_desc_t *bdp, 61 bhv_desc_t *bdp,
79 cred_t *credp) 62 cred_t *credp)
80{ 63{
81 int mode; 64 int mode;
82 vnode_t *vp; 65 vnode_t *vp = BHV_TO_VNODE(bdp);
83 xfs_inode_t *ip; 66 xfs_inode_t *ip = XFS_BHVTOI(bdp);
84
85 vp = BHV_TO_VNODE(bdp);
86 ip = XFS_BHVTOI(bdp);
87 67
88 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) 68 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
89 return XFS_ERROR(EIO); 69 return XFS_ERROR(EIO);
@@ -101,6 +81,36 @@ xfs_open(
101 return 0; 81 return 0;
102} 82}
103 83
84STATIC int
85xfs_close(
86 bhv_desc_t *bdp,
87 int flags,
88 lastclose_t lastclose,
89 cred_t *credp)
90{
91 vnode_t *vp = BHV_TO_VNODE(bdp);
92 xfs_inode_t *ip = XFS_BHVTOI(bdp);
93 int error = 0;
94
95 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
96 return XFS_ERROR(EIO);
97
98 if (lastclose != L_TRUE || !VN_ISREG(vp))
99 return 0;
100
101 /*
102 * If we previously truncated this file and removed old data in
103 * the process, we want to initiate "early" writeout on the last
104 * close. This is an attempt to combat the notorious NULL files
105 * problem which is particularly noticable from a truncate down,
106 * buffered (re-)write (delalloc), followed by a crash. What we
107 * are effectively doing here is significantly reducing the time
108 * window where we'd otherwise be exposed to that problem.
109 */
110 if (VUNTRUNCATE(vp) && VN_DIRTY(vp) && ip->i_delayed_blks > 0)
111 VOP_FLUSH_PAGES(vp, 0, -1, XFS_B_ASYNC, FI_NONE, error);
112 return error;
113}
104 114
105/* 115/*
106 * xfs_getattr 116 * xfs_getattr
@@ -665,9 +675,17 @@ xfs_setattr(
665 ((ip->i_d.di_nlink != 0 || 675 ((ip->i_d.di_nlink != 0 ||
666 !(mp->m_flags & XFS_MOUNT_WSYNC)) 676 !(mp->m_flags & XFS_MOUNT_WSYNC))
667 ? 1 : 0)); 677 ? 1 : 0));
668 if (code) { 678 if (code)
669 goto abort_return; 679 goto abort_return;
670 } 680 /*
681 * Truncated "down", so we're removing references
682 * to old data here - if we now delay flushing for
683 * a long time, we expose ourselves unduly to the
684 * notorious NULL files problem. So, we mark this
685 * vnode and flush it when the file is closed, and
686 * do not wait the usual (long) time for writeout.
687 */
688 VTRUNCATE(vp);
671 } 689 }
672 /* 690 /*
673 * Have to do this even if the file's size doesn't change. 691 * Have to do this even if the file's size doesn't change.
@@ -937,6 +955,13 @@ xfs_access(
937 955
938 956
939/* 957/*
958 * The maximum pathlen is 1024 bytes. Since the minimum file system
959 * blocksize is 512 bytes, we can get a max of 2 extents back from
960 * bmapi.
961 */
962#define SYMLINK_MAPS 2
963
964/*
940 * xfs_readlink 965 * xfs_readlink
941 * 966 *
942 */ 967 */
@@ -1470,9 +1495,6 @@ xfs_inactive_symlink_local(
1470 return 0; 1495 return 0;
1471} 1496}
1472 1497
1473/*
1474 *
1475 */
1476STATIC int 1498STATIC int
1477xfs_inactive_attrs( 1499xfs_inactive_attrs(
1478 xfs_inode_t *ip, 1500 xfs_inode_t *ip,
@@ -1531,10 +1553,10 @@ xfs_release(
1531 1553
1532 vp = BHV_TO_VNODE(bdp); 1554 vp = BHV_TO_VNODE(bdp);
1533 ip = XFS_BHVTOI(bdp); 1555 ip = XFS_BHVTOI(bdp);
1556 mp = ip->i_mount;
1534 1557
1535 if (!VN_ISREG(vp) || (ip->i_d.di_mode == 0)) { 1558 if (!VN_ISREG(vp) || (ip->i_d.di_mode == 0))
1536 return 0; 1559 return 0;
1537 }
1538 1560
1539 /* If this is a read-only mount, don't do this (would generate I/O) */ 1561 /* If this is a read-only mount, don't do this (would generate I/O) */
1540 if (vp->v_vfsp->vfs_flag & VFS_RDONLY) 1562 if (vp->v_vfsp->vfs_flag & VFS_RDONLY)
@@ -1546,8 +1568,6 @@ xfs_release(
1546 return 0; 1568 return 0;
1547#endif 1569#endif
1548 1570
1549 mp = ip->i_mount;
1550
1551 if (ip->i_d.di_nlink != 0) { 1571 if (ip->i_d.di_nlink != 0) {
1552 if ((((ip->i_d.di_mode & S_IFMT) == S_IFREG) && 1572 if ((((ip->i_d.di_mode & S_IFMT) == S_IFREG) &&
1553 ((ip->i_d.di_size > 0) || (VN_CACHED(vp) > 0 || 1573 ((ip->i_d.di_size > 0) || (VN_CACHED(vp) > 0 ||
@@ -3745,7 +3765,6 @@ xfs_inode_flush(
3745 return error; 3765 return error;
3746} 3766}
3747 3767
3748
3749int 3768int
3750xfs_set_dmattrs ( 3769xfs_set_dmattrs (
3751 bhv_desc_t *bdp, 3770 bhv_desc_t *bdp,
@@ -3786,10 +3805,6 @@ xfs_set_dmattrs (
3786 return error; 3805 return error;
3787} 3806}
3788 3807
3789
3790/*
3791 * xfs_reclaim
3792 */
3793STATIC int 3808STATIC int
3794xfs_reclaim( 3809xfs_reclaim(
3795 bhv_desc_t *bdp) 3810 bhv_desc_t *bdp)
@@ -4645,6 +4660,7 @@ xfs_change_file_space(
4645vnodeops_t xfs_vnodeops = { 4660vnodeops_t xfs_vnodeops = {
4646 BHV_IDENTITY_INIT(VN_BHV_XFS,VNODE_POSITION_XFS), 4661 BHV_IDENTITY_INIT(VN_BHV_XFS,VNODE_POSITION_XFS),
4647 .vop_open = xfs_open, 4662 .vop_open = xfs_open,
4663 .vop_close = xfs_close,
4648 .vop_read = xfs_read, 4664 .vop_read = xfs_read,
4649#ifdef HAVE_SENDFILE 4665#ifdef HAVE_SENDFILE
4650 .vop_sendfile = xfs_sendfile, 4666 .vop_sendfile = xfs_sendfile,