diff options
author | Steve French <sfrench@us.ibm.com> | 2008-04-24 11:26:50 -0400 |
---|---|---|
committer | Steve French <sfrench@us.ibm.com> | 2008-04-24 11:26:50 -0400 |
commit | 36d99df2fb474222ab47fbe8ae7385661033223b (patch) | |
tree | 962e068491b752a944f61c454fad3f8619a1ea3f /fs/xfs | |
parent | 076d8423a98659a92837b07aa494cb74bfefe77c (diff) | |
parent | 3dc5063786b273f1aee545844f6bd4e9651ebffe (diff) |
Merge branch 'master' of /pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'fs/xfs')
66 files changed, 1918 insertions, 2309 deletions
diff --git a/fs/xfs/Kconfig b/fs/xfs/Kconfig index 35115bca036e..524021ff5436 100644 --- a/fs/xfs/Kconfig +++ b/fs/xfs/Kconfig | |||
@@ -35,18 +35,6 @@ config XFS_QUOTA | |||
35 | with or without the generic quota support enabled (CONFIG_QUOTA) - | 35 | with or without the generic quota support enabled (CONFIG_QUOTA) - |
36 | they are completely independent subsystems. | 36 | they are completely independent subsystems. |
37 | 37 | ||
38 | config XFS_SECURITY | ||
39 | bool "XFS Security Label support" | ||
40 | depends on XFS_FS | ||
41 | help | ||
42 | Security labels support alternative access control models | ||
43 | implemented by security modules like SELinux. This option | ||
44 | enables an extended attribute namespace for inode security | ||
45 | labels in the XFS filesystem. | ||
46 | |||
47 | If you are not using a security module that requires using | ||
48 | extended attributes for inode security labels, say N. | ||
49 | |||
50 | config XFS_POSIX_ACL | 38 | config XFS_POSIX_ACL |
51 | bool "XFS POSIX ACL support" | 39 | bool "XFS POSIX ACL support" |
52 | depends on XFS_FS | 40 | depends on XFS_FS |
diff --git a/fs/xfs/linux-2.6/kmem.c b/fs/xfs/linux-2.6/kmem.c index e040f1ce1b6a..9b1bb17a0501 100644 --- a/fs/xfs/linux-2.6/kmem.c +++ b/fs/xfs/linux-2.6/kmem.c | |||
@@ -37,7 +37,7 @@ kmem_alloc(size_t size, unsigned int __nocast flags) | |||
37 | #ifdef DEBUG | 37 | #ifdef DEBUG |
38 | if (unlikely(!(flags & KM_LARGE) && (size > PAGE_SIZE))) { | 38 | if (unlikely(!(flags & KM_LARGE) && (size > PAGE_SIZE))) { |
39 | printk(KERN_WARNING "Large %s attempt, size=%ld\n", | 39 | printk(KERN_WARNING "Large %s attempt, size=%ld\n", |
40 | __FUNCTION__, (long)size); | 40 | __func__, (long)size); |
41 | dump_stack(); | 41 | dump_stack(); |
42 | } | 42 | } |
43 | #endif | 43 | #endif |
@@ -52,7 +52,7 @@ kmem_alloc(size_t size, unsigned int __nocast flags) | |||
52 | if (!(++retries % 100)) | 52 | if (!(++retries % 100)) |
53 | printk(KERN_ERR "XFS: possible memory allocation " | 53 | printk(KERN_ERR "XFS: possible memory allocation " |
54 | "deadlock in %s (mode:0x%x)\n", | 54 | "deadlock in %s (mode:0x%x)\n", |
55 | __FUNCTION__, lflags); | 55 | __func__, lflags); |
56 | congestion_wait(WRITE, HZ/50); | 56 | congestion_wait(WRITE, HZ/50); |
57 | } while (1); | 57 | } while (1); |
58 | } | 58 | } |
@@ -129,7 +129,7 @@ kmem_zone_alloc(kmem_zone_t *zone, unsigned int __nocast flags) | |||
129 | if (!(++retries % 100)) | 129 | if (!(++retries % 100)) |
130 | printk(KERN_ERR "XFS: possible memory allocation " | 130 | printk(KERN_ERR "XFS: possible memory allocation " |
131 | "deadlock in %s (mode:0x%x)\n", | 131 | "deadlock in %s (mode:0x%x)\n", |
132 | __FUNCTION__, lflags); | 132 | __func__, lflags); |
133 | congestion_wait(WRITE, HZ/50); | 133 | congestion_wait(WRITE, HZ/50); |
134 | } while (1); | 134 | } while (1); |
135 | } | 135 | } |
diff --git a/fs/xfs/linux-2.6/sema.h b/fs/xfs/linux-2.6/sema.h index 2009e6d922ce..3abe7e9ceb33 100644 --- a/fs/xfs/linux-2.6/sema.h +++ b/fs/xfs/linux-2.6/sema.h | |||
@@ -20,8 +20,8 @@ | |||
20 | 20 | ||
21 | #include <linux/time.h> | 21 | #include <linux/time.h> |
22 | #include <linux/wait.h> | 22 | #include <linux/wait.h> |
23 | #include <linux/semaphore.h> | ||
23 | #include <asm/atomic.h> | 24 | #include <asm/atomic.h> |
24 | #include <asm/semaphore.h> | ||
25 | 25 | ||
26 | /* | 26 | /* |
27 | * sema_t structure just maps to struct semaphore in Linux kernel. | 27 | * sema_t structure just maps to struct semaphore in Linux kernel. |
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c index e0519529c26c..a55c3b26d840 100644 --- a/fs/xfs/linux-2.6/xfs_aops.c +++ b/fs/xfs/linux-2.6/xfs_aops.c | |||
@@ -243,8 +243,12 @@ xfs_end_bio_unwritten( | |||
243 | size_t size = ioend->io_size; | 243 | size_t size = ioend->io_size; |
244 | 244 | ||
245 | if (likely(!ioend->io_error)) { | 245 | if (likely(!ioend->io_error)) { |
246 | if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) | 246 | if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) { |
247 | xfs_iomap_write_unwritten(ip, offset, size); | 247 | int error; |
248 | error = xfs_iomap_write_unwritten(ip, offset, size); | ||
249 | if (error) | ||
250 | ioend->io_error = error; | ||
251 | } | ||
248 | xfs_setfilesize(ioend); | 252 | xfs_setfilesize(ioend); |
249 | } | 253 | } |
250 | xfs_destroy_ioend(ioend); | 254 | xfs_destroy_ioend(ioend); |
@@ -1532,9 +1536,9 @@ xfs_vm_bmap( | |||
1532 | struct xfs_inode *ip = XFS_I(inode); | 1536 | struct xfs_inode *ip = XFS_I(inode); |
1533 | 1537 | ||
1534 | xfs_itrace_entry(XFS_I(inode)); | 1538 | xfs_itrace_entry(XFS_I(inode)); |
1535 | xfs_rwlock(ip, VRWLOCK_READ); | 1539 | xfs_ilock(ip, XFS_IOLOCK_SHARED); |
1536 | xfs_flush_pages(ip, (xfs_off_t)0, -1, 0, FI_REMAPF); | 1540 | xfs_flush_pages(ip, (xfs_off_t)0, -1, 0, FI_REMAPF); |
1537 | xfs_rwunlock(ip, VRWLOCK_READ); | 1541 | xfs_iunlock(ip, XFS_IOLOCK_SHARED); |
1538 | return generic_block_bmap(mapping, block, xfs_get_blocks); | 1542 | return generic_block_bmap(mapping, block, xfs_get_blocks); |
1539 | } | 1543 | } |
1540 | 1544 | ||
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index e347bfd47c91..52f6846101d5 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c | |||
@@ -400,7 +400,7 @@ _xfs_buf_lookup_pages( | |||
400 | printk(KERN_ERR | 400 | printk(KERN_ERR |
401 | "XFS: possible memory allocation " | 401 | "XFS: possible memory allocation " |
402 | "deadlock in %s (mode:0x%x)\n", | 402 | "deadlock in %s (mode:0x%x)\n", |
403 | __FUNCTION__, gfp_mask); | 403 | __func__, gfp_mask); |
404 | 404 | ||
405 | XFS_STATS_INC(xb_page_retries); | 405 | XFS_STATS_INC(xb_page_retries); |
406 | xfsbufd_wakeup(0, gfp_mask); | 406 | xfsbufd_wakeup(0, gfp_mask); |
@@ -598,7 +598,7 @@ xfs_buf_get_flags( | |||
598 | error = _xfs_buf_map_pages(bp, flags); | 598 | error = _xfs_buf_map_pages(bp, flags); |
599 | if (unlikely(error)) { | 599 | if (unlikely(error)) { |
600 | printk(KERN_WARNING "%s: failed to map pages\n", | 600 | printk(KERN_WARNING "%s: failed to map pages\n", |
601 | __FUNCTION__); | 601 | __func__); |
602 | goto no_buffer; | 602 | goto no_buffer; |
603 | } | 603 | } |
604 | } | 604 | } |
@@ -778,7 +778,7 @@ xfs_buf_get_noaddr( | |||
778 | error = _xfs_buf_map_pages(bp, XBF_MAPPED); | 778 | error = _xfs_buf_map_pages(bp, XBF_MAPPED); |
779 | if (unlikely(error)) { | 779 | if (unlikely(error)) { |
780 | printk(KERN_WARNING "%s: failed to map pages\n", | 780 | printk(KERN_WARNING "%s: failed to map pages\n", |
781 | __FUNCTION__); | 781 | __func__); |
782 | goto fail_free_mem; | 782 | goto fail_free_mem; |
783 | } | 783 | } |
784 | 784 | ||
@@ -1060,7 +1060,7 @@ xfs_buf_iostart( | |||
1060 | bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_ASYNC); | 1060 | bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_ASYNC); |
1061 | bp->b_flags |= flags & (XBF_DELWRI | XBF_ASYNC); | 1061 | bp->b_flags |= flags & (XBF_DELWRI | XBF_ASYNC); |
1062 | xfs_buf_delwri_queue(bp, 1); | 1062 | xfs_buf_delwri_queue(bp, 1); |
1063 | return status; | 1063 | return 0; |
1064 | } | 1064 | } |
1065 | 1065 | ||
1066 | bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_ASYNC | XBF_DELWRI | \ | 1066 | bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_ASYNC | XBF_DELWRI | \ |
diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h index a3d207de48b8..841d7883528d 100644 --- a/fs/xfs/linux-2.6/xfs_buf.h +++ b/fs/xfs/linux-2.6/xfs_buf.h | |||
@@ -387,11 +387,15 @@ static inline int XFS_bwrite(xfs_buf_t *bp) | |||
387 | return error; | 387 | return error; |
388 | } | 388 | } |
389 | 389 | ||
390 | static inline int xfs_bdwrite(void *mp, xfs_buf_t *bp) | 390 | /* |
391 | * No error can be returned from xfs_buf_iostart for delwri | ||
392 | * buffers as they are queued and no I/O is issued. | ||
393 | */ | ||
394 | static inline void xfs_bdwrite(void *mp, xfs_buf_t *bp) | ||
391 | { | 395 | { |
392 | bp->b_strat = xfs_bdstrat_cb; | 396 | bp->b_strat = xfs_bdstrat_cb; |
393 | bp->b_fspriv3 = mp; | 397 | bp->b_fspriv3 = mp; |
394 | return xfs_buf_iostart(bp, XBF_DELWRI | XBF_ASYNC); | 398 | (void)xfs_buf_iostart(bp, XBF_DELWRI | XBF_ASYNC); |
395 | } | 399 | } |
396 | 400 | ||
397 | #define XFS_bdstrat(bp) xfs_buf_iorequest(bp) | 401 | #define XFS_bdstrat(bp) xfs_buf_iorequest(bp) |
diff --git a/fs/xfs/linux-2.6/xfs_cred.h b/fs/xfs/linux-2.6/xfs_cred.h index e7f3da61c6c3..652721ce0ea5 100644 --- a/fs/xfs/linux-2.6/xfs_cred.h +++ b/fs/xfs/linux-2.6/xfs_cred.h | |||
@@ -30,7 +30,7 @@ typedef struct cred { | |||
30 | extern struct cred *sys_cred; | 30 | extern struct cred *sys_cred; |
31 | 31 | ||
32 | /* this is a hack.. (assumes sys_cred is the only cred_t in the system) */ | 32 | /* this is a hack.. (assumes sys_cred is the only cred_t in the system) */ |
33 | static __inline int capable_cred(cred_t *cr, int cid) | 33 | static inline int capable_cred(cred_t *cr, int cid) |
34 | { | 34 | { |
35 | return (cr == sys_cred) ? 1 : capable(cid); | 35 | return (cr == sys_cred) ? 1 : capable(cid); |
36 | } | 36 | } |
diff --git a/fs/xfs/linux-2.6/xfs_export.c b/fs/xfs/linux-2.6/xfs_export.c index ca4f66c4de16..265f0168ab76 100644 --- a/fs/xfs/linux-2.6/xfs_export.c +++ b/fs/xfs/linux-2.6/xfs_export.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include "xfs_trans.h" | 22 | #include "xfs_trans.h" |
23 | #include "xfs_sb.h" | 23 | #include "xfs_sb.h" |
24 | #include "xfs_ag.h" | 24 | #include "xfs_ag.h" |
25 | #include "xfs_dir2.h" | ||
25 | #include "xfs_dmapi.h" | 26 | #include "xfs_dmapi.h" |
26 | #include "xfs_mount.h" | 27 | #include "xfs_mount.h" |
27 | #include "xfs_export.h" | 28 | #include "xfs_export.h" |
@@ -30,8 +31,6 @@ | |||
30 | #include "xfs_inode.h" | 31 | #include "xfs_inode.h" |
31 | #include "xfs_vfsops.h" | 32 | #include "xfs_vfsops.h" |
32 | 33 | ||
33 | static struct dentry dotdot = { .d_name.name = "..", .d_name.len = 2, }; | ||
34 | |||
35 | /* | 34 | /* |
36 | * Note that we only accept fileids which are long enough rather than allow | 35 | * Note that we only accept fileids which are long enough rather than allow |
37 | * the parent generation number to default to zero. XFS considers zero a | 36 | * the parent generation number to default to zero. XFS considers zero a |
@@ -66,7 +65,7 @@ xfs_fs_encode_fh( | |||
66 | int len; | 65 | int len; |
67 | 66 | ||
68 | /* Directories don't need their parent encoded, they have ".." */ | 67 | /* Directories don't need their parent encoded, they have ".." */ |
69 | if (S_ISDIR(inode->i_mode)) | 68 | if (S_ISDIR(inode->i_mode) || !connectable) |
70 | fileid_type = FILEID_INO32_GEN; | 69 | fileid_type = FILEID_INO32_GEN; |
71 | else | 70 | else |
72 | fileid_type = FILEID_INO32_GEN_PARENT; | 71 | fileid_type = FILEID_INO32_GEN_PARENT; |
@@ -213,17 +212,16 @@ xfs_fs_get_parent( | |||
213 | struct dentry *child) | 212 | struct dentry *child) |
214 | { | 213 | { |
215 | int error; | 214 | int error; |
216 | bhv_vnode_t *cvp; | 215 | struct xfs_inode *cip; |
217 | struct dentry *parent; | 216 | struct dentry *parent; |
218 | 217 | ||
219 | cvp = NULL; | 218 | error = xfs_lookup(XFS_I(child->d_inode), &xfs_name_dotdot, &cip); |
220 | error = xfs_lookup(XFS_I(child->d_inode), &dotdot, &cvp); | ||
221 | if (unlikely(error)) | 219 | if (unlikely(error)) |
222 | return ERR_PTR(-error); | 220 | return ERR_PTR(-error); |
223 | 221 | ||
224 | parent = d_alloc_anon(vn_to_inode(cvp)); | 222 | parent = d_alloc_anon(cip->i_vnode); |
225 | if (unlikely(!parent)) { | 223 | if (unlikely(!parent)) { |
226 | VN_RELE(cvp); | 224 | iput(cip->i_vnode); |
227 | return ERR_PTR(-ENOMEM); | 225 | return ERR_PTR(-ENOMEM); |
228 | } | 226 | } |
229 | return parent; | 227 | return parent; |
diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c index edab1ffbb163..05905246434d 100644 --- a/fs/xfs/linux-2.6/xfs_file.c +++ b/fs/xfs/linux-2.6/xfs_file.c | |||
@@ -469,16 +469,11 @@ xfs_file_open_exec( | |||
469 | struct inode *inode) | 469 | struct inode *inode) |
470 | { | 470 | { |
471 | struct xfs_mount *mp = XFS_M(inode->i_sb); | 471 | struct xfs_mount *mp = XFS_M(inode->i_sb); |
472 | struct xfs_inode *ip = XFS_I(inode); | ||
472 | 473 | ||
473 | if (unlikely(mp->m_flags & XFS_MOUNT_DMAPI)) { | 474 | if (unlikely(mp->m_flags & XFS_MOUNT_DMAPI) && |
474 | if (DM_EVENT_ENABLED(XFS_I(inode), DM_EVENT_READ)) { | 475 | DM_EVENT_ENABLED(ip, DM_EVENT_READ)) |
475 | bhv_vnode_t *vp = vn_from_inode(inode); | 476 | return -XFS_SEND_DATA(mp, DM_EVENT_READ, ip, 0, 0, 0, NULL); |
476 | |||
477 | return -XFS_SEND_DATA(mp, DM_EVENT_READ, | ||
478 | vp, 0, 0, 0, NULL); | ||
479 | } | ||
480 | } | ||
481 | |||
482 | return 0; | 477 | return 0; |
483 | } | 478 | } |
484 | #endif /* HAVE_FOP_OPEN_EXEC */ | 479 | #endif /* HAVE_FOP_OPEN_EXEC */ |
diff --git a/fs/xfs/linux-2.6/xfs_fs_subr.c b/fs/xfs/linux-2.6/xfs_fs_subr.c index ac6d34cc355d..1eefe61f0e10 100644 --- a/fs/xfs/linux-2.6/xfs_fs_subr.c +++ b/fs/xfs/linux-2.6/xfs_fs_subr.c | |||
@@ -17,18 +17,7 @@ | |||
17 | */ | 17 | */ |
18 | #include "xfs.h" | 18 | #include "xfs.h" |
19 | #include "xfs_vnodeops.h" | 19 | #include "xfs_vnodeops.h" |
20 | |||
21 | /* | ||
22 | * The following six includes are needed so that we can include | ||
23 | * xfs_inode.h. What a mess.. | ||
24 | */ | ||
25 | #include "xfs_bmap_btree.h" | 20 | #include "xfs_bmap_btree.h" |
26 | #include "xfs_inum.h" | ||
27 | #include "xfs_dir2.h" | ||
28 | #include "xfs_dir2_sf.h" | ||
29 | #include "xfs_attr_sf.h" | ||
30 | #include "xfs_dinode.h" | ||
31 | |||
32 | #include "xfs_inode.h" | 21 | #include "xfs_inode.h" |
33 | 22 | ||
34 | int fs_noerr(void) { return 0; } | 23 | int fs_noerr(void) { return 0; } |
@@ -42,11 +31,10 @@ xfs_tosspages( | |||
42 | xfs_off_t last, | 31 | xfs_off_t last, |
43 | int fiopt) | 32 | int fiopt) |
44 | { | 33 | { |
45 | bhv_vnode_t *vp = XFS_ITOV(ip); | 34 | struct address_space *mapping = ip->i_vnode->i_mapping; |
46 | struct inode *inode = vn_to_inode(vp); | ||
47 | 35 | ||
48 | if (VN_CACHED(vp)) | 36 | if (mapping->nrpages) |
49 | truncate_inode_pages(inode->i_mapping, first); | 37 | truncate_inode_pages(mapping, first); |
50 | } | 38 | } |
51 | 39 | ||
52 | int | 40 | int |
@@ -56,15 +44,14 @@ xfs_flushinval_pages( | |||
56 | xfs_off_t last, | 44 | xfs_off_t last, |
57 | int fiopt) | 45 | int fiopt) |
58 | { | 46 | { |
59 | bhv_vnode_t *vp = XFS_ITOV(ip); | 47 | struct address_space *mapping = ip->i_vnode->i_mapping; |
60 | struct inode *inode = vn_to_inode(vp); | ||
61 | int ret = 0; | 48 | int ret = 0; |
62 | 49 | ||
63 | if (VN_CACHED(vp)) { | 50 | if (mapping->nrpages) { |
64 | xfs_iflags_clear(ip, XFS_ITRUNCATED); | 51 | xfs_iflags_clear(ip, XFS_ITRUNCATED); |
65 | ret = filemap_write_and_wait(inode->i_mapping); | 52 | ret = filemap_write_and_wait(mapping); |
66 | if (!ret) | 53 | if (!ret) |
67 | truncate_inode_pages(inode->i_mapping, first); | 54 | truncate_inode_pages(mapping, first); |
68 | } | 55 | } |
69 | return ret; | 56 | return ret; |
70 | } | 57 | } |
@@ -77,17 +64,16 @@ xfs_flush_pages( | |||
77 | uint64_t flags, | 64 | uint64_t flags, |
78 | int fiopt) | 65 | int fiopt) |
79 | { | 66 | { |
80 | bhv_vnode_t *vp = XFS_ITOV(ip); | 67 | struct address_space *mapping = ip->i_vnode->i_mapping; |
81 | struct inode *inode = vn_to_inode(vp); | ||
82 | int ret = 0; | 68 | int ret = 0; |
83 | int ret2; | 69 | int ret2; |
84 | 70 | ||
85 | if (VN_DIRTY(vp)) { | 71 | if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { |
86 | xfs_iflags_clear(ip, XFS_ITRUNCATED); | 72 | xfs_iflags_clear(ip, XFS_ITRUNCATED); |
87 | ret = filemap_fdatawrite(inode->i_mapping); | 73 | ret = filemap_fdatawrite(mapping); |
88 | if (flags & XFS_B_ASYNC) | 74 | if (flags & XFS_B_ASYNC) |
89 | return ret; | 75 | return ret; |
90 | ret2 = filemap_fdatawait(inode->i_mapping); | 76 | ret2 = filemap_fdatawait(mapping); |
91 | if (!ret) | 77 | if (!ret) |
92 | ret = ret2; | 78 | ret = ret2; |
93 | } | 79 | } |
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c index f34bd010eb51..4ddb86b73c6b 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl.c +++ b/fs/xfs/linux-2.6/xfs_ioctl.c | |||
@@ -535,8 +535,6 @@ xfs_attrmulti_attr_set( | |||
535 | char *kbuf; | 535 | char *kbuf; |
536 | int error = EFAULT; | 536 | int error = EFAULT; |
537 | 537 | ||
538 | if (IS_RDONLY(inode)) | ||
539 | return -EROFS; | ||
540 | if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) | 538 | if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) |
541 | return EPERM; | 539 | return EPERM; |
542 | if (len > XATTR_SIZE_MAX) | 540 | if (len > XATTR_SIZE_MAX) |
@@ -562,8 +560,6 @@ xfs_attrmulti_attr_remove( | |||
562 | char *name, | 560 | char *name, |
563 | __uint32_t flags) | 561 | __uint32_t flags) |
564 | { | 562 | { |
565 | if (IS_RDONLY(inode)) | ||
566 | return -EROFS; | ||
567 | if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) | 563 | if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) |
568 | return EPERM; | 564 | return EPERM; |
569 | return xfs_attr_remove(XFS_I(inode), name, flags); | 565 | return xfs_attr_remove(XFS_I(inode), name, flags); |
@@ -573,6 +569,7 @@ STATIC int | |||
573 | xfs_attrmulti_by_handle( | 569 | xfs_attrmulti_by_handle( |
574 | xfs_mount_t *mp, | 570 | xfs_mount_t *mp, |
575 | void __user *arg, | 571 | void __user *arg, |
572 | struct file *parfilp, | ||
576 | struct inode *parinode) | 573 | struct inode *parinode) |
577 | { | 574 | { |
578 | int error; | 575 | int error; |
@@ -626,13 +623,21 @@ xfs_attrmulti_by_handle( | |||
626 | &ops[i].am_length, ops[i].am_flags); | 623 | &ops[i].am_length, ops[i].am_flags); |
627 | break; | 624 | break; |
628 | case ATTR_OP_SET: | 625 | case ATTR_OP_SET: |
626 | ops[i].am_error = mnt_want_write(parfilp->f_path.mnt); | ||
627 | if (ops[i].am_error) | ||
628 | break; | ||
629 | ops[i].am_error = xfs_attrmulti_attr_set(inode, | 629 | ops[i].am_error = xfs_attrmulti_attr_set(inode, |
630 | attr_name, ops[i].am_attrvalue, | 630 | attr_name, ops[i].am_attrvalue, |
631 | ops[i].am_length, ops[i].am_flags); | 631 | ops[i].am_length, ops[i].am_flags); |
632 | mnt_drop_write(parfilp->f_path.mnt); | ||
632 | break; | 633 | break; |
633 | case ATTR_OP_REMOVE: | 634 | case ATTR_OP_REMOVE: |
635 | ops[i].am_error = mnt_want_write(parfilp->f_path.mnt); | ||
636 | if (ops[i].am_error) | ||
637 | break; | ||
634 | ops[i].am_error = xfs_attrmulti_attr_remove(inode, | 638 | ops[i].am_error = xfs_attrmulti_attr_remove(inode, |
635 | attr_name, ops[i].am_flags); | 639 | attr_name, ops[i].am_flags); |
640 | mnt_drop_write(parfilp->f_path.mnt); | ||
636 | break; | 641 | break; |
637 | default: | 642 | default: |
638 | ops[i].am_error = EINVAL; | 643 | ops[i].am_error = EINVAL; |
@@ -651,314 +656,6 @@ xfs_attrmulti_by_handle( | |||
651 | return -error; | 656 | return -error; |
652 | } | 657 | } |
653 | 658 | ||
654 | /* prototypes for a few of the stack-hungry cases that have | ||
655 | * their own functions. Functions are defined after their use | ||
656 | * so gcc doesn't get fancy and inline them with -03 */ | ||
657 | |||
658 | STATIC int | ||
659 | xfs_ioc_space( | ||
660 | struct xfs_inode *ip, | ||
661 | struct inode *inode, | ||
662 | struct file *filp, | ||
663 | int flags, | ||
664 | unsigned int cmd, | ||
665 | void __user *arg); | ||
666 | |||
667 | STATIC int | ||
668 | xfs_ioc_bulkstat( | ||
669 | xfs_mount_t *mp, | ||
670 | unsigned int cmd, | ||
671 | void __user *arg); | ||
672 | |||
673 | STATIC int | ||
674 | xfs_ioc_fsgeometry_v1( | ||
675 | xfs_mount_t *mp, | ||
676 | void __user *arg); | ||
677 | |||
678 | STATIC int | ||
679 | xfs_ioc_fsgeometry( | ||
680 | xfs_mount_t *mp, | ||
681 | void __user *arg); | ||
682 | |||
683 | STATIC int | ||
684 | xfs_ioc_xattr( | ||
685 | xfs_inode_t *ip, | ||
686 | struct file *filp, | ||
687 | unsigned int cmd, | ||
688 | void __user *arg); | ||
689 | |||
690 | STATIC int | ||
691 | xfs_ioc_fsgetxattr( | ||
692 | xfs_inode_t *ip, | ||
693 | int attr, | ||
694 | void __user *arg); | ||
695 | |||
696 | STATIC int | ||
697 | xfs_ioc_getbmap( | ||
698 | struct xfs_inode *ip, | ||
699 | int flags, | ||
700 | unsigned int cmd, | ||
701 | void __user *arg); | ||
702 | |||
703 | STATIC int | ||
704 | xfs_ioc_getbmapx( | ||
705 | struct xfs_inode *ip, | ||
706 | void __user *arg); | ||
707 | |||
708 | int | ||
709 | xfs_ioctl( | ||
710 | xfs_inode_t *ip, | ||
711 | struct file *filp, | ||
712 | int ioflags, | ||
713 | unsigned int cmd, | ||
714 | void __user *arg) | ||
715 | { | ||
716 | struct inode *inode = filp->f_path.dentry->d_inode; | ||
717 | xfs_mount_t *mp = ip->i_mount; | ||
718 | int error; | ||
719 | |||
720 | xfs_itrace_entry(XFS_I(inode)); | ||
721 | switch (cmd) { | ||
722 | |||
723 | case XFS_IOC_ALLOCSP: | ||
724 | case XFS_IOC_FREESP: | ||
725 | case XFS_IOC_RESVSP: | ||
726 | case XFS_IOC_UNRESVSP: | ||
727 | case XFS_IOC_ALLOCSP64: | ||
728 | case XFS_IOC_FREESP64: | ||
729 | case XFS_IOC_RESVSP64: | ||
730 | case XFS_IOC_UNRESVSP64: | ||
731 | /* | ||
732 | * Only allow the sys admin to reserve space unless | ||
733 | * unwritten extents are enabled. | ||
734 | */ | ||
735 | if (!xfs_sb_version_hasextflgbit(&mp->m_sb) && | ||
736 | !capable(CAP_SYS_ADMIN)) | ||
737 | return -EPERM; | ||
738 | |||
739 | return xfs_ioc_space(ip, inode, filp, ioflags, cmd, arg); | ||
740 | |||
741 | case XFS_IOC_DIOINFO: { | ||
742 | struct dioattr da; | ||
743 | xfs_buftarg_t *target = | ||
744 | XFS_IS_REALTIME_INODE(ip) ? | ||
745 | mp->m_rtdev_targp : mp->m_ddev_targp; | ||
746 | |||
747 | da.d_mem = da.d_miniosz = 1 << target->bt_sshift; | ||
748 | da.d_maxiosz = INT_MAX & ~(da.d_miniosz - 1); | ||
749 | |||
750 | if (copy_to_user(arg, &da, sizeof(da))) | ||
751 | return -XFS_ERROR(EFAULT); | ||
752 | return 0; | ||
753 | } | ||
754 | |||
755 | case XFS_IOC_FSBULKSTAT_SINGLE: | ||
756 | case XFS_IOC_FSBULKSTAT: | ||
757 | case XFS_IOC_FSINUMBERS: | ||
758 | return xfs_ioc_bulkstat(mp, cmd, arg); | ||
759 | |||
760 | case XFS_IOC_FSGEOMETRY_V1: | ||
761 | return xfs_ioc_fsgeometry_v1(mp, arg); | ||
762 | |||
763 | case XFS_IOC_FSGEOMETRY: | ||
764 | return xfs_ioc_fsgeometry(mp, arg); | ||
765 | |||
766 | case XFS_IOC_GETVERSION: | ||
767 | return put_user(inode->i_generation, (int __user *)arg); | ||
768 | |||
769 | case XFS_IOC_FSGETXATTR: | ||
770 | return xfs_ioc_fsgetxattr(ip, 0, arg); | ||
771 | case XFS_IOC_FSGETXATTRA: | ||
772 | return xfs_ioc_fsgetxattr(ip, 1, arg); | ||
773 | case XFS_IOC_GETXFLAGS: | ||
774 | case XFS_IOC_SETXFLAGS: | ||
775 | case XFS_IOC_FSSETXATTR: | ||
776 | return xfs_ioc_xattr(ip, filp, cmd, arg); | ||
777 | |||
778 | case XFS_IOC_FSSETDM: { | ||
779 | struct fsdmidata dmi; | ||
780 | |||
781 | if (copy_from_user(&dmi, arg, sizeof(dmi))) | ||
782 | return -XFS_ERROR(EFAULT); | ||
783 | |||
784 | error = xfs_set_dmattrs(ip, dmi.fsd_dmevmask, | ||
785 | dmi.fsd_dmstate); | ||
786 | return -error; | ||
787 | } | ||
788 | |||
789 | case XFS_IOC_GETBMAP: | ||
790 | case XFS_IOC_GETBMAPA: | ||
791 | return xfs_ioc_getbmap(ip, ioflags, cmd, arg); | ||
792 | |||
793 | case XFS_IOC_GETBMAPX: | ||
794 | return xfs_ioc_getbmapx(ip, arg); | ||
795 | |||
796 | case XFS_IOC_FD_TO_HANDLE: | ||
797 | case XFS_IOC_PATH_TO_HANDLE: | ||
798 | case XFS_IOC_PATH_TO_FSHANDLE: | ||
799 | return xfs_find_handle(cmd, arg); | ||
800 | |||
801 | case XFS_IOC_OPEN_BY_HANDLE: | ||
802 | return xfs_open_by_handle(mp, arg, filp, inode); | ||
803 | |||
804 | case XFS_IOC_FSSETDM_BY_HANDLE: | ||
805 | return xfs_fssetdm_by_handle(mp, arg, inode); | ||
806 | |||
807 | case XFS_IOC_READLINK_BY_HANDLE: | ||
808 | return xfs_readlink_by_handle(mp, arg, inode); | ||
809 | |||
810 | case XFS_IOC_ATTRLIST_BY_HANDLE: | ||
811 | return xfs_attrlist_by_handle(mp, arg, inode); | ||
812 | |||
813 | case XFS_IOC_ATTRMULTI_BY_HANDLE: | ||
814 | return xfs_attrmulti_by_handle(mp, arg, inode); | ||
815 | |||
816 | case XFS_IOC_SWAPEXT: { | ||
817 | error = xfs_swapext((struct xfs_swapext __user *)arg); | ||
818 | return -error; | ||
819 | } | ||
820 | |||
821 | case XFS_IOC_FSCOUNTS: { | ||
822 | xfs_fsop_counts_t out; | ||
823 | |||
824 | error = xfs_fs_counts(mp, &out); | ||
825 | if (error) | ||
826 | return -error; | ||
827 | |||
828 | if (copy_to_user(arg, &out, sizeof(out))) | ||
829 | return -XFS_ERROR(EFAULT); | ||
830 | return 0; | ||
831 | } | ||
832 | |||
833 | case XFS_IOC_SET_RESBLKS: { | ||
834 | xfs_fsop_resblks_t inout; | ||
835 | __uint64_t in; | ||
836 | |||
837 | if (!capable(CAP_SYS_ADMIN)) | ||
838 | return -EPERM; | ||
839 | |||
840 | if (copy_from_user(&inout, arg, sizeof(inout))) | ||
841 | return -XFS_ERROR(EFAULT); | ||
842 | |||
843 | /* input parameter is passed in resblks field of structure */ | ||
844 | in = inout.resblks; | ||
845 | error = xfs_reserve_blocks(mp, &in, &inout); | ||
846 | if (error) | ||
847 | return -error; | ||
848 | |||
849 | if (copy_to_user(arg, &inout, sizeof(inout))) | ||
850 | return -XFS_ERROR(EFAULT); | ||
851 | return 0; | ||
852 | } | ||
853 | |||
854 | case XFS_IOC_GET_RESBLKS: { | ||
855 | xfs_fsop_resblks_t out; | ||
856 | |||
857 | if (!capable(CAP_SYS_ADMIN)) | ||
858 | return -EPERM; | ||
859 | |||
860 | error = xfs_reserve_blocks(mp, NULL, &out); | ||
861 | if (error) | ||
862 | return -error; | ||
863 | |||
864 | if (copy_to_user(arg, &out, sizeof(out))) | ||
865 | return -XFS_ERROR(EFAULT); | ||
866 | |||
867 | return 0; | ||
868 | } | ||
869 | |||
870 | case XFS_IOC_FSGROWFSDATA: { | ||
871 | xfs_growfs_data_t in; | ||
872 | |||
873 | if (!capable(CAP_SYS_ADMIN)) | ||
874 | return -EPERM; | ||
875 | |||
876 | if (copy_from_user(&in, arg, sizeof(in))) | ||
877 | return -XFS_ERROR(EFAULT); | ||
878 | |||
879 | error = xfs_growfs_data(mp, &in); | ||
880 | return -error; | ||
881 | } | ||
882 | |||
883 | case XFS_IOC_FSGROWFSLOG: { | ||
884 | xfs_growfs_log_t in; | ||
885 | |||
886 | if (!capable(CAP_SYS_ADMIN)) | ||
887 | return -EPERM; | ||
888 | |||
889 | if (copy_from_user(&in, arg, sizeof(in))) | ||
890 | return -XFS_ERROR(EFAULT); | ||
891 | |||
892 | error = xfs_growfs_log(mp, &in); | ||
893 | return -error; | ||
894 | } | ||
895 | |||
896 | case XFS_IOC_FSGROWFSRT: { | ||
897 | xfs_growfs_rt_t in; | ||
898 | |||
899 | if (!capable(CAP_SYS_ADMIN)) | ||
900 | return -EPERM; | ||
901 | |||
902 | if (copy_from_user(&in, arg, sizeof(in))) | ||
903 | return -XFS_ERROR(EFAULT); | ||
904 | |||
905 | error = xfs_growfs_rt(mp, &in); | ||
906 | return -error; | ||
907 | } | ||
908 | |||
909 | case XFS_IOC_FREEZE: | ||
910 | if (!capable(CAP_SYS_ADMIN)) | ||
911 | return -EPERM; | ||
912 | |||
913 | if (inode->i_sb->s_frozen == SB_UNFROZEN) | ||
914 | freeze_bdev(inode->i_sb->s_bdev); | ||
915 | return 0; | ||
916 | |||
917 | case XFS_IOC_THAW: | ||
918 | if (!capable(CAP_SYS_ADMIN)) | ||
919 | return -EPERM; | ||
920 | if (inode->i_sb->s_frozen != SB_UNFROZEN) | ||
921 | thaw_bdev(inode->i_sb->s_bdev, inode->i_sb); | ||
922 | return 0; | ||
923 | |||
924 | case XFS_IOC_GOINGDOWN: { | ||
925 | __uint32_t in; | ||
926 | |||
927 | if (!capable(CAP_SYS_ADMIN)) | ||
928 | return -EPERM; | ||
929 | |||
930 | if (get_user(in, (__uint32_t __user *)arg)) | ||
931 | return -XFS_ERROR(EFAULT); | ||
932 | |||
933 | error = xfs_fs_goingdown(mp, in); | ||
934 | return -error; | ||
935 | } | ||
936 | |||
937 | case XFS_IOC_ERROR_INJECTION: { | ||
938 | xfs_error_injection_t in; | ||
939 | |||
940 | if (!capable(CAP_SYS_ADMIN)) | ||
941 | return -EPERM; | ||
942 | |||
943 | if (copy_from_user(&in, arg, sizeof(in))) | ||
944 | return -XFS_ERROR(EFAULT); | ||
945 | |||
946 | error = xfs_errortag_add(in.errtag, mp); | ||
947 | return -error; | ||
948 | } | ||
949 | |||
950 | case XFS_IOC_ERROR_CLEARALL: | ||
951 | if (!capable(CAP_SYS_ADMIN)) | ||
952 | return -EPERM; | ||
953 | |||
954 | error = xfs_errortag_clearall(mp, 1); | ||
955 | return -error; | ||
956 | |||
957 | default: | ||
958 | return -ENOTTY; | ||
959 | } | ||
960 | } | ||
961 | |||
962 | STATIC int | 659 | STATIC int |
963 | xfs_ioc_space( | 660 | xfs_ioc_space( |
964 | struct xfs_inode *ip, | 661 | struct xfs_inode *ip, |
@@ -1179,85 +876,85 @@ xfs_ioc_fsgetxattr( | |||
1179 | } | 876 | } |
1180 | 877 | ||
1181 | STATIC int | 878 | STATIC int |
1182 | xfs_ioc_xattr( | 879 | xfs_ioc_fssetxattr( |
1183 | xfs_inode_t *ip, | 880 | xfs_inode_t *ip, |
1184 | struct file *filp, | 881 | struct file *filp, |
1185 | unsigned int cmd, | ||
1186 | void __user *arg) | 882 | void __user *arg) |
1187 | { | 883 | { |
1188 | struct fsxattr fa; | 884 | struct fsxattr fa; |
1189 | struct bhv_vattr *vattr; | 885 | struct bhv_vattr *vattr; |
1190 | int error = 0; | 886 | int error; |
1191 | int attr_flags; | 887 | int attr_flags; |
1192 | unsigned int flags; | 888 | |
889 | if (copy_from_user(&fa, arg, sizeof(fa))) | ||
890 | return -EFAULT; | ||
1193 | 891 | ||
1194 | vattr = kmalloc(sizeof(*vattr), GFP_KERNEL); | 892 | vattr = kmalloc(sizeof(*vattr), GFP_KERNEL); |
1195 | if (unlikely(!vattr)) | 893 | if (unlikely(!vattr)) |
1196 | return -ENOMEM; | 894 | return -ENOMEM; |
1197 | 895 | ||
1198 | switch (cmd) { | 896 | attr_flags = 0; |
1199 | case XFS_IOC_FSSETXATTR: { | 897 | if (filp->f_flags & (O_NDELAY|O_NONBLOCK)) |
1200 | if (copy_from_user(&fa, arg, sizeof(fa))) { | 898 | attr_flags |= ATTR_NONBLOCK; |
1201 | error = -EFAULT; | ||
1202 | break; | ||
1203 | } | ||
1204 | 899 | ||
1205 | attr_flags = 0; | 900 | vattr->va_mask = XFS_AT_XFLAGS | XFS_AT_EXTSIZE | XFS_AT_PROJID; |
1206 | if (filp->f_flags & (O_NDELAY|O_NONBLOCK)) | 901 | vattr->va_xflags = fa.fsx_xflags; |
1207 | attr_flags |= ATTR_NONBLOCK; | 902 | vattr->va_extsize = fa.fsx_extsize; |
903 | vattr->va_projid = fa.fsx_projid; | ||
1208 | 904 | ||
1209 | vattr->va_mask = XFS_AT_XFLAGS | XFS_AT_EXTSIZE | XFS_AT_PROJID; | 905 | error = -xfs_setattr(ip, vattr, attr_flags, NULL); |
1210 | vattr->va_xflags = fa.fsx_xflags; | 906 | if (!error) |
1211 | vattr->va_extsize = fa.fsx_extsize; | 907 | vn_revalidate(XFS_ITOV(ip)); /* update flags */ |
1212 | vattr->va_projid = fa.fsx_projid; | 908 | kfree(vattr); |
909 | return 0; | ||
910 | } | ||
1213 | 911 | ||
1214 | error = xfs_setattr(ip, vattr, attr_flags, NULL); | 912 | STATIC int |
1215 | if (likely(!error)) | 913 | xfs_ioc_getxflags( |
1216 | vn_revalidate(XFS_ITOV(ip)); /* update flags */ | 914 | xfs_inode_t *ip, |
1217 | error = -error; | 915 | void __user *arg) |
1218 | break; | 916 | { |
1219 | } | 917 | unsigned int flags; |
1220 | 918 | ||
1221 | case XFS_IOC_GETXFLAGS: { | 919 | flags = xfs_di2lxflags(ip->i_d.di_flags); |
1222 | flags = xfs_di2lxflags(ip->i_d.di_flags); | 920 | if (copy_to_user(arg, &flags, sizeof(flags))) |
1223 | if (copy_to_user(arg, &flags, sizeof(flags))) | 921 | return -EFAULT; |
1224 | error = -EFAULT; | 922 | return 0; |
1225 | break; | 923 | } |
1226 | } | ||
1227 | 924 | ||
1228 | case XFS_IOC_SETXFLAGS: { | 925 | STATIC int |
1229 | if (copy_from_user(&flags, arg, sizeof(flags))) { | 926 | xfs_ioc_setxflags( |
1230 | error = -EFAULT; | 927 | xfs_inode_t *ip, |
1231 | break; | 928 | struct file *filp, |
1232 | } | 929 | void __user *arg) |
930 | { | ||
931 | struct bhv_vattr *vattr; | ||
932 | unsigned int flags; | ||
933 | int attr_flags; | ||
934 | int error; | ||
1233 | 935 | ||
1234 | if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \ | 936 | if (copy_from_user(&flags, arg, sizeof(flags))) |
1235 | FS_NOATIME_FL | FS_NODUMP_FL | \ | 937 | return -EFAULT; |
1236 | FS_SYNC_FL)) { | ||
1237 | error = -EOPNOTSUPP; | ||
1238 | break; | ||
1239 | } | ||
1240 | 938 | ||
1241 | attr_flags = 0; | 939 | if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \ |
1242 | if (filp->f_flags & (O_NDELAY|O_NONBLOCK)) | 940 | FS_NOATIME_FL | FS_NODUMP_FL | \ |
1243 | attr_flags |= ATTR_NONBLOCK; | 941 | FS_SYNC_FL)) |
942 | return -EOPNOTSUPP; | ||
1244 | 943 | ||
1245 | vattr->va_mask = XFS_AT_XFLAGS; | 944 | vattr = kmalloc(sizeof(*vattr), GFP_KERNEL); |
1246 | vattr->va_xflags = xfs_merge_ioc_xflags(flags, | 945 | if (unlikely(!vattr)) |
1247 | xfs_ip2xflags(ip)); | 946 | return -ENOMEM; |
1248 | 947 | ||
1249 | error = xfs_setattr(ip, vattr, attr_flags, NULL); | 948 | attr_flags = 0; |
1250 | if (likely(!error)) | 949 | if (filp->f_flags & (O_NDELAY|O_NONBLOCK)) |
1251 | vn_revalidate(XFS_ITOV(ip)); /* update flags */ | 950 | attr_flags |= ATTR_NONBLOCK; |
1252 | error = -error; | ||
1253 | break; | ||
1254 | } | ||
1255 | 951 | ||
1256 | default: | 952 | vattr->va_mask = XFS_AT_XFLAGS; |
1257 | error = -ENOTTY; | 953 | vattr->va_xflags = xfs_merge_ioc_xflags(flags, xfs_ip2xflags(ip)); |
1258 | break; | ||
1259 | } | ||
1260 | 954 | ||
955 | error = -xfs_setattr(ip, vattr, attr_flags, NULL); | ||
956 | if (likely(!error)) | ||
957 | vn_revalidate(XFS_ITOV(ip)); /* update flags */ | ||
1261 | kfree(vattr); | 958 | kfree(vattr); |
1262 | return error; | 959 | return error; |
1263 | } | 960 | } |
@@ -1332,3 +1029,259 @@ xfs_ioc_getbmapx( | |||
1332 | 1029 | ||
1333 | return 0; | 1030 | return 0; |
1334 | } | 1031 | } |
1032 | |||
1033 | int | ||
1034 | xfs_ioctl( | ||
1035 | xfs_inode_t *ip, | ||
1036 | struct file *filp, | ||
1037 | int ioflags, | ||
1038 | unsigned int cmd, | ||
1039 | void __user *arg) | ||
1040 | { | ||
1041 | struct inode *inode = filp->f_path.dentry->d_inode; | ||
1042 | xfs_mount_t *mp = ip->i_mount; | ||
1043 | int error; | ||
1044 | |||
1045 | xfs_itrace_entry(XFS_I(inode)); | ||
1046 | switch (cmd) { | ||
1047 | |||
1048 | case XFS_IOC_ALLOCSP: | ||
1049 | case XFS_IOC_FREESP: | ||
1050 | case XFS_IOC_RESVSP: | ||
1051 | case XFS_IOC_UNRESVSP: | ||
1052 | case XFS_IOC_ALLOCSP64: | ||
1053 | case XFS_IOC_FREESP64: | ||
1054 | case XFS_IOC_RESVSP64: | ||
1055 | case XFS_IOC_UNRESVSP64: | ||
1056 | /* | ||
1057 | * Only allow the sys admin to reserve space unless | ||
1058 | * unwritten extents are enabled. | ||
1059 | */ | ||
1060 | if (!xfs_sb_version_hasextflgbit(&mp->m_sb) && | ||
1061 | !capable(CAP_SYS_ADMIN)) | ||
1062 | return -EPERM; | ||
1063 | |||
1064 | return xfs_ioc_space(ip, inode, filp, ioflags, cmd, arg); | ||
1065 | |||
1066 | case XFS_IOC_DIOINFO: { | ||
1067 | struct dioattr da; | ||
1068 | xfs_buftarg_t *target = | ||
1069 | XFS_IS_REALTIME_INODE(ip) ? | ||
1070 | mp->m_rtdev_targp : mp->m_ddev_targp; | ||
1071 | |||
1072 | da.d_mem = da.d_miniosz = 1 << target->bt_sshift; | ||
1073 | da.d_maxiosz = INT_MAX & ~(da.d_miniosz - 1); | ||
1074 | |||
1075 | if (copy_to_user(arg, &da, sizeof(da))) | ||
1076 | return -XFS_ERROR(EFAULT); | ||
1077 | return 0; | ||
1078 | } | ||
1079 | |||
1080 | case XFS_IOC_FSBULKSTAT_SINGLE: | ||
1081 | case XFS_IOC_FSBULKSTAT: | ||
1082 | case XFS_IOC_FSINUMBERS: | ||
1083 | return xfs_ioc_bulkstat(mp, cmd, arg); | ||
1084 | |||
1085 | case XFS_IOC_FSGEOMETRY_V1: | ||
1086 | return xfs_ioc_fsgeometry_v1(mp, arg); | ||
1087 | |||
1088 | case XFS_IOC_FSGEOMETRY: | ||
1089 | return xfs_ioc_fsgeometry(mp, arg); | ||
1090 | |||
1091 | case XFS_IOC_GETVERSION: | ||
1092 | return put_user(inode->i_generation, (int __user *)arg); | ||
1093 | |||
1094 | case XFS_IOC_FSGETXATTR: | ||
1095 | return xfs_ioc_fsgetxattr(ip, 0, arg); | ||
1096 | case XFS_IOC_FSGETXATTRA: | ||
1097 | return xfs_ioc_fsgetxattr(ip, 1, arg); | ||
1098 | case XFS_IOC_FSSETXATTR: | ||
1099 | return xfs_ioc_fssetxattr(ip, filp, arg); | ||
1100 | case XFS_IOC_GETXFLAGS: | ||
1101 | return xfs_ioc_getxflags(ip, arg); | ||
1102 | case XFS_IOC_SETXFLAGS: | ||
1103 | return xfs_ioc_setxflags(ip, filp, arg); | ||
1104 | |||
1105 | case XFS_IOC_FSSETDM: { | ||
1106 | struct fsdmidata dmi; | ||
1107 | |||
1108 | if (copy_from_user(&dmi, arg, sizeof(dmi))) | ||
1109 | return -XFS_ERROR(EFAULT); | ||
1110 | |||
1111 | error = xfs_set_dmattrs(ip, dmi.fsd_dmevmask, | ||
1112 | dmi.fsd_dmstate); | ||
1113 | return -error; | ||
1114 | } | ||
1115 | |||
1116 | case XFS_IOC_GETBMAP: | ||
1117 | case XFS_IOC_GETBMAPA: | ||
1118 | return xfs_ioc_getbmap(ip, ioflags, cmd, arg); | ||
1119 | |||
1120 | case XFS_IOC_GETBMAPX: | ||
1121 | return xfs_ioc_getbmapx(ip, arg); | ||
1122 | |||
1123 | case XFS_IOC_FD_TO_HANDLE: | ||
1124 | case XFS_IOC_PATH_TO_HANDLE: | ||
1125 | case XFS_IOC_PATH_TO_FSHANDLE: | ||
1126 | return xfs_find_handle(cmd, arg); | ||
1127 | |||
1128 | case XFS_IOC_OPEN_BY_HANDLE: | ||
1129 | return xfs_open_by_handle(mp, arg, filp, inode); | ||
1130 | |||
1131 | case XFS_IOC_FSSETDM_BY_HANDLE: | ||
1132 | return xfs_fssetdm_by_handle(mp, arg, inode); | ||
1133 | |||
1134 | case XFS_IOC_READLINK_BY_HANDLE: | ||
1135 | return xfs_readlink_by_handle(mp, arg, inode); | ||
1136 | |||
1137 | case XFS_IOC_ATTRLIST_BY_HANDLE: | ||
1138 | return xfs_attrlist_by_handle(mp, arg, inode); | ||
1139 | |||
1140 | case XFS_IOC_ATTRMULTI_BY_HANDLE: | ||
1141 | return xfs_attrmulti_by_handle(mp, arg, filp, inode); | ||
1142 | |||
1143 | case XFS_IOC_SWAPEXT: { | ||
1144 | error = xfs_swapext((struct xfs_swapext __user *)arg); | ||
1145 | return -error; | ||
1146 | } | ||
1147 | |||
1148 | case XFS_IOC_FSCOUNTS: { | ||
1149 | xfs_fsop_counts_t out; | ||
1150 | |||
1151 | error = xfs_fs_counts(mp, &out); | ||
1152 | if (error) | ||
1153 | return -error; | ||
1154 | |||
1155 | if (copy_to_user(arg, &out, sizeof(out))) | ||
1156 | return -XFS_ERROR(EFAULT); | ||
1157 | return 0; | ||
1158 | } | ||
1159 | |||
1160 | case XFS_IOC_SET_RESBLKS: { | ||
1161 | xfs_fsop_resblks_t inout; | ||
1162 | __uint64_t in; | ||
1163 | |||
1164 | if (!capable(CAP_SYS_ADMIN)) | ||
1165 | return -EPERM; | ||
1166 | |||
1167 | if (copy_from_user(&inout, arg, sizeof(inout))) | ||
1168 | return -XFS_ERROR(EFAULT); | ||
1169 | |||
1170 | /* input parameter is passed in resblks field of structure */ | ||
1171 | in = inout.resblks; | ||
1172 | error = xfs_reserve_blocks(mp, &in, &inout); | ||
1173 | if (error) | ||
1174 | return -error; | ||
1175 | |||
1176 | if (copy_to_user(arg, &inout, sizeof(inout))) | ||
1177 | return -XFS_ERROR(EFAULT); | ||
1178 | return 0; | ||
1179 | } | ||
1180 | |||
1181 | case XFS_IOC_GET_RESBLKS: { | ||
1182 | xfs_fsop_resblks_t out; | ||
1183 | |||
1184 | if (!capable(CAP_SYS_ADMIN)) | ||
1185 | return -EPERM; | ||
1186 | |||
1187 | error = xfs_reserve_blocks(mp, NULL, &out); | ||
1188 | if (error) | ||
1189 | return -error; | ||
1190 | |||
1191 | if (copy_to_user(arg, &out, sizeof(out))) | ||
1192 | return -XFS_ERROR(EFAULT); | ||
1193 | |||
1194 | return 0; | ||
1195 | } | ||
1196 | |||
1197 | case XFS_IOC_FSGROWFSDATA: { | ||
1198 | xfs_growfs_data_t in; | ||
1199 | |||
1200 | if (!capable(CAP_SYS_ADMIN)) | ||
1201 | return -EPERM; | ||
1202 | |||
1203 | if (copy_from_user(&in, arg, sizeof(in))) | ||
1204 | return -XFS_ERROR(EFAULT); | ||
1205 | |||
1206 | error = xfs_growfs_data(mp, &in); | ||
1207 | return -error; | ||
1208 | } | ||
1209 | |||
1210 | case XFS_IOC_FSGROWFSLOG: { | ||
1211 | xfs_growfs_log_t in; | ||
1212 | |||
1213 | if (!capable(CAP_SYS_ADMIN)) | ||
1214 | return -EPERM; | ||
1215 | |||
1216 | if (copy_from_user(&in, arg, sizeof(in))) | ||
1217 | return -XFS_ERROR(EFAULT); | ||
1218 | |||
1219 | error = xfs_growfs_log(mp, &in); | ||
1220 | return -error; | ||
1221 | } | ||
1222 | |||
1223 | case XFS_IOC_FSGROWFSRT: { | ||
1224 | xfs_growfs_rt_t in; | ||
1225 | |||
1226 | if (!capable(CAP_SYS_ADMIN)) | ||
1227 | return -EPERM; | ||
1228 | |||
1229 | if (copy_from_user(&in, arg, sizeof(in))) | ||
1230 | return -XFS_ERROR(EFAULT); | ||
1231 | |||
1232 | error = xfs_growfs_rt(mp, &in); | ||
1233 | return -error; | ||
1234 | } | ||
1235 | |||
1236 | case XFS_IOC_FREEZE: | ||
1237 | if (!capable(CAP_SYS_ADMIN)) | ||
1238 | return -EPERM; | ||
1239 | |||
1240 | if (inode->i_sb->s_frozen == SB_UNFROZEN) | ||
1241 | freeze_bdev(inode->i_sb->s_bdev); | ||
1242 | return 0; | ||
1243 | |||
1244 | case XFS_IOC_THAW: | ||
1245 | if (!capable(CAP_SYS_ADMIN)) | ||
1246 | return -EPERM; | ||
1247 | if (inode->i_sb->s_frozen != SB_UNFROZEN) | ||
1248 | thaw_bdev(inode->i_sb->s_bdev, inode->i_sb); | ||
1249 | return 0; | ||
1250 | |||
1251 | case XFS_IOC_GOINGDOWN: { | ||
1252 | __uint32_t in; | ||
1253 | |||
1254 | if (!capable(CAP_SYS_ADMIN)) | ||
1255 | return -EPERM; | ||
1256 | |||
1257 | if (get_user(in, (__uint32_t __user *)arg)) | ||
1258 | return -XFS_ERROR(EFAULT); | ||
1259 | |||
1260 | error = xfs_fs_goingdown(mp, in); | ||
1261 | return -error; | ||
1262 | } | ||
1263 | |||
1264 | case XFS_IOC_ERROR_INJECTION: { | ||
1265 | xfs_error_injection_t in; | ||
1266 | |||
1267 | if (!capable(CAP_SYS_ADMIN)) | ||
1268 | return -EPERM; | ||
1269 | |||
1270 | if (copy_from_user(&in, arg, sizeof(in))) | ||
1271 | return -XFS_ERROR(EFAULT); | ||
1272 | |||
1273 | error = xfs_errortag_add(in.errtag, mp); | ||
1274 | return -error; | ||
1275 | } | ||
1276 | |||
1277 | case XFS_IOC_ERROR_CLEARALL: | ||
1278 | if (!capable(CAP_SYS_ADMIN)) | ||
1279 | return -EPERM; | ||
1280 | |||
1281 | error = xfs_errortag_clearall(mp, 1); | ||
1282 | return -error; | ||
1283 | |||
1284 | default: | ||
1285 | return -ENOTTY; | ||
1286 | } | ||
1287 | } | ||
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c index cc4abd3daa49..a1237dad6430 100644 --- a/fs/xfs/linux-2.6/xfs_iops.c +++ b/fs/xfs/linux-2.6/xfs_iops.c | |||
@@ -62,12 +62,11 @@ void | |||
62 | xfs_synchronize_atime( | 62 | xfs_synchronize_atime( |
63 | xfs_inode_t *ip) | 63 | xfs_inode_t *ip) |
64 | { | 64 | { |
65 | bhv_vnode_t *vp; | 65 | struct inode *inode = ip->i_vnode; |
66 | 66 | ||
67 | vp = XFS_ITOV_NULL(ip); | 67 | if (inode) { |
68 | if (vp) { | 68 | ip->i_d.di_atime.t_sec = (__int32_t)inode->i_atime.tv_sec; |
69 | ip->i_d.di_atime.t_sec = (__int32_t)vp->i_atime.tv_sec; | 69 | ip->i_d.di_atime.t_nsec = (__int32_t)inode->i_atime.tv_nsec; |
70 | ip->i_d.di_atime.t_nsec = (__int32_t)vp->i_atime.tv_nsec; | ||
71 | } | 70 | } |
72 | } | 71 | } |
73 | 72 | ||
@@ -80,11 +79,10 @@ void | |||
80 | xfs_mark_inode_dirty_sync( | 79 | xfs_mark_inode_dirty_sync( |
81 | xfs_inode_t *ip) | 80 | xfs_inode_t *ip) |
82 | { | 81 | { |
83 | bhv_vnode_t *vp; | 82 | struct inode *inode = ip->i_vnode; |
84 | 83 | ||
85 | vp = XFS_ITOV_NULL(ip); | 84 | if (inode) |
86 | if (vp) | 85 | mark_inode_dirty_sync(inode); |
87 | mark_inode_dirty_sync(vn_to_inode(vp)); | ||
88 | } | 86 | } |
89 | 87 | ||
90 | /* | 88 | /* |
@@ -157,13 +155,6 @@ xfs_ichgtime_fast( | |||
157 | */ | 155 | */ |
158 | ASSERT((flags & XFS_ICHGTIME_ACC) == 0); | 156 | ASSERT((flags & XFS_ICHGTIME_ACC) == 0); |
159 | 157 | ||
160 | /* | ||
161 | * We're not supposed to change timestamps in readonly-mounted | ||
162 | * filesystems. Throw it away if anyone asks us. | ||
163 | */ | ||
164 | if (unlikely(IS_RDONLY(inode))) | ||
165 | return; | ||
166 | |||
167 | if (flags & XFS_ICHGTIME_MOD) { | 158 | if (flags & XFS_ICHGTIME_MOD) { |
168 | tvp = &inode->i_mtime; | 159 | tvp = &inode->i_mtime; |
169 | ip->i_d.di_mtime.t_sec = (__int32_t)tvp->tv_sec; | 160 | ip->i_d.di_mtime.t_sec = (__int32_t)tvp->tv_sec; |
@@ -215,66 +206,62 @@ xfs_validate_fields( | |||
215 | */ | 206 | */ |
216 | STATIC int | 207 | STATIC int |
217 | xfs_init_security( | 208 | xfs_init_security( |
218 | bhv_vnode_t *vp, | 209 | struct inode *inode, |
219 | struct inode *dir) | 210 | struct inode *dir) |
220 | { | 211 | { |
221 | struct inode *ip = vn_to_inode(vp); | 212 | struct xfs_inode *ip = XFS_I(inode); |
222 | size_t length; | 213 | size_t length; |
223 | void *value; | 214 | void *value; |
224 | char *name; | 215 | char *name; |
225 | int error; | 216 | int error; |
226 | 217 | ||
227 | error = security_inode_init_security(ip, dir, &name, &value, &length); | 218 | error = security_inode_init_security(inode, dir, &name, |
219 | &value, &length); | ||
228 | if (error) { | 220 | if (error) { |
229 | if (error == -EOPNOTSUPP) | 221 | if (error == -EOPNOTSUPP) |
230 | return 0; | 222 | return 0; |
231 | return -error; | 223 | return -error; |
232 | } | 224 | } |
233 | 225 | ||
234 | error = xfs_attr_set(XFS_I(ip), name, value, | 226 | error = xfs_attr_set(ip, name, value, length, ATTR_SECURE); |
235 | length, ATTR_SECURE); | ||
236 | if (!error) | 227 | if (!error) |
237 | xfs_iflags_set(XFS_I(ip), XFS_IMODIFIED); | 228 | xfs_iflags_set(ip, XFS_IMODIFIED); |
238 | 229 | ||
239 | kfree(name); | 230 | kfree(name); |
240 | kfree(value); | 231 | kfree(value); |
241 | return error; | 232 | return error; |
242 | } | 233 | } |
243 | 234 | ||
244 | /* | 235 | static void |
245 | * Determine whether a process has a valid fs_struct (kernel daemons | 236 | xfs_dentry_to_name( |
246 | * like knfsd don't have an fs_struct). | 237 | struct xfs_name *namep, |
247 | * | 238 | struct dentry *dentry) |
248 | * XXX(hch): nfsd is broken, better fix it instead. | ||
249 | */ | ||
250 | STATIC_INLINE int | ||
251 | xfs_has_fs_struct(struct task_struct *task) | ||
252 | { | 239 | { |
253 | return (task->fs != init_task.fs); | 240 | namep->name = dentry->d_name.name; |
241 | namep->len = dentry->d_name.len; | ||
254 | } | 242 | } |
255 | 243 | ||
256 | STATIC void | 244 | STATIC void |
257 | xfs_cleanup_inode( | 245 | xfs_cleanup_inode( |
258 | struct inode *dir, | 246 | struct inode *dir, |
259 | bhv_vnode_t *vp, | 247 | struct inode *inode, |
260 | struct dentry *dentry, | 248 | struct dentry *dentry, |
261 | int mode) | 249 | int mode) |
262 | { | 250 | { |
263 | struct dentry teardown = {}; | 251 | struct xfs_name teardown; |
264 | 252 | ||
265 | /* Oh, the horror. | 253 | /* Oh, the horror. |
266 | * If we can't add the ACL or we fail in | 254 | * If we can't add the ACL or we fail in |
267 | * xfs_init_security we must back out. | 255 | * xfs_init_security we must back out. |
268 | * ENOSPC can hit here, among other things. | 256 | * ENOSPC can hit here, among other things. |
269 | */ | 257 | */ |
270 | teardown.d_inode = vn_to_inode(vp); | 258 | xfs_dentry_to_name(&teardown, dentry); |
271 | teardown.d_name = dentry->d_name; | ||
272 | 259 | ||
273 | if (S_ISDIR(mode)) | 260 | if (S_ISDIR(mode)) |
274 | xfs_rmdir(XFS_I(dir), &teardown); | 261 | xfs_rmdir(XFS_I(dir), &teardown, XFS_I(inode)); |
275 | else | 262 | else |
276 | xfs_remove(XFS_I(dir), &teardown); | 263 | xfs_remove(XFS_I(dir), &teardown, XFS_I(inode)); |
277 | VN_RELE(vp); | 264 | iput(inode); |
278 | } | 265 | } |
279 | 266 | ||
280 | STATIC int | 267 | STATIC int |
@@ -284,9 +271,10 @@ xfs_vn_mknod( | |||
284 | int mode, | 271 | int mode, |
285 | dev_t rdev) | 272 | dev_t rdev) |
286 | { | 273 | { |
287 | struct inode *ip; | 274 | struct inode *inode; |
288 | bhv_vnode_t *vp = NULL, *dvp = vn_from_inode(dir); | 275 | struct xfs_inode *ip = NULL; |
289 | xfs_acl_t *default_acl = NULL; | 276 | xfs_acl_t *default_acl = NULL; |
277 | struct xfs_name name; | ||
290 | attrexists_t test_default_acl = _ACL_DEFAULT_EXISTS; | 278 | attrexists_t test_default_acl = _ACL_DEFAULT_EXISTS; |
291 | int error; | 279 | int error; |
292 | 280 | ||
@@ -297,59 +285,67 @@ xfs_vn_mknod( | |||
297 | if (unlikely(!sysv_valid_dev(rdev) || MAJOR(rdev) & ~0x1ff)) | 285 | if (unlikely(!sysv_valid_dev(rdev) || MAJOR(rdev) & ~0x1ff)) |
298 | return -EINVAL; | 286 | return -EINVAL; |
299 | 287 | ||
300 | if (unlikely(test_default_acl && test_default_acl(dvp))) { | 288 | if (test_default_acl && test_default_acl(dir)) { |
301 | if (!_ACL_ALLOC(default_acl)) { | 289 | if (!_ACL_ALLOC(default_acl)) { |
302 | return -ENOMEM; | 290 | return -ENOMEM; |
303 | } | 291 | } |
304 | if (!_ACL_GET_DEFAULT(dvp, default_acl)) { | 292 | if (!_ACL_GET_DEFAULT(dir, default_acl)) { |
305 | _ACL_FREE(default_acl); | 293 | _ACL_FREE(default_acl); |
306 | default_acl = NULL; | 294 | default_acl = NULL; |
307 | } | 295 | } |
308 | } | 296 | } |
309 | 297 | ||
310 | if (IS_POSIXACL(dir) && !default_acl && xfs_has_fs_struct(current)) | 298 | xfs_dentry_to_name(&name, dentry); |
299 | |||
300 | if (IS_POSIXACL(dir) && !default_acl) | ||
311 | mode &= ~current->fs->umask; | 301 | mode &= ~current->fs->umask; |
312 | 302 | ||
313 | switch (mode & S_IFMT) { | 303 | switch (mode & S_IFMT) { |
314 | case S_IFCHR: case S_IFBLK: case S_IFIFO: case S_IFSOCK: | 304 | case S_IFCHR: |
305 | case S_IFBLK: | ||
306 | case S_IFIFO: | ||
307 | case S_IFSOCK: | ||
315 | rdev = sysv_encode_dev(rdev); | 308 | rdev = sysv_encode_dev(rdev); |
316 | case S_IFREG: | 309 | case S_IFREG: |
317 | error = xfs_create(XFS_I(dir), dentry, mode, rdev, &vp, NULL); | 310 | error = xfs_create(XFS_I(dir), &name, mode, rdev, &ip, NULL); |
318 | break; | 311 | break; |
319 | case S_IFDIR: | 312 | case S_IFDIR: |
320 | error = xfs_mkdir(XFS_I(dir), dentry, mode, &vp, NULL); | 313 | error = xfs_mkdir(XFS_I(dir), &name, mode, &ip, NULL); |
321 | break; | 314 | break; |
322 | default: | 315 | default: |
323 | error = EINVAL; | 316 | error = EINVAL; |
324 | break; | 317 | break; |
325 | } | 318 | } |
326 | 319 | ||
327 | if (unlikely(!error)) { | 320 | if (unlikely(error)) |
328 | error = xfs_init_security(vp, dir); | 321 | goto out_free_acl; |
329 | if (error) | ||
330 | xfs_cleanup_inode(dir, vp, dentry, mode); | ||
331 | } | ||
332 | 322 | ||
333 | if (unlikely(default_acl)) { | 323 | inode = ip->i_vnode; |
334 | if (!error) { | 324 | |
335 | error = _ACL_INHERIT(vp, mode, default_acl); | 325 | error = xfs_init_security(inode, dir); |
336 | if (!error) | 326 | if (unlikely(error)) |
337 | xfs_iflags_set(XFS_I(vp), XFS_IMODIFIED); | 327 | goto out_cleanup_inode; |
338 | else | 328 | |
339 | xfs_cleanup_inode(dir, vp, dentry, mode); | 329 | if (default_acl) { |
340 | } | 330 | error = _ACL_INHERIT(inode, mode, default_acl); |
331 | if (unlikely(error)) | ||
332 | goto out_cleanup_inode; | ||
333 | xfs_iflags_set(ip, XFS_IMODIFIED); | ||
341 | _ACL_FREE(default_acl); | 334 | _ACL_FREE(default_acl); |
342 | } | 335 | } |
343 | 336 | ||
344 | if (likely(!error)) { | ||
345 | ASSERT(vp); | ||
346 | ip = vn_to_inode(vp); | ||
347 | 337 | ||
348 | if (S_ISDIR(mode)) | 338 | if (S_ISDIR(mode)) |
349 | xfs_validate_fields(ip); | 339 | xfs_validate_fields(inode); |
350 | d_instantiate(dentry, ip); | 340 | d_instantiate(dentry, inode); |
351 | xfs_validate_fields(dir); | 341 | xfs_validate_fields(dir); |
352 | } | 342 | return -error; |
343 | |||
344 | out_cleanup_inode: | ||
345 | xfs_cleanup_inode(dir, inode, dentry, mode); | ||
346 | out_free_acl: | ||
347 | if (default_acl) | ||
348 | _ACL_FREE(default_acl); | ||
353 | return -error; | 349 | return -error; |
354 | } | 350 | } |
355 | 351 | ||
@@ -378,13 +374,15 @@ xfs_vn_lookup( | |||
378 | struct dentry *dentry, | 374 | struct dentry *dentry, |
379 | struct nameidata *nd) | 375 | struct nameidata *nd) |
380 | { | 376 | { |
381 | bhv_vnode_t *cvp; | 377 | struct xfs_inode *cip; |
378 | struct xfs_name name; | ||
382 | int error; | 379 | int error; |
383 | 380 | ||
384 | if (dentry->d_name.len >= MAXNAMELEN) | 381 | if (dentry->d_name.len >= MAXNAMELEN) |
385 | return ERR_PTR(-ENAMETOOLONG); | 382 | return ERR_PTR(-ENAMETOOLONG); |
386 | 383 | ||
387 | error = xfs_lookup(XFS_I(dir), dentry, &cvp); | 384 | xfs_dentry_to_name(&name, dentry); |
385 | error = xfs_lookup(XFS_I(dir), &name, &cip); | ||
388 | if (unlikely(error)) { | 386 | if (unlikely(error)) { |
389 | if (unlikely(error != ENOENT)) | 387 | if (unlikely(error != ENOENT)) |
390 | return ERR_PTR(-error); | 388 | return ERR_PTR(-error); |
@@ -392,7 +390,7 @@ xfs_vn_lookup( | |||
392 | return NULL; | 390 | return NULL; |
393 | } | 391 | } |
394 | 392 | ||
395 | return d_splice_alias(vn_to_inode(cvp), dentry); | 393 | return d_splice_alias(cip->i_vnode, dentry); |
396 | } | 394 | } |
397 | 395 | ||
398 | STATIC int | 396 | STATIC int |
@@ -401,23 +399,24 @@ xfs_vn_link( | |||
401 | struct inode *dir, | 399 | struct inode *dir, |
402 | struct dentry *dentry) | 400 | struct dentry *dentry) |
403 | { | 401 | { |
404 | struct inode *ip; /* inode of guy being linked to */ | 402 | struct inode *inode; /* inode of guy being linked to */ |
405 | bhv_vnode_t *vp; /* vp of name being linked */ | 403 | struct xfs_name name; |
406 | int error; | 404 | int error; |
407 | 405 | ||
408 | ip = old_dentry->d_inode; /* inode being linked to */ | 406 | inode = old_dentry->d_inode; |
409 | vp = vn_from_inode(ip); | 407 | xfs_dentry_to_name(&name, dentry); |
410 | 408 | ||
411 | VN_HOLD(vp); | 409 | igrab(inode); |
412 | error = xfs_link(XFS_I(dir), vp, dentry); | 410 | error = xfs_link(XFS_I(dir), XFS_I(inode), &name); |
413 | if (unlikely(error)) { | 411 | if (unlikely(error)) { |
414 | VN_RELE(vp); | 412 | iput(inode); |
415 | } else { | 413 | return -error; |
416 | xfs_iflags_set(XFS_I(dir), XFS_IMODIFIED); | ||
417 | xfs_validate_fields(ip); | ||
418 | d_instantiate(dentry, ip); | ||
419 | } | 414 | } |
420 | return -error; | 415 | |
416 | xfs_iflags_set(XFS_I(dir), XFS_IMODIFIED); | ||
417 | xfs_validate_fields(inode); | ||
418 | d_instantiate(dentry, inode); | ||
419 | return 0; | ||
421 | } | 420 | } |
422 | 421 | ||
423 | STATIC int | 422 | STATIC int |
@@ -426,11 +425,13 @@ xfs_vn_unlink( | |||
426 | struct dentry *dentry) | 425 | struct dentry *dentry) |
427 | { | 426 | { |
428 | struct inode *inode; | 427 | struct inode *inode; |
428 | struct xfs_name name; | ||
429 | int error; | 429 | int error; |
430 | 430 | ||
431 | inode = dentry->d_inode; | 431 | inode = dentry->d_inode; |
432 | xfs_dentry_to_name(&name, dentry); | ||
432 | 433 | ||
433 | error = xfs_remove(XFS_I(dir), dentry); | 434 | error = xfs_remove(XFS_I(dir), &name, XFS_I(inode)); |
434 | if (likely(!error)) { | 435 | if (likely(!error)) { |
435 | xfs_validate_fields(dir); /* size needs update */ | 436 | xfs_validate_fields(dir); /* size needs update */ |
436 | xfs_validate_fields(inode); | 437 | xfs_validate_fields(inode); |
@@ -444,29 +445,34 @@ xfs_vn_symlink( | |||
444 | struct dentry *dentry, | 445 | struct dentry *dentry, |
445 | const char *symname) | 446 | const char *symname) |
446 | { | 447 | { |
447 | struct inode *ip; | 448 | struct inode *inode; |
448 | bhv_vnode_t *cvp; /* used to lookup symlink to put in dentry */ | 449 | struct xfs_inode *cip = NULL; |
450 | struct xfs_name name; | ||
449 | int error; | 451 | int error; |
450 | mode_t mode; | 452 | mode_t mode; |
451 | 453 | ||
452 | cvp = NULL; | ||
453 | |||
454 | mode = S_IFLNK | | 454 | mode = S_IFLNK | |
455 | (irix_symlink_mode ? 0777 & ~current->fs->umask : S_IRWXUGO); | 455 | (irix_symlink_mode ? 0777 & ~current->fs->umask : S_IRWXUGO); |
456 | xfs_dentry_to_name(&name, dentry); | ||
456 | 457 | ||
457 | error = xfs_symlink(XFS_I(dir), dentry, (char *)symname, mode, | 458 | error = xfs_symlink(XFS_I(dir), &name, symname, mode, &cip, NULL); |
458 | &cvp, NULL); | 459 | if (unlikely(error)) |
459 | if (likely(!error && cvp)) { | 460 | goto out; |
460 | error = xfs_init_security(cvp, dir); | 461 | |
461 | if (likely(!error)) { | 462 | inode = cip->i_vnode; |
462 | ip = vn_to_inode(cvp); | 463 | |
463 | d_instantiate(dentry, ip); | 464 | error = xfs_init_security(inode, dir); |
464 | xfs_validate_fields(dir); | 465 | if (unlikely(error)) |
465 | xfs_validate_fields(ip); | 466 | goto out_cleanup_inode; |
466 | } else { | 467 | |
467 | xfs_cleanup_inode(dir, cvp, dentry, 0); | 468 | d_instantiate(dentry, inode); |
468 | } | 469 | xfs_validate_fields(dir); |
469 | } | 470 | xfs_validate_fields(inode); |
471 | return 0; | ||
472 | |||
473 | out_cleanup_inode: | ||
474 | xfs_cleanup_inode(dir, inode, dentry, 0); | ||
475 | out: | ||
470 | return -error; | 476 | return -error; |
471 | } | 477 | } |
472 | 478 | ||
@@ -476,9 +482,12 @@ xfs_vn_rmdir( | |||
476 | struct dentry *dentry) | 482 | struct dentry *dentry) |
477 | { | 483 | { |
478 | struct inode *inode = dentry->d_inode; | 484 | struct inode *inode = dentry->d_inode; |
485 | struct xfs_name name; | ||
479 | int error; | 486 | int error; |
480 | 487 | ||
481 | error = xfs_rmdir(XFS_I(dir), dentry); | 488 | xfs_dentry_to_name(&name, dentry); |
489 | |||
490 | error = xfs_rmdir(XFS_I(dir), &name, XFS_I(inode)); | ||
482 | if (likely(!error)) { | 491 | if (likely(!error)) { |
483 | xfs_validate_fields(inode); | 492 | xfs_validate_fields(inode); |
484 | xfs_validate_fields(dir); | 493 | xfs_validate_fields(dir); |
@@ -494,12 +503,15 @@ xfs_vn_rename( | |||
494 | struct dentry *ndentry) | 503 | struct dentry *ndentry) |
495 | { | 504 | { |
496 | struct inode *new_inode = ndentry->d_inode; | 505 | struct inode *new_inode = ndentry->d_inode; |
497 | bhv_vnode_t *tvp; /* target directory */ | 506 | struct xfs_name oname; |
507 | struct xfs_name nname; | ||
498 | int error; | 508 | int error; |
499 | 509 | ||
500 | tvp = vn_from_inode(ndir); | 510 | xfs_dentry_to_name(&oname, odentry); |
511 | xfs_dentry_to_name(&nname, ndentry); | ||
501 | 512 | ||
502 | error = xfs_rename(XFS_I(odir), odentry, tvp, ndentry); | 513 | error = xfs_rename(XFS_I(odir), &oname, XFS_I(odentry->d_inode), |
514 | XFS_I(ndir), &nname); | ||
503 | if (likely(!error)) { | 515 | if (likely(!error)) { |
504 | if (new_inode) | 516 | if (new_inode) |
505 | xfs_validate_fields(new_inode); | 517 | xfs_validate_fields(new_inode); |
@@ -700,11 +712,19 @@ xfs_vn_setattr( | |||
700 | return -error; | 712 | return -error; |
701 | } | 713 | } |
702 | 714 | ||
715 | /* | ||
716 | * block_truncate_page can return an error, but we can't propagate it | ||
717 | * at all here. Leave a complaint + stack trace in the syslog because | ||
718 | * this could be bad. If it is bad, we need to propagate the error further. | ||
719 | */ | ||
703 | STATIC void | 720 | STATIC void |
704 | xfs_vn_truncate( | 721 | xfs_vn_truncate( |
705 | struct inode *inode) | 722 | struct inode *inode) |
706 | { | 723 | { |
707 | block_truncate_page(inode->i_mapping, inode->i_size, xfs_get_blocks); | 724 | int error; |
725 | error = block_truncate_page(inode->i_mapping, inode->i_size, | ||
726 | xfs_get_blocks); | ||
727 | WARN_ON(error); | ||
708 | } | 728 | } |
709 | 729 | ||
710 | STATIC int | 730 | STATIC int |
diff --git a/fs/xfs/linux-2.6/xfs_linux.h b/fs/xfs/linux-2.6/xfs_linux.h index 3ca39c4e5d2a..e5143323e71f 100644 --- a/fs/xfs/linux-2.6/xfs_linux.h +++ b/fs/xfs/linux-2.6/xfs_linux.h | |||
@@ -99,7 +99,6 @@ | |||
99 | /* | 99 | /* |
100 | * Feature macros (disable/enable) | 100 | * Feature macros (disable/enable) |
101 | */ | 101 | */ |
102 | #undef HAVE_REFCACHE /* reference cache not needed for NFS in 2.6 */ | ||
103 | #define HAVE_SPLICE /* a splice(2) exists in 2.6, but not in 2.4 */ | 102 | #define HAVE_SPLICE /* a splice(2) exists in 2.6, but not in 2.4 */ |
104 | #ifdef CONFIG_SMP | 103 | #ifdef CONFIG_SMP |
105 | #define HAVE_PERCPU_SB /* per cpu superblock counters are a 2.6 feature */ | 104 | #define HAVE_PERCPU_SB /* per cpu superblock counters are a 2.6 feature */ |
diff --git a/fs/xfs/linux-2.6/xfs_lrw.c b/fs/xfs/linux-2.6/xfs_lrw.c index 166353388490..1ebd8004469c 100644 --- a/fs/xfs/linux-2.6/xfs_lrw.c +++ b/fs/xfs/linux-2.6/xfs_lrw.c | |||
@@ -51,6 +51,7 @@ | |||
51 | #include "xfs_vnodeops.h" | 51 | #include "xfs_vnodeops.h" |
52 | 52 | ||
53 | #include <linux/capability.h> | 53 | #include <linux/capability.h> |
54 | #include <linux/mount.h> | ||
54 | #include <linux/writeback.h> | 55 | #include <linux/writeback.h> |
55 | 56 | ||
56 | 57 | ||
@@ -176,7 +177,6 @@ xfs_read( | |||
176 | { | 177 | { |
177 | struct file *file = iocb->ki_filp; | 178 | struct file *file = iocb->ki_filp; |
178 | struct inode *inode = file->f_mapping->host; | 179 | struct inode *inode = file->f_mapping->host; |
179 | bhv_vnode_t *vp = XFS_ITOV(ip); | ||
180 | xfs_mount_t *mp = ip->i_mount; | 180 | xfs_mount_t *mp = ip->i_mount; |
181 | size_t size = 0; | 181 | size_t size = 0; |
182 | ssize_t ret = 0; | 182 | ssize_t ret = 0; |
@@ -228,11 +228,11 @@ xfs_read( | |||
228 | xfs_ilock(ip, XFS_IOLOCK_SHARED); | 228 | xfs_ilock(ip, XFS_IOLOCK_SHARED); |
229 | 229 | ||
230 | if (DM_EVENT_ENABLED(ip, DM_EVENT_READ) && !(ioflags & IO_INVIS)) { | 230 | if (DM_EVENT_ENABLED(ip, DM_EVENT_READ) && !(ioflags & IO_INVIS)) { |
231 | bhv_vrwlock_t locktype = VRWLOCK_READ; | ||
232 | int dmflags = FILP_DELAY_FLAG(file) | DM_SEM_FLAG_RD(ioflags); | 231 | int dmflags = FILP_DELAY_FLAG(file) | DM_SEM_FLAG_RD(ioflags); |
232 | int iolock = XFS_IOLOCK_SHARED; | ||
233 | 233 | ||
234 | ret = -XFS_SEND_DATA(mp, DM_EVENT_READ, vp, *offset, size, | 234 | ret = -XFS_SEND_DATA(mp, DM_EVENT_READ, ip, *offset, size, |
235 | dmflags, &locktype); | 235 | dmflags, &iolock); |
236 | if (ret) { | 236 | if (ret) { |
237 | xfs_iunlock(ip, XFS_IOLOCK_SHARED); | 237 | xfs_iunlock(ip, XFS_IOLOCK_SHARED); |
238 | if (unlikely(ioflags & IO_ISDIRECT)) | 238 | if (unlikely(ioflags & IO_ISDIRECT)) |
@@ -242,7 +242,7 @@ xfs_read( | |||
242 | } | 242 | } |
243 | 243 | ||
244 | if (unlikely(ioflags & IO_ISDIRECT)) { | 244 | if (unlikely(ioflags & IO_ISDIRECT)) { |
245 | if (VN_CACHED(vp)) | 245 | if (inode->i_mapping->nrpages) |
246 | ret = xfs_flushinval_pages(ip, (*offset & PAGE_CACHE_MASK), | 246 | ret = xfs_flushinval_pages(ip, (*offset & PAGE_CACHE_MASK), |
247 | -1, FI_REMAPF_LOCKED); | 247 | -1, FI_REMAPF_LOCKED); |
248 | mutex_unlock(&inode->i_mutex); | 248 | mutex_unlock(&inode->i_mutex); |
@@ -276,7 +276,6 @@ xfs_splice_read( | |||
276 | int flags, | 276 | int flags, |
277 | int ioflags) | 277 | int ioflags) |
278 | { | 278 | { |
279 | bhv_vnode_t *vp = XFS_ITOV(ip); | ||
280 | xfs_mount_t *mp = ip->i_mount; | 279 | xfs_mount_t *mp = ip->i_mount; |
281 | ssize_t ret; | 280 | ssize_t ret; |
282 | 281 | ||
@@ -287,11 +286,11 @@ xfs_splice_read( | |||
287 | xfs_ilock(ip, XFS_IOLOCK_SHARED); | 286 | xfs_ilock(ip, XFS_IOLOCK_SHARED); |
288 | 287 | ||
289 | if (DM_EVENT_ENABLED(ip, DM_EVENT_READ) && !(ioflags & IO_INVIS)) { | 288 | if (DM_EVENT_ENABLED(ip, DM_EVENT_READ) && !(ioflags & IO_INVIS)) { |
290 | bhv_vrwlock_t locktype = VRWLOCK_READ; | 289 | int iolock = XFS_IOLOCK_SHARED; |
291 | int error; | 290 | int error; |
292 | 291 | ||
293 | error = XFS_SEND_DATA(mp, DM_EVENT_READ, vp, *ppos, count, | 292 | error = XFS_SEND_DATA(mp, DM_EVENT_READ, ip, *ppos, count, |
294 | FILP_DELAY_FLAG(infilp), &locktype); | 293 | FILP_DELAY_FLAG(infilp), &iolock); |
295 | if (error) { | 294 | if (error) { |
296 | xfs_iunlock(ip, XFS_IOLOCK_SHARED); | 295 | xfs_iunlock(ip, XFS_IOLOCK_SHARED); |
297 | return -error; | 296 | return -error; |
@@ -317,7 +316,6 @@ xfs_splice_write( | |||
317 | int flags, | 316 | int flags, |
318 | int ioflags) | 317 | int ioflags) |
319 | { | 318 | { |
320 | bhv_vnode_t *vp = XFS_ITOV(ip); | ||
321 | xfs_mount_t *mp = ip->i_mount; | 319 | xfs_mount_t *mp = ip->i_mount; |
322 | ssize_t ret; | 320 | ssize_t ret; |
323 | struct inode *inode = outfilp->f_mapping->host; | 321 | struct inode *inode = outfilp->f_mapping->host; |
@@ -330,11 +328,11 @@ xfs_splice_write( | |||
330 | xfs_ilock(ip, XFS_IOLOCK_EXCL); | 328 | xfs_ilock(ip, XFS_IOLOCK_EXCL); |
331 | 329 | ||
332 | if (DM_EVENT_ENABLED(ip, DM_EVENT_WRITE) && !(ioflags & IO_INVIS)) { | 330 | if (DM_EVENT_ENABLED(ip, DM_EVENT_WRITE) && !(ioflags & IO_INVIS)) { |
333 | bhv_vrwlock_t locktype = VRWLOCK_WRITE; | 331 | int iolock = XFS_IOLOCK_EXCL; |
334 | int error; | 332 | int error; |
335 | 333 | ||
336 | error = XFS_SEND_DATA(mp, DM_EVENT_WRITE, vp, *ppos, count, | 334 | error = XFS_SEND_DATA(mp, DM_EVENT_WRITE, ip, *ppos, count, |
337 | FILP_DELAY_FLAG(outfilp), &locktype); | 335 | FILP_DELAY_FLAG(outfilp), &iolock); |
338 | if (error) { | 336 | if (error) { |
339 | xfs_iunlock(ip, XFS_IOLOCK_EXCL); | 337 | xfs_iunlock(ip, XFS_IOLOCK_EXCL); |
340 | return -error; | 338 | return -error; |
@@ -573,14 +571,12 @@ xfs_write( | |||
573 | struct file *file = iocb->ki_filp; | 571 | struct file *file = iocb->ki_filp; |
574 | struct address_space *mapping = file->f_mapping; | 572 | struct address_space *mapping = file->f_mapping; |
575 | struct inode *inode = mapping->host; | 573 | struct inode *inode = mapping->host; |
576 | bhv_vnode_t *vp = XFS_ITOV(xip); | ||
577 | unsigned long segs = nsegs; | 574 | unsigned long segs = nsegs; |
578 | xfs_mount_t *mp; | 575 | xfs_mount_t *mp; |
579 | ssize_t ret = 0, error = 0; | 576 | ssize_t ret = 0, error = 0; |
580 | xfs_fsize_t isize, new_size; | 577 | xfs_fsize_t isize, new_size; |
581 | int iolock; | 578 | int iolock; |
582 | int eventsent = 0; | 579 | int eventsent = 0; |
583 | bhv_vrwlock_t locktype; | ||
584 | size_t ocount = 0, count; | 580 | size_t ocount = 0, count; |
585 | loff_t pos; | 581 | loff_t pos; |
586 | int need_i_mutex; | 582 | int need_i_mutex; |
@@ -607,11 +603,9 @@ xfs_write( | |||
607 | relock: | 603 | relock: |
608 | if (ioflags & IO_ISDIRECT) { | 604 | if (ioflags & IO_ISDIRECT) { |
609 | iolock = XFS_IOLOCK_SHARED; | 605 | iolock = XFS_IOLOCK_SHARED; |
610 | locktype = VRWLOCK_WRITE_DIRECT; | ||
611 | need_i_mutex = 0; | 606 | need_i_mutex = 0; |
612 | } else { | 607 | } else { |
613 | iolock = XFS_IOLOCK_EXCL; | 608 | iolock = XFS_IOLOCK_EXCL; |
614 | locktype = VRWLOCK_WRITE; | ||
615 | need_i_mutex = 1; | 609 | need_i_mutex = 1; |
616 | mutex_lock(&inode->i_mutex); | 610 | mutex_lock(&inode->i_mutex); |
617 | } | 611 | } |
@@ -634,9 +628,8 @@ start: | |||
634 | dmflags |= DM_FLAGS_IMUX; | 628 | dmflags |= DM_FLAGS_IMUX; |
635 | 629 | ||
636 | xfs_iunlock(xip, XFS_ILOCK_EXCL); | 630 | xfs_iunlock(xip, XFS_ILOCK_EXCL); |
637 | error = XFS_SEND_DATA(xip->i_mount, DM_EVENT_WRITE, vp, | 631 | error = XFS_SEND_DATA(xip->i_mount, DM_EVENT_WRITE, xip, |
638 | pos, count, | 632 | pos, count, dmflags, &iolock); |
639 | dmflags, &locktype); | ||
640 | if (error) { | 633 | if (error) { |
641 | goto out_unlock_internal; | 634 | goto out_unlock_internal; |
642 | } | 635 | } |
@@ -664,10 +657,9 @@ start: | |||
664 | return XFS_ERROR(-EINVAL); | 657 | return XFS_ERROR(-EINVAL); |
665 | } | 658 | } |
666 | 659 | ||
667 | if (!need_i_mutex && (VN_CACHED(vp) || pos > xip->i_size)) { | 660 | if (!need_i_mutex && (mapping->nrpages || pos > xip->i_size)) { |
668 | xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock); | 661 | xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock); |
669 | iolock = XFS_IOLOCK_EXCL; | 662 | iolock = XFS_IOLOCK_EXCL; |
670 | locktype = VRWLOCK_WRITE; | ||
671 | need_i_mutex = 1; | 663 | need_i_mutex = 1; |
672 | mutex_lock(&inode->i_mutex); | 664 | mutex_lock(&inode->i_mutex); |
673 | xfs_ilock(xip, XFS_ILOCK_EXCL|iolock); | 665 | xfs_ilock(xip, XFS_ILOCK_EXCL|iolock); |
@@ -679,10 +671,16 @@ start: | |||
679 | if (new_size > xip->i_size) | 671 | if (new_size > xip->i_size) |
680 | xip->i_new_size = new_size; | 672 | xip->i_new_size = new_size; |
681 | 673 | ||
682 | if (likely(!(ioflags & IO_INVIS))) { | 674 | /* |
675 | * We're not supposed to change timestamps in readonly-mounted | ||
676 | * filesystems. Throw it away if anyone asks us. | ||
677 | */ | ||
678 | if (likely(!(ioflags & IO_INVIS) && | ||
679 | !mnt_want_write(file->f_path.mnt))) { | ||
683 | file_update_time(file); | 680 | file_update_time(file); |
684 | xfs_ichgtime_fast(xip, inode, | 681 | xfs_ichgtime_fast(xip, inode, |
685 | XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); | 682 | XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); |
683 | mnt_drop_write(file->f_path.mnt); | ||
686 | } | 684 | } |
687 | 685 | ||
688 | /* | 686 | /* |
@@ -727,7 +725,7 @@ retry: | |||
727 | current->backing_dev_info = mapping->backing_dev_info; | 725 | current->backing_dev_info = mapping->backing_dev_info; |
728 | 726 | ||
729 | if ((ioflags & IO_ISDIRECT)) { | 727 | if ((ioflags & IO_ISDIRECT)) { |
730 | if (VN_CACHED(vp)) { | 728 | if (mapping->nrpages) { |
731 | WARN_ON(need_i_mutex == 0); | 729 | WARN_ON(need_i_mutex == 0); |
732 | xfs_inval_cached_trace(xip, pos, -1, | 730 | xfs_inval_cached_trace(xip, pos, -1, |
733 | (pos & PAGE_CACHE_MASK), -1); | 731 | (pos & PAGE_CACHE_MASK), -1); |
@@ -744,7 +742,6 @@ retry: | |||
744 | mutex_unlock(&inode->i_mutex); | 742 | mutex_unlock(&inode->i_mutex); |
745 | 743 | ||
746 | iolock = XFS_IOLOCK_SHARED; | 744 | iolock = XFS_IOLOCK_SHARED; |
747 | locktype = VRWLOCK_WRITE_DIRECT; | ||
748 | need_i_mutex = 0; | 745 | need_i_mutex = 0; |
749 | } | 746 | } |
750 | 747 | ||
@@ -781,15 +778,15 @@ retry: | |||
781 | 778 | ||
782 | if (ret == -ENOSPC && | 779 | if (ret == -ENOSPC && |
783 | DM_EVENT_ENABLED(xip, DM_EVENT_NOSPACE) && !(ioflags & IO_INVIS)) { | 780 | DM_EVENT_ENABLED(xip, DM_EVENT_NOSPACE) && !(ioflags & IO_INVIS)) { |
784 | xfs_rwunlock(xip, locktype); | 781 | xfs_iunlock(xip, iolock); |
785 | if (need_i_mutex) | 782 | if (need_i_mutex) |
786 | mutex_unlock(&inode->i_mutex); | 783 | mutex_unlock(&inode->i_mutex); |
787 | error = XFS_SEND_NAMESP(xip->i_mount, DM_EVENT_NOSPACE, vp, | 784 | error = XFS_SEND_NAMESP(xip->i_mount, DM_EVENT_NOSPACE, xip, |
788 | DM_RIGHT_NULL, vp, DM_RIGHT_NULL, NULL, NULL, | 785 | DM_RIGHT_NULL, xip, DM_RIGHT_NULL, NULL, NULL, |
789 | 0, 0, 0); /* Delay flag intentionally unused */ | 786 | 0, 0, 0); /* Delay flag intentionally unused */ |
790 | if (need_i_mutex) | 787 | if (need_i_mutex) |
791 | mutex_lock(&inode->i_mutex); | 788 | mutex_lock(&inode->i_mutex); |
792 | xfs_rwlock(xip, locktype); | 789 | xfs_ilock(xip, iolock); |
793 | if (error) | 790 | if (error) |
794 | goto out_unlock_internal; | 791 | goto out_unlock_internal; |
795 | pos = xip->i_size; | 792 | pos = xip->i_size; |
@@ -817,7 +814,8 @@ retry: | |||
817 | /* Handle various SYNC-type writes */ | 814 | /* Handle various SYNC-type writes */ |
818 | if ((file->f_flags & O_SYNC) || IS_SYNC(inode)) { | 815 | if ((file->f_flags & O_SYNC) || IS_SYNC(inode)) { |
819 | int error2; | 816 | int error2; |
820 | xfs_rwunlock(xip, locktype); | 817 | |
818 | xfs_iunlock(xip, iolock); | ||
821 | if (need_i_mutex) | 819 | if (need_i_mutex) |
822 | mutex_unlock(&inode->i_mutex); | 820 | mutex_unlock(&inode->i_mutex); |
823 | error2 = sync_page_range(inode, mapping, pos, ret); | 821 | error2 = sync_page_range(inode, mapping, pos, ret); |
@@ -825,7 +823,7 @@ retry: | |||
825 | error = error2; | 823 | error = error2; |
826 | if (need_i_mutex) | 824 | if (need_i_mutex) |
827 | mutex_lock(&inode->i_mutex); | 825 | mutex_lock(&inode->i_mutex); |
828 | xfs_rwlock(xip, locktype); | 826 | xfs_ilock(xip, iolock); |
829 | error2 = xfs_write_sync_logforce(mp, xip); | 827 | error2 = xfs_write_sync_logforce(mp, xip); |
830 | if (!error) | 828 | if (!error) |
831 | error = error2; | 829 | error = error2; |
@@ -846,7 +844,7 @@ retry: | |||
846 | xip->i_d.di_size = xip->i_size; | 844 | xip->i_d.di_size = xip->i_size; |
847 | xfs_iunlock(xip, XFS_ILOCK_EXCL); | 845 | xfs_iunlock(xip, XFS_ILOCK_EXCL); |
848 | } | 846 | } |
849 | xfs_rwunlock(xip, locktype); | 847 | xfs_iunlock(xip, iolock); |
850 | out_unlock_mutex: | 848 | out_unlock_mutex: |
851 | if (need_i_mutex) | 849 | if (need_i_mutex) |
852 | mutex_unlock(&inode->i_mutex); | 850 | mutex_unlock(&inode->i_mutex); |
@@ -884,28 +882,23 @@ xfs_bdstrat_cb(struct xfs_buf *bp) | |||
884 | } | 882 | } |
885 | 883 | ||
886 | /* | 884 | /* |
887 | * Wrapper around bdstrat so that we can stop data | 885 | * Wrapper around bdstrat so that we can stop data from going to disk in case |
888 | * from going to disk in case we are shutting down the filesystem. | 886 | * we are shutting down the filesystem. Typically user data goes thru this |
889 | * Typically user data goes thru this path; one of the exceptions | 887 | * path; one of the exceptions is the superblock. |
890 | * is the superblock. | ||
891 | */ | 888 | */ |
892 | int | 889 | void |
893 | xfsbdstrat( | 890 | xfsbdstrat( |
894 | struct xfs_mount *mp, | 891 | struct xfs_mount *mp, |
895 | struct xfs_buf *bp) | 892 | struct xfs_buf *bp) |
896 | { | 893 | { |
897 | ASSERT(mp); | 894 | ASSERT(mp); |
898 | if (!XFS_FORCED_SHUTDOWN(mp)) { | 895 | if (!XFS_FORCED_SHUTDOWN(mp)) { |
899 | /* Grio redirection would go here | ||
900 | * if (XFS_BUF_IS_GRIO(bp)) { | ||
901 | */ | ||
902 | |||
903 | xfs_buf_iorequest(bp); | 896 | xfs_buf_iorequest(bp); |
904 | return 0; | 897 | return; |
905 | } | 898 | } |
906 | 899 | ||
907 | xfs_buftrace("XFSBDSTRAT IOERROR", bp); | 900 | xfs_buftrace("XFSBDSTRAT IOERROR", bp); |
908 | return (xfs_bioerror_relse(bp)); | 901 | xfs_bioerror_relse(bp); |
909 | } | 902 | } |
910 | 903 | ||
911 | /* | 904 | /* |
diff --git a/fs/xfs/linux-2.6/xfs_lrw.h b/fs/xfs/linux-2.6/xfs_lrw.h index e200253139cf..e1d498b4ba7a 100644 --- a/fs/xfs/linux-2.6/xfs_lrw.h +++ b/fs/xfs/linux-2.6/xfs_lrw.h | |||
@@ -68,7 +68,8 @@ extern void xfs_inval_cached_trace(struct xfs_inode *, | |||
68 | #define xfs_inval_cached_trace(ip, offset, len, first, last) | 68 | #define xfs_inval_cached_trace(ip, offset, len, first, last) |
69 | #endif | 69 | #endif |
70 | 70 | ||
71 | extern int xfsbdstrat(struct xfs_mount *, struct xfs_buf *); | 71 | /* errors from xfsbdstrat() must be extracted from the buffer */ |
72 | extern void xfsbdstrat(struct xfs_mount *, struct xfs_buf *); | ||
72 | extern int xfs_bdstrat_cb(struct xfs_buf *); | 73 | extern int xfs_bdstrat_cb(struct xfs_buf *); |
73 | extern int xfs_dev_is_read_only(struct xfs_mount *, char *); | 74 | extern int xfs_dev_is_read_only(struct xfs_mount *, char *); |
74 | 75 | ||
diff --git a/fs/xfs/linux-2.6/xfs_stats.h b/fs/xfs/linux-2.6/xfs_stats.h index 8ba7a2fa6c1d..afd0b0d5fdb2 100644 --- a/fs/xfs/linux-2.6/xfs_stats.h +++ b/fs/xfs/linux-2.6/xfs_stats.h | |||
@@ -144,8 +144,8 @@ extern void xfs_cleanup_procfs(void); | |||
144 | # define XFS_STATS_DEC(count) | 144 | # define XFS_STATS_DEC(count) |
145 | # define XFS_STATS_ADD(count, inc) | 145 | # define XFS_STATS_ADD(count, inc) |
146 | 146 | ||
147 | static __inline void xfs_init_procfs(void) { }; | 147 | static inline void xfs_init_procfs(void) { }; |
148 | static __inline void xfs_cleanup_procfs(void) { }; | 148 | static inline void xfs_cleanup_procfs(void) { }; |
149 | 149 | ||
150 | #endif /* !CONFIG_PROC_FS */ | 150 | #endif /* !CONFIG_PROC_FS */ |
151 | 151 | ||
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index 8831d9518790..865eb708aa95 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c | |||
@@ -896,7 +896,8 @@ xfs_fs_write_inode( | |||
896 | struct inode *inode, | 896 | struct inode *inode, |
897 | int sync) | 897 | int sync) |
898 | { | 898 | { |
899 | int error = 0, flags = FLUSH_INODE; | 899 | int error = 0; |
900 | int flags = 0; | ||
900 | 901 | ||
901 | xfs_itrace_entry(XFS_I(inode)); | 902 | xfs_itrace_entry(XFS_I(inode)); |
902 | if (sync) { | 903 | if (sync) { |
@@ -934,7 +935,7 @@ xfs_fs_clear_inode( | |||
934 | xfs_inactive(ip); | 935 | xfs_inactive(ip); |
935 | xfs_iflags_clear(ip, XFS_IMODIFIED); | 936 | xfs_iflags_clear(ip, XFS_IMODIFIED); |
936 | if (xfs_reclaim(ip)) | 937 | if (xfs_reclaim(ip)) |
937 | panic("%s: cannot reclaim 0x%p\n", __FUNCTION__, inode); | 938 | panic("%s: cannot reclaim 0x%p\n", __func__, inode); |
938 | } | 939 | } |
939 | 940 | ||
940 | ASSERT(XFS_I(inode) == NULL); | 941 | ASSERT(XFS_I(inode) == NULL); |
@@ -1027,8 +1028,7 @@ xfs_sync_worker( | |||
1027 | int error; | 1028 | int error; |
1028 | 1029 | ||
1029 | if (!(mp->m_flags & XFS_MOUNT_RDONLY)) | 1030 | if (!(mp->m_flags & XFS_MOUNT_RDONLY)) |
1030 | error = xfs_sync(mp, SYNC_FSDATA | SYNC_BDFLUSH | SYNC_ATTR | | 1031 | error = xfs_sync(mp, SYNC_FSDATA | SYNC_BDFLUSH | SYNC_ATTR); |
1031 | SYNC_REFCACHE | SYNC_SUPER); | ||
1032 | mp->m_sync_seq++; | 1032 | mp->m_sync_seq++; |
1033 | wake_up(&mp->m_wait_single_sync_task); | 1033 | wake_up(&mp->m_wait_single_sync_task); |
1034 | } | 1034 | } |
@@ -1306,7 +1306,7 @@ xfs_fs_fill_super( | |||
1306 | void *data, | 1306 | void *data, |
1307 | int silent) | 1307 | int silent) |
1308 | { | 1308 | { |
1309 | struct inode *rootvp; | 1309 | struct inode *root; |
1310 | struct xfs_mount *mp = NULL; | 1310 | struct xfs_mount *mp = NULL; |
1311 | struct xfs_mount_args *args = xfs_args_allocate(sb, silent); | 1311 | struct xfs_mount_args *args = xfs_args_allocate(sb, silent); |
1312 | int error; | 1312 | int error; |
@@ -1344,19 +1344,18 @@ xfs_fs_fill_super( | |||
1344 | sb->s_time_gran = 1; | 1344 | sb->s_time_gran = 1; |
1345 | set_posix_acl_flag(sb); | 1345 | set_posix_acl_flag(sb); |
1346 | 1346 | ||
1347 | rootvp = igrab(mp->m_rootip->i_vnode); | 1347 | root = igrab(mp->m_rootip->i_vnode); |
1348 | if (!rootvp) { | 1348 | if (!root) { |
1349 | error = ENOENT; | 1349 | error = ENOENT; |
1350 | goto fail_unmount; | 1350 | goto fail_unmount; |
1351 | } | 1351 | } |
1352 | 1352 | if (is_bad_inode(root)) { | |
1353 | sb->s_root = d_alloc_root(vn_to_inode(rootvp)); | 1353 | error = EINVAL; |
1354 | if (!sb->s_root) { | ||
1355 | error = ENOMEM; | ||
1356 | goto fail_vnrele; | 1354 | goto fail_vnrele; |
1357 | } | 1355 | } |
1358 | if (is_bad_inode(sb->s_root->d_inode)) { | 1356 | sb->s_root = d_alloc_root(root); |
1359 | error = EINVAL; | 1357 | if (!sb->s_root) { |
1358 | error = ENOMEM; | ||
1360 | goto fail_vnrele; | 1359 | goto fail_vnrele; |
1361 | } | 1360 | } |
1362 | 1361 | ||
@@ -1378,7 +1377,7 @@ fail_vnrele: | |||
1378 | dput(sb->s_root); | 1377 | dput(sb->s_root); |
1379 | sb->s_root = NULL; | 1378 | sb->s_root = NULL; |
1380 | } else { | 1379 | } else { |
1381 | VN_RELE(rootvp); | 1380 | iput(root); |
1382 | } | 1381 | } |
1383 | 1382 | ||
1384 | fail_unmount: | 1383 | fail_unmount: |
diff --git a/fs/xfs/linux-2.6/xfs_super.h b/fs/xfs/linux-2.6/xfs_super.h index 3efcf45b14ab..3efb7c6d3303 100644 --- a/fs/xfs/linux-2.6/xfs_super.h +++ b/fs/xfs/linux-2.6/xfs_super.h | |||
@@ -50,13 +50,7 @@ extern void xfs_qm_exit(void); | |||
50 | # define set_posix_acl_flag(sb) do { } while (0) | 50 | # define set_posix_acl_flag(sb) do { } while (0) |
51 | #endif | 51 | #endif |
52 | 52 | ||
53 | #ifdef CONFIG_XFS_SECURITY | 53 | #define XFS_SECURITY_STRING "security attributes, " |
54 | # define XFS_SECURITY_STRING "security attributes, " | ||
55 | # define ENOSECURITY 0 | ||
56 | #else | ||
57 | # define XFS_SECURITY_STRING | ||
58 | # define ENOSECURITY EOPNOTSUPP | ||
59 | #endif | ||
60 | 54 | ||
61 | #ifdef CONFIG_XFS_RT | 55 | #ifdef CONFIG_XFS_RT |
62 | # define XFS_REALTIME_STRING "realtime, " | 56 | # define XFS_REALTIME_STRING "realtime, " |
diff --git a/fs/xfs/linux-2.6/xfs_vfs.h b/fs/xfs/linux-2.6/xfs_vfs.h index 4da03a4e3520..7e60c7776b1c 100644 --- a/fs/xfs/linux-2.6/xfs_vfs.h +++ b/fs/xfs/linux-2.6/xfs_vfs.h | |||
@@ -49,7 +49,6 @@ typedef struct bhv_vfs_sync_work { | |||
49 | #define SYNC_REFCACHE 0x0040 /* prune some of the nfs ref cache */ | 49 | #define SYNC_REFCACHE 0x0040 /* prune some of the nfs ref cache */ |
50 | #define SYNC_REMOUNT 0x0080 /* remount readonly, no dummy LRs */ | 50 | #define SYNC_REMOUNT 0x0080 /* remount readonly, no dummy LRs */ |
51 | #define SYNC_IOWAIT 0x0100 /* wait for all I/O to complete */ | 51 | #define SYNC_IOWAIT 0x0100 /* wait for all I/O to complete */ |
52 | #define SYNC_SUPER 0x0200 /* flush superblock to disk */ | ||
53 | 52 | ||
54 | /* | 53 | /* |
55 | * When remounting a filesystem read-only or freezing the filesystem, | 54 | * When remounting a filesystem read-only or freezing the filesystem, |
diff --git a/fs/xfs/linux-2.6/xfs_vnode.h b/fs/xfs/linux-2.6/xfs_vnode.h index b5ea418693b1..8b4d63ce8694 100644 --- a/fs/xfs/linux-2.6/xfs_vnode.h +++ b/fs/xfs/linux-2.6/xfs_vnode.h | |||
@@ -23,8 +23,6 @@ struct bhv_vattr; | |||
23 | struct xfs_iomap; | 23 | struct xfs_iomap; |
24 | struct attrlist_cursor_kern; | 24 | struct attrlist_cursor_kern; |
25 | 25 | ||
26 | typedef struct dentry bhv_vname_t; | ||
27 | typedef __u64 bhv_vnumber_t; | ||
28 | typedef struct inode bhv_vnode_t; | 26 | typedef struct inode bhv_vnode_t; |
29 | 27 | ||
30 | #define VN_ISLNK(vp) S_ISLNK((vp)->i_mode) | 28 | #define VN_ISLNK(vp) S_ISLNK((vp)->i_mode) |
@@ -46,18 +44,6 @@ static inline struct inode *vn_to_inode(bhv_vnode_t *vnode) | |||
46 | } | 44 | } |
47 | 45 | ||
48 | /* | 46 | /* |
49 | * Values for the vop_rwlock/rwunlock flags parameter. | ||
50 | */ | ||
51 | typedef enum bhv_vrwlock { | ||
52 | VRWLOCK_NONE, | ||
53 | VRWLOCK_READ, | ||
54 | VRWLOCK_WRITE, | ||
55 | VRWLOCK_WRITE_DIRECT, | ||
56 | VRWLOCK_TRY_READ, | ||
57 | VRWLOCK_TRY_WRITE | ||
58 | } bhv_vrwlock_t; | ||
59 | |||
60 | /* | ||
61 | * Return values for xfs_inactive. A return value of | 47 | * Return values for xfs_inactive. A return value of |
62 | * VN_INACTIVE_NOCACHE implies that the file system behavior | 48 | * VN_INACTIVE_NOCACHE implies that the file system behavior |
63 | * has disassociated its state and bhv_desc_t from the vnode. | 49 | * has disassociated its state and bhv_desc_t from the vnode. |
@@ -73,12 +59,9 @@ typedef enum bhv_vrwlock { | |||
73 | #define IO_INVIS 0x00020 /* don't update inode timestamps */ | 59 | #define IO_INVIS 0x00020 /* don't update inode timestamps */ |
74 | 60 | ||
75 | /* | 61 | /* |
76 | * Flags for vop_iflush call | 62 | * Flags for xfs_inode_flush |
77 | */ | 63 | */ |
78 | #define FLUSH_SYNC 1 /* wait for flush to complete */ | 64 | #define FLUSH_SYNC 1 /* wait for flush to complete */ |
79 | #define FLUSH_INODE 2 /* flush the inode itself */ | ||
80 | #define FLUSH_LOG 4 /* force the last log entry for | ||
81 | * this inode out to disk */ | ||
82 | 65 | ||
83 | /* | 66 | /* |
84 | * Flush/Invalidate options for vop_toss/flush/flushinval_pages. | 67 | * Flush/Invalidate options for vop_toss/flush/flushinval_pages. |
@@ -226,13 +209,6 @@ static inline bhv_vnode_t *vn_grab(bhv_vnode_t *vp) | |||
226 | } | 209 | } |
227 | 210 | ||
228 | /* | 211 | /* |
229 | * Vname handling macros. | ||
230 | */ | ||
231 | #define VNAME(dentry) ((char *) (dentry)->d_name.name) | ||
232 | #define VNAMELEN(dentry) ((dentry)->d_name.len) | ||
233 | #define VNAME_TO_VNODE(dentry) (vn_from_inode((dentry)->d_inode)) | ||
234 | |||
235 | /* | ||
236 | * Dealing with bad inodes | 212 | * Dealing with bad inodes |
237 | */ | 213 | */ |
238 | static inline int VN_BAD(bhv_vnode_t *vp) | 214 | static inline int VN_BAD(bhv_vnode_t *vp) |
@@ -303,9 +279,9 @@ extern void xfs_itrace_hold(struct xfs_inode *, char *, int, inst_t *); | |||
303 | extern void _xfs_itrace_ref(struct xfs_inode *, char *, int, inst_t *); | 279 | extern void _xfs_itrace_ref(struct xfs_inode *, char *, int, inst_t *); |
304 | extern void xfs_itrace_rele(struct xfs_inode *, char *, int, inst_t *); | 280 | extern void xfs_itrace_rele(struct xfs_inode *, char *, int, inst_t *); |
305 | #define xfs_itrace_entry(ip) \ | 281 | #define xfs_itrace_entry(ip) \ |
306 | _xfs_itrace_entry(ip, __FUNCTION__, (inst_t *)__return_address) | 282 | _xfs_itrace_entry(ip, __func__, (inst_t *)__return_address) |
307 | #define xfs_itrace_exit(ip) \ | 283 | #define xfs_itrace_exit(ip) \ |
308 | _xfs_itrace_exit(ip, __FUNCTION__, (inst_t *)__return_address) | 284 | _xfs_itrace_exit(ip, __func__, (inst_t *)__return_address) |
309 | #define xfs_itrace_exit_tag(ip, tag) \ | 285 | #define xfs_itrace_exit_tag(ip, tag) \ |
310 | _xfs_itrace_exit(ip, tag, (inst_t *)__return_address) | 286 | _xfs_itrace_exit(ip, tag, (inst_t *)__return_address) |
311 | #define xfs_itrace_ref(ip) \ | 287 | #define xfs_itrace_ref(ip) \ |
diff --git a/fs/xfs/quota/xfs_dquot.c b/fs/xfs/quota/xfs_dquot.c index 665babcca6a6..631ebb31b295 100644 --- a/fs/xfs/quota/xfs_dquot.c +++ b/fs/xfs/quota/xfs_dquot.c | |||
@@ -1291,7 +1291,7 @@ xfs_qm_dqflush( | |||
1291 | if (flags & XFS_QMOPT_DELWRI) { | 1291 | if (flags & XFS_QMOPT_DELWRI) { |
1292 | xfs_bdwrite(mp, bp); | 1292 | xfs_bdwrite(mp, bp); |
1293 | } else if (flags & XFS_QMOPT_ASYNC) { | 1293 | } else if (flags & XFS_QMOPT_ASYNC) { |
1294 | xfs_bawrite(mp, bp); | 1294 | error = xfs_bawrite(mp, bp); |
1295 | } else { | 1295 | } else { |
1296 | error = xfs_bwrite(mp, bp); | 1296 | error = xfs_bwrite(mp, bp); |
1297 | } | 1297 | } |
@@ -1439,9 +1439,7 @@ xfs_qm_dqpurge( | |||
1439 | uint flags) | 1439 | uint flags) |
1440 | { | 1440 | { |
1441 | xfs_dqhash_t *thishash; | 1441 | xfs_dqhash_t *thishash; |
1442 | xfs_mount_t *mp; | 1442 | xfs_mount_t *mp = dqp->q_mount; |
1443 | |||
1444 | mp = dqp->q_mount; | ||
1445 | 1443 | ||
1446 | ASSERT(XFS_QM_IS_MPLIST_LOCKED(mp)); | 1444 | ASSERT(XFS_QM_IS_MPLIST_LOCKED(mp)); |
1447 | ASSERT(XFS_DQ_IS_HASH_LOCKED(dqp->q_hash)); | 1445 | ASSERT(XFS_DQ_IS_HASH_LOCKED(dqp->q_hash)); |
@@ -1485,6 +1483,7 @@ xfs_qm_dqpurge( | |||
1485 | * we're unmounting, we do care, so we flush it and wait. | 1483 | * we're unmounting, we do care, so we flush it and wait. |
1486 | */ | 1484 | */ |
1487 | if (XFS_DQ_IS_DIRTY(dqp)) { | 1485 | if (XFS_DQ_IS_DIRTY(dqp)) { |
1486 | int error; | ||
1488 | xfs_dqtrace_entry(dqp, "DQPURGE ->DQFLUSH: DQDIRTY"); | 1487 | xfs_dqtrace_entry(dqp, "DQPURGE ->DQFLUSH: DQDIRTY"); |
1489 | /* dqflush unlocks dqflock */ | 1488 | /* dqflush unlocks dqflock */ |
1490 | /* | 1489 | /* |
@@ -1495,7 +1494,10 @@ xfs_qm_dqpurge( | |||
1495 | * We don't care about getting disk errors here. We need | 1494 | * We don't care about getting disk errors here. We need |
1496 | * to purge this dquot anyway, so we go ahead regardless. | 1495 | * to purge this dquot anyway, so we go ahead regardless. |
1497 | */ | 1496 | */ |
1498 | (void) xfs_qm_dqflush(dqp, XFS_QMOPT_SYNC); | 1497 | error = xfs_qm_dqflush(dqp, XFS_QMOPT_SYNC); |
1498 | if (error) | ||
1499 | xfs_fs_cmn_err(CE_WARN, mp, | ||
1500 | "xfs_qm_dqpurge: dquot %p flush failed", dqp); | ||
1499 | xfs_dqflock(dqp); | 1501 | xfs_dqflock(dqp); |
1500 | } | 1502 | } |
1501 | ASSERT(dqp->q_pincount == 0); | 1503 | ASSERT(dqp->q_pincount == 0); |
@@ -1580,12 +1582,18 @@ xfs_qm_dqflock_pushbuf_wait( | |||
1580 | XFS_INCORE_TRYLOCK); | 1582 | XFS_INCORE_TRYLOCK); |
1581 | if (bp != NULL) { | 1583 | if (bp != NULL) { |
1582 | if (XFS_BUF_ISDELAYWRITE(bp)) { | 1584 | if (XFS_BUF_ISDELAYWRITE(bp)) { |
1585 | int error; | ||
1583 | if (XFS_BUF_ISPINNED(bp)) { | 1586 | if (XFS_BUF_ISPINNED(bp)) { |
1584 | xfs_log_force(dqp->q_mount, | 1587 | xfs_log_force(dqp->q_mount, |
1585 | (xfs_lsn_t)0, | 1588 | (xfs_lsn_t)0, |
1586 | XFS_LOG_FORCE); | 1589 | XFS_LOG_FORCE); |
1587 | } | 1590 | } |
1588 | xfs_bawrite(dqp->q_mount, bp); | 1591 | error = xfs_bawrite(dqp->q_mount, bp); |
1592 | if (error) | ||
1593 | xfs_fs_cmn_err(CE_WARN, dqp->q_mount, | ||
1594 | "xfs_qm_dqflock_pushbuf_wait: " | ||
1595 | "pushbuf error %d on dqp %p, bp %p", | ||
1596 | error, dqp, bp); | ||
1589 | } else { | 1597 | } else { |
1590 | xfs_buf_relse(bp); | 1598 | xfs_buf_relse(bp); |
1591 | } | 1599 | } |
diff --git a/fs/xfs/quota/xfs_dquot_item.c b/fs/xfs/quota/xfs_dquot_item.c index 1800e8d1f646..36e05ca78412 100644 --- a/fs/xfs/quota/xfs_dquot_item.c +++ b/fs/xfs/quota/xfs_dquot_item.c | |||
@@ -146,6 +146,7 @@ xfs_qm_dquot_logitem_push( | |||
146 | xfs_dq_logitem_t *logitem) | 146 | xfs_dq_logitem_t *logitem) |
147 | { | 147 | { |
148 | xfs_dquot_t *dqp; | 148 | xfs_dquot_t *dqp; |
149 | int error; | ||
149 | 150 | ||
150 | dqp = logitem->qli_dquot; | 151 | dqp = logitem->qli_dquot; |
151 | 152 | ||
@@ -161,7 +162,11 @@ xfs_qm_dquot_logitem_push( | |||
161 | * lock without sleeping, then there must not have been | 162 | * lock without sleeping, then there must not have been |
162 | * anyone in the process of flushing the dquot. | 163 | * anyone in the process of flushing the dquot. |
163 | */ | 164 | */ |
164 | xfs_qm_dqflush(dqp, XFS_B_DELWRI); | 165 | error = xfs_qm_dqflush(dqp, XFS_QMOPT_DELWRI); |
166 | if (error) | ||
167 | xfs_fs_cmn_err(CE_WARN, dqp->q_mount, | ||
168 | "xfs_qm_dquot_logitem_push: push error %d on dqp %p", | ||
169 | error, dqp); | ||
165 | xfs_dqunlock(dqp); | 170 | xfs_dqunlock(dqp); |
166 | } | 171 | } |
167 | 172 | ||
@@ -262,11 +267,16 @@ xfs_qm_dquot_logitem_pushbuf( | |||
262 | XFS_LOG_FORCE); | 267 | XFS_LOG_FORCE); |
263 | } | 268 | } |
264 | if (dopush) { | 269 | if (dopush) { |
270 | int error; | ||
265 | #ifdef XFSRACEDEBUG | 271 | #ifdef XFSRACEDEBUG |
266 | delay_for_intr(); | 272 | delay_for_intr(); |
267 | delay(300); | 273 | delay(300); |
268 | #endif | 274 | #endif |
269 | xfs_bawrite(mp, bp); | 275 | error = xfs_bawrite(mp, bp); |
276 | if (error) | ||
277 | xfs_fs_cmn_err(CE_WARN, mp, | ||
278 | "xfs_qm_dquot_logitem_pushbuf: pushbuf error %d on qip %p, bp %p", | ||
279 | error, qip, bp); | ||
270 | } else { | 280 | } else { |
271 | xfs_buf_relse(bp); | 281 | xfs_buf_relse(bp); |
272 | } | 282 | } |
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c index 8e9c5ae6504d..40ea56409561 100644 --- a/fs/xfs/quota/xfs_qm.c +++ b/fs/xfs/quota/xfs_qm.c | |||
@@ -304,8 +304,11 @@ xfs_qm_unmount_quotadestroy( | |||
304 | * necessary data structures like quotainfo. This is also responsible for | 304 | * necessary data structures like quotainfo. This is also responsible for |
305 | * running a quotacheck as necessary. We are guaranteed that the superblock | 305 | * running a quotacheck as necessary. We are guaranteed that the superblock |
306 | * is consistently read in at this point. | 306 | * is consistently read in at this point. |
307 | * | ||
308 | * If we fail here, the mount will continue with quota turned off. We don't | ||
309 | * need to inidicate success or failure at all. | ||
307 | */ | 310 | */ |
308 | int | 311 | void |
309 | xfs_qm_mount_quotas( | 312 | xfs_qm_mount_quotas( |
310 | xfs_mount_t *mp, | 313 | xfs_mount_t *mp, |
311 | int mfsi_flags) | 314 | int mfsi_flags) |
@@ -313,7 +316,6 @@ xfs_qm_mount_quotas( | |||
313 | int error = 0; | 316 | int error = 0; |
314 | uint sbf; | 317 | uint sbf; |
315 | 318 | ||
316 | |||
317 | /* | 319 | /* |
318 | * If quotas on realtime volumes is not supported, we disable | 320 | * If quotas on realtime volumes is not supported, we disable |
319 | * quotas immediately. | 321 | * quotas immediately. |
@@ -332,7 +334,8 @@ xfs_qm_mount_quotas( | |||
332 | * Allocate the quotainfo structure inside the mount struct, and | 334 | * Allocate the quotainfo structure inside the mount struct, and |
333 | * create quotainode(s), and change/rev superblock if necessary. | 335 | * create quotainode(s), and change/rev superblock if necessary. |
334 | */ | 336 | */ |
335 | if ((error = xfs_qm_init_quotainfo(mp))) { | 337 | error = xfs_qm_init_quotainfo(mp); |
338 | if (error) { | ||
336 | /* | 339 | /* |
337 | * We must turn off quotas. | 340 | * We must turn off quotas. |
338 | */ | 341 | */ |
@@ -344,12 +347,11 @@ xfs_qm_mount_quotas( | |||
344 | * If any of the quotas are not consistent, do a quotacheck. | 347 | * If any of the quotas are not consistent, do a quotacheck. |
345 | */ | 348 | */ |
346 | if (XFS_QM_NEED_QUOTACHECK(mp) && | 349 | if (XFS_QM_NEED_QUOTACHECK(mp) && |
347 | !(mfsi_flags & XFS_MFSI_NO_QUOTACHECK)) { | 350 | !(mfsi_flags & XFS_MFSI_NO_QUOTACHECK)) { |
348 | if ((error = xfs_qm_quotacheck(mp))) { | 351 | error = xfs_qm_quotacheck(mp); |
349 | /* Quotacheck has failed and quotas have | 352 | if (error) { |
350 | * been disabled. | 353 | /* Quotacheck failed and disabled quotas. */ |
351 | */ | 354 | return; |
352 | return XFS_ERROR(error); | ||
353 | } | 355 | } |
354 | } | 356 | } |
355 | /* | 357 | /* |
@@ -357,12 +359,10 @@ xfs_qm_mount_quotas( | |||
357 | * quotachecked status, since we won't be doing accounting for | 359 | * quotachecked status, since we won't be doing accounting for |
358 | * that type anymore. | 360 | * that type anymore. |
359 | */ | 361 | */ |
360 | if (!XFS_IS_UQUOTA_ON(mp)) { | 362 | if (!XFS_IS_UQUOTA_ON(mp)) |
361 | mp->m_qflags &= ~XFS_UQUOTA_CHKD; | 363 | mp->m_qflags &= ~XFS_UQUOTA_CHKD; |
362 | } | 364 | if (!(XFS_IS_GQUOTA_ON(mp) || XFS_IS_PQUOTA_ON(mp))) |
363 | if (!(XFS_IS_GQUOTA_ON(mp) || XFS_IS_PQUOTA_ON(mp))) { | ||
364 | mp->m_qflags &= ~XFS_OQUOTA_CHKD; | 365 | mp->m_qflags &= ~XFS_OQUOTA_CHKD; |
365 | } | ||
366 | 366 | ||
367 | write_changes: | 367 | write_changes: |
368 | /* | 368 | /* |
@@ -392,7 +392,7 @@ xfs_qm_mount_quotas( | |||
392 | xfs_fs_cmn_err(CE_WARN, mp, | 392 | xfs_fs_cmn_err(CE_WARN, mp, |
393 | "Failed to initialize disk quotas."); | 393 | "Failed to initialize disk quotas."); |
394 | } | 394 | } |
395 | return XFS_ERROR(error); | 395 | return; |
396 | } | 396 | } |
397 | 397 | ||
398 | /* | 398 | /* |
@@ -1438,7 +1438,7 @@ xfs_qm_qino_alloc( | |||
1438 | } | 1438 | } |
1439 | 1439 | ||
1440 | 1440 | ||
1441 | STATIC int | 1441 | STATIC void |
1442 | xfs_qm_reset_dqcounts( | 1442 | xfs_qm_reset_dqcounts( |
1443 | xfs_mount_t *mp, | 1443 | xfs_mount_t *mp, |
1444 | xfs_buf_t *bp, | 1444 | xfs_buf_t *bp, |
@@ -1478,8 +1478,6 @@ xfs_qm_reset_dqcounts( | |||
1478 | ddq->d_rtbwarns = 0; | 1478 | ddq->d_rtbwarns = 0; |
1479 | ddq = (xfs_disk_dquot_t *) ((xfs_dqblk_t *)ddq + 1); | 1479 | ddq = (xfs_disk_dquot_t *) ((xfs_dqblk_t *)ddq + 1); |
1480 | } | 1480 | } |
1481 | |||
1482 | return 0; | ||
1483 | } | 1481 | } |
1484 | 1482 | ||
1485 | STATIC int | 1483 | STATIC int |
@@ -1520,7 +1518,7 @@ xfs_qm_dqiter_bufs( | |||
1520 | if (error) | 1518 | if (error) |
1521 | break; | 1519 | break; |
1522 | 1520 | ||
1523 | (void) xfs_qm_reset_dqcounts(mp, bp, firstid, type); | 1521 | xfs_qm_reset_dqcounts(mp, bp, firstid, type); |
1524 | xfs_bdwrite(mp, bp); | 1522 | xfs_bdwrite(mp, bp); |
1525 | /* | 1523 | /* |
1526 | * goto the next block. | 1524 | * goto the next block. |
@@ -1810,7 +1808,7 @@ xfs_qm_dqusage_adjust( | |||
1810 | * Now release the inode. This will send it to 'inactive', and | 1808 | * Now release the inode. This will send it to 'inactive', and |
1811 | * possibly even free blocks. | 1809 | * possibly even free blocks. |
1812 | */ | 1810 | */ |
1813 | VN_RELE(XFS_ITOV(ip)); | 1811 | IRELE(ip); |
1814 | 1812 | ||
1815 | /* | 1813 | /* |
1816 | * Goto next inode. | 1814 | * Goto next inode. |
@@ -1880,6 +1878,14 @@ xfs_qm_quotacheck( | |||
1880 | } while (! done); | 1878 | } while (! done); |
1881 | 1879 | ||
1882 | /* | 1880 | /* |
1881 | * We've made all the changes that we need to make incore. | ||
1882 | * Flush them down to disk buffers if everything was updated | ||
1883 | * successfully. | ||
1884 | */ | ||
1885 | if (!error) | ||
1886 | error = xfs_qm_dqflush_all(mp, XFS_QMOPT_DELWRI); | ||
1887 | |||
1888 | /* | ||
1883 | * We can get this error if we couldn't do a dquot allocation inside | 1889 | * We can get this error if we couldn't do a dquot allocation inside |
1884 | * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the | 1890 | * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the |
1885 | * dirty dquots that might be cached, we just want to get rid of them | 1891 | * dirty dquots that might be cached, we just want to get rid of them |
@@ -1890,11 +1896,6 @@ xfs_qm_quotacheck( | |||
1890 | xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL | XFS_QMOPT_QUOTAOFF); | 1896 | xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL | XFS_QMOPT_QUOTAOFF); |
1891 | goto error_return; | 1897 | goto error_return; |
1892 | } | 1898 | } |
1893 | /* | ||
1894 | * We've made all the changes that we need to make incore. | ||
1895 | * Now flush_them down to disk buffers. | ||
1896 | */ | ||
1897 | xfs_qm_dqflush_all(mp, XFS_QMOPT_DELWRI); | ||
1898 | 1899 | ||
1899 | /* | 1900 | /* |
1900 | * We didn't log anything, because if we crashed, we'll have to | 1901 | * We didn't log anything, because if we crashed, we'll have to |
@@ -1926,7 +1927,10 @@ xfs_qm_quotacheck( | |||
1926 | ASSERT(mp->m_quotainfo != NULL); | 1927 | ASSERT(mp->m_quotainfo != NULL); |
1927 | ASSERT(xfs_Gqm != NULL); | 1928 | ASSERT(xfs_Gqm != NULL); |
1928 | xfs_qm_destroy_quotainfo(mp); | 1929 | xfs_qm_destroy_quotainfo(mp); |
1929 | (void)xfs_mount_reset_sbqflags(mp); | 1930 | if (xfs_mount_reset_sbqflags(mp)) { |
1931 | cmn_err(CE_WARN, "XFS quotacheck %s: " | ||
1932 | "Failed to reset quota flags.", mp->m_fsname); | ||
1933 | } | ||
1930 | } else { | 1934 | } else { |
1931 | cmn_err(CE_NOTE, "XFS quotacheck %s: Done.", mp->m_fsname); | 1935 | cmn_err(CE_NOTE, "XFS quotacheck %s: Done.", mp->m_fsname); |
1932 | } | 1936 | } |
@@ -1968,7 +1972,7 @@ xfs_qm_init_quotainos( | |||
1968 | if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, | 1972 | if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, |
1969 | 0, 0, &gip, 0))) { | 1973 | 0, 0, &gip, 0))) { |
1970 | if (uip) | 1974 | if (uip) |
1971 | VN_RELE(XFS_ITOV(uip)); | 1975 | IRELE(uip); |
1972 | return XFS_ERROR(error); | 1976 | return XFS_ERROR(error); |
1973 | } | 1977 | } |
1974 | } | 1978 | } |
@@ -1999,7 +2003,7 @@ xfs_qm_init_quotainos( | |||
1999 | sbflags | XFS_SB_GQUOTINO, flags); | 2003 | sbflags | XFS_SB_GQUOTINO, flags); |
2000 | if (error) { | 2004 | if (error) { |
2001 | if (uip) | 2005 | if (uip) |
2002 | VN_RELE(XFS_ITOV(uip)); | 2006 | IRELE(uip); |
2003 | 2007 | ||
2004 | return XFS_ERROR(error); | 2008 | return XFS_ERROR(error); |
2005 | } | 2009 | } |
@@ -2093,12 +2097,17 @@ xfs_qm_shake_freelist( | |||
2093 | * dirty dquots. | 2097 | * dirty dquots. |
2094 | */ | 2098 | */ |
2095 | if (XFS_DQ_IS_DIRTY(dqp)) { | 2099 | if (XFS_DQ_IS_DIRTY(dqp)) { |
2100 | int error; | ||
2096 | xfs_dqtrace_entry(dqp, "DQSHAKE: DQDIRTY"); | 2101 | xfs_dqtrace_entry(dqp, "DQSHAKE: DQDIRTY"); |
2097 | /* | 2102 | /* |
2098 | * We flush it delayed write, so don't bother | 2103 | * We flush it delayed write, so don't bother |
2099 | * releasing the mplock. | 2104 | * releasing the mplock. |
2100 | */ | 2105 | */ |
2101 | (void) xfs_qm_dqflush(dqp, XFS_QMOPT_DELWRI); | 2106 | error = xfs_qm_dqflush(dqp, XFS_QMOPT_DELWRI); |
2107 | if (error) { | ||
2108 | xfs_fs_cmn_err(CE_WARN, dqp->q_mount, | ||
2109 | "xfs_qm_dqflush_all: dquot %p flush failed", dqp); | ||
2110 | } | ||
2102 | xfs_dqunlock(dqp); /* dqflush unlocks dqflock */ | 2111 | xfs_dqunlock(dqp); /* dqflush unlocks dqflock */ |
2103 | dqp = dqp->dq_flnext; | 2112 | dqp = dqp->dq_flnext; |
2104 | continue; | 2113 | continue; |
@@ -2265,12 +2274,17 @@ xfs_qm_dqreclaim_one(void) | |||
2265 | * dirty dquots. | 2274 | * dirty dquots. |
2266 | */ | 2275 | */ |
2267 | if (XFS_DQ_IS_DIRTY(dqp)) { | 2276 | if (XFS_DQ_IS_DIRTY(dqp)) { |
2277 | int error; | ||
2268 | xfs_dqtrace_entry(dqp, "DQRECLAIM: DQDIRTY"); | 2278 | xfs_dqtrace_entry(dqp, "DQRECLAIM: DQDIRTY"); |
2269 | /* | 2279 | /* |
2270 | * We flush it delayed write, so don't bother | 2280 | * We flush it delayed write, so don't bother |
2271 | * releasing the freelist lock. | 2281 | * releasing the freelist lock. |
2272 | */ | 2282 | */ |
2273 | (void) xfs_qm_dqflush(dqp, XFS_QMOPT_DELWRI); | 2283 | error = xfs_qm_dqflush(dqp, XFS_QMOPT_DELWRI); |
2284 | if (error) { | ||
2285 | xfs_fs_cmn_err(CE_WARN, dqp->q_mount, | ||
2286 | "xfs_qm_dqreclaim: dquot %p flush failed", dqp); | ||
2287 | } | ||
2274 | xfs_dqunlock(dqp); /* dqflush unlocks dqflock */ | 2288 | xfs_dqunlock(dqp); /* dqflush unlocks dqflock */ |
2275 | continue; | 2289 | continue; |
2276 | } | 2290 | } |
@@ -2378,9 +2392,9 @@ xfs_qm_write_sb_changes( | |||
2378 | } | 2392 | } |
2379 | 2393 | ||
2380 | xfs_mod_sb(tp, flags); | 2394 | xfs_mod_sb(tp, flags); |
2381 | (void) xfs_trans_commit(tp, 0); | 2395 | error = xfs_trans_commit(tp, 0); |
2382 | 2396 | ||
2383 | return 0; | 2397 | return error; |
2384 | } | 2398 | } |
2385 | 2399 | ||
2386 | 2400 | ||
diff --git a/fs/xfs/quota/xfs_qm.h b/fs/xfs/quota/xfs_qm.h index baf537c1c177..cd2300e374af 100644 --- a/fs/xfs/quota/xfs_qm.h +++ b/fs/xfs/quota/xfs_qm.h | |||
@@ -165,7 +165,7 @@ typedef struct xfs_dquot_acct { | |||
165 | #define XFS_QM_RELE(xqm) ((xqm)->qm_nrefs--) | 165 | #define XFS_QM_RELE(xqm) ((xqm)->qm_nrefs--) |
166 | 166 | ||
167 | extern void xfs_qm_destroy_quotainfo(xfs_mount_t *); | 167 | extern void xfs_qm_destroy_quotainfo(xfs_mount_t *); |
168 | extern int xfs_qm_mount_quotas(xfs_mount_t *, int); | 168 | extern void xfs_qm_mount_quotas(xfs_mount_t *, int); |
169 | extern int xfs_qm_quotacheck(xfs_mount_t *); | 169 | extern int xfs_qm_quotacheck(xfs_mount_t *); |
170 | extern void xfs_qm_unmount_quotadestroy(xfs_mount_t *); | 170 | extern void xfs_qm_unmount_quotadestroy(xfs_mount_t *); |
171 | extern int xfs_qm_unmount_quotas(xfs_mount_t *); | 171 | extern int xfs_qm_unmount_quotas(xfs_mount_t *); |
diff --git a/fs/xfs/quota/xfs_qm_stats.h b/fs/xfs/quota/xfs_qm_stats.h index a50ffabcf554..5b964fc0dc09 100644 --- a/fs/xfs/quota/xfs_qm_stats.h +++ b/fs/xfs/quota/xfs_qm_stats.h | |||
@@ -45,8 +45,8 @@ extern void xfs_qm_cleanup_procfs(void); | |||
45 | 45 | ||
46 | # define XQM_STATS_INC(count) do { } while (0) | 46 | # define XQM_STATS_INC(count) do { } while (0) |
47 | 47 | ||
48 | static __inline void xfs_qm_init_procfs(void) { }; | 48 | static inline void xfs_qm_init_procfs(void) { }; |
49 | static __inline void xfs_qm_cleanup_procfs(void) { }; | 49 | static inline void xfs_qm_cleanup_procfs(void) { }; |
50 | 50 | ||
51 | #endif | 51 | #endif |
52 | 52 | ||
diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c index d2b8be7e75f9..8342823dbdc3 100644 --- a/fs/xfs/quota/xfs_qm_syscalls.c +++ b/fs/xfs/quota/xfs_qm_syscalls.c | |||
@@ -279,9 +279,12 @@ xfs_qm_scall_quotaoff( | |||
279 | 279 | ||
280 | /* | 280 | /* |
281 | * Write the LI_QUOTAOFF log record, and do SB changes atomically, | 281 | * Write the LI_QUOTAOFF log record, and do SB changes atomically, |
282 | * and synchronously. | 282 | * and synchronously. If we fail to write, we should abort the |
283 | * operation as it cannot be recovered safely if we crash. | ||
283 | */ | 284 | */ |
284 | xfs_qm_log_quotaoff(mp, &qoffstart, flags); | 285 | error = xfs_qm_log_quotaoff(mp, &qoffstart, flags); |
286 | if (error) | ||
287 | goto out_error; | ||
285 | 288 | ||
286 | /* | 289 | /* |
287 | * Next we clear the XFS_MOUNT_*DQ_ACTIVE bit(s) in the mount struct | 290 | * Next we clear the XFS_MOUNT_*DQ_ACTIVE bit(s) in the mount struct |
@@ -337,7 +340,12 @@ xfs_qm_scall_quotaoff( | |||
337 | * So, we have QUOTAOFF start and end logitems; the start | 340 | * So, we have QUOTAOFF start and end logitems; the start |
338 | * logitem won't get overwritten until the end logitem appears... | 341 | * logitem won't get overwritten until the end logitem appears... |
339 | */ | 342 | */ |
340 | xfs_qm_log_quotaoff_end(mp, qoffstart, flags); | 343 | error = xfs_qm_log_quotaoff_end(mp, qoffstart, flags); |
344 | if (error) { | ||
345 | /* We're screwed now. Shutdown is the only option. */ | ||
346 | xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); | ||
347 | goto out_error; | ||
348 | } | ||
341 | 349 | ||
342 | /* | 350 | /* |
343 | * If quotas is completely disabled, close shop. | 351 | * If quotas is completely disabled, close shop. |
@@ -361,6 +369,7 @@ xfs_qm_scall_quotaoff( | |||
361 | XFS_PURGE_INODE(XFS_QI_GQIP(mp)); | 369 | XFS_PURGE_INODE(XFS_QI_GQIP(mp)); |
362 | XFS_QI_GQIP(mp) = NULL; | 370 | XFS_QI_GQIP(mp) = NULL; |
363 | } | 371 | } |
372 | out_error: | ||
364 | mutex_unlock(&(XFS_QI_QOFFLOCK(mp))); | 373 | mutex_unlock(&(XFS_QI_QOFFLOCK(mp))); |
365 | 374 | ||
366 | return (error); | 375 | return (error); |
@@ -371,12 +380,11 @@ xfs_qm_scall_trunc_qfiles( | |||
371 | xfs_mount_t *mp, | 380 | xfs_mount_t *mp, |
372 | uint flags) | 381 | uint flags) |
373 | { | 382 | { |
374 | int error; | 383 | int error = 0, error2 = 0; |
375 | xfs_inode_t *qip; | 384 | xfs_inode_t *qip; |
376 | 385 | ||
377 | if (!capable(CAP_SYS_ADMIN)) | 386 | if (!capable(CAP_SYS_ADMIN)) |
378 | return XFS_ERROR(EPERM); | 387 | return XFS_ERROR(EPERM); |
379 | error = 0; | ||
380 | if (!xfs_sb_version_hasquota(&mp->m_sb) || flags == 0) { | 388 | if (!xfs_sb_version_hasquota(&mp->m_sb) || flags == 0) { |
381 | qdprintk("qtrunc flags=%x m_qflags=%x\n", flags, mp->m_qflags); | 389 | qdprintk("qtrunc flags=%x m_qflags=%x\n", flags, mp->m_qflags); |
382 | return XFS_ERROR(EINVAL); | 390 | return XFS_ERROR(EINVAL); |
@@ -384,22 +392,22 @@ xfs_qm_scall_trunc_qfiles( | |||
384 | 392 | ||
385 | if ((flags & XFS_DQ_USER) && mp->m_sb.sb_uquotino != NULLFSINO) { | 393 | if ((flags & XFS_DQ_USER) && mp->m_sb.sb_uquotino != NULLFSINO) { |
386 | error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, 0, 0, &qip, 0); | 394 | error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, 0, 0, &qip, 0); |
387 | if (! error) { | 395 | if (!error) { |
388 | (void) xfs_truncate_file(mp, qip); | 396 | error = xfs_truncate_file(mp, qip); |
389 | VN_RELE(XFS_ITOV(qip)); | 397 | IRELE(qip); |
390 | } | 398 | } |
391 | } | 399 | } |
392 | 400 | ||
393 | if ((flags & (XFS_DQ_GROUP|XFS_DQ_PROJ)) && | 401 | if ((flags & (XFS_DQ_GROUP|XFS_DQ_PROJ)) && |
394 | mp->m_sb.sb_gquotino != NULLFSINO) { | 402 | mp->m_sb.sb_gquotino != NULLFSINO) { |
395 | error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, 0, 0, &qip, 0); | 403 | error2 = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, 0, 0, &qip, 0); |
396 | if (! error) { | 404 | if (!error2) { |
397 | (void) xfs_truncate_file(mp, qip); | 405 | error2 = xfs_truncate_file(mp, qip); |
398 | VN_RELE(XFS_ITOV(qip)); | 406 | IRELE(qip); |
399 | } | 407 | } |
400 | } | 408 | } |
401 | 409 | ||
402 | return (error); | 410 | return error ? error : error2; |
403 | } | 411 | } |
404 | 412 | ||
405 | 413 | ||
@@ -552,13 +560,13 @@ xfs_qm_scall_getqstat( | |||
552 | out->qs_uquota.qfs_nblks = uip->i_d.di_nblocks; | 560 | out->qs_uquota.qfs_nblks = uip->i_d.di_nblocks; |
553 | out->qs_uquota.qfs_nextents = uip->i_d.di_nextents; | 561 | out->qs_uquota.qfs_nextents = uip->i_d.di_nextents; |
554 | if (tempuqip) | 562 | if (tempuqip) |
555 | VN_RELE(XFS_ITOV(uip)); | 563 | IRELE(uip); |
556 | } | 564 | } |
557 | if (gip) { | 565 | if (gip) { |
558 | out->qs_gquota.qfs_nblks = gip->i_d.di_nblocks; | 566 | out->qs_gquota.qfs_nblks = gip->i_d.di_nblocks; |
559 | out->qs_gquota.qfs_nextents = gip->i_d.di_nextents; | 567 | out->qs_gquota.qfs_nextents = gip->i_d.di_nextents; |
560 | if (tempgqip) | 568 | if (tempgqip) |
561 | VN_RELE(XFS_ITOV(gip)); | 569 | IRELE(gip); |
562 | } | 570 | } |
563 | if (mp->m_quotainfo) { | 571 | if (mp->m_quotainfo) { |
564 | out->qs_incoredqs = XFS_QI_MPLNDQUOTS(mp); | 572 | out->qs_incoredqs = XFS_QI_MPLNDQUOTS(mp); |
@@ -726,12 +734,12 @@ xfs_qm_scall_setqlim( | |||
726 | xfs_trans_log_dquot(tp, dqp); | 734 | xfs_trans_log_dquot(tp, dqp); |
727 | 735 | ||
728 | xfs_dqtrace_entry(dqp, "Q_SETQLIM: COMMIT"); | 736 | xfs_dqtrace_entry(dqp, "Q_SETQLIM: COMMIT"); |
729 | xfs_trans_commit(tp, 0); | 737 | error = xfs_trans_commit(tp, 0); |
730 | xfs_qm_dqprint(dqp); | 738 | xfs_qm_dqprint(dqp); |
731 | xfs_qm_dqrele(dqp); | 739 | xfs_qm_dqrele(dqp); |
732 | mutex_unlock(&(XFS_QI_QOFFLOCK(mp))); | 740 | mutex_unlock(&(XFS_QI_QOFFLOCK(mp))); |
733 | 741 | ||
734 | return (0); | 742 | return error; |
735 | } | 743 | } |
736 | 744 | ||
737 | STATIC int | 745 | STATIC int |
@@ -1095,7 +1103,7 @@ again: | |||
1095 | * inactive code in hell. | 1103 | * inactive code in hell. |
1096 | */ | 1104 | */ |
1097 | if (vnode_refd) | 1105 | if (vnode_refd) |
1098 | VN_RELE(vp); | 1106 | IRELE(ip); |
1099 | XFS_MOUNT_ILOCK(mp); | 1107 | XFS_MOUNT_ILOCK(mp); |
1100 | /* | 1108 | /* |
1101 | * If an inode was inserted or removed, we gotta | 1109 | * If an inode was inserted or removed, we gotta |
diff --git a/fs/xfs/support/ktrace.c b/fs/xfs/support/ktrace.c index 129067cfcb86..0b75d302508f 100644 --- a/fs/xfs/support/ktrace.c +++ b/fs/xfs/support/ktrace.c | |||
@@ -24,7 +24,7 @@ static int ktrace_zentries; | |||
24 | void __init | 24 | void __init |
25 | ktrace_init(int zentries) | 25 | ktrace_init(int zentries) |
26 | { | 26 | { |
27 | ktrace_zentries = zentries; | 27 | ktrace_zentries = roundup_pow_of_two(zentries); |
28 | 28 | ||
29 | ktrace_hdr_zone = kmem_zone_init(sizeof(ktrace_t), | 29 | ktrace_hdr_zone = kmem_zone_init(sizeof(ktrace_t), |
30 | "ktrace_hdr"); | 30 | "ktrace_hdr"); |
@@ -47,13 +47,16 @@ ktrace_uninit(void) | |||
47 | * ktrace_alloc() | 47 | * ktrace_alloc() |
48 | * | 48 | * |
49 | * Allocate a ktrace header and enough buffering for the given | 49 | * Allocate a ktrace header and enough buffering for the given |
50 | * number of entries. | 50 | * number of entries. Round the number of entries up to a |
51 | * power of 2 so we can do fast masking to get the index from | ||
52 | * the atomic index counter. | ||
51 | */ | 53 | */ |
52 | ktrace_t * | 54 | ktrace_t * |
53 | ktrace_alloc(int nentries, unsigned int __nocast sleep) | 55 | ktrace_alloc(int nentries, unsigned int __nocast sleep) |
54 | { | 56 | { |
55 | ktrace_t *ktp; | 57 | ktrace_t *ktp; |
56 | ktrace_entry_t *ktep; | 58 | ktrace_entry_t *ktep; |
59 | int entries; | ||
57 | 60 | ||
58 | ktp = (ktrace_t*)kmem_zone_alloc(ktrace_hdr_zone, sleep); | 61 | ktp = (ktrace_t*)kmem_zone_alloc(ktrace_hdr_zone, sleep); |
59 | 62 | ||
@@ -70,11 +73,12 @@ ktrace_alloc(int nentries, unsigned int __nocast sleep) | |||
70 | /* | 73 | /* |
71 | * Special treatment for buffers with the ktrace_zentries entries | 74 | * Special treatment for buffers with the ktrace_zentries entries |
72 | */ | 75 | */ |
73 | if (nentries == ktrace_zentries) { | 76 | entries = roundup_pow_of_two(nentries); |
77 | if (entries == ktrace_zentries) { | ||
74 | ktep = (ktrace_entry_t*)kmem_zone_zalloc(ktrace_ent_zone, | 78 | ktep = (ktrace_entry_t*)kmem_zone_zalloc(ktrace_ent_zone, |
75 | sleep); | 79 | sleep); |
76 | } else { | 80 | } else { |
77 | ktep = (ktrace_entry_t*)kmem_zalloc((nentries * sizeof(*ktep)), | 81 | ktep = (ktrace_entry_t*)kmem_zalloc((entries * sizeof(*ktep)), |
78 | sleep | KM_LARGE); | 82 | sleep | KM_LARGE); |
79 | } | 83 | } |
80 | 84 | ||
@@ -91,8 +95,10 @@ ktrace_alloc(int nentries, unsigned int __nocast sleep) | |||
91 | } | 95 | } |
92 | 96 | ||
93 | ktp->kt_entries = ktep; | 97 | ktp->kt_entries = ktep; |
94 | ktp->kt_nentries = nentries; | 98 | ktp->kt_nentries = entries; |
95 | ktp->kt_index = 0; | 99 | ASSERT(is_power_of_2(entries)); |
100 | ktp->kt_index_mask = entries - 1; | ||
101 | atomic_set(&ktp->kt_index, 0); | ||
96 | ktp->kt_rollover = 0; | 102 | ktp->kt_rollover = 0; |
97 | return ktp; | 103 | return ktp; |
98 | } | 104 | } |
@@ -151,8 +157,6 @@ ktrace_enter( | |||
151 | void *val14, | 157 | void *val14, |
152 | void *val15) | 158 | void *val15) |
153 | { | 159 | { |
154 | static DEFINE_SPINLOCK(wrap_lock); | ||
155 | unsigned long flags; | ||
156 | int index; | 160 | int index; |
157 | ktrace_entry_t *ktep; | 161 | ktrace_entry_t *ktep; |
158 | 162 | ||
@@ -161,12 +165,8 @@ ktrace_enter( | |||
161 | /* | 165 | /* |
162 | * Grab an entry by pushing the index up to the next one. | 166 | * Grab an entry by pushing the index up to the next one. |
163 | */ | 167 | */ |
164 | spin_lock_irqsave(&wrap_lock, flags); | 168 | index = atomic_add_return(1, &ktp->kt_index); |
165 | index = ktp->kt_index; | 169 | index = (index - 1) & ktp->kt_index_mask; |
166 | if (++ktp->kt_index == ktp->kt_nentries) | ||
167 | ktp->kt_index = 0; | ||
168 | spin_unlock_irqrestore(&wrap_lock, flags); | ||
169 | |||
170 | if (!ktp->kt_rollover && index == ktp->kt_nentries - 1) | 170 | if (!ktp->kt_rollover && index == ktp->kt_nentries - 1) |
171 | ktp->kt_rollover = 1; | 171 | ktp->kt_rollover = 1; |
172 | 172 | ||
@@ -199,11 +199,12 @@ int | |||
199 | ktrace_nentries( | 199 | ktrace_nentries( |
200 | ktrace_t *ktp) | 200 | ktrace_t *ktp) |
201 | { | 201 | { |
202 | if (ktp == NULL) { | 202 | int index; |
203 | if (ktp == NULL) | ||
203 | return 0; | 204 | return 0; |
204 | } | ||
205 | 205 | ||
206 | return (ktp->kt_rollover ? ktp->kt_nentries : ktp->kt_index); | 206 | index = atomic_read(&ktp->kt_index) & ktp->kt_index_mask; |
207 | return (ktp->kt_rollover ? ktp->kt_nentries : index); | ||
207 | } | 208 | } |
208 | 209 | ||
209 | /* | 210 | /* |
@@ -228,7 +229,7 @@ ktrace_first(ktrace_t *ktp, ktrace_snap_t *ktsp) | |||
228 | int nentries; | 229 | int nentries; |
229 | 230 | ||
230 | if (ktp->kt_rollover) | 231 | if (ktp->kt_rollover) |
231 | index = ktp->kt_index; | 232 | index = atomic_read(&ktp->kt_index) & ktp->kt_index_mask; |
232 | else | 233 | else |
233 | index = 0; | 234 | index = 0; |
234 | 235 | ||
diff --git a/fs/xfs/support/ktrace.h b/fs/xfs/support/ktrace.h index 56e72b40a859..741d6947ca60 100644 --- a/fs/xfs/support/ktrace.h +++ b/fs/xfs/support/ktrace.h | |||
@@ -30,7 +30,8 @@ typedef struct ktrace_entry { | |||
30 | */ | 30 | */ |
31 | typedef struct ktrace { | 31 | typedef struct ktrace { |
32 | int kt_nentries; /* number of entries in trace buf */ | 32 | int kt_nentries; /* number of entries in trace buf */ |
33 | int kt_index; /* current index in entries */ | 33 | atomic_t kt_index; /* current index in entries */ |
34 | unsigned int kt_index_mask; | ||
34 | int kt_rollover; | 35 | int kt_rollover; |
35 | ktrace_entry_t *kt_entries; /* buffer of entries */ | 36 | ktrace_entry_t *kt_entries; /* buffer of entries */ |
36 | } ktrace_t; | 37 | } ktrace_t; |
diff --git a/fs/xfs/xfs.h b/fs/xfs/xfs.h index 540e4c989825..765aaf65e2d3 100644 --- a/fs/xfs/xfs.h +++ b/fs/xfs/xfs.h | |||
@@ -22,7 +22,7 @@ | |||
22 | #define STATIC | 22 | #define STATIC |
23 | #define DEBUG 1 | 23 | #define DEBUG 1 |
24 | #define XFS_BUF_LOCK_TRACKING 1 | 24 | #define XFS_BUF_LOCK_TRACKING 1 |
25 | /* #define QUOTADEBUG 1 */ | 25 | #define QUOTADEBUG 1 |
26 | #endif | 26 | #endif |
27 | 27 | ||
28 | #ifdef CONFIG_XFS_TRACE | 28 | #ifdef CONFIG_XFS_TRACE |
diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c index 7272fe39a92d..8e130b9720ae 100644 --- a/fs/xfs/xfs_acl.c +++ b/fs/xfs/xfs_acl.c | |||
@@ -307,12 +307,13 @@ xfs_acl_vset( | |||
307 | 307 | ||
308 | VN_HOLD(vp); | 308 | VN_HOLD(vp); |
309 | error = xfs_acl_allow_set(vp, kind); | 309 | error = xfs_acl_allow_set(vp, kind); |
310 | if (error) | ||
311 | goto out; | ||
312 | 310 | ||
313 | /* Incoming ACL exists, set file mode based on its value */ | 311 | /* Incoming ACL exists, set file mode based on its value */ |
314 | if (kind == _ACL_TYPE_ACCESS) | 312 | if (!error && kind == _ACL_TYPE_ACCESS) |
315 | xfs_acl_setmode(vp, xfs_acl, &basicperms); | 313 | error = xfs_acl_setmode(vp, xfs_acl, &basicperms); |
314 | |||
315 | if (error) | ||
316 | goto out; | ||
316 | 317 | ||
317 | /* | 318 | /* |
318 | * If we have more than std unix permissions, set up the actual attr. | 319 | * If we have more than std unix permissions, set up the actual attr. |
@@ -323,7 +324,7 @@ xfs_acl_vset( | |||
323 | if (!basicperms) { | 324 | if (!basicperms) { |
324 | xfs_acl_set_attr(vp, xfs_acl, kind, &error); | 325 | xfs_acl_set_attr(vp, xfs_acl, kind, &error); |
325 | } else { | 326 | } else { |
326 | xfs_acl_vremove(vp, _ACL_TYPE_ACCESS); | 327 | error = -xfs_acl_vremove(vp, _ACL_TYPE_ACCESS); |
327 | } | 328 | } |
328 | 329 | ||
329 | out: | 330 | out: |
@@ -707,7 +708,9 @@ xfs_acl_inherit( | |||
707 | 708 | ||
708 | memcpy(cacl, pdaclp, sizeof(xfs_acl_t)); | 709 | memcpy(cacl, pdaclp, sizeof(xfs_acl_t)); |
709 | xfs_acl_filter_mode(mode, cacl); | 710 | xfs_acl_filter_mode(mode, cacl); |
710 | xfs_acl_setmode(vp, cacl, &basicperms); | 711 | error = xfs_acl_setmode(vp, cacl, &basicperms); |
712 | if (error) | ||
713 | goto out_error; | ||
711 | 714 | ||
712 | /* | 715 | /* |
713 | * Set the Default and Access ACL on the file. The mode is already | 716 | * Set the Default and Access ACL on the file. The mode is already |
@@ -720,6 +723,7 @@ xfs_acl_inherit( | |||
720 | xfs_acl_set_attr(vp, pdaclp, _ACL_TYPE_DEFAULT, &error); | 723 | xfs_acl_set_attr(vp, pdaclp, _ACL_TYPE_DEFAULT, &error); |
721 | if (!error && !basicperms) | 724 | if (!error && !basicperms) |
722 | xfs_acl_set_attr(vp, cacl, _ACL_TYPE_ACCESS, &error); | 725 | xfs_acl_set_attr(vp, cacl, _ACL_TYPE_ACCESS, &error); |
726 | out_error: | ||
723 | _ACL_FREE(cacl); | 727 | _ACL_FREE(cacl); |
724 | return error; | 728 | return error; |
725 | } | 729 | } |
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c index bdbfbbee4959..1956f83489f1 100644 --- a/fs/xfs/xfs_alloc.c +++ b/fs/xfs/xfs_alloc.c | |||
@@ -45,7 +45,7 @@ | |||
45 | #define XFSA_FIXUP_BNO_OK 1 | 45 | #define XFSA_FIXUP_BNO_OK 1 |
46 | #define XFSA_FIXUP_CNT_OK 2 | 46 | #define XFSA_FIXUP_CNT_OK 2 |
47 | 47 | ||
48 | STATIC int | 48 | STATIC void |
49 | xfs_alloc_search_busy(xfs_trans_t *tp, | 49 | xfs_alloc_search_busy(xfs_trans_t *tp, |
50 | xfs_agnumber_t agno, | 50 | xfs_agnumber_t agno, |
51 | xfs_agblock_t bno, | 51 | xfs_agblock_t bno, |
@@ -55,24 +55,24 @@ xfs_alloc_search_busy(xfs_trans_t *tp, | |||
55 | ktrace_t *xfs_alloc_trace_buf; | 55 | ktrace_t *xfs_alloc_trace_buf; |
56 | 56 | ||
57 | #define TRACE_ALLOC(s,a) \ | 57 | #define TRACE_ALLOC(s,a) \ |
58 | xfs_alloc_trace_alloc(__FUNCTION__, s, a, __LINE__) | 58 | xfs_alloc_trace_alloc(__func__, s, a, __LINE__) |
59 | #define TRACE_FREE(s,a,b,x,f) \ | 59 | #define TRACE_FREE(s,a,b,x,f) \ |
60 | xfs_alloc_trace_free(__FUNCTION__, s, mp, a, b, x, f, __LINE__) | 60 | xfs_alloc_trace_free(__func__, s, mp, a, b, x, f, __LINE__) |
61 | #define TRACE_MODAGF(s,a,f) \ | 61 | #define TRACE_MODAGF(s,a,f) \ |
62 | xfs_alloc_trace_modagf(__FUNCTION__, s, mp, a, f, __LINE__) | 62 | xfs_alloc_trace_modagf(__func__, s, mp, a, f, __LINE__) |
63 | #define TRACE_BUSY(__FUNCTION__,s,ag,agb,l,sl,tp) \ | 63 | #define TRACE_BUSY(__func__,s,ag,agb,l,sl,tp) \ |
64 | xfs_alloc_trace_busy(__FUNCTION__, s, mp, ag, agb, l, sl, tp, XFS_ALLOC_KTRACE_BUSY, __LINE__) | 64 | xfs_alloc_trace_busy(__func__, s, mp, ag, agb, l, sl, tp, XFS_ALLOC_KTRACE_BUSY, __LINE__) |
65 | #define TRACE_UNBUSY(__FUNCTION__,s,ag,sl,tp) \ | 65 | #define TRACE_UNBUSY(__func__,s,ag,sl,tp) \ |
66 | xfs_alloc_trace_busy(__FUNCTION__, s, mp, ag, -1, -1, sl, tp, XFS_ALLOC_KTRACE_UNBUSY, __LINE__) | 66 | xfs_alloc_trace_busy(__func__, s, mp, ag, -1, -1, sl, tp, XFS_ALLOC_KTRACE_UNBUSY, __LINE__) |
67 | #define TRACE_BUSYSEARCH(__FUNCTION__,s,ag,agb,l,sl,tp) \ | 67 | #define TRACE_BUSYSEARCH(__func__,s,ag,agb,l,tp) \ |
68 | xfs_alloc_trace_busy(__FUNCTION__, s, mp, ag, agb, l, sl, tp, XFS_ALLOC_KTRACE_BUSYSEARCH, __LINE__) | 68 | xfs_alloc_trace_busy(__func__, s, mp, ag, agb, l, 0, tp, XFS_ALLOC_KTRACE_BUSYSEARCH, __LINE__) |
69 | #else | 69 | #else |
70 | #define TRACE_ALLOC(s,a) | 70 | #define TRACE_ALLOC(s,a) |
71 | #define TRACE_FREE(s,a,b,x,f) | 71 | #define TRACE_FREE(s,a,b,x,f) |
72 | #define TRACE_MODAGF(s,a,f) | 72 | #define TRACE_MODAGF(s,a,f) |
73 | #define TRACE_BUSY(s,a,ag,agb,l,sl,tp) | 73 | #define TRACE_BUSY(s,a,ag,agb,l,sl,tp) |
74 | #define TRACE_UNBUSY(fname,s,ag,sl,tp) | 74 | #define TRACE_UNBUSY(fname,s,ag,sl,tp) |
75 | #define TRACE_BUSYSEARCH(fname,s,ag,agb,l,sl,tp) | 75 | #define TRACE_BUSYSEARCH(fname,s,ag,agb,l,tp) |
76 | #endif /* XFS_ALLOC_TRACE */ | 76 | #endif /* XFS_ALLOC_TRACE */ |
77 | 77 | ||
78 | /* | 78 | /* |
@@ -93,7 +93,7 @@ STATIC int xfs_alloc_ag_vextent_small(xfs_alloc_arg_t *, | |||
93 | * Compute aligned version of the found extent. | 93 | * Compute aligned version of the found extent. |
94 | * Takes alignment and min length into account. | 94 | * Takes alignment and min length into account. |
95 | */ | 95 | */ |
96 | STATIC int /* success (>= minlen) */ | 96 | STATIC void |
97 | xfs_alloc_compute_aligned( | 97 | xfs_alloc_compute_aligned( |
98 | xfs_agblock_t foundbno, /* starting block in found extent */ | 98 | xfs_agblock_t foundbno, /* starting block in found extent */ |
99 | xfs_extlen_t foundlen, /* length in found extent */ | 99 | xfs_extlen_t foundlen, /* length in found extent */ |
@@ -116,7 +116,6 @@ xfs_alloc_compute_aligned( | |||
116 | } | 116 | } |
117 | *resbno = bno; | 117 | *resbno = bno; |
118 | *reslen = len; | 118 | *reslen = len; |
119 | return len >= minlen; | ||
120 | } | 119 | } |
121 | 120 | ||
122 | /* | 121 | /* |
@@ -837,9 +836,9 @@ xfs_alloc_ag_vextent_near( | |||
837 | if ((error = xfs_alloc_get_rec(cnt_cur, <bno, <len, &i))) | 836 | if ((error = xfs_alloc_get_rec(cnt_cur, <bno, <len, &i))) |
838 | goto error0; | 837 | goto error0; |
839 | XFS_WANT_CORRUPTED_GOTO(i == 1, error0); | 838 | XFS_WANT_CORRUPTED_GOTO(i == 1, error0); |
840 | if (!xfs_alloc_compute_aligned(ltbno, ltlen, | 839 | xfs_alloc_compute_aligned(ltbno, ltlen, args->alignment, |
841 | args->alignment, args->minlen, | 840 | args->minlen, <bnoa, <lena); |
842 | <bnoa, <lena)) | 841 | if (ltlena < args->minlen) |
843 | continue; | 842 | continue; |
844 | args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen); | 843 | args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen); |
845 | xfs_alloc_fix_len(args); | 844 | xfs_alloc_fix_len(args); |
@@ -958,9 +957,9 @@ xfs_alloc_ag_vextent_near( | |||
958 | if ((error = xfs_alloc_get_rec(bno_cur_lt, <bno, <len, &i))) | 957 | if ((error = xfs_alloc_get_rec(bno_cur_lt, <bno, <len, &i))) |
959 | goto error0; | 958 | goto error0; |
960 | XFS_WANT_CORRUPTED_GOTO(i == 1, error0); | 959 | XFS_WANT_CORRUPTED_GOTO(i == 1, error0); |
961 | if (xfs_alloc_compute_aligned(ltbno, ltlen, | 960 | xfs_alloc_compute_aligned(ltbno, ltlen, args->alignment, |
962 | args->alignment, args->minlen, | 961 | args->minlen, <bnoa, <lena); |
963 | <bnoa, <lena)) | 962 | if (ltlena >= args->minlen) |
964 | break; | 963 | break; |
965 | if ((error = xfs_alloc_decrement(bno_cur_lt, 0, &i))) | 964 | if ((error = xfs_alloc_decrement(bno_cur_lt, 0, &i))) |
966 | goto error0; | 965 | goto error0; |
@@ -974,9 +973,9 @@ xfs_alloc_ag_vextent_near( | |||
974 | if ((error = xfs_alloc_get_rec(bno_cur_gt, >bno, >len, &i))) | 973 | if ((error = xfs_alloc_get_rec(bno_cur_gt, >bno, >len, &i))) |
975 | goto error0; | 974 | goto error0; |
976 | XFS_WANT_CORRUPTED_GOTO(i == 1, error0); | 975 | XFS_WANT_CORRUPTED_GOTO(i == 1, error0); |
977 | if (xfs_alloc_compute_aligned(gtbno, gtlen, | 976 | xfs_alloc_compute_aligned(gtbno, gtlen, args->alignment, |
978 | args->alignment, args->minlen, | 977 | args->minlen, >bnoa, >lena); |
979 | >bnoa, >lena)) | 978 | if (gtlena >= args->minlen) |
980 | break; | 979 | break; |
981 | if ((error = xfs_alloc_increment(bno_cur_gt, 0, &i))) | 980 | if ((error = xfs_alloc_increment(bno_cur_gt, 0, &i))) |
982 | goto error0; | 981 | goto error0; |
@@ -2562,9 +2561,10 @@ xfs_alloc_clear_busy(xfs_trans_t *tp, | |||
2562 | 2561 | ||
2563 | 2562 | ||
2564 | /* | 2563 | /* |
2565 | * returns non-zero if any of (agno,bno):len is in a busy list | 2564 | * If we find the extent in the busy list, force the log out to get the |
2565 | * extent out of the busy list so the caller can use it straight away. | ||
2566 | */ | 2566 | */ |
2567 | STATIC int | 2567 | STATIC void |
2568 | xfs_alloc_search_busy(xfs_trans_t *tp, | 2568 | xfs_alloc_search_busy(xfs_trans_t *tp, |
2569 | xfs_agnumber_t agno, | 2569 | xfs_agnumber_t agno, |
2570 | xfs_agblock_t bno, | 2570 | xfs_agblock_t bno, |
@@ -2572,7 +2572,6 @@ xfs_alloc_search_busy(xfs_trans_t *tp, | |||
2572 | { | 2572 | { |
2573 | xfs_mount_t *mp; | 2573 | xfs_mount_t *mp; |
2574 | xfs_perag_busy_t *bsy; | 2574 | xfs_perag_busy_t *bsy; |
2575 | int n; | ||
2576 | xfs_agblock_t uend, bend; | 2575 | xfs_agblock_t uend, bend; |
2577 | xfs_lsn_t lsn; | 2576 | xfs_lsn_t lsn; |
2578 | int cnt; | 2577 | int cnt; |
@@ -2585,21 +2584,18 @@ xfs_alloc_search_busy(xfs_trans_t *tp, | |||
2585 | uend = bno + len - 1; | 2584 | uend = bno + len - 1; |
2586 | 2585 | ||
2587 | /* search pagb_list for this slot, skipping open slots */ | 2586 | /* search pagb_list for this slot, skipping open slots */ |
2588 | for (bsy = mp->m_perag[agno].pagb_list, n = 0; | 2587 | for (bsy = mp->m_perag[agno].pagb_list; cnt; bsy++) { |
2589 | cnt; bsy++, n++) { | ||
2590 | 2588 | ||
2591 | /* | 2589 | /* |
2592 | * (start1,length1) within (start2, length2) | 2590 | * (start1,length1) within (start2, length2) |
2593 | */ | 2591 | */ |
2594 | if (bsy->busy_tp != NULL) { | 2592 | if (bsy->busy_tp != NULL) { |
2595 | bend = bsy->busy_start + bsy->busy_length - 1; | 2593 | bend = bsy->busy_start + bsy->busy_length - 1; |
2596 | if ((bno > bend) || | 2594 | if ((bno > bend) || (uend < bsy->busy_start)) { |
2597 | (uend < bsy->busy_start)) { | ||
2598 | cnt--; | 2595 | cnt--; |
2599 | } else { | 2596 | } else { |
2600 | TRACE_BUSYSEARCH("xfs_alloc_search_busy", | 2597 | TRACE_BUSYSEARCH("xfs_alloc_search_busy", |
2601 | "found1", agno, bno, len, n, | 2598 | "found1", agno, bno, len, tp); |
2602 | tp); | ||
2603 | break; | 2599 | break; |
2604 | } | 2600 | } |
2605 | } | 2601 | } |
@@ -2610,15 +2606,12 @@ xfs_alloc_search_busy(xfs_trans_t *tp, | |||
2610 | * transaction that freed the block | 2606 | * transaction that freed the block |
2611 | */ | 2607 | */ |
2612 | if (cnt) { | 2608 | if (cnt) { |
2613 | TRACE_BUSYSEARCH("xfs_alloc_search_busy", "found", agno, bno, len, n, tp); | 2609 | TRACE_BUSYSEARCH("xfs_alloc_search_busy", "found", agno, bno, len, tp); |
2614 | lsn = bsy->busy_tp->t_commit_lsn; | 2610 | lsn = bsy->busy_tp->t_commit_lsn; |
2615 | spin_unlock(&mp->m_perag[agno].pagb_lock); | 2611 | spin_unlock(&mp->m_perag[agno].pagb_lock); |
2616 | xfs_log_force(mp, lsn, XFS_LOG_FORCE|XFS_LOG_SYNC); | 2612 | xfs_log_force(mp, lsn, XFS_LOG_FORCE|XFS_LOG_SYNC); |
2617 | } else { | 2613 | } else { |
2618 | TRACE_BUSYSEARCH("xfs_alloc_search_busy", "not-found", agno, bno, len, n, tp); | 2614 | TRACE_BUSYSEARCH("xfs_alloc_search_busy", "not-found", agno, bno, len, tp); |
2619 | n = -1; | ||
2620 | spin_unlock(&mp->m_perag[agno].pagb_lock); | 2615 | spin_unlock(&mp->m_perag[agno].pagb_lock); |
2621 | } | 2616 | } |
2622 | |||
2623 | return n; | ||
2624 | } | 2617 | } |
diff --git a/fs/xfs/xfs_attr.c b/fs/xfs/xfs_attr.c index e58f321fdae9..36d781ee5fcc 100644 --- a/fs/xfs/xfs_attr.c +++ b/fs/xfs/xfs_attr.c | |||
@@ -2647,14 +2647,6 @@ attr_trusted_capable( | |||
2647 | } | 2647 | } |
2648 | 2648 | ||
2649 | STATIC int | 2649 | STATIC int |
2650 | attr_secure_capable( | ||
2651 | bhv_vnode_t *vp, | ||
2652 | cred_t *cred) | ||
2653 | { | ||
2654 | return -ENOSECURITY; | ||
2655 | } | ||
2656 | |||
2657 | STATIC int | ||
2658 | attr_system_set( | 2650 | attr_system_set( |
2659 | bhv_vnode_t *vp, char *name, void *data, size_t size, int xflags) | 2651 | bhv_vnode_t *vp, char *name, void *data, size_t size, int xflags) |
2660 | { | 2652 | { |
@@ -2724,7 +2716,7 @@ struct attrnames attr_secure = { | |||
2724 | .attr_get = attr_generic_get, | 2716 | .attr_get = attr_generic_get, |
2725 | .attr_set = attr_generic_set, | 2717 | .attr_set = attr_generic_set, |
2726 | .attr_remove = attr_generic_remove, | 2718 | .attr_remove = attr_generic_remove, |
2727 | .attr_capable = attr_secure_capable, | 2719 | .attr_capable = (attrcapable_t)fs_noerr, |
2728 | }; | 2720 | }; |
2729 | 2721 | ||
2730 | struct attrnames attr_user = { | 2722 | struct attrnames attr_user = { |
diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/xfs_attr_leaf.c index 96ba6aa4ed8c..303d41e4217b 100644 --- a/fs/xfs/xfs_attr_leaf.c +++ b/fs/xfs/xfs_attr_leaf.c | |||
@@ -166,7 +166,7 @@ xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes) | |||
166 | 166 | ||
167 | if (!(mp->m_flags & XFS_MOUNT_ATTR2)) { | 167 | if (!(mp->m_flags & XFS_MOUNT_ATTR2)) { |
168 | if (bytes <= XFS_IFORK_ASIZE(dp)) | 168 | if (bytes <= XFS_IFORK_ASIZE(dp)) |
169 | return mp->m_attroffset >> 3; | 169 | return dp->i_d.di_forkoff; |
170 | return 0; | 170 | return 0; |
171 | } | 171 | } |
172 | 172 | ||
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c index 2def273855a2..eb198c01c35d 100644 --- a/fs/xfs/xfs_bmap.c +++ b/fs/xfs/xfs_bmap.c | |||
@@ -323,13 +323,13 @@ xfs_bmap_trace_pre_update( | |||
323 | int whichfork); /* data or attr fork */ | 323 | int whichfork); /* data or attr fork */ |
324 | 324 | ||
325 | #define XFS_BMAP_TRACE_DELETE(d,ip,i,c,w) \ | 325 | #define XFS_BMAP_TRACE_DELETE(d,ip,i,c,w) \ |
326 | xfs_bmap_trace_delete(__FUNCTION__,d,ip,i,c,w) | 326 | xfs_bmap_trace_delete(__func__,d,ip,i,c,w) |
327 | #define XFS_BMAP_TRACE_INSERT(d,ip,i,c,r1,r2,w) \ | 327 | #define XFS_BMAP_TRACE_INSERT(d,ip,i,c,r1,r2,w) \ |
328 | xfs_bmap_trace_insert(__FUNCTION__,d,ip,i,c,r1,r2,w) | 328 | xfs_bmap_trace_insert(__func__,d,ip,i,c,r1,r2,w) |
329 | #define XFS_BMAP_TRACE_POST_UPDATE(d,ip,i,w) \ | 329 | #define XFS_BMAP_TRACE_POST_UPDATE(d,ip,i,w) \ |
330 | xfs_bmap_trace_post_update(__FUNCTION__,d,ip,i,w) | 330 | xfs_bmap_trace_post_update(__func__,d,ip,i,w) |
331 | #define XFS_BMAP_TRACE_PRE_UPDATE(d,ip,i,w) \ | 331 | #define XFS_BMAP_TRACE_PRE_UPDATE(d,ip,i,w) \ |
332 | xfs_bmap_trace_pre_update(__FUNCTION__,d,ip,i,w) | 332 | xfs_bmap_trace_pre_update(__func__,d,ip,i,w) |
333 | #else | 333 | #else |
334 | #define XFS_BMAP_TRACE_DELETE(d,ip,i,c,w) | 334 | #define XFS_BMAP_TRACE_DELETE(d,ip,i,c,w) |
335 | #define XFS_BMAP_TRACE_INSERT(d,ip,i,c,r1,r2,w) | 335 | #define XFS_BMAP_TRACE_INSERT(d,ip,i,c,r1,r2,w) |
@@ -2402,7 +2402,7 @@ xfs_bmap_extsize_align( | |||
2402 | 2402 | ||
2403 | #define XFS_ALLOC_GAP_UNITS 4 | 2403 | #define XFS_ALLOC_GAP_UNITS 4 |
2404 | 2404 | ||
2405 | STATIC int | 2405 | STATIC void |
2406 | xfs_bmap_adjacent( | 2406 | xfs_bmap_adjacent( |
2407 | xfs_bmalloca_t *ap) /* bmap alloc argument struct */ | 2407 | xfs_bmalloca_t *ap) /* bmap alloc argument struct */ |
2408 | { | 2408 | { |
@@ -2548,7 +2548,6 @@ xfs_bmap_adjacent( | |||
2548 | ap->rval = gotbno; | 2548 | ap->rval = gotbno; |
2549 | } | 2549 | } |
2550 | #undef ISVALID | 2550 | #undef ISVALID |
2551 | return 0; | ||
2552 | } | 2551 | } |
2553 | 2552 | ||
2554 | STATIC int | 2553 | STATIC int |
@@ -4154,16 +4153,21 @@ xfs_bmap_compute_maxlevels( | |||
4154 | * number of leaf entries, is controlled by the type of di_nextents | 4153 | * number of leaf entries, is controlled by the type of di_nextents |
4155 | * (a signed 32-bit number, xfs_extnum_t), or by di_anextents | 4154 | * (a signed 32-bit number, xfs_extnum_t), or by di_anextents |
4156 | * (a signed 16-bit number, xfs_aextnum_t). | 4155 | * (a signed 16-bit number, xfs_aextnum_t). |
4156 | * | ||
4157 | * Note that we can no longer assume that if we are in ATTR1 that | ||
4158 | * the fork offset of all the inodes will be (m_attroffset >> 3) | ||
4159 | * because we could have mounted with ATTR2 and then mounted back | ||
4160 | * with ATTR1, keeping the di_forkoff's fixed but probably at | ||
4161 | * various positions. Therefore, for both ATTR1 and ATTR2 | ||
4162 | * we have to assume the worst case scenario of a minimum size | ||
4163 | * available. | ||
4157 | */ | 4164 | */ |
4158 | if (whichfork == XFS_DATA_FORK) { | 4165 | if (whichfork == XFS_DATA_FORK) { |
4159 | maxleafents = MAXEXTNUM; | 4166 | maxleafents = MAXEXTNUM; |
4160 | sz = (mp->m_flags & XFS_MOUNT_ATTR2) ? | 4167 | sz = XFS_BMDR_SPACE_CALC(MINDBTPTRS); |
4161 | XFS_BMDR_SPACE_CALC(MINDBTPTRS) : mp->m_attroffset; | ||
4162 | } else { | 4168 | } else { |
4163 | maxleafents = MAXAEXTNUM; | 4169 | maxleafents = MAXAEXTNUM; |
4164 | sz = (mp->m_flags & XFS_MOUNT_ATTR2) ? | 4170 | sz = XFS_BMDR_SPACE_CALC(MINABTPTRS); |
4165 | XFS_BMDR_SPACE_CALC(MINABTPTRS) : | ||
4166 | mp->m_sb.sb_inodesize - mp->m_attroffset; | ||
4167 | } | 4171 | } |
4168 | maxrootrecs = (int)XFS_BTREE_BLOCK_MAXRECS(sz, xfs_bmdr, 0); | 4172 | maxrootrecs = (int)XFS_BTREE_BLOCK_MAXRECS(sz, xfs_bmdr, 0); |
4169 | minleafrecs = mp->m_bmap_dmnr[0]; | 4173 | minleafrecs = mp->m_bmap_dmnr[0]; |
@@ -5772,7 +5776,6 @@ xfs_getbmap( | |||
5772 | int error; /* return value */ | 5776 | int error; /* return value */ |
5773 | __int64_t fixlen; /* length for -1 case */ | 5777 | __int64_t fixlen; /* length for -1 case */ |
5774 | int i; /* extent number */ | 5778 | int i; /* extent number */ |
5775 | bhv_vnode_t *vp; /* corresponding vnode */ | ||
5776 | int lock; /* lock state */ | 5779 | int lock; /* lock state */ |
5777 | xfs_bmbt_irec_t *map; /* buffer for user's data */ | 5780 | xfs_bmbt_irec_t *map; /* buffer for user's data */ |
5778 | xfs_mount_t *mp; /* file system mount point */ | 5781 | xfs_mount_t *mp; /* file system mount point */ |
@@ -5789,7 +5792,6 @@ xfs_getbmap( | |||
5789 | int bmapi_flags; /* flags for xfs_bmapi */ | 5792 | int bmapi_flags; /* flags for xfs_bmapi */ |
5790 | __int32_t oflags; /* getbmapx bmv_oflags field */ | 5793 | __int32_t oflags; /* getbmapx bmv_oflags field */ |
5791 | 5794 | ||
5792 | vp = XFS_ITOV(ip); | ||
5793 | mp = ip->i_mount; | 5795 | mp = ip->i_mount; |
5794 | 5796 | ||
5795 | whichfork = interface & BMV_IF_ATTRFORK ? XFS_ATTR_FORK : XFS_DATA_FORK; | 5797 | whichfork = interface & BMV_IF_ATTRFORK ? XFS_ATTR_FORK : XFS_DATA_FORK; |
@@ -5811,7 +5813,7 @@ xfs_getbmap( | |||
5811 | if ((interface & BMV_IF_NO_DMAPI_READ) == 0 && | 5813 | if ((interface & BMV_IF_NO_DMAPI_READ) == 0 && |
5812 | DM_EVENT_ENABLED(ip, DM_EVENT_READ) && | 5814 | DM_EVENT_ENABLED(ip, DM_EVENT_READ) && |
5813 | whichfork == XFS_DATA_FORK) { | 5815 | whichfork == XFS_DATA_FORK) { |
5814 | error = XFS_SEND_DATA(mp, DM_EVENT_READ, vp, 0, 0, 0, NULL); | 5816 | error = XFS_SEND_DATA(mp, DM_EVENT_READ, ip, 0, 0, 0, NULL); |
5815 | if (error) | 5817 | if (error) |
5816 | return XFS_ERROR(error); | 5818 | return XFS_ERROR(error); |
5817 | } | 5819 | } |
@@ -5869,6 +5871,10 @@ xfs_getbmap( | |||
5869 | /* xfs_fsize_t last_byte = xfs_file_last_byte(ip); */ | 5871 | /* xfs_fsize_t last_byte = xfs_file_last_byte(ip); */ |
5870 | error = xfs_flush_pages(ip, (xfs_off_t)0, | 5872 | error = xfs_flush_pages(ip, (xfs_off_t)0, |
5871 | -1, 0, FI_REMAPF); | 5873 | -1, 0, FI_REMAPF); |
5874 | if (error) { | ||
5875 | xfs_iunlock(ip, XFS_IOLOCK_SHARED); | ||
5876 | return error; | ||
5877 | } | ||
5872 | } | 5878 | } |
5873 | 5879 | ||
5874 | ASSERT(whichfork == XFS_ATTR_FORK || ip->i_delayed_blks == 0); | 5880 | ASSERT(whichfork == XFS_ATTR_FORK || ip->i_delayed_blks == 0); |
@@ -6162,10 +6168,10 @@ xfs_check_block( | |||
6162 | } | 6168 | } |
6163 | if (*thispa == *pp) { | 6169 | if (*thispa == *pp) { |
6164 | cmn_err(CE_WARN, "%s: thispa(%d) == pp(%d) %Ld", | 6170 | cmn_err(CE_WARN, "%s: thispa(%d) == pp(%d) %Ld", |
6165 | __FUNCTION__, j, i, | 6171 | __func__, j, i, |
6166 | (unsigned long long)be64_to_cpu(*thispa)); | 6172 | (unsigned long long)be64_to_cpu(*thispa)); |
6167 | panic("%s: ptrs are equal in node\n", | 6173 | panic("%s: ptrs are equal in node\n", |
6168 | __FUNCTION__); | 6174 | __func__); |
6169 | } | 6175 | } |
6170 | } | 6176 | } |
6171 | } | 6177 | } |
@@ -6192,7 +6198,7 @@ xfs_bmap_check_leaf_extents( | |||
6192 | xfs_mount_t *mp; /* file system mount structure */ | 6198 | xfs_mount_t *mp; /* file system mount structure */ |
6193 | __be64 *pp; /* pointer to block address */ | 6199 | __be64 *pp; /* pointer to block address */ |
6194 | xfs_bmbt_rec_t *ep; /* pointer to current extent */ | 6200 | xfs_bmbt_rec_t *ep; /* pointer to current extent */ |
6195 | xfs_bmbt_rec_t *lastp; /* pointer to previous extent */ | 6201 | xfs_bmbt_rec_t last = {0, 0}; /* last extent in prev block */ |
6196 | xfs_bmbt_rec_t *nextp; /* pointer to next extent */ | 6202 | xfs_bmbt_rec_t *nextp; /* pointer to next extent */ |
6197 | int bp_release = 0; | 6203 | int bp_release = 0; |
6198 | 6204 | ||
@@ -6262,7 +6268,6 @@ xfs_bmap_check_leaf_extents( | |||
6262 | /* | 6268 | /* |
6263 | * Loop over all leaf nodes checking that all extents are in the right order. | 6269 | * Loop over all leaf nodes checking that all extents are in the right order. |
6264 | */ | 6270 | */ |
6265 | lastp = NULL; | ||
6266 | for (;;) { | 6271 | for (;;) { |
6267 | xfs_fsblock_t nextbno; | 6272 | xfs_fsblock_t nextbno; |
6268 | xfs_extnum_t num_recs; | 6273 | xfs_extnum_t num_recs; |
@@ -6283,18 +6288,16 @@ xfs_bmap_check_leaf_extents( | |||
6283 | */ | 6288 | */ |
6284 | 6289 | ||
6285 | ep = XFS_BTREE_REC_ADDR(xfs_bmbt, block, 1); | 6290 | ep = XFS_BTREE_REC_ADDR(xfs_bmbt, block, 1); |
6291 | if (i) { | ||
6292 | xfs_btree_check_rec(XFS_BTNUM_BMAP, &last, ep); | ||
6293 | } | ||
6286 | for (j = 1; j < num_recs; j++) { | 6294 | for (j = 1; j < num_recs; j++) { |
6287 | nextp = XFS_BTREE_REC_ADDR(xfs_bmbt, block, j + 1); | 6295 | nextp = XFS_BTREE_REC_ADDR(xfs_bmbt, block, j + 1); |
6288 | if (lastp) { | 6296 | xfs_btree_check_rec(XFS_BTNUM_BMAP, ep, nextp); |
6289 | xfs_btree_check_rec(XFS_BTNUM_BMAP, | ||
6290 | (void *)lastp, (void *)ep); | ||
6291 | } | ||
6292 | xfs_btree_check_rec(XFS_BTNUM_BMAP, (void *)ep, | ||
6293 | (void *)(nextp)); | ||
6294 | lastp = ep; | ||
6295 | ep = nextp; | 6297 | ep = nextp; |
6296 | } | 6298 | } |
6297 | 6299 | ||
6300 | last = *ep; | ||
6298 | i += num_recs; | 6301 | i += num_recs; |
6299 | if (bp_release) { | 6302 | if (bp_release) { |
6300 | bp_release = 0; | 6303 | bp_release = 0; |
@@ -6325,13 +6328,13 @@ xfs_bmap_check_leaf_extents( | |||
6325 | return; | 6328 | return; |
6326 | 6329 | ||
6327 | error0: | 6330 | error0: |
6328 | cmn_err(CE_WARN, "%s: at error0", __FUNCTION__); | 6331 | cmn_err(CE_WARN, "%s: at error0", __func__); |
6329 | if (bp_release) | 6332 | if (bp_release) |
6330 | xfs_trans_brelse(NULL, bp); | 6333 | xfs_trans_brelse(NULL, bp); |
6331 | error_norelse: | 6334 | error_norelse: |
6332 | cmn_err(CE_WARN, "%s: BAD after btree leaves for %d extents", | 6335 | cmn_err(CE_WARN, "%s: BAD after btree leaves for %d extents", |
6333 | __FUNCTION__, i); | 6336 | __func__, i); |
6334 | panic("%s: CORRUPTED BTREE OR SOMETHING", __FUNCTION__); | 6337 | panic("%s: CORRUPTED BTREE OR SOMETHING", __func__); |
6335 | return; | 6338 | return; |
6336 | } | 6339 | } |
6337 | #endif | 6340 | #endif |
diff --git a/fs/xfs/xfs_bmap.h b/fs/xfs/xfs_bmap.h index 87224b7d7984..6ff70cda451c 100644 --- a/fs/xfs/xfs_bmap.h +++ b/fs/xfs/xfs_bmap.h | |||
@@ -151,7 +151,7 @@ xfs_bmap_trace_exlist( | |||
151 | xfs_extnum_t cnt, /* count of entries in list */ | 151 | xfs_extnum_t cnt, /* count of entries in list */ |
152 | int whichfork); /* data or attr fork */ | 152 | int whichfork); /* data or attr fork */ |
153 | #define XFS_BMAP_TRACE_EXLIST(ip,c,w) \ | 153 | #define XFS_BMAP_TRACE_EXLIST(ip,c,w) \ |
154 | xfs_bmap_trace_exlist(__FUNCTION__,ip,c,w) | 154 | xfs_bmap_trace_exlist(__func__,ip,c,w) |
155 | #else | 155 | #else |
156 | #define XFS_BMAP_TRACE_EXLIST(ip,c,w) | 156 | #define XFS_BMAP_TRACE_EXLIST(ip,c,w) |
157 | #endif | 157 | #endif |
diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c index bd18987326a3..4f0e849d973e 100644 --- a/fs/xfs/xfs_bmap_btree.c +++ b/fs/xfs/xfs_bmap_btree.c | |||
@@ -275,21 +275,21 @@ xfs_bmbt_trace_cursor( | |||
275 | } | 275 | } |
276 | 276 | ||
277 | #define XFS_BMBT_TRACE_ARGBI(c,b,i) \ | 277 | #define XFS_BMBT_TRACE_ARGBI(c,b,i) \ |
278 | xfs_bmbt_trace_argbi(__FUNCTION__, c, b, i, __LINE__) | 278 | xfs_bmbt_trace_argbi(__func__, c, b, i, __LINE__) |
279 | #define XFS_BMBT_TRACE_ARGBII(c,b,i,j) \ | 279 | #define XFS_BMBT_TRACE_ARGBII(c,b,i,j) \ |
280 | xfs_bmbt_trace_argbii(__FUNCTION__, c, b, i, j, __LINE__) | 280 | xfs_bmbt_trace_argbii(__func__, c, b, i, j, __LINE__) |
281 | #define XFS_BMBT_TRACE_ARGFFFI(c,o,b,i,j) \ | 281 | #define XFS_BMBT_TRACE_ARGFFFI(c,o,b,i,j) \ |
282 | xfs_bmbt_trace_argfffi(__FUNCTION__, c, o, b, i, j, __LINE__) | 282 | xfs_bmbt_trace_argfffi(__func__, c, o, b, i, j, __LINE__) |
283 | #define XFS_BMBT_TRACE_ARGI(c,i) \ | 283 | #define XFS_BMBT_TRACE_ARGI(c,i) \ |
284 | xfs_bmbt_trace_argi(__FUNCTION__, c, i, __LINE__) | 284 | xfs_bmbt_trace_argi(__func__, c, i, __LINE__) |
285 | #define XFS_BMBT_TRACE_ARGIFK(c,i,f,s) \ | 285 | #define XFS_BMBT_TRACE_ARGIFK(c,i,f,s) \ |
286 | xfs_bmbt_trace_argifk(__FUNCTION__, c, i, f, s, __LINE__) | 286 | xfs_bmbt_trace_argifk(__func__, c, i, f, s, __LINE__) |
287 | #define XFS_BMBT_TRACE_ARGIFR(c,i,f,r) \ | 287 | #define XFS_BMBT_TRACE_ARGIFR(c,i,f,r) \ |
288 | xfs_bmbt_trace_argifr(__FUNCTION__, c, i, f, r, __LINE__) | 288 | xfs_bmbt_trace_argifr(__func__, c, i, f, r, __LINE__) |
289 | #define XFS_BMBT_TRACE_ARGIK(c,i,k) \ | 289 | #define XFS_BMBT_TRACE_ARGIK(c,i,k) \ |
290 | xfs_bmbt_trace_argik(__FUNCTION__, c, i, k, __LINE__) | 290 | xfs_bmbt_trace_argik(__func__, c, i, k, __LINE__) |
291 | #define XFS_BMBT_TRACE_CURSOR(c,s) \ | 291 | #define XFS_BMBT_TRACE_CURSOR(c,s) \ |
292 | xfs_bmbt_trace_cursor(__FUNCTION__, c, s, __LINE__) | 292 | xfs_bmbt_trace_cursor(__func__, c, s, __LINE__) |
293 | #else | 293 | #else |
294 | #define XFS_BMBT_TRACE_ARGBI(c,b,i) | 294 | #define XFS_BMBT_TRACE_ARGBI(c,b,i) |
295 | #define XFS_BMBT_TRACE_ARGBII(c,b,i,j) | 295 | #define XFS_BMBT_TRACE_ARGBII(c,b,i,j) |
@@ -2027,6 +2027,24 @@ xfs_bmbt_increment( | |||
2027 | 2027 | ||
2028 | /* | 2028 | /* |
2029 | * Insert the current record at the point referenced by cur. | 2029 | * Insert the current record at the point referenced by cur. |
2030 | * | ||
2031 | * A multi-level split of the tree on insert will invalidate the original | ||
2032 | * cursor. It appears, however, that some callers assume that the cursor is | ||
2033 | * always valid. Hence if we do a multi-level split we need to revalidate the | ||
2034 | * cursor. | ||
2035 | * | ||
2036 | * When a split occurs, we will see a new cursor returned. Use that as a | ||
2037 | * trigger to determine if we need to revalidate the original cursor. If we get | ||
2038 | * a split, then use the original irec to lookup up the path of the record we | ||
2039 | * just inserted. | ||
2040 | * | ||
2041 | * Note that the fact that the btree root is in the inode means that we can | ||
2042 | * have the level of the tree change without a "split" occurring at the root | ||
2043 | * level. What happens is that the root is migrated to an allocated block and | ||
2044 | * the inode root is pointed to it. This means a single split can change the | ||
2045 | * level of the tree (level 2 -> level 3) and invalidate the old cursor. Hence | ||
2046 | * the level change should be accounted as a split so as to correctly trigger a | ||
2047 | * revalidation of the old cursor. | ||
2030 | */ | 2048 | */ |
2031 | int /* error */ | 2049 | int /* error */ |
2032 | xfs_bmbt_insert( | 2050 | xfs_bmbt_insert( |
@@ -2039,11 +2057,14 @@ xfs_bmbt_insert( | |||
2039 | xfs_fsblock_t nbno; | 2057 | xfs_fsblock_t nbno; |
2040 | xfs_btree_cur_t *ncur; | 2058 | xfs_btree_cur_t *ncur; |
2041 | xfs_bmbt_rec_t nrec; | 2059 | xfs_bmbt_rec_t nrec; |
2060 | xfs_bmbt_irec_t oirec; /* original irec */ | ||
2042 | xfs_btree_cur_t *pcur; | 2061 | xfs_btree_cur_t *pcur; |
2062 | int splits = 0; | ||
2043 | 2063 | ||
2044 | XFS_BMBT_TRACE_CURSOR(cur, ENTRY); | 2064 | XFS_BMBT_TRACE_CURSOR(cur, ENTRY); |
2045 | level = 0; | 2065 | level = 0; |
2046 | nbno = NULLFSBLOCK; | 2066 | nbno = NULLFSBLOCK; |
2067 | oirec = cur->bc_rec.b; | ||
2047 | xfs_bmbt_disk_set_all(&nrec, &cur->bc_rec.b); | 2068 | xfs_bmbt_disk_set_all(&nrec, &cur->bc_rec.b); |
2048 | ncur = NULL; | 2069 | ncur = NULL; |
2049 | pcur = cur; | 2070 | pcur = cur; |
@@ -2052,11 +2073,13 @@ xfs_bmbt_insert( | |||
2052 | &i))) { | 2073 | &i))) { |
2053 | if (pcur != cur) | 2074 | if (pcur != cur) |
2054 | xfs_btree_del_cursor(pcur, XFS_BTREE_ERROR); | 2075 | xfs_btree_del_cursor(pcur, XFS_BTREE_ERROR); |
2055 | XFS_BMBT_TRACE_CURSOR(cur, ERROR); | 2076 | goto error0; |
2056 | return error; | ||
2057 | } | 2077 | } |
2058 | XFS_WANT_CORRUPTED_GOTO(i == 1, error0); | 2078 | XFS_WANT_CORRUPTED_GOTO(i == 1, error0); |
2059 | if (pcur != cur && (ncur || nbno == NULLFSBLOCK)) { | 2079 | if (pcur != cur && (ncur || nbno == NULLFSBLOCK)) { |
2080 | /* allocating a new root is effectively a split */ | ||
2081 | if (cur->bc_nlevels != pcur->bc_nlevels) | ||
2082 | splits++; | ||
2060 | cur->bc_nlevels = pcur->bc_nlevels; | 2083 | cur->bc_nlevels = pcur->bc_nlevels; |
2061 | cur->bc_private.b.allocated += | 2084 | cur->bc_private.b.allocated += |
2062 | pcur->bc_private.b.allocated; | 2085 | pcur->bc_private.b.allocated; |
@@ -2070,10 +2093,21 @@ xfs_bmbt_insert( | |||
2070 | xfs_btree_del_cursor(pcur, XFS_BTREE_NOERROR); | 2093 | xfs_btree_del_cursor(pcur, XFS_BTREE_NOERROR); |
2071 | } | 2094 | } |
2072 | if (ncur) { | 2095 | if (ncur) { |
2096 | splits++; | ||
2073 | pcur = ncur; | 2097 | pcur = ncur; |
2074 | ncur = NULL; | 2098 | ncur = NULL; |
2075 | } | 2099 | } |
2076 | } while (nbno != NULLFSBLOCK); | 2100 | } while (nbno != NULLFSBLOCK); |
2101 | |||
2102 | if (splits > 1) { | ||
2103 | /* revalidate the old cursor as we had a multi-level split */ | ||
2104 | error = xfs_bmbt_lookup_eq(cur, oirec.br_startoff, | ||
2105 | oirec.br_startblock, oirec.br_blockcount, &i); | ||
2106 | if (error) | ||
2107 | goto error0; | ||
2108 | ASSERT(i == 1); | ||
2109 | } | ||
2110 | |||
2077 | XFS_BMBT_TRACE_CURSOR(cur, EXIT); | 2111 | XFS_BMBT_TRACE_CURSOR(cur, EXIT); |
2078 | *stat = i; | 2112 | *stat = i; |
2079 | return 0; | 2113 | return 0; |
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c index 63debd147eb5..53a71c62025d 100644 --- a/fs/xfs/xfs_buf_item.c +++ b/fs/xfs/xfs_buf_item.c | |||
@@ -645,7 +645,12 @@ xfs_buf_item_push( | |||
645 | bp = bip->bli_buf; | 645 | bp = bip->bli_buf; |
646 | 646 | ||
647 | if (XFS_BUF_ISDELAYWRITE(bp)) { | 647 | if (XFS_BUF_ISDELAYWRITE(bp)) { |
648 | xfs_bawrite(bip->bli_item.li_mountp, bp); | 648 | int error; |
649 | error = xfs_bawrite(bip->bli_item.li_mountp, bp); | ||
650 | if (error) | ||
651 | xfs_fs_cmn_err(CE_WARN, bip->bli_item.li_mountp, | ||
652 | "xfs_buf_item_push: pushbuf error %d on bip %p, bp %p", | ||
653 | error, bip, bp); | ||
649 | } else { | 654 | } else { |
650 | xfs_buf_relse(bp); | 655 | xfs_buf_relse(bp); |
651 | } | 656 | } |
diff --git a/fs/xfs/xfs_dir2.c b/fs/xfs/xfs_dir2.c index e92e73f0e6af..7cb26529766b 100644 --- a/fs/xfs/xfs_dir2.c +++ b/fs/xfs/xfs_dir2.c | |||
@@ -44,6 +44,7 @@ | |||
44 | #include "xfs_error.h" | 44 | #include "xfs_error.h" |
45 | #include "xfs_vnodeops.h" | 45 | #include "xfs_vnodeops.h" |
46 | 46 | ||
47 | struct xfs_name xfs_name_dotdot = {"..", 2}; | ||
47 | 48 | ||
48 | void | 49 | void |
49 | xfs_dir_mount( | 50 | xfs_dir_mount( |
@@ -146,8 +147,7 @@ int | |||
146 | xfs_dir_createname( | 147 | xfs_dir_createname( |
147 | xfs_trans_t *tp, | 148 | xfs_trans_t *tp, |
148 | xfs_inode_t *dp, | 149 | xfs_inode_t *dp, |
149 | char *name, | 150 | struct xfs_name *name, |
150 | int namelen, | ||
151 | xfs_ino_t inum, /* new entry inode number */ | 151 | xfs_ino_t inum, /* new entry inode number */ |
152 | xfs_fsblock_t *first, /* bmap's firstblock */ | 152 | xfs_fsblock_t *first, /* bmap's firstblock */ |
153 | xfs_bmap_free_t *flist, /* bmap's freeblock list */ | 153 | xfs_bmap_free_t *flist, /* bmap's freeblock list */ |
@@ -162,9 +162,9 @@ xfs_dir_createname( | |||
162 | return rval; | 162 | return rval; |
163 | XFS_STATS_INC(xs_dir_create); | 163 | XFS_STATS_INC(xs_dir_create); |
164 | 164 | ||
165 | args.name = name; | 165 | args.name = name->name; |
166 | args.namelen = namelen; | 166 | args.namelen = name->len; |
167 | args.hashval = xfs_da_hashname(name, namelen); | 167 | args.hashval = xfs_da_hashname(name->name, name->len); |
168 | args.inumber = inum; | 168 | args.inumber = inum; |
169 | args.dp = dp; | 169 | args.dp = dp; |
170 | args.firstblock = first; | 170 | args.firstblock = first; |
@@ -197,8 +197,7 @@ int | |||
197 | xfs_dir_lookup( | 197 | xfs_dir_lookup( |
198 | xfs_trans_t *tp, | 198 | xfs_trans_t *tp, |
199 | xfs_inode_t *dp, | 199 | xfs_inode_t *dp, |
200 | char *name, | 200 | struct xfs_name *name, |
201 | int namelen, | ||
202 | xfs_ino_t *inum) /* out: inode number */ | 201 | xfs_ino_t *inum) /* out: inode number */ |
203 | { | 202 | { |
204 | xfs_da_args_t args; | 203 | xfs_da_args_t args; |
@@ -207,18 +206,14 @@ xfs_dir_lookup( | |||
207 | 206 | ||
208 | ASSERT((dp->i_d.di_mode & S_IFMT) == S_IFDIR); | 207 | ASSERT((dp->i_d.di_mode & S_IFMT) == S_IFDIR); |
209 | XFS_STATS_INC(xs_dir_lookup); | 208 | XFS_STATS_INC(xs_dir_lookup); |
209 | memset(&args, 0, sizeof(xfs_da_args_t)); | ||
210 | 210 | ||
211 | args.name = name; | 211 | args.name = name->name; |
212 | args.namelen = namelen; | 212 | args.namelen = name->len; |
213 | args.hashval = xfs_da_hashname(name, namelen); | 213 | args.hashval = xfs_da_hashname(name->name, name->len); |
214 | args.inumber = 0; | ||
215 | args.dp = dp; | 214 | args.dp = dp; |
216 | args.firstblock = NULL; | ||
217 | args.flist = NULL; | ||
218 | args.total = 0; | ||
219 | args.whichfork = XFS_DATA_FORK; | 215 | args.whichfork = XFS_DATA_FORK; |
220 | args.trans = tp; | 216 | args.trans = tp; |
221 | args.justcheck = args.addname = 0; | ||
222 | args.oknoent = 1; | 217 | args.oknoent = 1; |
223 | 218 | ||
224 | if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) | 219 | if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) |
@@ -247,8 +242,7 @@ int | |||
247 | xfs_dir_removename( | 242 | xfs_dir_removename( |
248 | xfs_trans_t *tp, | 243 | xfs_trans_t *tp, |
249 | xfs_inode_t *dp, | 244 | xfs_inode_t *dp, |
250 | char *name, | 245 | struct xfs_name *name, |
251 | int namelen, | ||
252 | xfs_ino_t ino, | 246 | xfs_ino_t ino, |
253 | xfs_fsblock_t *first, /* bmap's firstblock */ | 247 | xfs_fsblock_t *first, /* bmap's firstblock */ |
254 | xfs_bmap_free_t *flist, /* bmap's freeblock list */ | 248 | xfs_bmap_free_t *flist, /* bmap's freeblock list */ |
@@ -261,9 +255,9 @@ xfs_dir_removename( | |||
261 | ASSERT((dp->i_d.di_mode & S_IFMT) == S_IFDIR); | 255 | ASSERT((dp->i_d.di_mode & S_IFMT) == S_IFDIR); |
262 | XFS_STATS_INC(xs_dir_remove); | 256 | XFS_STATS_INC(xs_dir_remove); |
263 | 257 | ||
264 | args.name = name; | 258 | args.name = name->name; |
265 | args.namelen = namelen; | 259 | args.namelen = name->len; |
266 | args.hashval = xfs_da_hashname(name, namelen); | 260 | args.hashval = xfs_da_hashname(name->name, name->len); |
267 | args.inumber = ino; | 261 | args.inumber = ino; |
268 | args.dp = dp; | 262 | args.dp = dp; |
269 | args.firstblock = first; | 263 | args.firstblock = first; |
@@ -329,8 +323,7 @@ int | |||
329 | xfs_dir_replace( | 323 | xfs_dir_replace( |
330 | xfs_trans_t *tp, | 324 | xfs_trans_t *tp, |
331 | xfs_inode_t *dp, | 325 | xfs_inode_t *dp, |
332 | char *name, /* name of entry to replace */ | 326 | struct xfs_name *name, /* name of entry to replace */ |
333 | int namelen, | ||
334 | xfs_ino_t inum, /* new inode number */ | 327 | xfs_ino_t inum, /* new inode number */ |
335 | xfs_fsblock_t *first, /* bmap's firstblock */ | 328 | xfs_fsblock_t *first, /* bmap's firstblock */ |
336 | xfs_bmap_free_t *flist, /* bmap's freeblock list */ | 329 | xfs_bmap_free_t *flist, /* bmap's freeblock list */ |
@@ -345,9 +338,9 @@ xfs_dir_replace( | |||
345 | if ((rval = xfs_dir_ino_validate(tp->t_mountp, inum))) | 338 | if ((rval = xfs_dir_ino_validate(tp->t_mountp, inum))) |
346 | return rval; | 339 | return rval; |
347 | 340 | ||
348 | args.name = name; | 341 | args.name = name->name; |
349 | args.namelen = namelen; | 342 | args.namelen = name->len; |
350 | args.hashval = xfs_da_hashname(name, namelen); | 343 | args.hashval = xfs_da_hashname(name->name, name->len); |
351 | args.inumber = inum; | 344 | args.inumber = inum; |
352 | args.dp = dp; | 345 | args.dp = dp; |
353 | args.firstblock = first; | 346 | args.firstblock = first; |
@@ -374,28 +367,29 @@ xfs_dir_replace( | |||
374 | 367 | ||
375 | /* | 368 | /* |
376 | * See if this entry can be added to the directory without allocating space. | 369 | * See if this entry can be added to the directory without allocating space. |
370 | * First checks that the caller couldn't reserve enough space (resblks = 0). | ||
377 | */ | 371 | */ |
378 | int | 372 | int |
379 | xfs_dir_canenter( | 373 | xfs_dir_canenter( |
380 | xfs_trans_t *tp, | 374 | xfs_trans_t *tp, |
381 | xfs_inode_t *dp, | 375 | xfs_inode_t *dp, |
382 | char *name, /* name of entry to add */ | 376 | struct xfs_name *name, /* name of entry to add */ |
383 | int namelen) | 377 | uint resblks) |
384 | { | 378 | { |
385 | xfs_da_args_t args; | 379 | xfs_da_args_t args; |
386 | int rval; | 380 | int rval; |
387 | int v; /* type-checking value */ | 381 | int v; /* type-checking value */ |
388 | 382 | ||
383 | if (resblks) | ||
384 | return 0; | ||
385 | |||
389 | ASSERT((dp->i_d.di_mode & S_IFMT) == S_IFDIR); | 386 | ASSERT((dp->i_d.di_mode & S_IFMT) == S_IFDIR); |
387 | memset(&args, 0, sizeof(xfs_da_args_t)); | ||
390 | 388 | ||
391 | args.name = name; | 389 | args.name = name->name; |
392 | args.namelen = namelen; | 390 | args.namelen = name->len; |
393 | args.hashval = xfs_da_hashname(name, namelen); | 391 | args.hashval = xfs_da_hashname(name->name, name->len); |
394 | args.inumber = 0; | ||
395 | args.dp = dp; | 392 | args.dp = dp; |
396 | args.firstblock = NULL; | ||
397 | args.flist = NULL; | ||
398 | args.total = 0; | ||
399 | args.whichfork = XFS_DATA_FORK; | 393 | args.whichfork = XFS_DATA_FORK; |
400 | args.trans = tp; | 394 | args.trans = tp; |
401 | args.justcheck = args.addname = args.oknoent = 1; | 395 | args.justcheck = args.addname = args.oknoent = 1; |
diff --git a/fs/xfs/xfs_dir2.h b/fs/xfs/xfs_dir2.h index b265197e74cf..6392f939029f 100644 --- a/fs/xfs/xfs_dir2.h +++ b/fs/xfs/xfs_dir2.h | |||
@@ -59,6 +59,8 @@ typedef __uint32_t xfs_dir2_db_t; | |||
59 | */ | 59 | */ |
60 | typedef xfs_off_t xfs_dir2_off_t; | 60 | typedef xfs_off_t xfs_dir2_off_t; |
61 | 61 | ||
62 | extern struct xfs_name xfs_name_dotdot; | ||
63 | |||
62 | /* | 64 | /* |
63 | * Generic directory interface routines | 65 | * Generic directory interface routines |
64 | */ | 66 | */ |
@@ -68,21 +70,21 @@ extern int xfs_dir_isempty(struct xfs_inode *dp); | |||
68 | extern int xfs_dir_init(struct xfs_trans *tp, struct xfs_inode *dp, | 70 | extern int xfs_dir_init(struct xfs_trans *tp, struct xfs_inode *dp, |
69 | struct xfs_inode *pdp); | 71 | struct xfs_inode *pdp); |
70 | extern int xfs_dir_createname(struct xfs_trans *tp, struct xfs_inode *dp, | 72 | extern int xfs_dir_createname(struct xfs_trans *tp, struct xfs_inode *dp, |
71 | char *name, int namelen, xfs_ino_t inum, | 73 | struct xfs_name *name, xfs_ino_t inum, |
72 | xfs_fsblock_t *first, | 74 | xfs_fsblock_t *first, |
73 | struct xfs_bmap_free *flist, xfs_extlen_t tot); | 75 | struct xfs_bmap_free *flist, xfs_extlen_t tot); |
74 | extern int xfs_dir_lookup(struct xfs_trans *tp, struct xfs_inode *dp, | 76 | extern int xfs_dir_lookup(struct xfs_trans *tp, struct xfs_inode *dp, |
75 | char *name, int namelen, xfs_ino_t *inum); | 77 | struct xfs_name *name, xfs_ino_t *inum); |
76 | extern int xfs_dir_removename(struct xfs_trans *tp, struct xfs_inode *dp, | 78 | extern int xfs_dir_removename(struct xfs_trans *tp, struct xfs_inode *dp, |
77 | char *name, int namelen, xfs_ino_t ino, | 79 | struct xfs_name *name, xfs_ino_t ino, |
78 | xfs_fsblock_t *first, | 80 | xfs_fsblock_t *first, |
79 | struct xfs_bmap_free *flist, xfs_extlen_t tot); | 81 | struct xfs_bmap_free *flist, xfs_extlen_t tot); |
80 | extern int xfs_dir_replace(struct xfs_trans *tp, struct xfs_inode *dp, | 82 | extern int xfs_dir_replace(struct xfs_trans *tp, struct xfs_inode *dp, |
81 | char *name, int namelen, xfs_ino_t inum, | 83 | struct xfs_name *name, xfs_ino_t inum, |
82 | xfs_fsblock_t *first, | 84 | xfs_fsblock_t *first, |
83 | struct xfs_bmap_free *flist, xfs_extlen_t tot); | 85 | struct xfs_bmap_free *flist, xfs_extlen_t tot); |
84 | extern int xfs_dir_canenter(struct xfs_trans *tp, struct xfs_inode *dp, | 86 | extern int xfs_dir_canenter(struct xfs_trans *tp, struct xfs_inode *dp, |
85 | char *name, int namelen); | 87 | struct xfs_name *name, uint resblks); |
86 | extern int xfs_dir_ino_validate(struct xfs_mount *mp, xfs_ino_t ino); | 88 | extern int xfs_dir_ino_validate(struct xfs_mount *mp, xfs_ino_t ino); |
87 | 89 | ||
88 | /* | 90 | /* |
diff --git a/fs/xfs/xfs_filestream.c b/fs/xfs/xfs_filestream.c index eb03eab5ca52..3f3785b10804 100644 --- a/fs/xfs/xfs_filestream.c +++ b/fs/xfs/xfs_filestream.c | |||
@@ -73,7 +73,7 @@ xfs_filestreams_trace( | |||
73 | #define TRACE4(mp,t,a0,a1,a2,a3) TRACE6(mp,t,a0,a1,a2,a3,0,0) | 73 | #define TRACE4(mp,t,a0,a1,a2,a3) TRACE6(mp,t,a0,a1,a2,a3,0,0) |
74 | #define TRACE5(mp,t,a0,a1,a2,a3,a4) TRACE6(mp,t,a0,a1,a2,a3,a4,0) | 74 | #define TRACE5(mp,t,a0,a1,a2,a3,a4) TRACE6(mp,t,a0,a1,a2,a3,a4,0) |
75 | #define TRACE6(mp,t,a0,a1,a2,a3,a4,a5) \ | 75 | #define TRACE6(mp,t,a0,a1,a2,a3,a4,a5) \ |
76 | xfs_filestreams_trace(mp, t, __FUNCTION__, __LINE__, \ | 76 | xfs_filestreams_trace(mp, t, __func__, __LINE__, \ |
77 | (__psunsigned_t)a0, (__psunsigned_t)a1, \ | 77 | (__psunsigned_t)a0, (__psunsigned_t)a1, \ |
78 | (__psunsigned_t)a2, (__psunsigned_t)a3, \ | 78 | (__psunsigned_t)a2, (__psunsigned_t)a3, \ |
79 | (__psunsigned_t)a4, (__psunsigned_t)a5) | 79 | (__psunsigned_t)a4, (__psunsigned_t)a5) |
diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c index 5a146cb22980..a64dfbd565a5 100644 --- a/fs/xfs/xfs_ialloc.c +++ b/fs/xfs/xfs_ialloc.c | |||
@@ -107,6 +107,16 @@ xfs_ialloc_log_di( | |||
107 | /* | 107 | /* |
108 | * Allocation group level functions. | 108 | * Allocation group level functions. |
109 | */ | 109 | */ |
110 | static inline int | ||
111 | xfs_ialloc_cluster_alignment( | ||
112 | xfs_alloc_arg_t *args) | ||
113 | { | ||
114 | if (xfs_sb_version_hasalign(&args->mp->m_sb) && | ||
115 | args->mp->m_sb.sb_inoalignmt >= | ||
116 | XFS_B_TO_FSBT(args->mp, XFS_INODE_CLUSTER_SIZE(args->mp))) | ||
117 | return args->mp->m_sb.sb_inoalignmt; | ||
118 | return 1; | ||
119 | } | ||
110 | 120 | ||
111 | /* | 121 | /* |
112 | * Allocate new inodes in the allocation group specified by agbp. | 122 | * Allocate new inodes in the allocation group specified by agbp. |
@@ -167,10 +177,24 @@ xfs_ialloc_ag_alloc( | |||
167 | args.mod = args.total = args.wasdel = args.isfl = | 177 | args.mod = args.total = args.wasdel = args.isfl = |
168 | args.userdata = args.minalignslop = 0; | 178 | args.userdata = args.minalignslop = 0; |
169 | args.prod = 1; | 179 | args.prod = 1; |
170 | args.alignment = 1; | 180 | |
171 | /* | 181 | /* |
172 | * Allow space for the inode btree to split. | 182 | * We need to take into account alignment here to ensure that |
183 | * we don't modify the free list if we fail to have an exact | ||
184 | * block. If we don't have an exact match, and every oher | ||
185 | * attempt allocation attempt fails, we'll end up cancelling | ||
186 | * a dirty transaction and shutting down. | ||
187 | * | ||
188 | * For an exact allocation, alignment must be 1, | ||
189 | * however we need to take cluster alignment into account when | ||
190 | * fixing up the freelist. Use the minalignslop field to | ||
191 | * indicate that extra blocks might be required for alignment, | ||
192 | * but not to use them in the actual exact allocation. | ||
173 | */ | 193 | */ |
194 | args.alignment = 1; | ||
195 | args.minalignslop = xfs_ialloc_cluster_alignment(&args) - 1; | ||
196 | |||
197 | /* Allow space for the inode btree to split. */ | ||
174 | args.minleft = XFS_IN_MAXLEVELS(args.mp) - 1; | 198 | args.minleft = XFS_IN_MAXLEVELS(args.mp) - 1; |
175 | if ((error = xfs_alloc_vextent(&args))) | 199 | if ((error = xfs_alloc_vextent(&args))) |
176 | return error; | 200 | return error; |
@@ -191,13 +215,8 @@ xfs_ialloc_ag_alloc( | |||
191 | ASSERT(!(args.mp->m_flags & XFS_MOUNT_NOALIGN)); | 215 | ASSERT(!(args.mp->m_flags & XFS_MOUNT_NOALIGN)); |
192 | args.alignment = args.mp->m_dalign; | 216 | args.alignment = args.mp->m_dalign; |
193 | isaligned = 1; | 217 | isaligned = 1; |
194 | } else if (xfs_sb_version_hasalign(&args.mp->m_sb) && | 218 | } else |
195 | args.mp->m_sb.sb_inoalignmt >= | 219 | args.alignment = xfs_ialloc_cluster_alignment(&args); |
196 | XFS_B_TO_FSBT(args.mp, | ||
197 | XFS_INODE_CLUSTER_SIZE(args.mp))) | ||
198 | args.alignment = args.mp->m_sb.sb_inoalignmt; | ||
199 | else | ||
200 | args.alignment = 1; | ||
201 | /* | 220 | /* |
202 | * Need to figure out where to allocate the inode blocks. | 221 | * Need to figure out where to allocate the inode blocks. |
203 | * Ideally they should be spaced out through the a.g. | 222 | * Ideally they should be spaced out through the a.g. |
@@ -230,12 +249,7 @@ xfs_ialloc_ag_alloc( | |||
230 | args.agbno = be32_to_cpu(agi->agi_root); | 249 | args.agbno = be32_to_cpu(agi->agi_root); |
231 | args.fsbno = XFS_AGB_TO_FSB(args.mp, | 250 | args.fsbno = XFS_AGB_TO_FSB(args.mp, |
232 | be32_to_cpu(agi->agi_seqno), args.agbno); | 251 | be32_to_cpu(agi->agi_seqno), args.agbno); |
233 | if (xfs_sb_version_hasalign(&args.mp->m_sb) && | 252 | args.alignment = xfs_ialloc_cluster_alignment(&args); |
234 | args.mp->m_sb.sb_inoalignmt >= | ||
235 | XFS_B_TO_FSBT(args.mp, XFS_INODE_CLUSTER_SIZE(args.mp))) | ||
236 | args.alignment = args.mp->m_sb.sb_inoalignmt; | ||
237 | else | ||
238 | args.alignment = 1; | ||
239 | if ((error = xfs_alloc_vextent(&args))) | 253 | if ((error = xfs_alloc_vextent(&args))) |
240 | return error; | 254 | return error; |
241 | } | 255 | } |
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c index 8e09b71f4104..e657c5128460 100644 --- a/fs/xfs/xfs_iget.c +++ b/fs/xfs/xfs_iget.c | |||
@@ -78,7 +78,6 @@ xfs_iget_core( | |||
78 | xfs_inode_t *ip; | 78 | xfs_inode_t *ip; |
79 | xfs_inode_t *iq; | 79 | xfs_inode_t *iq; |
80 | int error; | 80 | int error; |
81 | xfs_icluster_t *icl, *new_icl = NULL; | ||
82 | unsigned long first_index, mask; | 81 | unsigned long first_index, mask; |
83 | xfs_perag_t *pag; | 82 | xfs_perag_t *pag; |
84 | xfs_agino_t agino; | 83 | xfs_agino_t agino; |
@@ -229,11 +228,9 @@ finish_inode: | |||
229 | } | 228 | } |
230 | 229 | ||
231 | /* | 230 | /* |
232 | * This is a bit messy - we preallocate everything we _might_ | 231 | * Preload the radix tree so we can insert safely under the |
233 | * need before we pick up the ici lock. That way we don't have to | 232 | * write spinlock. |
234 | * juggle locks and go all the way back to the start. | ||
235 | */ | 233 | */ |
236 | new_icl = kmem_zone_alloc(xfs_icluster_zone, KM_SLEEP); | ||
237 | if (radix_tree_preload(GFP_KERNEL)) { | 234 | if (radix_tree_preload(GFP_KERNEL)) { |
238 | xfs_idestroy(ip); | 235 | xfs_idestroy(ip); |
239 | delay(1); | 236 | delay(1); |
@@ -242,17 +239,6 @@ finish_inode: | |||
242 | mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1); | 239 | mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1); |
243 | first_index = agino & mask; | 240 | first_index = agino & mask; |
244 | write_lock(&pag->pag_ici_lock); | 241 | write_lock(&pag->pag_ici_lock); |
245 | |||
246 | /* | ||
247 | * Find the cluster if it exists | ||
248 | */ | ||
249 | icl = NULL; | ||
250 | if (radix_tree_gang_lookup(&pag->pag_ici_root, (void**)&iq, | ||
251 | first_index, 1)) { | ||
252 | if ((XFS_INO_TO_AGINO(mp, iq->i_ino) & mask) == first_index) | ||
253 | icl = iq->i_cluster; | ||
254 | } | ||
255 | |||
256 | /* | 242 | /* |
257 | * insert the new inode | 243 | * insert the new inode |
258 | */ | 244 | */ |
@@ -267,30 +253,13 @@ finish_inode: | |||
267 | } | 253 | } |
268 | 254 | ||
269 | /* | 255 | /* |
270 | * These values _must_ be set before releasing ihlock! | 256 | * These values _must_ be set before releasing the radix tree lock! |
271 | */ | 257 | */ |
272 | ip->i_udquot = ip->i_gdquot = NULL; | 258 | ip->i_udquot = ip->i_gdquot = NULL; |
273 | xfs_iflags_set(ip, XFS_INEW); | 259 | xfs_iflags_set(ip, XFS_INEW); |
274 | 260 | ||
275 | ASSERT(ip->i_cluster == NULL); | ||
276 | |||
277 | if (!icl) { | ||
278 | spin_lock_init(&new_icl->icl_lock); | ||
279 | INIT_HLIST_HEAD(&new_icl->icl_inodes); | ||
280 | icl = new_icl; | ||
281 | new_icl = NULL; | ||
282 | } else { | ||
283 | ASSERT(!hlist_empty(&icl->icl_inodes)); | ||
284 | } | ||
285 | spin_lock(&icl->icl_lock); | ||
286 | hlist_add_head(&ip->i_cnode, &icl->icl_inodes); | ||
287 | ip->i_cluster = icl; | ||
288 | spin_unlock(&icl->icl_lock); | ||
289 | |||
290 | write_unlock(&pag->pag_ici_lock); | 261 | write_unlock(&pag->pag_ici_lock); |
291 | radix_tree_preload_end(); | 262 | radix_tree_preload_end(); |
292 | if (new_icl) | ||
293 | kmem_zone_free(xfs_icluster_zone, new_icl); | ||
294 | 263 | ||
295 | /* | 264 | /* |
296 | * Link ip to its mount and thread it on the mount's inode list. | 265 | * Link ip to its mount and thread it on the mount's inode list. |
@@ -529,18 +498,6 @@ xfs_iextract( | |||
529 | xfs_put_perag(mp, pag); | 498 | xfs_put_perag(mp, pag); |
530 | 499 | ||
531 | /* | 500 | /* |
532 | * Remove from cluster list | ||
533 | */ | ||
534 | mp = ip->i_mount; | ||
535 | spin_lock(&ip->i_cluster->icl_lock); | ||
536 | hlist_del(&ip->i_cnode); | ||
537 | spin_unlock(&ip->i_cluster->icl_lock); | ||
538 | |||
539 | /* was last inode in cluster? */ | ||
540 | if (hlist_empty(&ip->i_cluster->icl_inodes)) | ||
541 | kmem_zone_free(xfs_icluster_zone, ip->i_cluster); | ||
542 | |||
543 | /* | ||
544 | * Remove from mount's inode list. | 501 | * Remove from mount's inode list. |
545 | */ | 502 | */ |
546 | XFS_MOUNT_ILOCK(mp); | 503 | XFS_MOUNT_ILOCK(mp); |
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index f43a6e01d68f..ca12acb90394 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c | |||
@@ -55,7 +55,6 @@ | |||
55 | 55 | ||
56 | kmem_zone_t *xfs_ifork_zone; | 56 | kmem_zone_t *xfs_ifork_zone; |
57 | kmem_zone_t *xfs_inode_zone; | 57 | kmem_zone_t *xfs_inode_zone; |
58 | kmem_zone_t *xfs_icluster_zone; | ||
59 | 58 | ||
60 | /* | 59 | /* |
61 | * Used in xfs_itruncate(). This is the maximum number of extents | 60 | * Used in xfs_itruncate(). This is the maximum number of extents |
@@ -126,6 +125,90 @@ xfs_inobp_check( | |||
126 | #endif | 125 | #endif |
127 | 126 | ||
128 | /* | 127 | /* |
128 | * Find the buffer associated with the given inode map | ||
129 | * We do basic validation checks on the buffer once it has been | ||
130 | * retrieved from disk. | ||
131 | */ | ||
132 | STATIC int | ||
133 | xfs_imap_to_bp( | ||
134 | xfs_mount_t *mp, | ||
135 | xfs_trans_t *tp, | ||
136 | xfs_imap_t *imap, | ||
137 | xfs_buf_t **bpp, | ||
138 | uint buf_flags, | ||
139 | uint imap_flags) | ||
140 | { | ||
141 | int error; | ||
142 | int i; | ||
143 | int ni; | ||
144 | xfs_buf_t *bp; | ||
145 | |||
146 | error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap->im_blkno, | ||
147 | (int)imap->im_len, buf_flags, &bp); | ||
148 | if (error) { | ||
149 | if (error != EAGAIN) { | ||
150 | cmn_err(CE_WARN, | ||
151 | "xfs_imap_to_bp: xfs_trans_read_buf()returned " | ||
152 | "an error %d on %s. Returning error.", | ||
153 | error, mp->m_fsname); | ||
154 | } else { | ||
155 | ASSERT(buf_flags & XFS_BUF_TRYLOCK); | ||
156 | } | ||
157 | return error; | ||
158 | } | ||
159 | |||
160 | /* | ||
161 | * Validate the magic number and version of every inode in the buffer | ||
162 | * (if DEBUG kernel) or the first inode in the buffer, otherwise. | ||
163 | */ | ||
164 | #ifdef DEBUG | ||
165 | ni = BBTOB(imap->im_len) >> mp->m_sb.sb_inodelog; | ||
166 | #else /* usual case */ | ||
167 | ni = 1; | ||
168 | #endif | ||
169 | |||
170 | for (i = 0; i < ni; i++) { | ||
171 | int di_ok; | ||
172 | xfs_dinode_t *dip; | ||
173 | |||
174 | dip = (xfs_dinode_t *)xfs_buf_offset(bp, | ||
175 | (i << mp->m_sb.sb_inodelog)); | ||
176 | di_ok = be16_to_cpu(dip->di_core.di_magic) == XFS_DINODE_MAGIC && | ||
177 | XFS_DINODE_GOOD_VERSION(dip->di_core.di_version); | ||
178 | if (unlikely(XFS_TEST_ERROR(!di_ok, mp, | ||
179 | XFS_ERRTAG_ITOBP_INOTOBP, | ||
180 | XFS_RANDOM_ITOBP_INOTOBP))) { | ||
181 | if (imap_flags & XFS_IMAP_BULKSTAT) { | ||
182 | xfs_trans_brelse(tp, bp); | ||
183 | return XFS_ERROR(EINVAL); | ||
184 | } | ||
185 | XFS_CORRUPTION_ERROR("xfs_imap_to_bp", | ||
186 | XFS_ERRLEVEL_HIGH, mp, dip); | ||
187 | #ifdef DEBUG | ||
188 | cmn_err(CE_PANIC, | ||
189 | "Device %s - bad inode magic/vsn " | ||
190 | "daddr %lld #%d (magic=%x)", | ||
191 | XFS_BUFTARG_NAME(mp->m_ddev_targp), | ||
192 | (unsigned long long)imap->im_blkno, i, | ||
193 | be16_to_cpu(dip->di_core.di_magic)); | ||
194 | #endif | ||
195 | xfs_trans_brelse(tp, bp); | ||
196 | return XFS_ERROR(EFSCORRUPTED); | ||
197 | } | ||
198 | } | ||
199 | |||
200 | xfs_inobp_check(mp, bp); | ||
201 | |||
202 | /* | ||
203 | * Mark the buffer as an inode buffer now that it looks good | ||
204 | */ | ||
205 | XFS_BUF_SET_VTYPE(bp, B_FS_INO); | ||
206 | |||
207 | *bpp = bp; | ||
208 | return 0; | ||
209 | } | ||
210 | |||
211 | /* | ||
129 | * This routine is called to map an inode number within a file | 212 | * This routine is called to map an inode number within a file |
130 | * system to the buffer containing the on-disk version of the | 213 | * system to the buffer containing the on-disk version of the |
131 | * inode. It returns a pointer to the buffer containing the | 214 | * inode. It returns a pointer to the buffer containing the |
@@ -147,72 +230,19 @@ xfs_inotobp( | |||
147 | xfs_buf_t **bpp, | 230 | xfs_buf_t **bpp, |
148 | int *offset) | 231 | int *offset) |
149 | { | 232 | { |
150 | int di_ok; | ||
151 | xfs_imap_t imap; | 233 | xfs_imap_t imap; |
152 | xfs_buf_t *bp; | 234 | xfs_buf_t *bp; |
153 | int error; | 235 | int error; |
154 | xfs_dinode_t *dip; | ||
155 | 236 | ||
156 | /* | ||
157 | * Call the space management code to find the location of the | ||
158 | * inode on disk. | ||
159 | */ | ||
160 | imap.im_blkno = 0; | 237 | imap.im_blkno = 0; |
161 | error = xfs_imap(mp, tp, ino, &imap, XFS_IMAP_LOOKUP); | 238 | error = xfs_imap(mp, tp, ino, &imap, XFS_IMAP_LOOKUP); |
162 | if (error != 0) { | 239 | if (error) |
163 | cmn_err(CE_WARN, | ||
164 | "xfs_inotobp: xfs_imap() returned an " | ||
165 | "error %d on %s. Returning error.", error, mp->m_fsname); | ||
166 | return error; | 240 | return error; |
167 | } | ||
168 | 241 | ||
169 | /* | 242 | error = xfs_imap_to_bp(mp, tp, &imap, &bp, XFS_BUF_LOCK, 0); |
170 | * If the inode number maps to a block outside the bounds of the | 243 | if (error) |
171 | * file system then return NULL rather than calling read_buf | ||
172 | * and panicing when we get an error from the driver. | ||
173 | */ | ||
174 | if ((imap.im_blkno + imap.im_len) > | ||
175 | XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)) { | ||
176 | cmn_err(CE_WARN, | ||
177 | "xfs_inotobp: inode number (%llu + %d) maps to a block outside the bounds " | ||
178 | "of the file system %s. Returning EINVAL.", | ||
179 | (unsigned long long)imap.im_blkno, | ||
180 | imap.im_len, mp->m_fsname); | ||
181 | return XFS_ERROR(EINVAL); | ||
182 | } | ||
183 | |||
184 | /* | ||
185 | * Read in the buffer. If tp is NULL, xfs_trans_read_buf() will | ||
186 | * default to just a read_buf() call. | ||
187 | */ | ||
188 | error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap.im_blkno, | ||
189 | (int)imap.im_len, XFS_BUF_LOCK, &bp); | ||
190 | |||
191 | if (error) { | ||
192 | cmn_err(CE_WARN, | ||
193 | "xfs_inotobp: xfs_trans_read_buf() returned an " | ||
194 | "error %d on %s. Returning error.", error, mp->m_fsname); | ||
195 | return error; | 244 | return error; |
196 | } | ||
197 | dip = (xfs_dinode_t *)xfs_buf_offset(bp, 0); | ||
198 | di_ok = | ||
199 | be16_to_cpu(dip->di_core.di_magic) == XFS_DINODE_MAGIC && | ||
200 | XFS_DINODE_GOOD_VERSION(dip->di_core.di_version); | ||
201 | if (unlikely(XFS_TEST_ERROR(!di_ok, mp, XFS_ERRTAG_ITOBP_INOTOBP, | ||
202 | XFS_RANDOM_ITOBP_INOTOBP))) { | ||
203 | XFS_CORRUPTION_ERROR("xfs_inotobp", XFS_ERRLEVEL_LOW, mp, dip); | ||
204 | xfs_trans_brelse(tp, bp); | ||
205 | cmn_err(CE_WARN, | ||
206 | "xfs_inotobp: XFS_TEST_ERROR() returned an " | ||
207 | "error on %s. Returning EFSCORRUPTED.", mp->m_fsname); | ||
208 | return XFS_ERROR(EFSCORRUPTED); | ||
209 | } | ||
210 | 245 | ||
211 | xfs_inobp_check(mp, bp); | ||
212 | |||
213 | /* | ||
214 | * Set *dipp to point to the on-disk inode in the buffer. | ||
215 | */ | ||
216 | *dipp = (xfs_dinode_t *)xfs_buf_offset(bp, imap.im_boffset); | 246 | *dipp = (xfs_dinode_t *)xfs_buf_offset(bp, imap.im_boffset); |
217 | *bpp = bp; | 247 | *bpp = bp; |
218 | *offset = imap.im_boffset; | 248 | *offset = imap.im_boffset; |
@@ -248,46 +278,21 @@ xfs_itobp( | |||
248 | xfs_dinode_t **dipp, | 278 | xfs_dinode_t **dipp, |
249 | xfs_buf_t **bpp, | 279 | xfs_buf_t **bpp, |
250 | xfs_daddr_t bno, | 280 | xfs_daddr_t bno, |
251 | uint imap_flags) | 281 | uint imap_flags, |
282 | uint buf_flags) | ||
252 | { | 283 | { |
253 | xfs_imap_t imap; | 284 | xfs_imap_t imap; |
254 | xfs_buf_t *bp; | 285 | xfs_buf_t *bp; |
255 | int error; | 286 | int error; |
256 | int i; | ||
257 | int ni; | ||
258 | 287 | ||
259 | if (ip->i_blkno == (xfs_daddr_t)0) { | 288 | if (ip->i_blkno == (xfs_daddr_t)0) { |
260 | /* | ||
261 | * Call the space management code to find the location of the | ||
262 | * inode on disk. | ||
263 | */ | ||
264 | imap.im_blkno = bno; | 289 | imap.im_blkno = bno; |
265 | if ((error = xfs_imap(mp, tp, ip->i_ino, &imap, | 290 | error = xfs_imap(mp, tp, ip->i_ino, &imap, |
266 | XFS_IMAP_LOOKUP | imap_flags))) | 291 | XFS_IMAP_LOOKUP | imap_flags); |
292 | if (error) | ||
267 | return error; | 293 | return error; |
268 | 294 | ||
269 | /* | 295 | /* |
270 | * If the inode number maps to a block outside the bounds | ||
271 | * of the file system then return NULL rather than calling | ||
272 | * read_buf and panicing when we get an error from the | ||
273 | * driver. | ||
274 | */ | ||
275 | if ((imap.im_blkno + imap.im_len) > | ||
276 | XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)) { | ||
277 | #ifdef DEBUG | ||
278 | xfs_fs_cmn_err(CE_ALERT, mp, "xfs_itobp: " | ||
279 | "(imap.im_blkno (0x%llx) " | ||
280 | "+ imap.im_len (0x%llx)) > " | ||
281 | " XFS_FSB_TO_BB(mp, " | ||
282 | "mp->m_sb.sb_dblocks) (0x%llx)", | ||
283 | (unsigned long long) imap.im_blkno, | ||
284 | (unsigned long long) imap.im_len, | ||
285 | XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)); | ||
286 | #endif /* DEBUG */ | ||
287 | return XFS_ERROR(EINVAL); | ||
288 | } | ||
289 | |||
290 | /* | ||
291 | * Fill in the fields in the inode that will be used to | 296 | * Fill in the fields in the inode that will be used to |
292 | * map the inode to its buffer from now on. | 297 | * map the inode to its buffer from now on. |
293 | */ | 298 | */ |
@@ -305,76 +310,17 @@ xfs_itobp( | |||
305 | } | 310 | } |
306 | ASSERT(bno == 0 || bno == imap.im_blkno); | 311 | ASSERT(bno == 0 || bno == imap.im_blkno); |
307 | 312 | ||
308 | /* | 313 | error = xfs_imap_to_bp(mp, tp, &imap, &bp, buf_flags, imap_flags); |
309 | * Read in the buffer. If tp is NULL, xfs_trans_read_buf() will | 314 | if (error) |
310 | * default to just a read_buf() call. | ||
311 | */ | ||
312 | error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap.im_blkno, | ||
313 | (int)imap.im_len, XFS_BUF_LOCK, &bp); | ||
314 | if (error) { | ||
315 | #ifdef DEBUG | ||
316 | xfs_fs_cmn_err(CE_ALERT, mp, "xfs_itobp: " | ||
317 | "xfs_trans_read_buf() returned error %d, " | ||
318 | "imap.im_blkno 0x%llx, imap.im_len 0x%llx", | ||
319 | error, (unsigned long long) imap.im_blkno, | ||
320 | (unsigned long long) imap.im_len); | ||
321 | #endif /* DEBUG */ | ||
322 | return error; | 315 | return error; |
323 | } | ||
324 | |||
325 | /* | ||
326 | * Validate the magic number and version of every inode in the buffer | ||
327 | * (if DEBUG kernel) or the first inode in the buffer, otherwise. | ||
328 | * No validation is done here in userspace (xfs_repair). | ||
329 | */ | ||
330 | #if !defined(__KERNEL__) | ||
331 | ni = 0; | ||
332 | #elif defined(DEBUG) | ||
333 | ni = BBTOB(imap.im_len) >> mp->m_sb.sb_inodelog; | ||
334 | #else /* usual case */ | ||
335 | ni = 1; | ||
336 | #endif | ||
337 | |||
338 | for (i = 0; i < ni; i++) { | ||
339 | int di_ok; | ||
340 | xfs_dinode_t *dip; | ||
341 | 316 | ||
342 | dip = (xfs_dinode_t *)xfs_buf_offset(bp, | 317 | if (!bp) { |
343 | (i << mp->m_sb.sb_inodelog)); | 318 | ASSERT(buf_flags & XFS_BUF_TRYLOCK); |
344 | di_ok = be16_to_cpu(dip->di_core.di_magic) == XFS_DINODE_MAGIC && | 319 | ASSERT(tp == NULL); |
345 | XFS_DINODE_GOOD_VERSION(dip->di_core.di_version); | 320 | *bpp = NULL; |
346 | if (unlikely(XFS_TEST_ERROR(!di_ok, mp, | 321 | return EAGAIN; |
347 | XFS_ERRTAG_ITOBP_INOTOBP, | ||
348 | XFS_RANDOM_ITOBP_INOTOBP))) { | ||
349 | if (imap_flags & XFS_IMAP_BULKSTAT) { | ||
350 | xfs_trans_brelse(tp, bp); | ||
351 | return XFS_ERROR(EINVAL); | ||
352 | } | ||
353 | #ifdef DEBUG | ||
354 | cmn_err(CE_ALERT, | ||
355 | "Device %s - bad inode magic/vsn " | ||
356 | "daddr %lld #%d (magic=%x)", | ||
357 | XFS_BUFTARG_NAME(mp->m_ddev_targp), | ||
358 | (unsigned long long)imap.im_blkno, i, | ||
359 | be16_to_cpu(dip->di_core.di_magic)); | ||
360 | #endif | ||
361 | XFS_CORRUPTION_ERROR("xfs_itobp", XFS_ERRLEVEL_HIGH, | ||
362 | mp, dip); | ||
363 | xfs_trans_brelse(tp, bp); | ||
364 | return XFS_ERROR(EFSCORRUPTED); | ||
365 | } | ||
366 | } | 322 | } |
367 | 323 | ||
368 | xfs_inobp_check(mp, bp); | ||
369 | |||
370 | /* | ||
371 | * Mark the buffer as an inode buffer now that it looks good | ||
372 | */ | ||
373 | XFS_BUF_SET_VTYPE(bp, B_FS_INO); | ||
374 | |||
375 | /* | ||
376 | * Set *dipp to point to the on-disk inode in the buffer. | ||
377 | */ | ||
378 | *dipp = (xfs_dinode_t *)xfs_buf_offset(bp, imap.im_boffset); | 324 | *dipp = (xfs_dinode_t *)xfs_buf_offset(bp, imap.im_boffset); |
379 | *bpp = bp; | 325 | *bpp = bp; |
380 | return 0; | 326 | return 0; |
@@ -878,7 +824,7 @@ xfs_iread( | |||
878 | * return NULL as well. Set i_blkno to 0 so that xfs_itobp() will | 824 | * return NULL as well. Set i_blkno to 0 so that xfs_itobp() will |
879 | * know that this is a new incore inode. | 825 | * know that this is a new incore inode. |
880 | */ | 826 | */ |
881 | error = xfs_itobp(mp, tp, ip, &dip, &bp, bno, imap_flags); | 827 | error = xfs_itobp(mp, tp, ip, &dip, &bp, bno, imap_flags, XFS_BUF_LOCK); |
882 | if (error) { | 828 | if (error) { |
883 | kmem_zone_free(xfs_inode_zone, ip); | 829 | kmem_zone_free(xfs_inode_zone, ip); |
884 | return error; | 830 | return error; |
@@ -1518,51 +1464,50 @@ xfs_itruncate_start( | |||
1518 | } | 1464 | } |
1519 | 1465 | ||
1520 | /* | 1466 | /* |
1521 | * Shrink the file to the given new_size. The new | 1467 | * Shrink the file to the given new_size. The new size must be smaller than |
1522 | * size must be smaller than the current size. | 1468 | * the current size. This will free up the underlying blocks in the removed |
1523 | * This will free up the underlying blocks | 1469 | * range after a call to xfs_itruncate_start() or xfs_atruncate_start(). |
1524 | * in the removed range after a call to xfs_itruncate_start() | ||
1525 | * or xfs_atruncate_start(). | ||
1526 | * | 1470 | * |
1527 | * The transaction passed to this routine must have made | 1471 | * The transaction passed to this routine must have made a permanent log |
1528 | * a permanent log reservation of at least XFS_ITRUNCATE_LOG_RES. | 1472 | * reservation of at least XFS_ITRUNCATE_LOG_RES. This routine may commit the |
1529 | * This routine may commit the given transaction and | 1473 | * given transaction and start new ones, so make sure everything involved in |
1530 | * start new ones, so make sure everything involved in | 1474 | * the transaction is tidy before calling here. Some transaction will be |
1531 | * the transaction is tidy before calling here. | 1475 | * returned to the caller to be committed. The incoming transaction must |
1532 | * Some transaction will be returned to the caller to be | 1476 | * already include the inode, and both inode locks must be held exclusively. |
1533 | * committed. The incoming transaction must already include | 1477 | * The inode must also be "held" within the transaction. On return the inode |
1534 | * the inode, and both inode locks must be held exclusively. | 1478 | * will be "held" within the returned transaction. This routine does NOT |
1535 | * The inode must also be "held" within the transaction. On | 1479 | * require any disk space to be reserved for it within the transaction. |
1536 | * return the inode will be "held" within the returned transaction. | ||
1537 | * This routine does NOT require any disk space to be reserved | ||
1538 | * for it within the transaction. | ||
1539 | * | 1480 | * |
1540 | * The fork parameter must be either xfs_attr_fork or xfs_data_fork, | 1481 | * The fork parameter must be either xfs_attr_fork or xfs_data_fork, and it |
1541 | * and it indicates the fork which is to be truncated. For the | 1482 | * indicates the fork which is to be truncated. For the attribute fork we only |
1542 | * attribute fork we only support truncation to size 0. | 1483 | * support truncation to size 0. |
1543 | * | 1484 | * |
1544 | * We use the sync parameter to indicate whether or not the first | 1485 | * We use the sync parameter to indicate whether or not the first transaction |
1545 | * transaction we perform might have to be synchronous. For the attr fork, | 1486 | * we perform might have to be synchronous. For the attr fork, it needs to be |
1546 | * it needs to be so if the unlink of the inode is not yet known to be | 1487 | * so if the unlink of the inode is not yet known to be permanent in the log. |
1547 | * permanent in the log. This keeps us from freeing and reusing the | 1488 | * This keeps us from freeing and reusing the blocks of the attribute fork |
1548 | * blocks of the attribute fork before the unlink of the inode becomes | 1489 | * before the unlink of the inode becomes permanent. |
1549 | * permanent. | ||
1550 | * | 1490 | * |
1551 | * For the data fork, we normally have to run synchronously if we're | 1491 | * For the data fork, we normally have to run synchronously if we're being |
1552 | * being called out of the inactive path or we're being called | 1492 | * called out of the inactive path or we're being called out of the create path |
1553 | * out of the create path where we're truncating an existing file. | 1493 | * where we're truncating an existing file. Either way, the truncate needs to |
1554 | * Either way, the truncate needs to be sync so blocks don't reappear | 1494 | * be sync so blocks don't reappear in the file with altered data in case of a |
1555 | * in the file with altered data in case of a crash. wsync filesystems | 1495 | * crash. wsync filesystems can run the first case async because anything that |
1556 | * can run the first case async because anything that shrinks the inode | 1496 | * shrinks the inode has to run sync so by the time we're called here from |
1557 | * has to run sync so by the time we're called here from inactive, the | 1497 | * inactive, the inode size is permanently set to 0. |
1558 | * inode size is permanently set to 0. | ||
1559 | * | 1498 | * |
1560 | * Calls from the truncate path always need to be sync unless we're | 1499 | * Calls from the truncate path always need to be sync unless we're in a wsync |
1561 | * in a wsync filesystem and the file has already been unlinked. | 1500 | * filesystem and the file has already been unlinked. |
1562 | * | 1501 | * |
1563 | * The caller is responsible for correctly setting the sync parameter. | 1502 | * The caller is responsible for correctly setting the sync parameter. It gets |
1564 | * It gets too hard for us to guess here which path we're being called | 1503 | * too hard for us to guess here which path we're being called out of just |
1565 | * out of just based on inode state. | 1504 | * based on inode state. |
1505 | * | ||
1506 | * If we get an error, we must return with the inode locked and linked into the | ||
1507 | * current transaction. This keeps things simple for the higher level code, | ||
1508 | * because it always knows that the inode is locked and held in the transaction | ||
1509 | * that returns to it whether errors occur or not. We don't mark the inode | ||
1510 | * dirty on error so that transactions can be easily aborted if possible. | ||
1566 | */ | 1511 | */ |
1567 | int | 1512 | int |
1568 | xfs_itruncate_finish( | 1513 | xfs_itruncate_finish( |
@@ -1741,65 +1686,51 @@ xfs_itruncate_finish( | |||
1741 | */ | 1686 | */ |
1742 | error = xfs_bmap_finish(tp, &free_list, &committed); | 1687 | error = xfs_bmap_finish(tp, &free_list, &committed); |
1743 | ntp = *tp; | 1688 | ntp = *tp; |
1689 | if (committed) { | ||
1690 | /* link the inode into the next xact in the chain */ | ||
1691 | xfs_trans_ijoin(ntp, ip, | ||
1692 | XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); | ||
1693 | xfs_trans_ihold(ntp, ip); | ||
1694 | } | ||
1695 | |||
1744 | if (error) { | 1696 | if (error) { |
1745 | /* | 1697 | /* |
1746 | * If the bmap finish call encounters an error, | 1698 | * If the bmap finish call encounters an error, return |
1747 | * return to the caller where the transaction | 1699 | * to the caller where the transaction can be properly |
1748 | * can be properly aborted. We just need to | 1700 | * aborted. We just need to make sure we're not |
1749 | * make sure we're not holding any resources | 1701 | * holding any resources that we were not when we came |
1750 | * that we were not when we came in. | 1702 | * in. |
1751 | * | 1703 | * |
1752 | * Aborting from this point might lose some | 1704 | * Aborting from this point might lose some blocks in |
1753 | * blocks in the file system, but oh well. | 1705 | * the file system, but oh well. |
1754 | */ | 1706 | */ |
1755 | xfs_bmap_cancel(&free_list); | 1707 | xfs_bmap_cancel(&free_list); |
1756 | if (committed) { | ||
1757 | /* | ||
1758 | * If the passed in transaction committed | ||
1759 | * in xfs_bmap_finish(), then we want to | ||
1760 | * add the inode to this one before returning. | ||
1761 | * This keeps things simple for the higher | ||
1762 | * level code, because it always knows that | ||
1763 | * the inode is locked and held in the | ||
1764 | * transaction that returns to it whether | ||
1765 | * errors occur or not. We don't mark the | ||
1766 | * inode dirty so that this transaction can | ||
1767 | * be easily aborted if possible. | ||
1768 | */ | ||
1769 | xfs_trans_ijoin(ntp, ip, | ||
1770 | XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); | ||
1771 | xfs_trans_ihold(ntp, ip); | ||
1772 | } | ||
1773 | return error; | 1708 | return error; |
1774 | } | 1709 | } |
1775 | 1710 | ||
1776 | if (committed) { | 1711 | if (committed) { |
1777 | /* | 1712 | /* |
1778 | * The first xact was committed, | 1713 | * Mark the inode dirty so it will be logged and |
1779 | * so add the inode to the new one. | 1714 | * moved forward in the log as part of every commit. |
1780 | * Mark it dirty so it will be logged | ||
1781 | * and moved forward in the log as | ||
1782 | * part of every commit. | ||
1783 | */ | 1715 | */ |
1784 | xfs_trans_ijoin(ntp, ip, | ||
1785 | XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); | ||
1786 | xfs_trans_ihold(ntp, ip); | ||
1787 | xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE); | 1716 | xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE); |
1788 | } | 1717 | } |
1718 | |||
1789 | ntp = xfs_trans_dup(ntp); | 1719 | ntp = xfs_trans_dup(ntp); |
1790 | (void) xfs_trans_commit(*tp, 0); | 1720 | error = xfs_trans_commit(*tp, 0); |
1791 | *tp = ntp; | 1721 | *tp = ntp; |
1792 | error = xfs_trans_reserve(ntp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, | 1722 | |
1793 | XFS_TRANS_PERM_LOG_RES, | 1723 | /* link the inode into the next transaction in the chain */ |
1794 | XFS_ITRUNCATE_LOG_COUNT); | ||
1795 | /* | ||
1796 | * Add the inode being truncated to the next chained | ||
1797 | * transaction. | ||
1798 | */ | ||
1799 | xfs_trans_ijoin(ntp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); | 1724 | xfs_trans_ijoin(ntp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); |
1800 | xfs_trans_ihold(ntp, ip); | 1725 | xfs_trans_ihold(ntp, ip); |
1726 | |||
1727 | if (!error) | ||
1728 | error = xfs_trans_reserve(ntp, 0, | ||
1729 | XFS_ITRUNCATE_LOG_RES(mp), 0, | ||
1730 | XFS_TRANS_PERM_LOG_RES, | ||
1731 | XFS_ITRUNCATE_LOG_COUNT); | ||
1801 | if (error) | 1732 | if (error) |
1802 | return (error); | 1733 | return error; |
1803 | } | 1734 | } |
1804 | /* | 1735 | /* |
1805 | * Only update the size in the case of the data fork, but | 1736 | * Only update the size in the case of the data fork, but |
@@ -1967,7 +1898,7 @@ xfs_iunlink( | |||
1967 | * Here we put the head pointer into our next pointer, | 1898 | * Here we put the head pointer into our next pointer, |
1968 | * and then we fall through to point the head at us. | 1899 | * and then we fall through to point the head at us. |
1969 | */ | 1900 | */ |
1970 | error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0); | 1901 | error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0, XFS_BUF_LOCK); |
1971 | if (error) | 1902 | if (error) |
1972 | return error; | 1903 | return error; |
1973 | 1904 | ||
@@ -2075,7 +2006,7 @@ xfs_iunlink_remove( | |||
2075 | * of dealing with the buffer when there is no need to | 2006 | * of dealing with the buffer when there is no need to |
2076 | * change it. | 2007 | * change it. |
2077 | */ | 2008 | */ |
2078 | error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0); | 2009 | error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0, XFS_BUF_LOCK); |
2079 | if (error) { | 2010 | if (error) { |
2080 | cmn_err(CE_WARN, | 2011 | cmn_err(CE_WARN, |
2081 | "xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.", | 2012 | "xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.", |
@@ -2137,7 +2068,7 @@ xfs_iunlink_remove( | |||
2137 | * Now last_ibp points to the buffer previous to us on | 2068 | * Now last_ibp points to the buffer previous to us on |
2138 | * the unlinked list. Pull us from the list. | 2069 | * the unlinked list. Pull us from the list. |
2139 | */ | 2070 | */ |
2140 | error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0); | 2071 | error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0, XFS_BUF_LOCK); |
2141 | if (error) { | 2072 | if (error) { |
2142 | cmn_err(CE_WARN, | 2073 | cmn_err(CE_WARN, |
2143 | "xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.", | 2074 | "xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.", |
@@ -2172,13 +2103,6 @@ xfs_iunlink_remove( | |||
2172 | return 0; | 2103 | return 0; |
2173 | } | 2104 | } |
2174 | 2105 | ||
2175 | STATIC_INLINE int xfs_inode_clean(xfs_inode_t *ip) | ||
2176 | { | ||
2177 | return (((ip->i_itemp == NULL) || | ||
2178 | !(ip->i_itemp->ili_format.ilf_fields & XFS_ILOG_ALL)) && | ||
2179 | (ip->i_update_core == 0)); | ||
2180 | } | ||
2181 | |||
2182 | STATIC void | 2106 | STATIC void |
2183 | xfs_ifree_cluster( | 2107 | xfs_ifree_cluster( |
2184 | xfs_inode_t *free_ip, | 2108 | xfs_inode_t *free_ip, |
@@ -2400,7 +2324,7 @@ xfs_ifree( | |||
2400 | 2324 | ||
2401 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); | 2325 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); |
2402 | 2326 | ||
2403 | error = xfs_itobp(ip->i_mount, tp, ip, &dip, &ibp, 0, 0); | 2327 | error = xfs_itobp(ip->i_mount, tp, ip, &dip, &ibp, 0, 0, XFS_BUF_LOCK); |
2404 | if (error) | 2328 | if (error) |
2405 | return error; | 2329 | return error; |
2406 | 2330 | ||
@@ -2678,14 +2602,31 @@ xfs_imap( | |||
2678 | fsbno = imap->im_blkno ? | 2602 | fsbno = imap->im_blkno ? |
2679 | XFS_DADDR_TO_FSB(mp, imap->im_blkno) : NULLFSBLOCK; | 2603 | XFS_DADDR_TO_FSB(mp, imap->im_blkno) : NULLFSBLOCK; |
2680 | error = xfs_dilocate(mp, tp, ino, &fsbno, &len, &off, flags); | 2604 | error = xfs_dilocate(mp, tp, ino, &fsbno, &len, &off, flags); |
2681 | if (error != 0) { | 2605 | if (error) |
2682 | return error; | 2606 | return error; |
2683 | } | 2607 | |
2684 | imap->im_blkno = XFS_FSB_TO_DADDR(mp, fsbno); | 2608 | imap->im_blkno = XFS_FSB_TO_DADDR(mp, fsbno); |
2685 | imap->im_len = XFS_FSB_TO_BB(mp, len); | 2609 | imap->im_len = XFS_FSB_TO_BB(mp, len); |
2686 | imap->im_agblkno = XFS_FSB_TO_AGBNO(mp, fsbno); | 2610 | imap->im_agblkno = XFS_FSB_TO_AGBNO(mp, fsbno); |
2687 | imap->im_ioffset = (ushort)off; | 2611 | imap->im_ioffset = (ushort)off; |
2688 | imap->im_boffset = (ushort)(off << mp->m_sb.sb_inodelog); | 2612 | imap->im_boffset = (ushort)(off << mp->m_sb.sb_inodelog); |
2613 | |||
2614 | /* | ||
2615 | * If the inode number maps to a block outside the bounds | ||
2616 | * of the file system then return NULL rather than calling | ||
2617 | * read_buf and panicing when we get an error from the | ||
2618 | * driver. | ||
2619 | */ | ||
2620 | if ((imap->im_blkno + imap->im_len) > | ||
2621 | XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)) { | ||
2622 | xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: " | ||
2623 | "(imap->im_blkno (0x%llx) + imap->im_len (0x%llx)) > " | ||
2624 | " XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks) (0x%llx)", | ||
2625 | (unsigned long long) imap->im_blkno, | ||
2626 | (unsigned long long) imap->im_len, | ||
2627 | XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)); | ||
2628 | return EINVAL; | ||
2629 | } | ||
2689 | return 0; | 2630 | return 0; |
2690 | } | 2631 | } |
2691 | 2632 | ||
@@ -2826,38 +2767,41 @@ xfs_iunpin( | |||
2826 | } | 2767 | } |
2827 | 2768 | ||
2828 | /* | 2769 | /* |
2829 | * This is called to wait for the given inode to be unpinned. | 2770 | * This is called to unpin an inode. It can be directed to wait or to return |
2830 | * It will sleep until this happens. The caller must have the | 2771 | * immediately without waiting for the inode to be unpinned. The caller must |
2831 | * inode locked in at least shared mode so that the buffer cannot | 2772 | * have the inode locked in at least shared mode so that the buffer cannot be |
2832 | * be subsequently pinned once someone is waiting for it to be | 2773 | * subsequently pinned once someone is waiting for it to be unpinned. |
2833 | * unpinned. | ||
2834 | */ | 2774 | */ |
2835 | STATIC void | 2775 | STATIC void |
2836 | xfs_iunpin_wait( | 2776 | __xfs_iunpin_wait( |
2837 | xfs_inode_t *ip) | 2777 | xfs_inode_t *ip, |
2778 | int wait) | ||
2838 | { | 2779 | { |
2839 | xfs_inode_log_item_t *iip; | 2780 | xfs_inode_log_item_t *iip = ip->i_itemp; |
2840 | xfs_lsn_t lsn; | ||
2841 | 2781 | ||
2842 | ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE | MR_ACCESS)); | 2782 | ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE | MR_ACCESS)); |
2843 | 2783 | if (atomic_read(&ip->i_pincount) == 0) | |
2844 | if (atomic_read(&ip->i_pincount) == 0) { | ||
2845 | return; | 2784 | return; |
2846 | } | ||
2847 | 2785 | ||
2848 | iip = ip->i_itemp; | 2786 | /* Give the log a push to start the unpinning I/O */ |
2849 | if (iip && iip->ili_last_lsn) { | 2787 | xfs_log_force(ip->i_mount, (iip && iip->ili_last_lsn) ? |
2850 | lsn = iip->ili_last_lsn; | 2788 | iip->ili_last_lsn : 0, XFS_LOG_FORCE); |
2851 | } else { | 2789 | if (wait) |
2852 | lsn = (xfs_lsn_t)0; | 2790 | wait_event(ip->i_ipin_wait, (atomic_read(&ip->i_pincount) == 0)); |
2853 | } | 2791 | } |
2854 | 2792 | ||
2855 | /* | 2793 | static inline void |
2856 | * Give the log a push so we don't wait here too long. | 2794 | xfs_iunpin_wait( |
2857 | */ | 2795 | xfs_inode_t *ip) |
2858 | xfs_log_force(ip->i_mount, lsn, XFS_LOG_FORCE); | 2796 | { |
2797 | __xfs_iunpin_wait(ip, 1); | ||
2798 | } | ||
2859 | 2799 | ||
2860 | wait_event(ip->i_ipin_wait, (atomic_read(&ip->i_pincount) == 0)); | 2800 | static inline void |
2801 | xfs_iunpin_nowait( | ||
2802 | xfs_inode_t *ip) | ||
2803 | { | ||
2804 | __xfs_iunpin_wait(ip, 0); | ||
2861 | } | 2805 | } |
2862 | 2806 | ||
2863 | 2807 | ||
@@ -2932,7 +2876,7 @@ xfs_iextents_copy( | |||
2932 | * format indicates the current state of the fork. | 2876 | * format indicates the current state of the fork. |
2933 | */ | 2877 | */ |
2934 | /*ARGSUSED*/ | 2878 | /*ARGSUSED*/ |
2935 | STATIC int | 2879 | STATIC void |
2936 | xfs_iflush_fork( | 2880 | xfs_iflush_fork( |
2937 | xfs_inode_t *ip, | 2881 | xfs_inode_t *ip, |
2938 | xfs_dinode_t *dip, | 2882 | xfs_dinode_t *dip, |
@@ -2953,16 +2897,16 @@ xfs_iflush_fork( | |||
2953 | static const short extflag[2] = | 2897 | static const short extflag[2] = |
2954 | { XFS_ILOG_DEXT, XFS_ILOG_AEXT }; | 2898 | { XFS_ILOG_DEXT, XFS_ILOG_AEXT }; |
2955 | 2899 | ||
2956 | if (iip == NULL) | 2900 | if (!iip) |
2957 | return 0; | 2901 | return; |
2958 | ifp = XFS_IFORK_PTR(ip, whichfork); | 2902 | ifp = XFS_IFORK_PTR(ip, whichfork); |
2959 | /* | 2903 | /* |
2960 | * This can happen if we gave up in iformat in an error path, | 2904 | * This can happen if we gave up in iformat in an error path, |
2961 | * for the attribute fork. | 2905 | * for the attribute fork. |
2962 | */ | 2906 | */ |
2963 | if (ifp == NULL) { | 2907 | if (!ifp) { |
2964 | ASSERT(whichfork == XFS_ATTR_FORK); | 2908 | ASSERT(whichfork == XFS_ATTR_FORK); |
2965 | return 0; | 2909 | return; |
2966 | } | 2910 | } |
2967 | cp = XFS_DFORK_PTR(dip, whichfork); | 2911 | cp = XFS_DFORK_PTR(dip, whichfork); |
2968 | mp = ip->i_mount; | 2912 | mp = ip->i_mount; |
@@ -3023,8 +2967,145 @@ xfs_iflush_fork( | |||
3023 | ASSERT(0); | 2967 | ASSERT(0); |
3024 | break; | 2968 | break; |
3025 | } | 2969 | } |
2970 | } | ||
2971 | |||
2972 | STATIC int | ||
2973 | xfs_iflush_cluster( | ||
2974 | xfs_inode_t *ip, | ||
2975 | xfs_buf_t *bp) | ||
2976 | { | ||
2977 | xfs_mount_t *mp = ip->i_mount; | ||
2978 | xfs_perag_t *pag = xfs_get_perag(mp, ip->i_ino); | ||
2979 | unsigned long first_index, mask; | ||
2980 | int ilist_size; | ||
2981 | xfs_inode_t **ilist; | ||
2982 | xfs_inode_t *iq; | ||
2983 | int nr_found; | ||
2984 | int clcount = 0; | ||
2985 | int bufwasdelwri; | ||
2986 | int i; | ||
2987 | |||
2988 | ASSERT(pag->pagi_inodeok); | ||
2989 | ASSERT(pag->pag_ici_init); | ||
2990 | |||
2991 | ilist_size = XFS_INODE_CLUSTER_SIZE(mp) * sizeof(xfs_inode_t *); | ||
2992 | ilist = kmem_alloc(ilist_size, KM_MAYFAIL); | ||
2993 | if (!ilist) | ||
2994 | return 0; | ||
2995 | |||
2996 | mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1); | ||
2997 | first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask; | ||
2998 | read_lock(&pag->pag_ici_lock); | ||
2999 | /* really need a gang lookup range call here */ | ||
3000 | nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, (void**)ilist, | ||
3001 | first_index, | ||
3002 | XFS_INODE_CLUSTER_SIZE(mp)); | ||
3003 | if (nr_found == 0) | ||
3004 | goto out_free; | ||
3005 | |||
3006 | for (i = 0; i < nr_found; i++) { | ||
3007 | iq = ilist[i]; | ||
3008 | if (iq == ip) | ||
3009 | continue; | ||
3010 | /* if the inode lies outside this cluster, we're done. */ | ||
3011 | if ((XFS_INO_TO_AGINO(mp, iq->i_ino) & mask) != first_index) | ||
3012 | break; | ||
3013 | /* | ||
3014 | * Do an un-protected check to see if the inode is dirty and | ||
3015 | * is a candidate for flushing. These checks will be repeated | ||
3016 | * later after the appropriate locks are acquired. | ||
3017 | */ | ||
3018 | if (xfs_inode_clean(iq) && xfs_ipincount(iq) == 0) | ||
3019 | continue; | ||
3020 | |||
3021 | /* | ||
3022 | * Try to get locks. If any are unavailable or it is pinned, | ||
3023 | * then this inode cannot be flushed and is skipped. | ||
3024 | */ | ||
3025 | |||
3026 | if (!xfs_ilock_nowait(iq, XFS_ILOCK_SHARED)) | ||
3027 | continue; | ||
3028 | if (!xfs_iflock_nowait(iq)) { | ||
3029 | xfs_iunlock(iq, XFS_ILOCK_SHARED); | ||
3030 | continue; | ||
3031 | } | ||
3032 | if (xfs_ipincount(iq)) { | ||
3033 | xfs_ifunlock(iq); | ||
3034 | xfs_iunlock(iq, XFS_ILOCK_SHARED); | ||
3035 | continue; | ||
3036 | } | ||
3037 | |||
3038 | /* | ||
3039 | * arriving here means that this inode can be flushed. First | ||
3040 | * re-check that it's dirty before flushing. | ||
3041 | */ | ||
3042 | if (!xfs_inode_clean(iq)) { | ||
3043 | int error; | ||
3044 | error = xfs_iflush_int(iq, bp); | ||
3045 | if (error) { | ||
3046 | xfs_iunlock(iq, XFS_ILOCK_SHARED); | ||
3047 | goto cluster_corrupt_out; | ||
3048 | } | ||
3049 | clcount++; | ||
3050 | } else { | ||
3051 | xfs_ifunlock(iq); | ||
3052 | } | ||
3053 | xfs_iunlock(iq, XFS_ILOCK_SHARED); | ||
3054 | } | ||
3055 | |||
3056 | if (clcount) { | ||
3057 | XFS_STATS_INC(xs_icluster_flushcnt); | ||
3058 | XFS_STATS_ADD(xs_icluster_flushinode, clcount); | ||
3059 | } | ||
3026 | 3060 | ||
3061 | out_free: | ||
3062 | read_unlock(&pag->pag_ici_lock); | ||
3063 | kmem_free(ilist, ilist_size); | ||
3027 | return 0; | 3064 | return 0; |
3065 | |||
3066 | |||
3067 | cluster_corrupt_out: | ||
3068 | /* | ||
3069 | * Corruption detected in the clustering loop. Invalidate the | ||
3070 | * inode buffer and shut down the filesystem. | ||
3071 | */ | ||
3072 | read_unlock(&pag->pag_ici_lock); | ||
3073 | /* | ||
3074 | * Clean up the buffer. If it was B_DELWRI, just release it -- | ||
3075 | * brelse can handle it with no problems. If not, shut down the | ||
3076 | * filesystem before releasing the buffer. | ||
3077 | */ | ||
3078 | bufwasdelwri = XFS_BUF_ISDELAYWRITE(bp); | ||
3079 | if (bufwasdelwri) | ||
3080 | xfs_buf_relse(bp); | ||
3081 | |||
3082 | xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); | ||
3083 | |||
3084 | if (!bufwasdelwri) { | ||
3085 | /* | ||
3086 | * Just like incore_relse: if we have b_iodone functions, | ||
3087 | * mark the buffer as an error and call them. Otherwise | ||
3088 | * mark it as stale and brelse. | ||
3089 | */ | ||
3090 | if (XFS_BUF_IODONE_FUNC(bp)) { | ||
3091 | XFS_BUF_CLR_BDSTRAT_FUNC(bp); | ||
3092 | XFS_BUF_UNDONE(bp); | ||
3093 | XFS_BUF_STALE(bp); | ||
3094 | XFS_BUF_SHUT(bp); | ||
3095 | XFS_BUF_ERROR(bp,EIO); | ||
3096 | xfs_biodone(bp); | ||
3097 | } else { | ||
3098 | XFS_BUF_STALE(bp); | ||
3099 | xfs_buf_relse(bp); | ||
3100 | } | ||
3101 | } | ||
3102 | |||
3103 | /* | ||
3104 | * Unlocks the flush lock | ||
3105 | */ | ||
3106 | xfs_iflush_abort(iq); | ||
3107 | kmem_free(ilist, ilist_size); | ||
3108 | return XFS_ERROR(EFSCORRUPTED); | ||
3028 | } | 3109 | } |
3029 | 3110 | ||
3030 | /* | 3111 | /* |
@@ -3046,11 +3127,7 @@ xfs_iflush( | |||
3046 | xfs_dinode_t *dip; | 3127 | xfs_dinode_t *dip; |
3047 | xfs_mount_t *mp; | 3128 | xfs_mount_t *mp; |
3048 | int error; | 3129 | int error; |
3049 | /* REFERENCED */ | 3130 | int noblock = (flags == XFS_IFLUSH_ASYNC_NOBLOCK); |
3050 | xfs_inode_t *iq; | ||
3051 | int clcount; /* count of inodes clustered */ | ||
3052 | int bufwasdelwri; | ||
3053 | struct hlist_node *entry; | ||
3054 | enum { INT_DELWRI = (1 << 0), INT_ASYNC = (1 << 1) }; | 3131 | enum { INT_DELWRI = (1 << 0), INT_ASYNC = (1 << 1) }; |
3055 | 3132 | ||
3056 | XFS_STATS_INC(xs_iflush_count); | 3133 | XFS_STATS_INC(xs_iflush_count); |
@@ -3067,8 +3144,7 @@ xfs_iflush( | |||
3067 | * If the inode isn't dirty, then just release the inode | 3144 | * If the inode isn't dirty, then just release the inode |
3068 | * flush lock and do nothing. | 3145 | * flush lock and do nothing. |
3069 | */ | 3146 | */ |
3070 | if ((ip->i_update_core == 0) && | 3147 | if (xfs_inode_clean(ip)) { |
3071 | ((iip == NULL) || !(iip->ili_format.ilf_fields & XFS_ILOG_ALL))) { | ||
3072 | ASSERT((iip != NULL) ? | 3148 | ASSERT((iip != NULL) ? |
3073 | !(iip->ili_item.li_flags & XFS_LI_IN_AIL) : 1); | 3149 | !(iip->ili_item.li_flags & XFS_LI_IN_AIL) : 1); |
3074 | xfs_ifunlock(ip); | 3150 | xfs_ifunlock(ip); |
@@ -3076,11 +3152,21 @@ xfs_iflush( | |||
3076 | } | 3152 | } |
3077 | 3153 | ||
3078 | /* | 3154 | /* |
3079 | * We can't flush the inode until it is unpinned, so | 3155 | * We can't flush the inode until it is unpinned, so wait for it if we |
3080 | * wait for it. We know noone new can pin it, because | 3156 | * are allowed to block. We know noone new can pin it, because we are |
3081 | * we are holding the inode lock shared and you need | 3157 | * holding the inode lock shared and you need to hold it exclusively to |
3082 | * to hold it exclusively to pin the inode. | 3158 | * pin the inode. |
3159 | * | ||
3160 | * If we are not allowed to block, force the log out asynchronously so | ||
3161 | * that when we come back the inode will be unpinned. If other inodes | ||
3162 | * in the same cluster are dirty, they will probably write the inode | ||
3163 | * out for us if they occur after the log force completes. | ||
3083 | */ | 3164 | */ |
3165 | if (noblock && xfs_ipincount(ip)) { | ||
3166 | xfs_iunpin_nowait(ip); | ||
3167 | xfs_ifunlock(ip); | ||
3168 | return EAGAIN; | ||
3169 | } | ||
3084 | xfs_iunpin_wait(ip); | 3170 | xfs_iunpin_wait(ip); |
3085 | 3171 | ||
3086 | /* | 3172 | /* |
@@ -3097,15 +3183,6 @@ xfs_iflush( | |||
3097 | } | 3183 | } |
3098 | 3184 | ||
3099 | /* | 3185 | /* |
3100 | * Get the buffer containing the on-disk inode. | ||
3101 | */ | ||
3102 | error = xfs_itobp(mp, NULL, ip, &dip, &bp, 0, 0); | ||
3103 | if (error) { | ||
3104 | xfs_ifunlock(ip); | ||
3105 | return error; | ||
3106 | } | ||
3107 | |||
3108 | /* | ||
3109 | * Decide how buffer will be flushed out. This is done before | 3186 | * Decide how buffer will be flushed out. This is done before |
3110 | * the call to xfs_iflush_int because this field is zeroed by it. | 3187 | * the call to xfs_iflush_int because this field is zeroed by it. |
3111 | */ | 3188 | */ |
@@ -3121,6 +3198,7 @@ xfs_iflush( | |||
3121 | case XFS_IFLUSH_DELWRI_ELSE_SYNC: | 3198 | case XFS_IFLUSH_DELWRI_ELSE_SYNC: |
3122 | flags = 0; | 3199 | flags = 0; |
3123 | break; | 3200 | break; |
3201 | case XFS_IFLUSH_ASYNC_NOBLOCK: | ||
3124 | case XFS_IFLUSH_ASYNC: | 3202 | case XFS_IFLUSH_ASYNC: |
3125 | case XFS_IFLUSH_DELWRI_ELSE_ASYNC: | 3203 | case XFS_IFLUSH_DELWRI_ELSE_ASYNC: |
3126 | flags = INT_ASYNC; | 3204 | flags = INT_ASYNC; |
@@ -3140,6 +3218,7 @@ xfs_iflush( | |||
3140 | case XFS_IFLUSH_DELWRI: | 3218 | case XFS_IFLUSH_DELWRI: |
3141 | flags = INT_DELWRI; | 3219 | flags = INT_DELWRI; |
3142 | break; | 3220 | break; |
3221 | case XFS_IFLUSH_ASYNC_NOBLOCK: | ||
3143 | case XFS_IFLUSH_ASYNC: | 3222 | case XFS_IFLUSH_ASYNC: |
3144 | flags = INT_ASYNC; | 3223 | flags = INT_ASYNC; |
3145 | break; | 3224 | break; |
@@ -3154,94 +3233,41 @@ xfs_iflush( | |||
3154 | } | 3233 | } |
3155 | 3234 | ||
3156 | /* | 3235 | /* |
3157 | * First flush out the inode that xfs_iflush was called with. | 3236 | * Get the buffer containing the on-disk inode. |
3158 | */ | 3237 | */ |
3159 | error = xfs_iflush_int(ip, bp); | 3238 | error = xfs_itobp(mp, NULL, ip, &dip, &bp, 0, 0, |
3160 | if (error) { | 3239 | noblock ? XFS_BUF_TRYLOCK : XFS_BUF_LOCK); |
3161 | goto corrupt_out; | 3240 | if (error || !bp) { |
3241 | xfs_ifunlock(ip); | ||
3242 | return error; | ||
3162 | } | 3243 | } |
3163 | 3244 | ||
3164 | /* | 3245 | /* |
3165 | * inode clustering: | 3246 | * First flush out the inode that xfs_iflush was called with. |
3166 | * see if other inodes can be gathered into this write | ||
3167 | */ | 3247 | */ |
3168 | spin_lock(&ip->i_cluster->icl_lock); | 3248 | error = xfs_iflush_int(ip, bp); |
3169 | ip->i_cluster->icl_buf = bp; | 3249 | if (error) |
3170 | 3250 | goto corrupt_out; | |
3171 | clcount = 0; | ||
3172 | hlist_for_each_entry(iq, entry, &ip->i_cluster->icl_inodes, i_cnode) { | ||
3173 | if (iq == ip) | ||
3174 | continue; | ||
3175 | |||
3176 | /* | ||
3177 | * Do an un-protected check to see if the inode is dirty and | ||
3178 | * is a candidate for flushing. These checks will be repeated | ||
3179 | * later after the appropriate locks are acquired. | ||
3180 | */ | ||
3181 | iip = iq->i_itemp; | ||
3182 | if ((iq->i_update_core == 0) && | ||
3183 | ((iip == NULL) || | ||
3184 | !(iip->ili_format.ilf_fields & XFS_ILOG_ALL)) && | ||
3185 | xfs_ipincount(iq) == 0) { | ||
3186 | continue; | ||
3187 | } | ||
3188 | |||
3189 | /* | ||
3190 | * Try to get locks. If any are unavailable, | ||
3191 | * then this inode cannot be flushed and is skipped. | ||
3192 | */ | ||
3193 | |||
3194 | /* get inode locks (just i_lock) */ | ||
3195 | if (xfs_ilock_nowait(iq, XFS_ILOCK_SHARED)) { | ||
3196 | /* get inode flush lock */ | ||
3197 | if (xfs_iflock_nowait(iq)) { | ||
3198 | /* check if pinned */ | ||
3199 | if (xfs_ipincount(iq) == 0) { | ||
3200 | /* arriving here means that | ||
3201 | * this inode can be flushed. | ||
3202 | * first re-check that it's | ||
3203 | * dirty | ||
3204 | */ | ||
3205 | iip = iq->i_itemp; | ||
3206 | if ((iq->i_update_core != 0)|| | ||
3207 | ((iip != NULL) && | ||
3208 | (iip->ili_format.ilf_fields & XFS_ILOG_ALL))) { | ||
3209 | clcount++; | ||
3210 | error = xfs_iflush_int(iq, bp); | ||
3211 | if (error) { | ||
3212 | xfs_iunlock(iq, | ||
3213 | XFS_ILOCK_SHARED); | ||
3214 | goto cluster_corrupt_out; | ||
3215 | } | ||
3216 | } else { | ||
3217 | xfs_ifunlock(iq); | ||
3218 | } | ||
3219 | } else { | ||
3220 | xfs_ifunlock(iq); | ||
3221 | } | ||
3222 | } | ||
3223 | xfs_iunlock(iq, XFS_ILOCK_SHARED); | ||
3224 | } | ||
3225 | } | ||
3226 | spin_unlock(&ip->i_cluster->icl_lock); | ||
3227 | |||
3228 | if (clcount) { | ||
3229 | XFS_STATS_INC(xs_icluster_flushcnt); | ||
3230 | XFS_STATS_ADD(xs_icluster_flushinode, clcount); | ||
3231 | } | ||
3232 | 3251 | ||
3233 | /* | 3252 | /* |
3234 | * If the buffer is pinned then push on the log so we won't | 3253 | * If the buffer is pinned then push on the log now so we won't |
3235 | * get stuck waiting in the write for too long. | 3254 | * get stuck waiting in the write for too long. |
3236 | */ | 3255 | */ |
3237 | if (XFS_BUF_ISPINNED(bp)){ | 3256 | if (XFS_BUF_ISPINNED(bp)) |
3238 | xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE); | 3257 | xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE); |
3239 | } | 3258 | |
3259 | /* | ||
3260 | * inode clustering: | ||
3261 | * see if other inodes can be gathered into this write | ||
3262 | */ | ||
3263 | error = xfs_iflush_cluster(ip, bp); | ||
3264 | if (error) | ||
3265 | goto cluster_corrupt_out; | ||
3240 | 3266 | ||
3241 | if (flags & INT_DELWRI) { | 3267 | if (flags & INT_DELWRI) { |
3242 | xfs_bdwrite(mp, bp); | 3268 | xfs_bdwrite(mp, bp); |
3243 | } else if (flags & INT_ASYNC) { | 3269 | } else if (flags & INT_ASYNC) { |
3244 | xfs_bawrite(mp, bp); | 3270 | error = xfs_bawrite(mp, bp); |
3245 | } else { | 3271 | } else { |
3246 | error = xfs_bwrite(mp, bp); | 3272 | error = xfs_bwrite(mp, bp); |
3247 | } | 3273 | } |
@@ -3250,52 +3276,11 @@ xfs_iflush( | |||
3250 | corrupt_out: | 3276 | corrupt_out: |
3251 | xfs_buf_relse(bp); | 3277 | xfs_buf_relse(bp); |
3252 | xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); | 3278 | xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); |
3253 | xfs_iflush_abort(ip); | ||
3254 | /* | ||
3255 | * Unlocks the flush lock | ||
3256 | */ | ||
3257 | return XFS_ERROR(EFSCORRUPTED); | ||
3258 | |||
3259 | cluster_corrupt_out: | 3279 | cluster_corrupt_out: |
3260 | /* Corruption detected in the clustering loop. Invalidate the | ||
3261 | * inode buffer and shut down the filesystem. | ||
3262 | */ | ||
3263 | spin_unlock(&ip->i_cluster->icl_lock); | ||
3264 | |||
3265 | /* | ||
3266 | * Clean up the buffer. If it was B_DELWRI, just release it -- | ||
3267 | * brelse can handle it with no problems. If not, shut down the | ||
3268 | * filesystem before releasing the buffer. | ||
3269 | */ | ||
3270 | if ((bufwasdelwri= XFS_BUF_ISDELAYWRITE(bp))) { | ||
3271 | xfs_buf_relse(bp); | ||
3272 | } | ||
3273 | |||
3274 | xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); | ||
3275 | |||
3276 | if(!bufwasdelwri) { | ||
3277 | /* | ||
3278 | * Just like incore_relse: if we have b_iodone functions, | ||
3279 | * mark the buffer as an error and call them. Otherwise | ||
3280 | * mark it as stale and brelse. | ||
3281 | */ | ||
3282 | if (XFS_BUF_IODONE_FUNC(bp)) { | ||
3283 | XFS_BUF_CLR_BDSTRAT_FUNC(bp); | ||
3284 | XFS_BUF_UNDONE(bp); | ||
3285 | XFS_BUF_STALE(bp); | ||
3286 | XFS_BUF_SHUT(bp); | ||
3287 | XFS_BUF_ERROR(bp,EIO); | ||
3288 | xfs_biodone(bp); | ||
3289 | } else { | ||
3290 | XFS_BUF_STALE(bp); | ||
3291 | xfs_buf_relse(bp); | ||
3292 | } | ||
3293 | } | ||
3294 | |||
3295 | xfs_iflush_abort(iq); | ||
3296 | /* | 3280 | /* |
3297 | * Unlocks the flush lock | 3281 | * Unlocks the flush lock |
3298 | */ | 3282 | */ |
3283 | xfs_iflush_abort(ip); | ||
3299 | return XFS_ERROR(EFSCORRUPTED); | 3284 | return XFS_ERROR(EFSCORRUPTED); |
3300 | } | 3285 | } |
3301 | 3286 | ||
@@ -3325,8 +3310,7 @@ xfs_iflush_int( | |||
3325 | * If the inode isn't dirty, then just release the inode | 3310 | * If the inode isn't dirty, then just release the inode |
3326 | * flush lock and do nothing. | 3311 | * flush lock and do nothing. |
3327 | */ | 3312 | */ |
3328 | if ((ip->i_update_core == 0) && | 3313 | if (xfs_inode_clean(ip)) { |
3329 | ((iip == NULL) || !(iip->ili_format.ilf_fields & XFS_ILOG_ALL))) { | ||
3330 | xfs_ifunlock(ip); | 3314 | xfs_ifunlock(ip); |
3331 | return 0; | 3315 | return 0; |
3332 | } | 3316 | } |
@@ -3459,16 +3443,9 @@ xfs_iflush_int( | |||
3459 | } | 3443 | } |
3460 | } | 3444 | } |
3461 | 3445 | ||
3462 | if (xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK, bp) == EFSCORRUPTED) { | 3446 | xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK, bp); |
3463 | goto corrupt_out; | 3447 | if (XFS_IFORK_Q(ip)) |
3464 | } | 3448 | xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK, bp); |
3465 | |||
3466 | if (XFS_IFORK_Q(ip)) { | ||
3467 | /* | ||
3468 | * The only error from xfs_iflush_fork is on the data fork. | ||
3469 | */ | ||
3470 | (void) xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK, bp); | ||
3471 | } | ||
3472 | xfs_inobp_check(mp, bp); | 3449 | xfs_inobp_check(mp, bp); |
3473 | 3450 | ||
3474 | /* | 3451 | /* |
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index bfcd72cbaeea..93c37697a72c 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h | |||
@@ -133,19 +133,6 @@ typedef struct dm_attrs_s { | |||
133 | } dm_attrs_t; | 133 | } dm_attrs_t; |
134 | 134 | ||
135 | /* | 135 | /* |
136 | * This is the xfs inode cluster structure. This structure is used by | ||
137 | * xfs_iflush to find inodes that share a cluster and can be flushed to disk at | ||
138 | * the same time. | ||
139 | */ | ||
140 | typedef struct xfs_icluster { | ||
141 | struct hlist_head icl_inodes; /* list of inodes on cluster */ | ||
142 | xfs_daddr_t icl_blkno; /* starting block number of | ||
143 | * the cluster */ | ||
144 | struct xfs_buf *icl_buf; /* the inode buffer */ | ||
145 | spinlock_t icl_lock; /* inode list lock */ | ||
146 | } xfs_icluster_t; | ||
147 | |||
148 | /* | ||
149 | * This is the xfs in-core inode structure. | 136 | * This is the xfs in-core inode structure. |
150 | * Most of the on-disk inode is embedded in the i_d field. | 137 | * Most of the on-disk inode is embedded in the i_d field. |
151 | * | 138 | * |
@@ -240,10 +227,6 @@ typedef struct xfs_inode { | |||
240 | atomic_t i_pincount; /* inode pin count */ | 227 | atomic_t i_pincount; /* inode pin count */ |
241 | wait_queue_head_t i_ipin_wait; /* inode pinning wait queue */ | 228 | wait_queue_head_t i_ipin_wait; /* inode pinning wait queue */ |
242 | spinlock_t i_flags_lock; /* inode i_flags lock */ | 229 | spinlock_t i_flags_lock; /* inode i_flags lock */ |
243 | #ifdef HAVE_REFCACHE | ||
244 | struct xfs_inode **i_refcache; /* ptr to entry in ref cache */ | ||
245 | struct xfs_inode *i_release; /* inode to unref */ | ||
246 | #endif | ||
247 | /* Miscellaneous state. */ | 230 | /* Miscellaneous state. */ |
248 | unsigned short i_flags; /* see defined flags below */ | 231 | unsigned short i_flags; /* see defined flags below */ |
249 | unsigned char i_update_core; /* timestamps/size is dirty */ | 232 | unsigned char i_update_core; /* timestamps/size is dirty */ |
@@ -252,8 +235,6 @@ typedef struct xfs_inode { | |||
252 | unsigned int i_delayed_blks; /* count of delay alloc blks */ | 235 | unsigned int i_delayed_blks; /* count of delay alloc blks */ |
253 | 236 | ||
254 | xfs_icdinode_t i_d; /* most of ondisk inode */ | 237 | xfs_icdinode_t i_d; /* most of ondisk inode */ |
255 | xfs_icluster_t *i_cluster; /* cluster list header */ | ||
256 | struct hlist_node i_cnode; /* cluster link node */ | ||
257 | 238 | ||
258 | xfs_fsize_t i_size; /* in-memory size */ | 239 | xfs_fsize_t i_size; /* in-memory size */ |
259 | xfs_fsize_t i_new_size; /* size when write completes */ | 240 | xfs_fsize_t i_new_size; /* size when write completes */ |
@@ -461,6 +442,7 @@ xfs_iflags_test_and_clear(xfs_inode_t *ip, unsigned short flags) | |||
461 | #define XFS_IFLUSH_SYNC 3 | 442 | #define XFS_IFLUSH_SYNC 3 |
462 | #define XFS_IFLUSH_ASYNC 4 | 443 | #define XFS_IFLUSH_ASYNC 4 |
463 | #define XFS_IFLUSH_DELWRI 5 | 444 | #define XFS_IFLUSH_DELWRI 5 |
445 | #define XFS_IFLUSH_ASYNC_NOBLOCK 6 | ||
464 | 446 | ||
465 | /* | 447 | /* |
466 | * Flags for xfs_itruncate_start(). | 448 | * Flags for xfs_itruncate_start(). |
@@ -515,7 +497,7 @@ int xfs_finish_reclaim_all(struct xfs_mount *, int); | |||
515 | */ | 497 | */ |
516 | int xfs_itobp(struct xfs_mount *, struct xfs_trans *, | 498 | int xfs_itobp(struct xfs_mount *, struct xfs_trans *, |
517 | xfs_inode_t *, struct xfs_dinode **, struct xfs_buf **, | 499 | xfs_inode_t *, struct xfs_dinode **, struct xfs_buf **, |
518 | xfs_daddr_t, uint); | 500 | xfs_daddr_t, uint, uint); |
519 | int xfs_iread(struct xfs_mount *, struct xfs_trans *, xfs_ino_t, | 501 | int xfs_iread(struct xfs_mount *, struct xfs_trans *, xfs_ino_t, |
520 | xfs_inode_t **, xfs_daddr_t, uint); | 502 | xfs_inode_t **, xfs_daddr_t, uint); |
521 | int xfs_iread_extents(struct xfs_trans *, xfs_inode_t *, int); | 503 | int xfs_iread_extents(struct xfs_trans *, xfs_inode_t *, int); |
@@ -597,7 +579,6 @@ void xfs_inobp_check(struct xfs_mount *, struct xfs_buf *); | |||
597 | #define xfs_inobp_check(mp, bp) | 579 | #define xfs_inobp_check(mp, bp) |
598 | #endif /* DEBUG */ | 580 | #endif /* DEBUG */ |
599 | 581 | ||
600 | extern struct kmem_zone *xfs_icluster_zone; | ||
601 | extern struct kmem_zone *xfs_ifork_zone; | 582 | extern struct kmem_zone *xfs_ifork_zone; |
602 | extern struct kmem_zone *xfs_inode_zone; | 583 | extern struct kmem_zone *xfs_inode_zone; |
603 | extern struct kmem_zone *xfs_ili_zone; | 584 | extern struct kmem_zone *xfs_ili_zone; |
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c index 2c775b4ae9e6..93b5db453ea2 100644 --- a/fs/xfs/xfs_inode_item.c +++ b/fs/xfs/xfs_inode_item.c | |||
@@ -40,6 +40,7 @@ | |||
40 | #include "xfs_btree.h" | 40 | #include "xfs_btree.h" |
41 | #include "xfs_ialloc.h" | 41 | #include "xfs_ialloc.h" |
42 | #include "xfs_rw.h" | 42 | #include "xfs_rw.h" |
43 | #include "xfs_error.h" | ||
43 | 44 | ||
44 | 45 | ||
45 | kmem_zone_t *xfs_ili_zone; /* inode log item zone */ | 46 | kmem_zone_t *xfs_ili_zone; /* inode log item zone */ |
@@ -813,7 +814,12 @@ xfs_inode_item_pushbuf( | |||
813 | XFS_LOG_FORCE); | 814 | XFS_LOG_FORCE); |
814 | } | 815 | } |
815 | if (dopush) { | 816 | if (dopush) { |
816 | xfs_bawrite(mp, bp); | 817 | int error; |
818 | error = xfs_bawrite(mp, bp); | ||
819 | if (error) | ||
820 | xfs_fs_cmn_err(CE_WARN, mp, | ||
821 | "xfs_inode_item_pushbuf: pushbuf error %d on iip %p, bp %p", | ||
822 | error, iip, bp); | ||
817 | } else { | 823 | } else { |
818 | xfs_buf_relse(bp); | 824 | xfs_buf_relse(bp); |
819 | } | 825 | } |
diff --git a/fs/xfs/xfs_inode_item.h b/fs/xfs/xfs_inode_item.h index bfe92ea17952..40513077ab36 100644 --- a/fs/xfs/xfs_inode_item.h +++ b/fs/xfs/xfs_inode_item.h | |||
@@ -168,6 +168,14 @@ static inline int xfs_ilog_fext(int w) | |||
168 | return (w == XFS_DATA_FORK ? XFS_ILOG_DEXT : XFS_ILOG_AEXT); | 168 | return (w == XFS_DATA_FORK ? XFS_ILOG_DEXT : XFS_ILOG_AEXT); |
169 | } | 169 | } |
170 | 170 | ||
171 | static inline int xfs_inode_clean(xfs_inode_t *ip) | ||
172 | { | ||
173 | return (!ip->i_itemp || | ||
174 | !(ip->i_itemp->ili_format.ilf_fields & XFS_ILOG_ALL)) && | ||
175 | !ip->i_update_core; | ||
176 | } | ||
177 | |||
178 | |||
171 | #ifdef __KERNEL__ | 179 | #ifdef __KERNEL__ |
172 | 180 | ||
173 | extern void xfs_inode_item_init(struct xfs_inode *, struct xfs_mount *); | 181 | extern void xfs_inode_item_init(struct xfs_inode *, struct xfs_mount *); |
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index fde37f87d52f..fb3cf1191419 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c | |||
@@ -802,8 +802,11 @@ xfs_iomap_write_allocate( | |||
802 | */ | 802 | */ |
803 | nimaps = 1; | 803 | nimaps = 1; |
804 | end_fsb = XFS_B_TO_FSB(mp, ip->i_size); | 804 | end_fsb = XFS_B_TO_FSB(mp, ip->i_size); |
805 | xfs_bmap_last_offset(NULL, ip, &last_block, | 805 | error = xfs_bmap_last_offset(NULL, ip, &last_block, |
806 | XFS_DATA_FORK); | 806 | XFS_DATA_FORK); |
807 | if (error) | ||
808 | goto trans_cancel; | ||
809 | |||
807 | last_block = XFS_FILEOFF_MAX(last_block, end_fsb); | 810 | last_block = XFS_FILEOFF_MAX(last_block, end_fsb); |
808 | if ((map_start_fsb + count_fsb) > last_block) { | 811 | if ((map_start_fsb + count_fsb) > last_block) { |
809 | count_fsb = last_block - map_start_fsb; | 812 | count_fsb = last_block - map_start_fsb; |
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c index f615e04364f4..eb85bdedad0c 100644 --- a/fs/xfs/xfs_itable.c +++ b/fs/xfs/xfs_itable.c | |||
@@ -129,7 +129,7 @@ xfs_bulkstat_one_iget( | |||
129 | return error; | 129 | return error; |
130 | } | 130 | } |
131 | 131 | ||
132 | STATIC int | 132 | STATIC void |
133 | xfs_bulkstat_one_dinode( | 133 | xfs_bulkstat_one_dinode( |
134 | xfs_mount_t *mp, /* mount point for filesystem */ | 134 | xfs_mount_t *mp, /* mount point for filesystem */ |
135 | xfs_ino_t ino, /* inode number to get data for */ | 135 | xfs_ino_t ino, /* inode number to get data for */ |
@@ -198,8 +198,6 @@ xfs_bulkstat_one_dinode( | |||
198 | buf->bs_blocks = be64_to_cpu(dic->di_nblocks); | 198 | buf->bs_blocks = be64_to_cpu(dic->di_nblocks); |
199 | break; | 199 | break; |
200 | } | 200 | } |
201 | |||
202 | return 0; | ||
203 | } | 201 | } |
204 | 202 | ||
205 | STATIC int | 203 | STATIC int |
@@ -614,7 +612,8 @@ xfs_bulkstat( | |||
614 | xfs_buf_relse(bp); | 612 | xfs_buf_relse(bp); |
615 | error = xfs_itobp(mp, NULL, ip, | 613 | error = xfs_itobp(mp, NULL, ip, |
616 | &dip, &bp, bno, | 614 | &dip, &bp, bno, |
617 | XFS_IMAP_BULKSTAT); | 615 | XFS_IMAP_BULKSTAT, |
616 | XFS_BUF_LOCK); | ||
618 | if (!error) | 617 | if (!error) |
619 | clustidx = ip->i_boffset / mp->m_sb.sb_inodesize; | 618 | clustidx = ip->i_boffset / mp->m_sb.sb_inodesize; |
620 | kmem_zone_free(xfs_inode_zone, ip); | 619 | kmem_zone_free(xfs_inode_zone, ip); |
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 31f2b04f2c97..afaee301b0ee 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c | |||
@@ -41,6 +41,7 @@ | |||
41 | #include "xfs_inode.h" | 41 | #include "xfs_inode.h" |
42 | #include "xfs_rw.h" | 42 | #include "xfs_rw.h" |
43 | 43 | ||
44 | kmem_zone_t *xfs_log_ticket_zone; | ||
44 | 45 | ||
45 | #define xlog_write_adv_cnt(ptr, len, off, bytes) \ | 46 | #define xlog_write_adv_cnt(ptr, len, off, bytes) \ |
46 | { (ptr) += (bytes); \ | 47 | { (ptr) += (bytes); \ |
@@ -73,8 +74,6 @@ STATIC int xlog_state_get_iclog_space(xlog_t *log, | |||
73 | xlog_ticket_t *ticket, | 74 | xlog_ticket_t *ticket, |
74 | int *continued_write, | 75 | int *continued_write, |
75 | int *logoffsetp); | 76 | int *logoffsetp); |
76 | STATIC void xlog_state_put_ticket(xlog_t *log, | ||
77 | xlog_ticket_t *tic); | ||
78 | STATIC int xlog_state_release_iclog(xlog_t *log, | 77 | STATIC int xlog_state_release_iclog(xlog_t *log, |
79 | xlog_in_core_t *iclog); | 78 | xlog_in_core_t *iclog); |
80 | STATIC void xlog_state_switch_iclogs(xlog_t *log, | 79 | STATIC void xlog_state_switch_iclogs(xlog_t *log, |
@@ -101,7 +100,6 @@ STATIC void xlog_ungrant_log_space(xlog_t *log, | |||
101 | 100 | ||
102 | 101 | ||
103 | /* local ticket functions */ | 102 | /* local ticket functions */ |
104 | STATIC void xlog_state_ticket_alloc(xlog_t *log); | ||
105 | STATIC xlog_ticket_t *xlog_ticket_get(xlog_t *log, | 103 | STATIC xlog_ticket_t *xlog_ticket_get(xlog_t *log, |
106 | int unit_bytes, | 104 | int unit_bytes, |
107 | int count, | 105 | int count, |
@@ -330,7 +328,7 @@ xfs_log_done(xfs_mount_t *mp, | |||
330 | */ | 328 | */ |
331 | xlog_trace_loggrant(log, ticket, "xfs_log_done: (non-permanent)"); | 329 | xlog_trace_loggrant(log, ticket, "xfs_log_done: (non-permanent)"); |
332 | xlog_ungrant_log_space(log, ticket); | 330 | xlog_ungrant_log_space(log, ticket); |
333 | xlog_state_put_ticket(log, ticket); | 331 | xlog_ticket_put(log, ticket); |
334 | } else { | 332 | } else { |
335 | xlog_trace_loggrant(log, ticket, "xfs_log_done: (permanent)"); | 333 | xlog_trace_loggrant(log, ticket, "xfs_log_done: (permanent)"); |
336 | xlog_regrant_reserve_log_space(log, ticket); | 334 | xlog_regrant_reserve_log_space(log, ticket); |
@@ -384,7 +382,27 @@ _xfs_log_force( | |||
384 | return xlog_state_sync_all(log, flags, log_flushed); | 382 | return xlog_state_sync_all(log, flags, log_flushed); |
385 | else | 383 | else |
386 | return xlog_state_sync(log, lsn, flags, log_flushed); | 384 | return xlog_state_sync(log, lsn, flags, log_flushed); |
387 | } /* xfs_log_force */ | 385 | } /* _xfs_log_force */ |
386 | |||
387 | /* | ||
388 | * Wrapper for _xfs_log_force(), to be used when caller doesn't care | ||
389 | * about errors or whether the log was flushed or not. This is the normal | ||
390 | * interface to use when trying to unpin items or move the log forward. | ||
391 | */ | ||
392 | void | ||
393 | xfs_log_force( | ||
394 | xfs_mount_t *mp, | ||
395 | xfs_lsn_t lsn, | ||
396 | uint flags) | ||
397 | { | ||
398 | int error; | ||
399 | error = _xfs_log_force(mp, lsn, flags, NULL); | ||
400 | if (error) { | ||
401 | xfs_fs_cmn_err(CE_WARN, mp, "xfs_log_force: " | ||
402 | "error %d returned.", error); | ||
403 | } | ||
404 | } | ||
405 | |||
388 | 406 | ||
389 | /* | 407 | /* |
390 | * Attaches a new iclog I/O completion callback routine during | 408 | * Attaches a new iclog I/O completion callback routine during |
@@ -397,12 +415,10 @@ xfs_log_notify(xfs_mount_t *mp, /* mount of partition */ | |||
397 | void *iclog_hndl, /* iclog to hang callback off */ | 415 | void *iclog_hndl, /* iclog to hang callback off */ |
398 | xfs_log_callback_t *cb) | 416 | xfs_log_callback_t *cb) |
399 | { | 417 | { |
400 | xlog_t *log = mp->m_log; | ||
401 | xlog_in_core_t *iclog = (xlog_in_core_t *)iclog_hndl; | 418 | xlog_in_core_t *iclog = (xlog_in_core_t *)iclog_hndl; |
402 | int abortflg; | 419 | int abortflg; |
403 | 420 | ||
404 | cb->cb_next = NULL; | 421 | spin_lock(&iclog->ic_callback_lock); |
405 | spin_lock(&log->l_icloglock); | ||
406 | abortflg = (iclog->ic_state & XLOG_STATE_IOERROR); | 422 | abortflg = (iclog->ic_state & XLOG_STATE_IOERROR); |
407 | if (!abortflg) { | 423 | if (!abortflg) { |
408 | ASSERT_ALWAYS((iclog->ic_state == XLOG_STATE_ACTIVE) || | 424 | ASSERT_ALWAYS((iclog->ic_state == XLOG_STATE_ACTIVE) || |
@@ -411,7 +427,7 @@ xfs_log_notify(xfs_mount_t *mp, /* mount of partition */ | |||
411 | *(iclog->ic_callback_tail) = cb; | 427 | *(iclog->ic_callback_tail) = cb; |
412 | iclog->ic_callback_tail = &(cb->cb_next); | 428 | iclog->ic_callback_tail = &(cb->cb_next); |
413 | } | 429 | } |
414 | spin_unlock(&log->l_icloglock); | 430 | spin_unlock(&iclog->ic_callback_lock); |
415 | return abortflg; | 431 | return abortflg; |
416 | } /* xfs_log_notify */ | 432 | } /* xfs_log_notify */ |
417 | 433 | ||
@@ -471,6 +487,8 @@ xfs_log_reserve(xfs_mount_t *mp, | |||
471 | /* may sleep if need to allocate more tickets */ | 487 | /* may sleep if need to allocate more tickets */ |
472 | internal_ticket = xlog_ticket_get(log, unit_bytes, cnt, | 488 | internal_ticket = xlog_ticket_get(log, unit_bytes, cnt, |
473 | client, flags); | 489 | client, flags); |
490 | if (!internal_ticket) | ||
491 | return XFS_ERROR(ENOMEM); | ||
474 | internal_ticket->t_trans_type = t_type; | 492 | internal_ticket->t_trans_type = t_type; |
475 | *ticket = internal_ticket; | 493 | *ticket = internal_ticket; |
476 | xlog_trace_loggrant(log, internal_ticket, | 494 | xlog_trace_loggrant(log, internal_ticket, |
@@ -636,7 +654,8 @@ xfs_log_unmount_write(xfs_mount_t *mp) | |||
636 | if (mp->m_flags & XFS_MOUNT_RDONLY) | 654 | if (mp->m_flags & XFS_MOUNT_RDONLY) |
637 | return 0; | 655 | return 0; |
638 | 656 | ||
639 | xfs_log_force(mp, 0, XFS_LOG_FORCE|XFS_LOG_SYNC); | 657 | error = _xfs_log_force(mp, 0, XFS_LOG_FORCE|XFS_LOG_SYNC, NULL); |
658 | ASSERT(error || !(XLOG_FORCED_SHUTDOWN(log))); | ||
640 | 659 | ||
641 | #ifdef DEBUG | 660 | #ifdef DEBUG |
642 | first_iclog = iclog = log->l_iclog; | 661 | first_iclog = iclog = log->l_iclog; |
@@ -675,10 +694,10 @@ xfs_log_unmount_write(xfs_mount_t *mp) | |||
675 | 694 | ||
676 | spin_lock(&log->l_icloglock); | 695 | spin_lock(&log->l_icloglock); |
677 | iclog = log->l_iclog; | 696 | iclog = log->l_iclog; |
678 | iclog->ic_refcnt++; | 697 | atomic_inc(&iclog->ic_refcnt); |
679 | spin_unlock(&log->l_icloglock); | 698 | spin_unlock(&log->l_icloglock); |
680 | xlog_state_want_sync(log, iclog); | 699 | xlog_state_want_sync(log, iclog); |
681 | (void) xlog_state_release_iclog(log, iclog); | 700 | error = xlog_state_release_iclog(log, iclog); |
682 | 701 | ||
683 | spin_lock(&log->l_icloglock); | 702 | spin_lock(&log->l_icloglock); |
684 | if (!(iclog->ic_state == XLOG_STATE_ACTIVE || | 703 | if (!(iclog->ic_state == XLOG_STATE_ACTIVE || |
@@ -695,7 +714,7 @@ xfs_log_unmount_write(xfs_mount_t *mp) | |||
695 | if (tic) { | 714 | if (tic) { |
696 | xlog_trace_loggrant(log, tic, "unmount rec"); | 715 | xlog_trace_loggrant(log, tic, "unmount rec"); |
697 | xlog_ungrant_log_space(log, tic); | 716 | xlog_ungrant_log_space(log, tic); |
698 | xlog_state_put_ticket(log, tic); | 717 | xlog_ticket_put(log, tic); |
699 | } | 718 | } |
700 | } else { | 719 | } else { |
701 | /* | 720 | /* |
@@ -713,11 +732,11 @@ xfs_log_unmount_write(xfs_mount_t *mp) | |||
713 | */ | 732 | */ |
714 | spin_lock(&log->l_icloglock); | 733 | spin_lock(&log->l_icloglock); |
715 | iclog = log->l_iclog; | 734 | iclog = log->l_iclog; |
716 | iclog->ic_refcnt++; | 735 | atomic_inc(&iclog->ic_refcnt); |
717 | spin_unlock(&log->l_icloglock); | 736 | spin_unlock(&log->l_icloglock); |
718 | 737 | ||
719 | xlog_state_want_sync(log, iclog); | 738 | xlog_state_want_sync(log, iclog); |
720 | (void) xlog_state_release_iclog(log, iclog); | 739 | error = xlog_state_release_iclog(log, iclog); |
721 | 740 | ||
722 | spin_lock(&log->l_icloglock); | 741 | spin_lock(&log->l_icloglock); |
723 | 742 | ||
@@ -732,7 +751,7 @@ xfs_log_unmount_write(xfs_mount_t *mp) | |||
732 | } | 751 | } |
733 | } | 752 | } |
734 | 753 | ||
735 | return 0; | 754 | return error; |
736 | } /* xfs_log_unmount_write */ | 755 | } /* xfs_log_unmount_write */ |
737 | 756 | ||
738 | /* | 757 | /* |
@@ -1210,7 +1229,6 @@ xlog_alloc_log(xfs_mount_t *mp, | |||
1210 | spin_lock_init(&log->l_icloglock); | 1229 | spin_lock_init(&log->l_icloglock); |
1211 | spin_lock_init(&log->l_grant_lock); | 1230 | spin_lock_init(&log->l_grant_lock); |
1212 | initnsema(&log->l_flushsema, 0, "ic-flush"); | 1231 | initnsema(&log->l_flushsema, 0, "ic-flush"); |
1213 | xlog_state_ticket_alloc(log); /* wait until after icloglock inited */ | ||
1214 | 1232 | ||
1215 | /* log record size must be multiple of BBSIZE; see xlog_rec_header_t */ | 1233 | /* log record size must be multiple of BBSIZE; see xlog_rec_header_t */ |
1216 | ASSERT((XFS_BUF_SIZE(bp) & BBMASK) == 0); | 1234 | ASSERT((XFS_BUF_SIZE(bp) & BBMASK) == 0); |
@@ -1240,9 +1258,9 @@ xlog_alloc_log(xfs_mount_t *mp, | |||
1240 | XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)1); | 1258 | XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)1); |
1241 | iclog->ic_bp = bp; | 1259 | iclog->ic_bp = bp; |
1242 | iclog->hic_data = bp->b_addr; | 1260 | iclog->hic_data = bp->b_addr; |
1243 | 1261 | #ifdef DEBUG | |
1244 | log->l_iclog_bak[i] = (xfs_caddr_t)&(iclog->ic_header); | 1262 | log->l_iclog_bak[i] = (xfs_caddr_t)&(iclog->ic_header); |
1245 | 1263 | #endif | |
1246 | head = &iclog->ic_header; | 1264 | head = &iclog->ic_header; |
1247 | memset(head, 0, sizeof(xlog_rec_header_t)); | 1265 | memset(head, 0, sizeof(xlog_rec_header_t)); |
1248 | head->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM); | 1266 | head->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM); |
@@ -1253,10 +1271,11 @@ xlog_alloc_log(xfs_mount_t *mp, | |||
1253 | head->h_fmt = cpu_to_be32(XLOG_FMT); | 1271 | head->h_fmt = cpu_to_be32(XLOG_FMT); |
1254 | memcpy(&head->h_fs_uuid, &mp->m_sb.sb_uuid, sizeof(uuid_t)); | 1272 | memcpy(&head->h_fs_uuid, &mp->m_sb.sb_uuid, sizeof(uuid_t)); |
1255 | 1273 | ||
1256 | |||
1257 | iclog->ic_size = XFS_BUF_SIZE(bp) - log->l_iclog_hsize; | 1274 | iclog->ic_size = XFS_BUF_SIZE(bp) - log->l_iclog_hsize; |
1258 | iclog->ic_state = XLOG_STATE_ACTIVE; | 1275 | iclog->ic_state = XLOG_STATE_ACTIVE; |
1259 | iclog->ic_log = log; | 1276 | iclog->ic_log = log; |
1277 | atomic_set(&iclog->ic_refcnt, 0); | ||
1278 | spin_lock_init(&iclog->ic_callback_lock); | ||
1260 | iclog->ic_callback_tail = &(iclog->ic_callback); | 1279 | iclog->ic_callback_tail = &(iclog->ic_callback); |
1261 | iclog->ic_datap = (char *)iclog->hic_data + log->l_iclog_hsize; | 1280 | iclog->ic_datap = (char *)iclog->hic_data + log->l_iclog_hsize; |
1262 | 1281 | ||
@@ -1405,7 +1424,7 @@ xlog_sync(xlog_t *log, | |||
1405 | int v2 = xfs_sb_version_haslogv2(&log->l_mp->m_sb); | 1424 | int v2 = xfs_sb_version_haslogv2(&log->l_mp->m_sb); |
1406 | 1425 | ||
1407 | XFS_STATS_INC(xs_log_writes); | 1426 | XFS_STATS_INC(xs_log_writes); |
1408 | ASSERT(iclog->ic_refcnt == 0); | 1427 | ASSERT(atomic_read(&iclog->ic_refcnt) == 0); |
1409 | 1428 | ||
1410 | /* Add for LR header */ | 1429 | /* Add for LR header */ |
1411 | count_init = log->l_iclog_hsize + iclog->ic_offset; | 1430 | count_init = log->l_iclog_hsize + iclog->ic_offset; |
@@ -1538,7 +1557,6 @@ STATIC void | |||
1538 | xlog_dealloc_log(xlog_t *log) | 1557 | xlog_dealloc_log(xlog_t *log) |
1539 | { | 1558 | { |
1540 | xlog_in_core_t *iclog, *next_iclog; | 1559 | xlog_in_core_t *iclog, *next_iclog; |
1541 | xlog_ticket_t *tic, *next_tic; | ||
1542 | int i; | 1560 | int i; |
1543 | 1561 | ||
1544 | iclog = log->l_iclog; | 1562 | iclog = log->l_iclog; |
@@ -1559,22 +1577,6 @@ xlog_dealloc_log(xlog_t *log) | |||
1559 | spinlock_destroy(&log->l_icloglock); | 1577 | spinlock_destroy(&log->l_icloglock); |
1560 | spinlock_destroy(&log->l_grant_lock); | 1578 | spinlock_destroy(&log->l_grant_lock); |
1561 | 1579 | ||
1562 | /* XXXsup take a look at this again. */ | ||
1563 | if ((log->l_ticket_cnt != log->l_ticket_tcnt) && | ||
1564 | !XLOG_FORCED_SHUTDOWN(log)) { | ||
1565 | xfs_fs_cmn_err(CE_WARN, log->l_mp, | ||
1566 | "xlog_dealloc_log: (cnt: %d, total: %d)", | ||
1567 | log->l_ticket_cnt, log->l_ticket_tcnt); | ||
1568 | /* ASSERT(log->l_ticket_cnt == log->l_ticket_tcnt); */ | ||
1569 | |||
1570 | } else { | ||
1571 | tic = log->l_unmount_free; | ||
1572 | while (tic) { | ||
1573 | next_tic = tic->t_next; | ||
1574 | kmem_free(tic, PAGE_SIZE); | ||
1575 | tic = next_tic; | ||
1576 | } | ||
1577 | } | ||
1578 | xfs_buf_free(log->l_xbuf); | 1580 | xfs_buf_free(log->l_xbuf); |
1579 | #ifdef XFS_LOG_TRACE | 1581 | #ifdef XFS_LOG_TRACE |
1580 | if (log->l_trace != NULL) { | 1582 | if (log->l_trace != NULL) { |
@@ -1987,7 +1989,7 @@ xlog_state_clean_log(xlog_t *log) | |||
1987 | if (iclog->ic_state == XLOG_STATE_DIRTY) { | 1989 | if (iclog->ic_state == XLOG_STATE_DIRTY) { |
1988 | iclog->ic_state = XLOG_STATE_ACTIVE; | 1990 | iclog->ic_state = XLOG_STATE_ACTIVE; |
1989 | iclog->ic_offset = 0; | 1991 | iclog->ic_offset = 0; |
1990 | iclog->ic_callback = NULL; /* don't need to free */ | 1992 | ASSERT(iclog->ic_callback == NULL); |
1991 | /* | 1993 | /* |
1992 | * If the number of ops in this iclog indicate it just | 1994 | * If the number of ops in this iclog indicate it just |
1993 | * contains the dummy transaction, we can | 1995 | * contains the dummy transaction, we can |
@@ -2190,37 +2192,40 @@ xlog_state_do_callback( | |||
2190 | be64_to_cpu(iclog->ic_header.h_lsn); | 2192 | be64_to_cpu(iclog->ic_header.h_lsn); |
2191 | spin_unlock(&log->l_grant_lock); | 2193 | spin_unlock(&log->l_grant_lock); |
2192 | 2194 | ||
2193 | /* | ||
2194 | * Keep processing entries in the callback list | ||
2195 | * until we come around and it is empty. We | ||
2196 | * need to atomically see that the list is | ||
2197 | * empty and change the state to DIRTY so that | ||
2198 | * we don't miss any more callbacks being added. | ||
2199 | */ | ||
2200 | spin_lock(&log->l_icloglock); | ||
2201 | } else { | 2195 | } else { |
2196 | spin_unlock(&log->l_icloglock); | ||
2202 | ioerrors++; | 2197 | ioerrors++; |
2203 | } | 2198 | } |
2204 | cb = iclog->ic_callback; | ||
2205 | 2199 | ||
2200 | /* | ||
2201 | * Keep processing entries in the callback list until | ||
2202 | * we come around and it is empty. We need to | ||
2203 | * atomically see that the list is empty and change the | ||
2204 | * state to DIRTY so that we don't miss any more | ||
2205 | * callbacks being added. | ||
2206 | */ | ||
2207 | spin_lock(&iclog->ic_callback_lock); | ||
2208 | cb = iclog->ic_callback; | ||
2206 | while (cb) { | 2209 | while (cb) { |
2207 | iclog->ic_callback_tail = &(iclog->ic_callback); | 2210 | iclog->ic_callback_tail = &(iclog->ic_callback); |
2208 | iclog->ic_callback = NULL; | 2211 | iclog->ic_callback = NULL; |
2209 | spin_unlock(&log->l_icloglock); | 2212 | spin_unlock(&iclog->ic_callback_lock); |
2210 | 2213 | ||
2211 | /* perform callbacks in the order given */ | 2214 | /* perform callbacks in the order given */ |
2212 | for (; cb; cb = cb_next) { | 2215 | for (; cb; cb = cb_next) { |
2213 | cb_next = cb->cb_next; | 2216 | cb_next = cb->cb_next; |
2214 | cb->cb_func(cb->cb_arg, aborted); | 2217 | cb->cb_func(cb->cb_arg, aborted); |
2215 | } | 2218 | } |
2216 | spin_lock(&log->l_icloglock); | 2219 | spin_lock(&iclog->ic_callback_lock); |
2217 | cb = iclog->ic_callback; | 2220 | cb = iclog->ic_callback; |
2218 | } | 2221 | } |
2219 | 2222 | ||
2220 | loopdidcallbacks++; | 2223 | loopdidcallbacks++; |
2221 | funcdidcallbacks++; | 2224 | funcdidcallbacks++; |
2222 | 2225 | ||
2226 | spin_lock(&log->l_icloglock); | ||
2223 | ASSERT(iclog->ic_callback == NULL); | 2227 | ASSERT(iclog->ic_callback == NULL); |
2228 | spin_unlock(&iclog->ic_callback_lock); | ||
2224 | if (!(iclog->ic_state & XLOG_STATE_IOERROR)) | 2229 | if (!(iclog->ic_state & XLOG_STATE_IOERROR)) |
2225 | iclog->ic_state = XLOG_STATE_DIRTY; | 2230 | iclog->ic_state = XLOG_STATE_DIRTY; |
2226 | 2231 | ||
@@ -2241,7 +2246,7 @@ xlog_state_do_callback( | |||
2241 | repeats = 0; | 2246 | repeats = 0; |
2242 | xfs_fs_cmn_err(CE_WARN, log->l_mp, | 2247 | xfs_fs_cmn_err(CE_WARN, log->l_mp, |
2243 | "%s: possible infinite loop (%d iterations)", | 2248 | "%s: possible infinite loop (%d iterations)", |
2244 | __FUNCTION__, flushcnt); | 2249 | __func__, flushcnt); |
2245 | } | 2250 | } |
2246 | } while (!ioerrors && loopdidcallbacks); | 2251 | } while (!ioerrors && loopdidcallbacks); |
2247 | 2252 | ||
@@ -2309,7 +2314,7 @@ xlog_state_done_syncing( | |||
2309 | 2314 | ||
2310 | ASSERT(iclog->ic_state == XLOG_STATE_SYNCING || | 2315 | ASSERT(iclog->ic_state == XLOG_STATE_SYNCING || |
2311 | iclog->ic_state == XLOG_STATE_IOERROR); | 2316 | iclog->ic_state == XLOG_STATE_IOERROR); |
2312 | ASSERT(iclog->ic_refcnt == 0); | 2317 | ASSERT(atomic_read(&iclog->ic_refcnt) == 0); |
2313 | ASSERT(iclog->ic_bwritecnt == 1 || iclog->ic_bwritecnt == 2); | 2318 | ASSERT(iclog->ic_bwritecnt == 1 || iclog->ic_bwritecnt == 2); |
2314 | 2319 | ||
2315 | 2320 | ||
@@ -2391,7 +2396,7 @@ restart: | |||
2391 | ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE); | 2396 | ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE); |
2392 | head = &iclog->ic_header; | 2397 | head = &iclog->ic_header; |
2393 | 2398 | ||
2394 | iclog->ic_refcnt++; /* prevents sync */ | 2399 | atomic_inc(&iclog->ic_refcnt); /* prevents sync */ |
2395 | log_offset = iclog->ic_offset; | 2400 | log_offset = iclog->ic_offset; |
2396 | 2401 | ||
2397 | /* On the 1st write to an iclog, figure out lsn. This works | 2402 | /* On the 1st write to an iclog, figure out lsn. This works |
@@ -2423,12 +2428,12 @@ restart: | |||
2423 | xlog_state_switch_iclogs(log, iclog, iclog->ic_size); | 2428 | xlog_state_switch_iclogs(log, iclog, iclog->ic_size); |
2424 | 2429 | ||
2425 | /* If I'm the only one writing to this iclog, sync it to disk */ | 2430 | /* If I'm the only one writing to this iclog, sync it to disk */ |
2426 | if (iclog->ic_refcnt == 1) { | 2431 | if (atomic_read(&iclog->ic_refcnt) == 1) { |
2427 | spin_unlock(&log->l_icloglock); | 2432 | spin_unlock(&log->l_icloglock); |
2428 | if ((error = xlog_state_release_iclog(log, iclog))) | 2433 | if ((error = xlog_state_release_iclog(log, iclog))) |
2429 | return error; | 2434 | return error; |
2430 | } else { | 2435 | } else { |
2431 | iclog->ic_refcnt--; | 2436 | atomic_dec(&iclog->ic_refcnt); |
2432 | spin_unlock(&log->l_icloglock); | 2437 | spin_unlock(&log->l_icloglock); |
2433 | } | 2438 | } |
2434 | goto restart; | 2439 | goto restart; |
@@ -2792,18 +2797,6 @@ xlog_ungrant_log_space(xlog_t *log, | |||
2792 | 2797 | ||
2793 | 2798 | ||
2794 | /* | 2799 | /* |
2795 | * Atomically put back used ticket. | ||
2796 | */ | ||
2797 | STATIC void | ||
2798 | xlog_state_put_ticket(xlog_t *log, | ||
2799 | xlog_ticket_t *tic) | ||
2800 | { | ||
2801 | spin_lock(&log->l_icloglock); | ||
2802 | xlog_ticket_put(log, tic); | ||
2803 | spin_unlock(&log->l_icloglock); | ||
2804 | } /* xlog_state_put_ticket */ | ||
2805 | |||
2806 | /* | ||
2807 | * Flush iclog to disk if this is the last reference to the given iclog and | 2800 | * Flush iclog to disk if this is the last reference to the given iclog and |
2808 | * the WANT_SYNC bit is set. | 2801 | * the WANT_SYNC bit is set. |
2809 | * | 2802 | * |
@@ -2813,33 +2806,35 @@ xlog_state_put_ticket(xlog_t *log, | |||
2813 | * | 2806 | * |
2814 | */ | 2807 | */ |
2815 | STATIC int | 2808 | STATIC int |
2816 | xlog_state_release_iclog(xlog_t *log, | 2809 | xlog_state_release_iclog( |
2817 | xlog_in_core_t *iclog) | 2810 | xlog_t *log, |
2811 | xlog_in_core_t *iclog) | ||
2818 | { | 2812 | { |
2819 | int sync = 0; /* do we sync? */ | 2813 | int sync = 0; /* do we sync? */ |
2820 | 2814 | ||
2821 | xlog_assign_tail_lsn(log->l_mp); | 2815 | if (iclog->ic_state & XLOG_STATE_IOERROR) |
2816 | return XFS_ERROR(EIO); | ||
2822 | 2817 | ||
2823 | spin_lock(&log->l_icloglock); | 2818 | ASSERT(atomic_read(&iclog->ic_refcnt) > 0); |
2819 | if (!atomic_dec_and_lock(&iclog->ic_refcnt, &log->l_icloglock)) | ||
2820 | return 0; | ||
2824 | 2821 | ||
2825 | if (iclog->ic_state & XLOG_STATE_IOERROR) { | 2822 | if (iclog->ic_state & XLOG_STATE_IOERROR) { |
2826 | spin_unlock(&log->l_icloglock); | 2823 | spin_unlock(&log->l_icloglock); |
2827 | return XFS_ERROR(EIO); | 2824 | return XFS_ERROR(EIO); |
2828 | } | 2825 | } |
2829 | |||
2830 | ASSERT(iclog->ic_refcnt > 0); | ||
2831 | ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE || | 2826 | ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE || |
2832 | iclog->ic_state == XLOG_STATE_WANT_SYNC); | 2827 | iclog->ic_state == XLOG_STATE_WANT_SYNC); |
2833 | 2828 | ||
2834 | if (--iclog->ic_refcnt == 0 && | 2829 | if (iclog->ic_state == XLOG_STATE_WANT_SYNC) { |
2835 | iclog->ic_state == XLOG_STATE_WANT_SYNC) { | 2830 | /* update tail before writing to iclog */ |
2831 | xlog_assign_tail_lsn(log->l_mp); | ||
2836 | sync++; | 2832 | sync++; |
2837 | iclog->ic_state = XLOG_STATE_SYNCING; | 2833 | iclog->ic_state = XLOG_STATE_SYNCING; |
2838 | iclog->ic_header.h_tail_lsn = cpu_to_be64(log->l_tail_lsn); | 2834 | iclog->ic_header.h_tail_lsn = cpu_to_be64(log->l_tail_lsn); |
2839 | xlog_verify_tail_lsn(log, iclog, log->l_tail_lsn); | 2835 | xlog_verify_tail_lsn(log, iclog, log->l_tail_lsn); |
2840 | /* cycle incremented when incrementing curr_block */ | 2836 | /* cycle incremented when incrementing curr_block */ |
2841 | } | 2837 | } |
2842 | |||
2843 | spin_unlock(&log->l_icloglock); | 2838 | spin_unlock(&log->l_icloglock); |
2844 | 2839 | ||
2845 | /* | 2840 | /* |
@@ -2849,11 +2844,9 @@ xlog_state_release_iclog(xlog_t *log, | |||
2849 | * this iclog has consistent data, so we ignore IOERROR | 2844 | * this iclog has consistent data, so we ignore IOERROR |
2850 | * flags after this point. | 2845 | * flags after this point. |
2851 | */ | 2846 | */ |
2852 | if (sync) { | 2847 | if (sync) |
2853 | return xlog_sync(log, iclog); | 2848 | return xlog_sync(log, iclog); |
2854 | } | ||
2855 | return 0; | 2849 | return 0; |
2856 | |||
2857 | } /* xlog_state_release_iclog */ | 2850 | } /* xlog_state_release_iclog */ |
2858 | 2851 | ||
2859 | 2852 | ||
@@ -2953,7 +2946,8 @@ xlog_state_sync_all(xlog_t *log, uint flags, int *log_flushed) | |||
2953 | * previous iclog and go to sleep. | 2946 | * previous iclog and go to sleep. |
2954 | */ | 2947 | */ |
2955 | if (iclog->ic_state == XLOG_STATE_DIRTY || | 2948 | if (iclog->ic_state == XLOG_STATE_DIRTY || |
2956 | (iclog->ic_refcnt == 0 && iclog->ic_offset == 0)) { | 2949 | (atomic_read(&iclog->ic_refcnt) == 0 |
2950 | && iclog->ic_offset == 0)) { | ||
2957 | iclog = iclog->ic_prev; | 2951 | iclog = iclog->ic_prev; |
2958 | if (iclog->ic_state == XLOG_STATE_ACTIVE || | 2952 | if (iclog->ic_state == XLOG_STATE_ACTIVE || |
2959 | iclog->ic_state == XLOG_STATE_DIRTY) | 2953 | iclog->ic_state == XLOG_STATE_DIRTY) |
@@ -2961,14 +2955,14 @@ xlog_state_sync_all(xlog_t *log, uint flags, int *log_flushed) | |||
2961 | else | 2955 | else |
2962 | goto maybe_sleep; | 2956 | goto maybe_sleep; |
2963 | } else { | 2957 | } else { |
2964 | if (iclog->ic_refcnt == 0) { | 2958 | if (atomic_read(&iclog->ic_refcnt) == 0) { |
2965 | /* We are the only one with access to this | 2959 | /* We are the only one with access to this |
2966 | * iclog. Flush it out now. There should | 2960 | * iclog. Flush it out now. There should |
2967 | * be a roundoff of zero to show that someone | 2961 | * be a roundoff of zero to show that someone |
2968 | * has already taken care of the roundoff from | 2962 | * has already taken care of the roundoff from |
2969 | * the previous sync. | 2963 | * the previous sync. |
2970 | */ | 2964 | */ |
2971 | iclog->ic_refcnt++; | 2965 | atomic_inc(&iclog->ic_refcnt); |
2972 | lsn = be64_to_cpu(iclog->ic_header.h_lsn); | 2966 | lsn = be64_to_cpu(iclog->ic_header.h_lsn); |
2973 | xlog_state_switch_iclogs(log, iclog, 0); | 2967 | xlog_state_switch_iclogs(log, iclog, 0); |
2974 | spin_unlock(&log->l_icloglock); | 2968 | spin_unlock(&log->l_icloglock); |
@@ -3100,7 +3094,7 @@ try_again: | |||
3100 | already_slept = 1; | 3094 | already_slept = 1; |
3101 | goto try_again; | 3095 | goto try_again; |
3102 | } else { | 3096 | } else { |
3103 | iclog->ic_refcnt++; | 3097 | atomic_inc(&iclog->ic_refcnt); |
3104 | xlog_state_switch_iclogs(log, iclog, 0); | 3098 | xlog_state_switch_iclogs(log, iclog, 0); |
3105 | spin_unlock(&log->l_icloglock); | 3099 | spin_unlock(&log->l_icloglock); |
3106 | if (xlog_state_release_iclog(log, iclog)) | 3100 | if (xlog_state_release_iclog(log, iclog)) |
@@ -3172,92 +3166,19 @@ xlog_state_want_sync(xlog_t *log, xlog_in_core_t *iclog) | |||
3172 | */ | 3166 | */ |
3173 | 3167 | ||
3174 | /* | 3168 | /* |
3175 | * Algorithm doesn't take into account page size. ;-( | 3169 | * Free a used ticket. |
3176 | */ | ||
3177 | STATIC void | ||
3178 | xlog_state_ticket_alloc(xlog_t *log) | ||
3179 | { | ||
3180 | xlog_ticket_t *t_list; | ||
3181 | xlog_ticket_t *next; | ||
3182 | xfs_caddr_t buf; | ||
3183 | uint i = (PAGE_SIZE / sizeof(xlog_ticket_t)) - 2; | ||
3184 | |||
3185 | /* | ||
3186 | * The kmem_zalloc may sleep, so we shouldn't be holding the | ||
3187 | * global lock. XXXmiken: may want to use zone allocator. | ||
3188 | */ | ||
3189 | buf = (xfs_caddr_t) kmem_zalloc(PAGE_SIZE, KM_SLEEP); | ||
3190 | |||
3191 | spin_lock(&log->l_icloglock); | ||
3192 | |||
3193 | /* Attach 1st ticket to Q, so we can keep track of allocated memory */ | ||
3194 | t_list = (xlog_ticket_t *)buf; | ||
3195 | t_list->t_next = log->l_unmount_free; | ||
3196 | log->l_unmount_free = t_list++; | ||
3197 | log->l_ticket_cnt++; | ||
3198 | log->l_ticket_tcnt++; | ||
3199 | |||
3200 | /* Next ticket becomes first ticket attached to ticket free list */ | ||
3201 | if (log->l_freelist != NULL) { | ||
3202 | ASSERT(log->l_tail != NULL); | ||
3203 | log->l_tail->t_next = t_list; | ||
3204 | } else { | ||
3205 | log->l_freelist = t_list; | ||
3206 | } | ||
3207 | log->l_ticket_cnt++; | ||
3208 | log->l_ticket_tcnt++; | ||
3209 | |||
3210 | /* Cycle through rest of alloc'ed memory, building up free Q */ | ||
3211 | for ( ; i > 0; i--) { | ||
3212 | next = t_list + 1; | ||
3213 | t_list->t_next = next; | ||
3214 | t_list = next; | ||
3215 | log->l_ticket_cnt++; | ||
3216 | log->l_ticket_tcnt++; | ||
3217 | } | ||
3218 | t_list->t_next = NULL; | ||
3219 | log->l_tail = t_list; | ||
3220 | spin_unlock(&log->l_icloglock); | ||
3221 | } /* xlog_state_ticket_alloc */ | ||
3222 | |||
3223 | |||
3224 | /* | ||
3225 | * Put ticket into free list | ||
3226 | * | ||
3227 | * Assumption: log lock is held around this call. | ||
3228 | */ | 3170 | */ |
3229 | STATIC void | 3171 | STATIC void |
3230 | xlog_ticket_put(xlog_t *log, | 3172 | xlog_ticket_put(xlog_t *log, |
3231 | xlog_ticket_t *ticket) | 3173 | xlog_ticket_t *ticket) |
3232 | { | 3174 | { |
3233 | sv_destroy(&ticket->t_sema); | 3175 | sv_destroy(&ticket->t_sema); |
3234 | 3176 | kmem_zone_free(xfs_log_ticket_zone, ticket); | |
3235 | /* | ||
3236 | * Don't think caching will make that much difference. It's | ||
3237 | * more important to make debug easier. | ||
3238 | */ | ||
3239 | #if 0 | ||
3240 | /* real code will want to use LIFO for caching */ | ||
3241 | ticket->t_next = log->l_freelist; | ||
3242 | log->l_freelist = ticket; | ||
3243 | /* no need to clear fields */ | ||
3244 | #else | ||
3245 | /* When we debug, it is easier if tickets are cycled */ | ||
3246 | ticket->t_next = NULL; | ||
3247 | if (log->l_tail) { | ||
3248 | log->l_tail->t_next = ticket; | ||
3249 | } else { | ||
3250 | ASSERT(log->l_freelist == NULL); | ||
3251 | log->l_freelist = ticket; | ||
3252 | } | ||
3253 | log->l_tail = ticket; | ||
3254 | #endif /* DEBUG */ | ||
3255 | log->l_ticket_cnt++; | ||
3256 | } /* xlog_ticket_put */ | 3177 | } /* xlog_ticket_put */ |
3257 | 3178 | ||
3258 | 3179 | ||
3259 | /* | 3180 | /* |
3260 | * Grab ticket off freelist or allocation some more | 3181 | * Allocate and initialise a new log ticket. |
3261 | */ | 3182 | */ |
3262 | STATIC xlog_ticket_t * | 3183 | STATIC xlog_ticket_t * |
3263 | xlog_ticket_get(xlog_t *log, | 3184 | xlog_ticket_get(xlog_t *log, |
@@ -3269,21 +3190,9 @@ xlog_ticket_get(xlog_t *log, | |||
3269 | xlog_ticket_t *tic; | 3190 | xlog_ticket_t *tic; |
3270 | uint num_headers; | 3191 | uint num_headers; |
3271 | 3192 | ||
3272 | alloc: | 3193 | tic = kmem_zone_zalloc(xfs_log_ticket_zone, KM_SLEEP|KM_MAYFAIL); |
3273 | if (log->l_freelist == NULL) | 3194 | if (!tic) |
3274 | xlog_state_ticket_alloc(log); /* potentially sleep */ | 3195 | return NULL; |
3275 | |||
3276 | spin_lock(&log->l_icloglock); | ||
3277 | if (log->l_freelist == NULL) { | ||
3278 | spin_unlock(&log->l_icloglock); | ||
3279 | goto alloc; | ||
3280 | } | ||
3281 | tic = log->l_freelist; | ||
3282 | log->l_freelist = tic->t_next; | ||
3283 | if (log->l_freelist == NULL) | ||
3284 | log->l_tail = NULL; | ||
3285 | log->l_ticket_cnt--; | ||
3286 | spin_unlock(&log->l_icloglock); | ||
3287 | 3196 | ||
3288 | /* | 3197 | /* |
3289 | * Permanent reservations have up to 'cnt'-1 active log operations | 3198 | * Permanent reservations have up to 'cnt'-1 active log operations |
@@ -3611,8 +3520,8 @@ xfs_log_force_umount( | |||
3611 | * before we mark the filesystem SHUTDOWN and wake | 3520 | * before we mark the filesystem SHUTDOWN and wake |
3612 | * everybody up to tell the bad news. | 3521 | * everybody up to tell the bad news. |
3613 | */ | 3522 | */ |
3614 | spin_lock(&log->l_grant_lock); | ||
3615 | spin_lock(&log->l_icloglock); | 3523 | spin_lock(&log->l_icloglock); |
3524 | spin_lock(&log->l_grant_lock); | ||
3616 | mp->m_flags |= XFS_MOUNT_FS_SHUTDOWN; | 3525 | mp->m_flags |= XFS_MOUNT_FS_SHUTDOWN; |
3617 | XFS_BUF_DONE(mp->m_sb_bp); | 3526 | XFS_BUF_DONE(mp->m_sb_bp); |
3618 | /* | 3527 | /* |
diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h index 4cdac048df5e..d1d678ecb63e 100644 --- a/fs/xfs/xfs_log.h +++ b/fs/xfs/xfs_log.h | |||
@@ -142,8 +142,9 @@ int _xfs_log_force(struct xfs_mount *mp, | |||
142 | xfs_lsn_t lsn, | 142 | xfs_lsn_t lsn, |
143 | uint flags, | 143 | uint flags, |
144 | int *log_forced); | 144 | int *log_forced); |
145 | #define xfs_log_force(mp, lsn, flags) \ | 145 | void xfs_log_force(struct xfs_mount *mp, |
146 | _xfs_log_force(mp, lsn, flags, NULL); | 146 | xfs_lsn_t lsn, |
147 | uint flags); | ||
147 | int xfs_log_mount(struct xfs_mount *mp, | 148 | int xfs_log_mount(struct xfs_mount *mp, |
148 | struct xfs_buftarg *log_target, | 149 | struct xfs_buftarg *log_target, |
149 | xfs_daddr_t start_block, | 150 | xfs_daddr_t start_block, |
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h index c6244cc733c0..8952a392b5f3 100644 --- a/fs/xfs/xfs_log_priv.h +++ b/fs/xfs/xfs_log_priv.h | |||
@@ -242,7 +242,7 @@ typedef struct xlog_res { | |||
242 | 242 | ||
243 | typedef struct xlog_ticket { | 243 | typedef struct xlog_ticket { |
244 | sv_t t_sema; /* sleep on this semaphore : 20 */ | 244 | sv_t t_sema; /* sleep on this semaphore : 20 */ |
245 | struct xlog_ticket *t_next; /* :4|8 */ | 245 | struct xlog_ticket *t_next; /* :4|8 */ |
246 | struct xlog_ticket *t_prev; /* :4|8 */ | 246 | struct xlog_ticket *t_prev; /* :4|8 */ |
247 | xlog_tid_t t_tid; /* transaction identifier : 4 */ | 247 | xlog_tid_t t_tid; /* transaction identifier : 4 */ |
248 | int t_curr_res; /* current reservation in bytes : 4 */ | 248 | int t_curr_res; /* current reservation in bytes : 4 */ |
@@ -324,6 +324,19 @@ typedef struct xlog_rec_ext_header { | |||
324 | * - ic_offset is the current number of bytes written to in this iclog. | 324 | * - ic_offset is the current number of bytes written to in this iclog. |
325 | * - ic_refcnt is bumped when someone is writing to the log. | 325 | * - ic_refcnt is bumped when someone is writing to the log. |
326 | * - ic_state is the state of the iclog. | 326 | * - ic_state is the state of the iclog. |
327 | * | ||
328 | * Because of cacheline contention on large machines, we need to separate | ||
329 | * various resources onto different cachelines. To start with, make the | ||
330 | * structure cacheline aligned. The following fields can be contended on | ||
331 | * by independent processes: | ||
332 | * | ||
333 | * - ic_callback_* | ||
334 | * - ic_refcnt | ||
335 | * - fields protected by the global l_icloglock | ||
336 | * | ||
337 | * so we need to ensure that these fields are located in separate cachelines. | ||
338 | * We'll put all the read-only and l_icloglock fields in the first cacheline, | ||
339 | * and move everything else out to subsequent cachelines. | ||
327 | */ | 340 | */ |
328 | typedef struct xlog_iclog_fields { | 341 | typedef struct xlog_iclog_fields { |
329 | sv_t ic_forcesema; | 342 | sv_t ic_forcesema; |
@@ -332,17 +345,22 @@ typedef struct xlog_iclog_fields { | |||
332 | struct xlog_in_core *ic_prev; | 345 | struct xlog_in_core *ic_prev; |
333 | struct xfs_buf *ic_bp; | 346 | struct xfs_buf *ic_bp; |
334 | struct log *ic_log; | 347 | struct log *ic_log; |
335 | xfs_log_callback_t *ic_callback; | ||
336 | xfs_log_callback_t **ic_callback_tail; | ||
337 | #ifdef XFS_LOG_TRACE | ||
338 | struct ktrace *ic_trace; | ||
339 | #endif | ||
340 | int ic_size; | 348 | int ic_size; |
341 | int ic_offset; | 349 | int ic_offset; |
342 | int ic_refcnt; | ||
343 | int ic_bwritecnt; | 350 | int ic_bwritecnt; |
344 | ushort_t ic_state; | 351 | ushort_t ic_state; |
345 | char *ic_datap; /* pointer to iclog data */ | 352 | char *ic_datap; /* pointer to iclog data */ |
353 | #ifdef XFS_LOG_TRACE | ||
354 | struct ktrace *ic_trace; | ||
355 | #endif | ||
356 | |||
357 | /* Callback structures need their own cacheline */ | ||
358 | spinlock_t ic_callback_lock ____cacheline_aligned_in_smp; | ||
359 | xfs_log_callback_t *ic_callback; | ||
360 | xfs_log_callback_t **ic_callback_tail; | ||
361 | |||
362 | /* reference counts need their own cacheline */ | ||
363 | atomic_t ic_refcnt ____cacheline_aligned_in_smp; | ||
346 | } xlog_iclog_fields_t; | 364 | } xlog_iclog_fields_t; |
347 | 365 | ||
348 | typedef union xlog_in_core2 { | 366 | typedef union xlog_in_core2 { |
@@ -366,6 +384,7 @@ typedef struct xlog_in_core { | |||
366 | #define ic_bp hic_fields.ic_bp | 384 | #define ic_bp hic_fields.ic_bp |
367 | #define ic_log hic_fields.ic_log | 385 | #define ic_log hic_fields.ic_log |
368 | #define ic_callback hic_fields.ic_callback | 386 | #define ic_callback hic_fields.ic_callback |
387 | #define ic_callback_lock hic_fields.ic_callback_lock | ||
369 | #define ic_callback_tail hic_fields.ic_callback_tail | 388 | #define ic_callback_tail hic_fields.ic_callback_tail |
370 | #define ic_trace hic_fields.ic_trace | 389 | #define ic_trace hic_fields.ic_trace |
371 | #define ic_size hic_fields.ic_size | 390 | #define ic_size hic_fields.ic_size |
@@ -383,43 +402,46 @@ typedef struct xlog_in_core { | |||
383 | * that round off problems won't occur when releasing partial reservations. | 402 | * that round off problems won't occur when releasing partial reservations. |
384 | */ | 403 | */ |
385 | typedef struct log { | 404 | typedef struct log { |
405 | /* The following fields don't need locking */ | ||
406 | struct xfs_mount *l_mp; /* mount point */ | ||
407 | struct xfs_buf *l_xbuf; /* extra buffer for log | ||
408 | * wrapping */ | ||
409 | struct xfs_buftarg *l_targ; /* buftarg of log */ | ||
410 | uint l_flags; | ||
411 | uint l_quotaoffs_flag; /* XFS_DQ_*, for QUOTAOFFs */ | ||
412 | struct xfs_buf_cancel **l_buf_cancel_table; | ||
413 | int l_iclog_hsize; /* size of iclog header */ | ||
414 | int l_iclog_heads; /* # of iclog header sectors */ | ||
415 | uint l_sectbb_log; /* log2 of sector size in BBs */ | ||
416 | uint l_sectbb_mask; /* sector size (in BBs) | ||
417 | * alignment mask */ | ||
418 | int l_iclog_size; /* size of log in bytes */ | ||
419 | int l_iclog_size_log; /* log power size of log */ | ||
420 | int l_iclog_bufs; /* number of iclog buffers */ | ||
421 | xfs_daddr_t l_logBBstart; /* start block of log */ | ||
422 | int l_logsize; /* size of log in bytes */ | ||
423 | int l_logBBsize; /* size of log in BB chunks */ | ||
424 | |||
386 | /* The following block of fields are changed while holding icloglock */ | 425 | /* The following block of fields are changed while holding icloglock */ |
387 | sema_t l_flushsema; /* iclog flushing semaphore */ | 426 | sema_t l_flushsema ____cacheline_aligned_in_smp; |
427 | /* iclog flushing semaphore */ | ||
388 | int l_flushcnt; /* # of procs waiting on this | 428 | int l_flushcnt; /* # of procs waiting on this |
389 | * sema */ | 429 | * sema */ |
390 | int l_ticket_cnt; /* free ticket count */ | ||
391 | int l_ticket_tcnt; /* total ticket count */ | ||
392 | int l_covered_state;/* state of "covering disk | 430 | int l_covered_state;/* state of "covering disk |
393 | * log entries" */ | 431 | * log entries" */ |
394 | xlog_ticket_t *l_freelist; /* free list of tickets */ | ||
395 | xlog_ticket_t *l_unmount_free;/* kmem_free these addresses */ | ||
396 | xlog_ticket_t *l_tail; /* free list of tickets */ | ||
397 | xlog_in_core_t *l_iclog; /* head log queue */ | 432 | xlog_in_core_t *l_iclog; /* head log queue */ |
398 | spinlock_t l_icloglock; /* grab to change iclog state */ | 433 | spinlock_t l_icloglock; /* grab to change iclog state */ |
399 | xfs_lsn_t l_tail_lsn; /* lsn of 1st LR with unflushed | 434 | xfs_lsn_t l_tail_lsn; /* lsn of 1st LR with unflushed |
400 | * buffers */ | 435 | * buffers */ |
401 | xfs_lsn_t l_last_sync_lsn;/* lsn of last LR on disk */ | 436 | xfs_lsn_t l_last_sync_lsn;/* lsn of last LR on disk */ |
402 | struct xfs_mount *l_mp; /* mount point */ | ||
403 | struct xfs_buf *l_xbuf; /* extra buffer for log | ||
404 | * wrapping */ | ||
405 | struct xfs_buftarg *l_targ; /* buftarg of log */ | ||
406 | xfs_daddr_t l_logBBstart; /* start block of log */ | ||
407 | int l_logsize; /* size of log in bytes */ | ||
408 | int l_logBBsize; /* size of log in BB chunks */ | ||
409 | int l_curr_cycle; /* Cycle number of log writes */ | 437 | int l_curr_cycle; /* Cycle number of log writes */ |
410 | int l_prev_cycle; /* Cycle number before last | 438 | int l_prev_cycle; /* Cycle number before last |
411 | * block increment */ | 439 | * block increment */ |
412 | int l_curr_block; /* current logical log block */ | 440 | int l_curr_block; /* current logical log block */ |
413 | int l_prev_block; /* previous logical log block */ | 441 | int l_prev_block; /* previous logical log block */ |
414 | int l_iclog_size; /* size of log in bytes */ | ||
415 | int l_iclog_size_log; /* log power size of log */ | ||
416 | int l_iclog_bufs; /* number of iclog buffers */ | ||
417 | |||
418 | /* The following field are used for debugging; need to hold icloglock */ | ||
419 | char *l_iclog_bak[XLOG_MAX_ICLOGS]; | ||
420 | 442 | ||
421 | /* The following block of fields are changed while holding grant_lock */ | 443 | /* The following block of fields are changed while holding grant_lock */ |
422 | spinlock_t l_grant_lock; | 444 | spinlock_t l_grant_lock ____cacheline_aligned_in_smp; |
423 | xlog_ticket_t *l_reserve_headq; | 445 | xlog_ticket_t *l_reserve_headq; |
424 | xlog_ticket_t *l_write_headq; | 446 | xlog_ticket_t *l_write_headq; |
425 | int l_grant_reserve_cycle; | 447 | int l_grant_reserve_cycle; |
@@ -427,19 +449,16 @@ typedef struct log { | |||
427 | int l_grant_write_cycle; | 449 | int l_grant_write_cycle; |
428 | int l_grant_write_bytes; | 450 | int l_grant_write_bytes; |
429 | 451 | ||
430 | /* The following fields don't need locking */ | ||
431 | #ifdef XFS_LOG_TRACE | 452 | #ifdef XFS_LOG_TRACE |
432 | struct ktrace *l_trace; | 453 | struct ktrace *l_trace; |
433 | struct ktrace *l_grant_trace; | 454 | struct ktrace *l_grant_trace; |
434 | #endif | 455 | #endif |
435 | uint l_flags; | 456 | |
436 | uint l_quotaoffs_flag; /* XFS_DQ_*, for QUOTAOFFs */ | 457 | /* The following field are used for debugging; need to hold icloglock */ |
437 | struct xfs_buf_cancel **l_buf_cancel_table; | 458 | #ifdef DEBUG |
438 | int l_iclog_hsize; /* size of iclog header */ | 459 | char *l_iclog_bak[XLOG_MAX_ICLOGS]; |
439 | int l_iclog_heads; /* # of iclog header sectors */ | 460 | #endif |
440 | uint l_sectbb_log; /* log2 of sector size in BBs */ | 461 | |
441 | uint l_sectbb_mask; /* sector size (in BBs) | ||
442 | * alignment mask */ | ||
443 | } xlog_t; | 462 | } xlog_t; |
444 | 463 | ||
445 | #define XLOG_FORCED_SHUTDOWN(log) ((log)->l_flags & XLOG_IO_ERROR) | 464 | #define XLOG_FORCED_SHUTDOWN(log) ((log)->l_flags & XLOG_IO_ERROR) |
@@ -459,6 +478,8 @@ extern struct xfs_buf *xlog_get_bp(xlog_t *, int); | |||
459 | extern void xlog_put_bp(struct xfs_buf *); | 478 | extern void xlog_put_bp(struct xfs_buf *); |
460 | extern int xlog_bread(xlog_t *, xfs_daddr_t, int, struct xfs_buf *); | 479 | extern int xlog_bread(xlog_t *, xfs_daddr_t, int, struct xfs_buf *); |
461 | 480 | ||
481 | extern kmem_zone_t *xfs_log_ticket_zone; | ||
482 | |||
462 | /* iclog tracing */ | 483 | /* iclog tracing */ |
463 | #define XLOG_TRACE_GRAB_FLUSH 1 | 484 | #define XLOG_TRACE_GRAB_FLUSH 1 |
464 | #define XLOG_TRACE_REL_FLUSH 2 | 485 | #define XLOG_TRACE_REL_FLUSH 2 |
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index b2b70eba282c..e65ab4af0955 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c | |||
@@ -46,6 +46,7 @@ | |||
46 | #include "xfs_trans_priv.h" | 46 | #include "xfs_trans_priv.h" |
47 | #include "xfs_quota.h" | 47 | #include "xfs_quota.h" |
48 | #include "xfs_rw.h" | 48 | #include "xfs_rw.h" |
49 | #include "xfs_utils.h" | ||
49 | 50 | ||
50 | STATIC int xlog_find_zeroed(xlog_t *, xfs_daddr_t *); | 51 | STATIC int xlog_find_zeroed(xlog_t *, xfs_daddr_t *); |
51 | STATIC int xlog_clear_stale_blocks(xlog_t *, xfs_lsn_t); | 52 | STATIC int xlog_clear_stale_blocks(xlog_t *, xfs_lsn_t); |
@@ -120,7 +121,8 @@ xlog_bread( | |||
120 | XFS_BUF_SET_TARGET(bp, log->l_mp->m_logdev_targp); | 121 | XFS_BUF_SET_TARGET(bp, log->l_mp->m_logdev_targp); |
121 | 122 | ||
122 | xfsbdstrat(log->l_mp, bp); | 123 | xfsbdstrat(log->l_mp, bp); |
123 | if ((error = xfs_iowait(bp))) | 124 | error = xfs_iowait(bp); |
125 | if (error) | ||
124 | xfs_ioerror_alert("xlog_bread", log->l_mp, | 126 | xfs_ioerror_alert("xlog_bread", log->l_mp, |
125 | bp, XFS_BUF_ADDR(bp)); | 127 | bp, XFS_BUF_ADDR(bp)); |
126 | return error; | 128 | return error; |
@@ -191,7 +193,7 @@ xlog_header_check_dump( | |||
191 | { | 193 | { |
192 | int b; | 194 | int b; |
193 | 195 | ||
194 | cmn_err(CE_DEBUG, "%s: SB : uuid = ", __FUNCTION__); | 196 | cmn_err(CE_DEBUG, "%s: SB : uuid = ", __func__); |
195 | for (b = 0; b < 16; b++) | 197 | for (b = 0; b < 16; b++) |
196 | cmn_err(CE_DEBUG, "%02x", ((uchar_t *)&mp->m_sb.sb_uuid)[b]); | 198 | cmn_err(CE_DEBUG, "%02x", ((uchar_t *)&mp->m_sb.sb_uuid)[b]); |
197 | cmn_err(CE_DEBUG, ", fmt = %d\n", XLOG_FMT); | 199 | cmn_err(CE_DEBUG, ", fmt = %d\n", XLOG_FMT); |
@@ -1160,10 +1162,14 @@ xlog_write_log_records( | |||
1160 | if (j == 0 && (start_block + endcount > ealign)) { | 1162 | if (j == 0 && (start_block + endcount > ealign)) { |
1161 | offset = XFS_BUF_PTR(bp); | 1163 | offset = XFS_BUF_PTR(bp); |
1162 | balign = BBTOB(ealign - start_block); | 1164 | balign = BBTOB(ealign - start_block); |
1163 | XFS_BUF_SET_PTR(bp, offset + balign, BBTOB(sectbb)); | 1165 | error = XFS_BUF_SET_PTR(bp, offset + balign, |
1164 | if ((error = xlog_bread(log, ealign, sectbb, bp))) | 1166 | BBTOB(sectbb)); |
1167 | if (!error) | ||
1168 | error = xlog_bread(log, ealign, sectbb, bp); | ||
1169 | if (!error) | ||
1170 | error = XFS_BUF_SET_PTR(bp, offset, bufblks); | ||
1171 | if (error) | ||
1165 | break; | 1172 | break; |
1166 | XFS_BUF_SET_PTR(bp, offset, bufblks); | ||
1167 | } | 1173 | } |
1168 | 1174 | ||
1169 | offset = xlog_align(log, start_block, endcount, bp); | 1175 | offset = xlog_align(log, start_block, endcount, bp); |
@@ -2280,7 +2286,9 @@ xlog_recover_do_inode_trans( | |||
2280 | * invalidate the buffer when we write it out below. | 2286 | * invalidate the buffer when we write it out below. |
2281 | */ | 2287 | */ |
2282 | imap.im_blkno = 0; | 2288 | imap.im_blkno = 0; |
2283 | xfs_imap(log->l_mp, NULL, ino, &imap, 0); | 2289 | error = xfs_imap(log->l_mp, NULL, ino, &imap, 0); |
2290 | if (error) | ||
2291 | goto error; | ||
2284 | } | 2292 | } |
2285 | 2293 | ||
2286 | /* | 2294 | /* |
@@ -2964,7 +2972,7 @@ xlog_recover_process_data( | |||
2964 | * Process an extent free intent item that was recovered from | 2972 | * Process an extent free intent item that was recovered from |
2965 | * the log. We need to free the extents that it describes. | 2973 | * the log. We need to free the extents that it describes. |
2966 | */ | 2974 | */ |
2967 | STATIC void | 2975 | STATIC int |
2968 | xlog_recover_process_efi( | 2976 | xlog_recover_process_efi( |
2969 | xfs_mount_t *mp, | 2977 | xfs_mount_t *mp, |
2970 | xfs_efi_log_item_t *efip) | 2978 | xfs_efi_log_item_t *efip) |
@@ -2972,6 +2980,7 @@ xlog_recover_process_efi( | |||
2972 | xfs_efd_log_item_t *efdp; | 2980 | xfs_efd_log_item_t *efdp; |
2973 | xfs_trans_t *tp; | 2981 | xfs_trans_t *tp; |
2974 | int i; | 2982 | int i; |
2983 | int error = 0; | ||
2975 | xfs_extent_t *extp; | 2984 | xfs_extent_t *extp; |
2976 | xfs_fsblock_t startblock_fsb; | 2985 | xfs_fsblock_t startblock_fsb; |
2977 | 2986 | ||
@@ -2995,23 +3004,32 @@ xlog_recover_process_efi( | |||
2995 | * free the memory associated with it. | 3004 | * free the memory associated with it. |
2996 | */ | 3005 | */ |
2997 | xfs_efi_release(efip, efip->efi_format.efi_nextents); | 3006 | xfs_efi_release(efip, efip->efi_format.efi_nextents); |
2998 | return; | 3007 | return XFS_ERROR(EIO); |
2999 | } | 3008 | } |
3000 | } | 3009 | } |
3001 | 3010 | ||
3002 | tp = xfs_trans_alloc(mp, 0); | 3011 | tp = xfs_trans_alloc(mp, 0); |
3003 | xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, 0, 0); | 3012 | error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, 0, 0); |
3013 | if (error) | ||
3014 | goto abort_error; | ||
3004 | efdp = xfs_trans_get_efd(tp, efip, efip->efi_format.efi_nextents); | 3015 | efdp = xfs_trans_get_efd(tp, efip, efip->efi_format.efi_nextents); |
3005 | 3016 | ||
3006 | for (i = 0; i < efip->efi_format.efi_nextents; i++) { | 3017 | for (i = 0; i < efip->efi_format.efi_nextents; i++) { |
3007 | extp = &(efip->efi_format.efi_extents[i]); | 3018 | extp = &(efip->efi_format.efi_extents[i]); |
3008 | xfs_free_extent(tp, extp->ext_start, extp->ext_len); | 3019 | error = xfs_free_extent(tp, extp->ext_start, extp->ext_len); |
3020 | if (error) | ||
3021 | goto abort_error; | ||
3009 | xfs_trans_log_efd_extent(tp, efdp, extp->ext_start, | 3022 | xfs_trans_log_efd_extent(tp, efdp, extp->ext_start, |
3010 | extp->ext_len); | 3023 | extp->ext_len); |
3011 | } | 3024 | } |
3012 | 3025 | ||
3013 | efip->efi_flags |= XFS_EFI_RECOVERED; | 3026 | efip->efi_flags |= XFS_EFI_RECOVERED; |
3014 | xfs_trans_commit(tp, 0); | 3027 | error = xfs_trans_commit(tp, 0); |
3028 | return error; | ||
3029 | |||
3030 | abort_error: | ||
3031 | xfs_trans_cancel(tp, XFS_TRANS_ABORT); | ||
3032 | return error; | ||
3015 | } | 3033 | } |
3016 | 3034 | ||
3017 | /* | 3035 | /* |
@@ -3059,7 +3077,7 @@ xlog_recover_check_ail( | |||
3059 | * everything already in the AIL, we stop processing as soon as | 3077 | * everything already in the AIL, we stop processing as soon as |
3060 | * we see something other than an EFI in the AIL. | 3078 | * we see something other than an EFI in the AIL. |
3061 | */ | 3079 | */ |
3062 | STATIC void | 3080 | STATIC int |
3063 | xlog_recover_process_efis( | 3081 | xlog_recover_process_efis( |
3064 | xlog_t *log) | 3082 | xlog_t *log) |
3065 | { | 3083 | { |
@@ -3067,6 +3085,7 @@ xlog_recover_process_efis( | |||
3067 | xfs_efi_log_item_t *efip; | 3085 | xfs_efi_log_item_t *efip; |
3068 | int gen; | 3086 | int gen; |
3069 | xfs_mount_t *mp; | 3087 | xfs_mount_t *mp; |
3088 | int error = 0; | ||
3070 | 3089 | ||
3071 | mp = log->l_mp; | 3090 | mp = log->l_mp; |
3072 | spin_lock(&mp->m_ail_lock); | 3091 | spin_lock(&mp->m_ail_lock); |
@@ -3091,11 +3110,14 @@ xlog_recover_process_efis( | |||
3091 | } | 3110 | } |
3092 | 3111 | ||
3093 | spin_unlock(&mp->m_ail_lock); | 3112 | spin_unlock(&mp->m_ail_lock); |
3094 | xlog_recover_process_efi(mp, efip); | 3113 | error = xlog_recover_process_efi(mp, efip); |
3114 | if (error) | ||
3115 | return error; | ||
3095 | spin_lock(&mp->m_ail_lock); | 3116 | spin_lock(&mp->m_ail_lock); |
3096 | lip = xfs_trans_next_ail(mp, lip, &gen, NULL); | 3117 | lip = xfs_trans_next_ail(mp, lip, &gen, NULL); |
3097 | } | 3118 | } |
3098 | spin_unlock(&mp->m_ail_lock); | 3119 | spin_unlock(&mp->m_ail_lock); |
3120 | return error; | ||
3099 | } | 3121 | } |
3100 | 3122 | ||
3101 | /* | 3123 | /* |
@@ -3115,21 +3137,18 @@ xlog_recover_clear_agi_bucket( | |||
3115 | int error; | 3137 | int error; |
3116 | 3138 | ||
3117 | tp = xfs_trans_alloc(mp, XFS_TRANS_CLEAR_AGI_BUCKET); | 3139 | tp = xfs_trans_alloc(mp, XFS_TRANS_CLEAR_AGI_BUCKET); |
3118 | xfs_trans_reserve(tp, 0, XFS_CLEAR_AGI_BUCKET_LOG_RES(mp), 0, 0, 0); | 3140 | error = xfs_trans_reserve(tp, 0, XFS_CLEAR_AGI_BUCKET_LOG_RES(mp), 0, 0, 0); |
3119 | 3141 | if (!error) | |
3120 | error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, | 3142 | error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, |
3121 | XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)), | 3143 | XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)), |
3122 | XFS_FSS_TO_BB(mp, 1), 0, &agibp); | 3144 | XFS_FSS_TO_BB(mp, 1), 0, &agibp); |
3123 | if (error) { | 3145 | if (error) |
3124 | xfs_trans_cancel(tp, XFS_TRANS_ABORT); | 3146 | goto out_abort; |
3125 | return; | ||
3126 | } | ||
3127 | 3147 | ||
3148 | error = EINVAL; | ||
3128 | agi = XFS_BUF_TO_AGI(agibp); | 3149 | agi = XFS_BUF_TO_AGI(agibp); |
3129 | if (be32_to_cpu(agi->agi_magicnum) != XFS_AGI_MAGIC) { | 3150 | if (be32_to_cpu(agi->agi_magicnum) != XFS_AGI_MAGIC) |
3130 | xfs_trans_cancel(tp, XFS_TRANS_ABORT); | 3151 | goto out_abort; |
3131 | return; | ||
3132 | } | ||
3133 | 3152 | ||
3134 | agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO); | 3153 | agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO); |
3135 | offset = offsetof(xfs_agi_t, agi_unlinked) + | 3154 | offset = offsetof(xfs_agi_t, agi_unlinked) + |
@@ -3137,7 +3156,17 @@ xlog_recover_clear_agi_bucket( | |||
3137 | xfs_trans_log_buf(tp, agibp, offset, | 3156 | xfs_trans_log_buf(tp, agibp, offset, |
3138 | (offset + sizeof(xfs_agino_t) - 1)); | 3157 | (offset + sizeof(xfs_agino_t) - 1)); |
3139 | 3158 | ||
3140 | (void) xfs_trans_commit(tp, 0); | 3159 | error = xfs_trans_commit(tp, 0); |
3160 | if (error) | ||
3161 | goto out_error; | ||
3162 | return; | ||
3163 | |||
3164 | out_abort: | ||
3165 | xfs_trans_cancel(tp, XFS_TRANS_ABORT); | ||
3166 | out_error: | ||
3167 | xfs_fs_cmn_err(CE_WARN, mp, "xlog_recover_clear_agi_bucket: " | ||
3168 | "failed to clear agi %d. Continuing.", agno); | ||
3169 | return; | ||
3141 | } | 3170 | } |
3142 | 3171 | ||
3143 | /* | 3172 | /* |
@@ -3214,7 +3243,8 @@ xlog_recover_process_iunlinks( | |||
3214 | * next inode in the bucket. | 3243 | * next inode in the bucket. |
3215 | */ | 3244 | */ |
3216 | error = xfs_itobp(mp, NULL, ip, &dip, | 3245 | error = xfs_itobp(mp, NULL, ip, &dip, |
3217 | &ibp, 0, 0); | 3246 | &ibp, 0, 0, |
3247 | XFS_BUF_LOCK); | ||
3218 | ASSERT(error || (dip != NULL)); | 3248 | ASSERT(error || (dip != NULL)); |
3219 | } | 3249 | } |
3220 | 3250 | ||
@@ -3247,7 +3277,7 @@ xlog_recover_process_iunlinks( | |||
3247 | if (ip->i_d.di_mode == 0) | 3277 | if (ip->i_d.di_mode == 0) |
3248 | xfs_iput_new(ip, 0); | 3278 | xfs_iput_new(ip, 0); |
3249 | else | 3279 | else |
3250 | VN_RELE(XFS_ITOV(ip)); | 3280 | IRELE(ip); |
3251 | } else { | 3281 | } else { |
3252 | /* | 3282 | /* |
3253 | * We can't read in the inode | 3283 | * We can't read in the inode |
@@ -3445,7 +3475,7 @@ xlog_valid_rec_header( | |||
3445 | (!rhead->h_version || | 3475 | (!rhead->h_version || |
3446 | (be32_to_cpu(rhead->h_version) & (~XLOG_VERSION_OKBITS))))) { | 3476 | (be32_to_cpu(rhead->h_version) & (~XLOG_VERSION_OKBITS))))) { |
3447 | xlog_warn("XFS: %s: unrecognised log version (%d).", | 3477 | xlog_warn("XFS: %s: unrecognised log version (%d).", |
3448 | __FUNCTION__, be32_to_cpu(rhead->h_version)); | 3478 | __func__, be32_to_cpu(rhead->h_version)); |
3449 | return XFS_ERROR(EIO); | 3479 | return XFS_ERROR(EIO); |
3450 | } | 3480 | } |
3451 | 3481 | ||
@@ -3604,15 +3634,19 @@ xlog_do_recovery_pass( | |||
3604 | * _first_, then the log start (LR header end) | 3634 | * _first_, then the log start (LR header end) |
3605 | * - order is important. | 3635 | * - order is important. |
3606 | */ | 3636 | */ |
3637 | wrapped_hblks = hblks - split_hblks; | ||
3607 | bufaddr = XFS_BUF_PTR(hbp); | 3638 | bufaddr = XFS_BUF_PTR(hbp); |
3608 | XFS_BUF_SET_PTR(hbp, | 3639 | error = XFS_BUF_SET_PTR(hbp, |
3609 | bufaddr + BBTOB(split_hblks), | 3640 | bufaddr + BBTOB(split_hblks), |
3610 | BBTOB(hblks - split_hblks)); | 3641 | BBTOB(hblks - split_hblks)); |
3611 | wrapped_hblks = hblks - split_hblks; | 3642 | if (!error) |
3612 | error = xlog_bread(log, 0, wrapped_hblks, hbp); | 3643 | error = xlog_bread(log, 0, |
3644 | wrapped_hblks, hbp); | ||
3645 | if (!error) | ||
3646 | error = XFS_BUF_SET_PTR(hbp, bufaddr, | ||
3647 | BBTOB(hblks)); | ||
3613 | if (error) | 3648 | if (error) |
3614 | goto bread_err2; | 3649 | goto bread_err2; |
3615 | XFS_BUF_SET_PTR(hbp, bufaddr, BBTOB(hblks)); | ||
3616 | if (!offset) | 3650 | if (!offset) |
3617 | offset = xlog_align(log, 0, | 3651 | offset = xlog_align(log, 0, |
3618 | wrapped_hblks, hbp); | 3652 | wrapped_hblks, hbp); |
@@ -3664,13 +3698,18 @@ xlog_do_recovery_pass( | |||
3664 | * - order is important. | 3698 | * - order is important. |
3665 | */ | 3699 | */ |
3666 | bufaddr = XFS_BUF_PTR(dbp); | 3700 | bufaddr = XFS_BUF_PTR(dbp); |
3667 | XFS_BUF_SET_PTR(dbp, | 3701 | error = XFS_BUF_SET_PTR(dbp, |
3668 | bufaddr + BBTOB(split_bblks), | 3702 | bufaddr + BBTOB(split_bblks), |
3669 | BBTOB(bblks - split_bblks)); | 3703 | BBTOB(bblks - split_bblks)); |
3670 | if ((error = xlog_bread(log, wrapped_hblks, | 3704 | if (!error) |
3671 | bblks - split_bblks, dbp))) | 3705 | error = xlog_bread(log, wrapped_hblks, |
3706 | bblks - split_bblks, | ||
3707 | dbp); | ||
3708 | if (!error) | ||
3709 | error = XFS_BUF_SET_PTR(dbp, bufaddr, | ||
3710 | h_size); | ||
3711 | if (error) | ||
3672 | goto bread_err2; | 3712 | goto bread_err2; |
3673 | XFS_BUF_SET_PTR(dbp, bufaddr, h_size); | ||
3674 | if (!offset) | 3713 | if (!offset) |
3675 | offset = xlog_align(log, wrapped_hblks, | 3714 | offset = xlog_align(log, wrapped_hblks, |
3676 | bblks - split_bblks, dbp); | 3715 | bblks - split_bblks, dbp); |
@@ -3826,7 +3865,8 @@ xlog_do_recover( | |||
3826 | XFS_BUF_READ(bp); | 3865 | XFS_BUF_READ(bp); |
3827 | XFS_BUF_UNASYNC(bp); | 3866 | XFS_BUF_UNASYNC(bp); |
3828 | xfsbdstrat(log->l_mp, bp); | 3867 | xfsbdstrat(log->l_mp, bp); |
3829 | if ((error = xfs_iowait(bp))) { | 3868 | error = xfs_iowait(bp); |
3869 | if (error) { | ||
3830 | xfs_ioerror_alert("xlog_do_recover", | 3870 | xfs_ioerror_alert("xlog_do_recover", |
3831 | log->l_mp, bp, XFS_BUF_ADDR(bp)); | 3871 | log->l_mp, bp, XFS_BUF_ADDR(bp)); |
3832 | ASSERT(0); | 3872 | ASSERT(0); |
@@ -3917,7 +3957,14 @@ xlog_recover_finish( | |||
3917 | * rather than accepting new requests. | 3957 | * rather than accepting new requests. |
3918 | */ | 3958 | */ |
3919 | if (log->l_flags & XLOG_RECOVERY_NEEDED) { | 3959 | if (log->l_flags & XLOG_RECOVERY_NEEDED) { |
3920 | xlog_recover_process_efis(log); | 3960 | int error; |
3961 | error = xlog_recover_process_efis(log); | ||
3962 | if (error) { | ||
3963 | cmn_err(CE_ALERT, | ||
3964 | "Failed to recover EFIs on filesystem: %s", | ||
3965 | log->l_mp->m_fsname); | ||
3966 | return error; | ||
3967 | } | ||
3921 | /* | 3968 | /* |
3922 | * Sync the log to get all the EFIs out of the AIL. | 3969 | * Sync the log to get all the EFIs out of the AIL. |
3923 | * This isn't absolutely necessary, but it helps in | 3970 | * This isn't absolutely necessary, but it helps in |
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index 8ed164eb9544..2fec452afbcc 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c | |||
@@ -43,8 +43,9 @@ | |||
43 | #include "xfs_rw.h" | 43 | #include "xfs_rw.h" |
44 | #include "xfs_quota.h" | 44 | #include "xfs_quota.h" |
45 | #include "xfs_fsops.h" | 45 | #include "xfs_fsops.h" |
46 | #include "xfs_utils.h" | ||
46 | 47 | ||
47 | STATIC void xfs_mount_log_sb(xfs_mount_t *, __int64_t); | 48 | STATIC int xfs_mount_log_sb(xfs_mount_t *, __int64_t); |
48 | STATIC int xfs_uuid_mount(xfs_mount_t *); | 49 | STATIC int xfs_uuid_mount(xfs_mount_t *); |
49 | STATIC void xfs_uuid_unmount(xfs_mount_t *mp); | 50 | STATIC void xfs_uuid_unmount(xfs_mount_t *mp); |
50 | STATIC void xfs_unmountfs_wait(xfs_mount_t *); | 51 | STATIC void xfs_unmountfs_wait(xfs_mount_t *); |
@@ -57,7 +58,7 @@ STATIC void xfs_icsb_balance_counter(xfs_mount_t *, xfs_sb_field_t, | |||
57 | STATIC void xfs_icsb_sync_counters(xfs_mount_t *); | 58 | STATIC void xfs_icsb_sync_counters(xfs_mount_t *); |
58 | STATIC int xfs_icsb_modify_counters(xfs_mount_t *, xfs_sb_field_t, | 59 | STATIC int xfs_icsb_modify_counters(xfs_mount_t *, xfs_sb_field_t, |
59 | int64_t, int); | 60 | int64_t, int); |
60 | STATIC int xfs_icsb_disable_counter(xfs_mount_t *, xfs_sb_field_t); | 61 | STATIC void xfs_icsb_disable_counter(xfs_mount_t *, xfs_sb_field_t); |
61 | 62 | ||
62 | #else | 63 | #else |
63 | 64 | ||
@@ -956,7 +957,6 @@ xfs_mountfs( | |||
956 | { | 957 | { |
957 | xfs_sb_t *sbp = &(mp->m_sb); | 958 | xfs_sb_t *sbp = &(mp->m_sb); |
958 | xfs_inode_t *rip; | 959 | xfs_inode_t *rip; |
959 | bhv_vnode_t *rvp = NULL; | ||
960 | __uint64_t resblks; | 960 | __uint64_t resblks; |
961 | __int64_t update_flags = 0LL; | 961 | __int64_t update_flags = 0LL; |
962 | uint quotamount, quotaflags; | 962 | uint quotamount, quotaflags; |
@@ -964,11 +964,6 @@ xfs_mountfs( | |||
964 | int uuid_mounted = 0; | 964 | int uuid_mounted = 0; |
965 | int error = 0; | 965 | int error = 0; |
966 | 966 | ||
967 | if (mp->m_sb_bp == NULL) { | ||
968 | error = xfs_readsb(mp, mfsi_flags); | ||
969 | if (error) | ||
970 | return error; | ||
971 | } | ||
972 | xfs_mount_common(mp, sbp); | 967 | xfs_mount_common(mp, sbp); |
973 | 968 | ||
974 | /* | 969 | /* |
@@ -1163,7 +1158,6 @@ xfs_mountfs( | |||
1163 | } | 1158 | } |
1164 | 1159 | ||
1165 | ASSERT(rip != NULL); | 1160 | ASSERT(rip != NULL); |
1166 | rvp = XFS_ITOV(rip); | ||
1167 | 1161 | ||
1168 | if (unlikely((rip->i_d.di_mode & S_IFMT) != S_IFDIR)) { | 1162 | if (unlikely((rip->i_d.di_mode & S_IFMT) != S_IFDIR)) { |
1169 | cmn_err(CE_WARN, "XFS: corrupted root inode"); | 1163 | cmn_err(CE_WARN, "XFS: corrupted root inode"); |
@@ -1195,8 +1189,13 @@ xfs_mountfs( | |||
1195 | /* | 1189 | /* |
1196 | * If fs is not mounted readonly, then update the superblock changes. | 1190 | * If fs is not mounted readonly, then update the superblock changes. |
1197 | */ | 1191 | */ |
1198 | if (update_flags && !(mp->m_flags & XFS_MOUNT_RDONLY)) | 1192 | if (update_flags && !(mp->m_flags & XFS_MOUNT_RDONLY)) { |
1199 | xfs_mount_log_sb(mp, update_flags); | 1193 | error = xfs_mount_log_sb(mp, update_flags); |
1194 | if (error) { | ||
1195 | cmn_err(CE_WARN, "XFS: failed to write sb changes"); | ||
1196 | goto error4; | ||
1197 | } | ||
1198 | } | ||
1200 | 1199 | ||
1201 | /* | 1200 | /* |
1202 | * Initialise the XFS quota management subsystem for this mount | 1201 | * Initialise the XFS quota management subsystem for this mount |
@@ -1233,12 +1232,15 @@ xfs_mountfs( | |||
1233 | * | 1232 | * |
1234 | * We default to 5% or 1024 fsbs of space reserved, whichever is smaller. | 1233 | * We default to 5% or 1024 fsbs of space reserved, whichever is smaller. |
1235 | * This may drive us straight to ENOSPC on mount, but that implies | 1234 | * This may drive us straight to ENOSPC on mount, but that implies |
1236 | * we were already there on the last unmount. | 1235 | * we were already there on the last unmount. Warn if this occurs. |
1237 | */ | 1236 | */ |
1238 | resblks = mp->m_sb.sb_dblocks; | 1237 | resblks = mp->m_sb.sb_dblocks; |
1239 | do_div(resblks, 20); | 1238 | do_div(resblks, 20); |
1240 | resblks = min_t(__uint64_t, resblks, 1024); | 1239 | resblks = min_t(__uint64_t, resblks, 1024); |
1241 | xfs_reserve_blocks(mp, &resblks, NULL); | 1240 | error = xfs_reserve_blocks(mp, &resblks, NULL); |
1241 | if (error) | ||
1242 | cmn_err(CE_WARN, "XFS: Unable to allocate reserve blocks. " | ||
1243 | "Continuing without a reserve pool."); | ||
1242 | 1244 | ||
1243 | return 0; | 1245 | return 0; |
1244 | 1246 | ||
@@ -1246,7 +1248,7 @@ xfs_mountfs( | |||
1246 | /* | 1248 | /* |
1247 | * Free up the root inode. | 1249 | * Free up the root inode. |
1248 | */ | 1250 | */ |
1249 | VN_RELE(rvp); | 1251 | IRELE(rip); |
1250 | error3: | 1252 | error3: |
1251 | xfs_log_unmount_dealloc(mp); | 1253 | xfs_log_unmount_dealloc(mp); |
1252 | error2: | 1254 | error2: |
@@ -1274,6 +1276,7 @@ int | |||
1274 | xfs_unmountfs(xfs_mount_t *mp, struct cred *cr) | 1276 | xfs_unmountfs(xfs_mount_t *mp, struct cred *cr) |
1275 | { | 1277 | { |
1276 | __uint64_t resblks; | 1278 | __uint64_t resblks; |
1279 | int error = 0; | ||
1277 | 1280 | ||
1278 | /* | 1281 | /* |
1279 | * We can potentially deadlock here if we have an inode cluster | 1282 | * We can potentially deadlock here if we have an inode cluster |
@@ -1317,9 +1320,15 @@ xfs_unmountfs(xfs_mount_t *mp, struct cred *cr) | |||
1317 | * value does not matter.... | 1320 | * value does not matter.... |
1318 | */ | 1321 | */ |
1319 | resblks = 0; | 1322 | resblks = 0; |
1320 | xfs_reserve_blocks(mp, &resblks, NULL); | 1323 | error = xfs_reserve_blocks(mp, &resblks, NULL); |
1324 | if (error) | ||
1325 | cmn_err(CE_WARN, "XFS: Unable to free reserved block pool. " | ||
1326 | "Freespace may not be correct on next mount."); | ||
1321 | 1327 | ||
1322 | xfs_log_sbcount(mp, 1); | 1328 | error = xfs_log_sbcount(mp, 1); |
1329 | if (error) | ||
1330 | cmn_err(CE_WARN, "XFS: Unable to update superblock counters. " | ||
1331 | "Freespace may not be correct on next mount."); | ||
1323 | xfs_unmountfs_writesb(mp); | 1332 | xfs_unmountfs_writesb(mp); |
1324 | xfs_unmountfs_wait(mp); /* wait for async bufs */ | 1333 | xfs_unmountfs_wait(mp); /* wait for async bufs */ |
1325 | xfs_log_unmount(mp); /* Done! No more fs ops. */ | 1334 | xfs_log_unmount(mp); /* Done! No more fs ops. */ |
@@ -1411,9 +1420,8 @@ xfs_log_sbcount( | |||
1411 | xfs_mod_sb(tp, XFS_SB_IFREE | XFS_SB_ICOUNT | XFS_SB_FDBLOCKS); | 1420 | xfs_mod_sb(tp, XFS_SB_IFREE | XFS_SB_ICOUNT | XFS_SB_FDBLOCKS); |
1412 | if (sync) | 1421 | if (sync) |
1413 | xfs_trans_set_sync(tp); | 1422 | xfs_trans_set_sync(tp); |
1414 | xfs_trans_commit(tp, 0); | 1423 | error = xfs_trans_commit(tp, 0); |
1415 | 1424 | return error; | |
1416 | return 0; | ||
1417 | } | 1425 | } |
1418 | 1426 | ||
1419 | STATIC void | 1427 | STATIC void |
@@ -1462,7 +1470,6 @@ xfs_unmountfs_writesb(xfs_mount_t *mp) | |||
1462 | XFS_BUF_UNASYNC(sbp); | 1470 | XFS_BUF_UNASYNC(sbp); |
1463 | ASSERT(XFS_BUF_TARGET(sbp) == mp->m_ddev_targp); | 1471 | ASSERT(XFS_BUF_TARGET(sbp) == mp->m_ddev_targp); |
1464 | xfsbdstrat(mp, sbp); | 1472 | xfsbdstrat(mp, sbp); |
1465 | /* Nevermind errors we might get here. */ | ||
1466 | error = xfs_iowait(sbp); | 1473 | error = xfs_iowait(sbp); |
1467 | if (error) | 1474 | if (error) |
1468 | xfs_ioerror_alert("xfs_unmountfs_writesb", | 1475 | xfs_ioerror_alert("xfs_unmountfs_writesb", |
@@ -1911,24 +1918,27 @@ xfs_uuid_unmount( | |||
1911 | * be altered by the mount options, as well as any potential sb_features2 | 1918 | * be altered by the mount options, as well as any potential sb_features2 |
1912 | * fixup. Only the first superblock is updated. | 1919 | * fixup. Only the first superblock is updated. |
1913 | */ | 1920 | */ |
1914 | STATIC void | 1921 | STATIC int |
1915 | xfs_mount_log_sb( | 1922 | xfs_mount_log_sb( |
1916 | xfs_mount_t *mp, | 1923 | xfs_mount_t *mp, |
1917 | __int64_t fields) | 1924 | __int64_t fields) |
1918 | { | 1925 | { |
1919 | xfs_trans_t *tp; | 1926 | xfs_trans_t *tp; |
1927 | int error; | ||
1920 | 1928 | ||
1921 | ASSERT(fields & (XFS_SB_UNIT | XFS_SB_WIDTH | XFS_SB_UUID | | 1929 | ASSERT(fields & (XFS_SB_UNIT | XFS_SB_WIDTH | XFS_SB_UUID | |
1922 | XFS_SB_FEATURES2 | XFS_SB_BAD_FEATURES2)); | 1930 | XFS_SB_FEATURES2 | XFS_SB_BAD_FEATURES2)); |
1923 | 1931 | ||
1924 | tp = xfs_trans_alloc(mp, XFS_TRANS_SB_UNIT); | 1932 | tp = xfs_trans_alloc(mp, XFS_TRANS_SB_UNIT); |
1925 | if (xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0, | 1933 | error = xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0, |
1926 | XFS_DEFAULT_LOG_COUNT)) { | 1934 | XFS_DEFAULT_LOG_COUNT); |
1935 | if (error) { | ||
1927 | xfs_trans_cancel(tp, 0); | 1936 | xfs_trans_cancel(tp, 0); |
1928 | return; | 1937 | return error; |
1929 | } | 1938 | } |
1930 | xfs_mod_sb(tp, fields); | 1939 | xfs_mod_sb(tp, fields); |
1931 | xfs_trans_commit(tp, 0); | 1940 | error = xfs_trans_commit(tp, 0); |
1941 | return error; | ||
1932 | } | 1942 | } |
1933 | 1943 | ||
1934 | 1944 | ||
@@ -2189,7 +2199,7 @@ xfs_icsb_counter_disabled( | |||
2189 | return test_bit(field, &mp->m_icsb_counters); | 2199 | return test_bit(field, &mp->m_icsb_counters); |
2190 | } | 2200 | } |
2191 | 2201 | ||
2192 | STATIC int | 2202 | STATIC void |
2193 | xfs_icsb_disable_counter( | 2203 | xfs_icsb_disable_counter( |
2194 | xfs_mount_t *mp, | 2204 | xfs_mount_t *mp, |
2195 | xfs_sb_field_t field) | 2205 | xfs_sb_field_t field) |
@@ -2207,7 +2217,7 @@ xfs_icsb_disable_counter( | |||
2207 | * the m_icsb_mutex. | 2217 | * the m_icsb_mutex. |
2208 | */ | 2218 | */ |
2209 | if (xfs_icsb_counter_disabled(mp, field)) | 2219 | if (xfs_icsb_counter_disabled(mp, field)) |
2210 | return 0; | 2220 | return; |
2211 | 2221 | ||
2212 | xfs_icsb_lock_all_counters(mp); | 2222 | xfs_icsb_lock_all_counters(mp); |
2213 | if (!test_and_set_bit(field, &mp->m_icsb_counters)) { | 2223 | if (!test_and_set_bit(field, &mp->m_icsb_counters)) { |
@@ -2230,8 +2240,6 @@ xfs_icsb_disable_counter( | |||
2230 | } | 2240 | } |
2231 | 2241 | ||
2232 | xfs_icsb_unlock_all_counters(mp); | 2242 | xfs_icsb_unlock_all_counters(mp); |
2233 | |||
2234 | return 0; | ||
2235 | } | 2243 | } |
2236 | 2244 | ||
2237 | STATIC void | 2245 | STATIC void |
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index 1d8a4728d847..1ed575110ff0 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h | |||
@@ -66,17 +66,17 @@ struct xfs_mru_cache; | |||
66 | * Prototypes and functions for the Data Migration subsystem. | 66 | * Prototypes and functions for the Data Migration subsystem. |
67 | */ | 67 | */ |
68 | 68 | ||
69 | typedef int (*xfs_send_data_t)(int, bhv_vnode_t *, | 69 | typedef int (*xfs_send_data_t)(int, struct xfs_inode *, |
70 | xfs_off_t, size_t, int, bhv_vrwlock_t *); | 70 | xfs_off_t, size_t, int, int *); |
71 | typedef int (*xfs_send_mmap_t)(struct vm_area_struct *, uint); | 71 | typedef int (*xfs_send_mmap_t)(struct vm_area_struct *, uint); |
72 | typedef int (*xfs_send_destroy_t)(bhv_vnode_t *, dm_right_t); | 72 | typedef int (*xfs_send_destroy_t)(struct xfs_inode *, dm_right_t); |
73 | typedef int (*xfs_send_namesp_t)(dm_eventtype_t, struct xfs_mount *, | 73 | typedef int (*xfs_send_namesp_t)(dm_eventtype_t, struct xfs_mount *, |
74 | bhv_vnode_t *, | 74 | struct xfs_inode *, dm_right_t, |
75 | dm_right_t, bhv_vnode_t *, dm_right_t, | 75 | struct xfs_inode *, dm_right_t, |
76 | char *, char *, mode_t, int, int); | 76 | const char *, const char *, mode_t, int, int); |
77 | typedef int (*xfs_send_mount_t)(struct xfs_mount *, dm_right_t, | 77 | typedef int (*xfs_send_mount_t)(struct xfs_mount *, dm_right_t, |
78 | char *, char *); | 78 | char *, char *); |
79 | typedef void (*xfs_send_unmount_t)(struct xfs_mount *, bhv_vnode_t *, | 79 | typedef void (*xfs_send_unmount_t)(struct xfs_mount *, struct xfs_inode *, |
80 | dm_right_t, mode_t, int, int); | 80 | dm_right_t, mode_t, int, int); |
81 | 81 | ||
82 | typedef struct xfs_dmops { | 82 | typedef struct xfs_dmops { |
@@ -88,20 +88,20 @@ typedef struct xfs_dmops { | |||
88 | xfs_send_unmount_t xfs_send_unmount; | 88 | xfs_send_unmount_t xfs_send_unmount; |
89 | } xfs_dmops_t; | 89 | } xfs_dmops_t; |
90 | 90 | ||
91 | #define XFS_SEND_DATA(mp, ev,vp,off,len,fl,lock) \ | 91 | #define XFS_SEND_DATA(mp, ev,ip,off,len,fl,lock) \ |
92 | (*(mp)->m_dm_ops->xfs_send_data)(ev,vp,off,len,fl,lock) | 92 | (*(mp)->m_dm_ops->xfs_send_data)(ev,ip,off,len,fl,lock) |
93 | #define XFS_SEND_MMAP(mp, vma,fl) \ | 93 | #define XFS_SEND_MMAP(mp, vma,fl) \ |
94 | (*(mp)->m_dm_ops->xfs_send_mmap)(vma,fl) | 94 | (*(mp)->m_dm_ops->xfs_send_mmap)(vma,fl) |
95 | #define XFS_SEND_DESTROY(mp, vp,right) \ | 95 | #define XFS_SEND_DESTROY(mp, ip,right) \ |
96 | (*(mp)->m_dm_ops->xfs_send_destroy)(vp,right) | 96 | (*(mp)->m_dm_ops->xfs_send_destroy)(ip,right) |
97 | #define XFS_SEND_NAMESP(mp, ev,b1,r1,b2,r2,n1,n2,mode,rval,fl) \ | 97 | #define XFS_SEND_NAMESP(mp, ev,b1,r1,b2,r2,n1,n2,mode,rval,fl) \ |
98 | (*(mp)->m_dm_ops->xfs_send_namesp)(ev,NULL,b1,r1,b2,r2,n1,n2,mode,rval,fl) | 98 | (*(mp)->m_dm_ops->xfs_send_namesp)(ev,NULL,b1,r1,b2,r2,n1,n2,mode,rval,fl) |
99 | #define XFS_SEND_PREUNMOUNT(mp,b1,r1,b2,r2,n1,n2,mode,rval,fl) \ | 99 | #define XFS_SEND_PREUNMOUNT(mp,b1,r1,b2,r2,n1,n2,mode,rval,fl) \ |
100 | (*(mp)->m_dm_ops->xfs_send_namesp)(DM_EVENT_PREUNMOUNT,mp,b1,r1,b2,r2,n1,n2,mode,rval,fl) | 100 | (*(mp)->m_dm_ops->xfs_send_namesp)(DM_EVENT_PREUNMOUNT,mp,b1,r1,b2,r2,n1,n2,mode,rval,fl) |
101 | #define XFS_SEND_MOUNT(mp,right,path,name) \ | 101 | #define XFS_SEND_MOUNT(mp,right,path,name) \ |
102 | (*(mp)->m_dm_ops->xfs_send_mount)(mp,right,path,name) | 102 | (*(mp)->m_dm_ops->xfs_send_mount)(mp,right,path,name) |
103 | #define XFS_SEND_UNMOUNT(mp, vp,right,mode,rval,fl) \ | 103 | #define XFS_SEND_UNMOUNT(mp, ip,right,mode,rval,fl) \ |
104 | (*(mp)->m_dm_ops->xfs_send_unmount)(mp,vp,right,mode,rval,fl) | 104 | (*(mp)->m_dm_ops->xfs_send_unmount)(mp,ip,right,mode,rval,fl) |
105 | 105 | ||
106 | 106 | ||
107 | /* | 107 | /* |
@@ -220,7 +220,7 @@ extern void xfs_icsb_sync_counters_flags(struct xfs_mount *, int); | |||
220 | #endif | 220 | #endif |
221 | 221 | ||
222 | typedef struct xfs_ail { | 222 | typedef struct xfs_ail { |
223 | xfs_ail_entry_t xa_ail; | 223 | struct list_head xa_ail; |
224 | uint xa_gen; | 224 | uint xa_gen; |
225 | struct task_struct *xa_task; | 225 | struct task_struct *xa_task; |
226 | xfs_lsn_t xa_target; | 226 | xfs_lsn_t xa_target; |
@@ -401,7 +401,7 @@ typedef struct xfs_mount { | |||
401 | 401 | ||
402 | /* | 402 | /* |
403 | * Allow large block sizes to be reported to userspace programs if the | 403 | * Allow large block sizes to be reported to userspace programs if the |
404 | * "largeio" mount option is used. | 404 | * "largeio" mount option is used. |
405 | * | 405 | * |
406 | * If compatibility mode is specified, simply return the basic unit of caching | 406 | * If compatibility mode is specified, simply return the basic unit of caching |
407 | * so that we don't get inefficient read/modify/write I/O from user apps. | 407 | * so that we don't get inefficient read/modify/write I/O from user apps. |
diff --git a/fs/xfs/xfs_rename.c b/fs/xfs/xfs_rename.c index 7eb157a59f9e..ee371890d85d 100644 --- a/fs/xfs/xfs_rename.c +++ b/fs/xfs/xfs_rename.c | |||
@@ -36,7 +36,6 @@ | |||
36 | #include "xfs_bmap.h" | 36 | #include "xfs_bmap.h" |
37 | #include "xfs_error.h" | 37 | #include "xfs_error.h" |
38 | #include "xfs_quota.h" | 38 | #include "xfs_quota.h" |
39 | #include "xfs_refcache.h" | ||
40 | #include "xfs_utils.h" | 39 | #include "xfs_utils.h" |
41 | #include "xfs_trans_space.h" | 40 | #include "xfs_trans_space.h" |
42 | #include "xfs_vnodeops.h" | 41 | #include "xfs_vnodeops.h" |
@@ -84,25 +83,23 @@ int xfs_rename_skip, xfs_rename_nskip; | |||
84 | */ | 83 | */ |
85 | STATIC int | 84 | STATIC int |
86 | xfs_lock_for_rename( | 85 | xfs_lock_for_rename( |
87 | xfs_inode_t *dp1, /* old (source) directory inode */ | 86 | xfs_inode_t *dp1, /* in: old (source) directory inode */ |
88 | xfs_inode_t *dp2, /* new (target) directory inode */ | 87 | xfs_inode_t *dp2, /* in: new (target) directory inode */ |
89 | bhv_vname_t *vname1,/* old entry name */ | 88 | xfs_inode_t *ip1, /* in: inode of old entry */ |
90 | bhv_vname_t *vname2,/* new entry name */ | 89 | struct xfs_name *name2, /* in: new entry name */ |
91 | xfs_inode_t **ipp1, /* inode of old entry */ | 90 | xfs_inode_t **ipp2, /* out: inode of new entry, if it |
92 | xfs_inode_t **ipp2, /* inode of new entry, if it | ||
93 | already exists, NULL otherwise. */ | 91 | already exists, NULL otherwise. */ |
94 | xfs_inode_t **i_tab,/* array of inode returned, sorted */ | 92 | xfs_inode_t **i_tab,/* out: array of inode returned, sorted */ |
95 | int *num_inodes) /* number of inodes in array */ | 93 | int *num_inodes) /* out: number of inodes in array */ |
96 | { | 94 | { |
97 | xfs_inode_t *ip1, *ip2, *temp; | 95 | xfs_inode_t *ip2 = NULL; |
96 | xfs_inode_t *temp; | ||
98 | xfs_ino_t inum1, inum2; | 97 | xfs_ino_t inum1, inum2; |
99 | int error; | 98 | int error; |
100 | int i, j; | 99 | int i, j; |
101 | uint lock_mode; | 100 | uint lock_mode; |
102 | int diff_dirs = (dp1 != dp2); | 101 | int diff_dirs = (dp1 != dp2); |
103 | 102 | ||
104 | ip2 = NULL; | ||
105 | |||
106 | /* | 103 | /* |
107 | * First, find out the current inums of the entries so that we | 104 | * First, find out the current inums of the entries so that we |
108 | * can determine the initial locking order. We'll have to | 105 | * can determine the initial locking order. We'll have to |
@@ -110,27 +107,20 @@ xfs_lock_for_rename( | |||
110 | * to see if we still have the right inodes, directories, etc. | 107 | * to see if we still have the right inodes, directories, etc. |
111 | */ | 108 | */ |
112 | lock_mode = xfs_ilock_map_shared(dp1); | 109 | lock_mode = xfs_ilock_map_shared(dp1); |
113 | error = xfs_get_dir_entry(vname1, &ip1); | 110 | IHOLD(ip1); |
114 | if (error) { | 111 | xfs_itrace_ref(ip1); |
115 | xfs_iunlock_map_shared(dp1, lock_mode); | ||
116 | return error; | ||
117 | } | ||
118 | 112 | ||
119 | inum1 = ip1->i_ino; | 113 | inum1 = ip1->i_ino; |
120 | 114 | ||
121 | ASSERT(ip1); | ||
122 | xfs_itrace_ref(ip1); | ||
123 | |||
124 | /* | 115 | /* |
125 | * Unlock dp1 and lock dp2 if they are different. | 116 | * Unlock dp1 and lock dp2 if they are different. |
126 | */ | 117 | */ |
127 | |||
128 | if (diff_dirs) { | 118 | if (diff_dirs) { |
129 | xfs_iunlock_map_shared(dp1, lock_mode); | 119 | xfs_iunlock_map_shared(dp1, lock_mode); |
130 | lock_mode = xfs_ilock_map_shared(dp2); | 120 | lock_mode = xfs_ilock_map_shared(dp2); |
131 | } | 121 | } |
132 | 122 | ||
133 | error = xfs_dir_lookup_int(dp2, lock_mode, vname2, &inum2, &ip2); | 123 | error = xfs_dir_lookup_int(dp2, lock_mode, name2, &inum2, &ip2); |
134 | if (error == ENOENT) { /* target does not need to exist. */ | 124 | if (error == ENOENT) { /* target does not need to exist. */ |
135 | inum2 = 0; | 125 | inum2 = 0; |
136 | } else if (error) { | 126 | } else if (error) { |
@@ -162,6 +152,7 @@ xfs_lock_for_rename( | |||
162 | *num_inodes = 4; | 152 | *num_inodes = 4; |
163 | i_tab[3] = ip2; | 153 | i_tab[3] = ip2; |
164 | } | 154 | } |
155 | *ipp2 = i_tab[3]; | ||
165 | 156 | ||
166 | /* | 157 | /* |
167 | * Sort the elements via bubble sort. (Remember, there are at | 158 | * Sort the elements via bubble sort. (Remember, there are at |
@@ -199,21 +190,6 @@ xfs_lock_for_rename( | |||
199 | xfs_lock_inodes(i_tab, *num_inodes, 0, XFS_ILOCK_SHARED); | 190 | xfs_lock_inodes(i_tab, *num_inodes, 0, XFS_ILOCK_SHARED); |
200 | } | 191 | } |
201 | 192 | ||
202 | /* | ||
203 | * Set the return value. Null out any unused entries in i_tab. | ||
204 | */ | ||
205 | *ipp1 = *ipp2 = NULL; | ||
206 | for (i=0; i < *num_inodes; i++) { | ||
207 | if (i_tab[i]->i_ino == inum1) { | ||
208 | *ipp1 = i_tab[i]; | ||
209 | } | ||
210 | if (i_tab[i]->i_ino == inum2) { | ||
211 | *ipp2 = i_tab[i]; | ||
212 | } | ||
213 | } | ||
214 | for (;i < 4; i++) { | ||
215 | i_tab[i] = NULL; | ||
216 | } | ||
217 | return 0; | 193 | return 0; |
218 | } | 194 | } |
219 | 195 | ||
@@ -223,13 +199,13 @@ xfs_lock_for_rename( | |||
223 | int | 199 | int |
224 | xfs_rename( | 200 | xfs_rename( |
225 | xfs_inode_t *src_dp, | 201 | xfs_inode_t *src_dp, |
226 | bhv_vname_t *src_vname, | 202 | struct xfs_name *src_name, |
227 | bhv_vnode_t *target_dir_vp, | 203 | xfs_inode_t *src_ip, |
228 | bhv_vname_t *target_vname) | 204 | xfs_inode_t *target_dp, |
205 | struct xfs_name *target_name) | ||
229 | { | 206 | { |
230 | bhv_vnode_t *src_dir_vp = XFS_ITOV(src_dp); | ||
231 | xfs_trans_t *tp; | 207 | xfs_trans_t *tp; |
232 | xfs_inode_t *target_dp, *src_ip, *target_ip; | 208 | xfs_inode_t *target_ip; |
233 | xfs_mount_t *mp = src_dp->i_mount; | 209 | xfs_mount_t *mp = src_dp->i_mount; |
234 | int new_parent; /* moving to a new dir */ | 210 | int new_parent; /* moving to a new dir */ |
235 | int src_is_directory; /* src_name is a directory */ | 211 | int src_is_directory; /* src_name is a directory */ |
@@ -243,29 +219,16 @@ xfs_rename( | |||
243 | int spaceres; | 219 | int spaceres; |
244 | int target_link_zero = 0; | 220 | int target_link_zero = 0; |
245 | int num_inodes; | 221 | int num_inodes; |
246 | char *src_name = VNAME(src_vname); | ||
247 | char *target_name = VNAME(target_vname); | ||
248 | int src_namelen = VNAMELEN(src_vname); | ||
249 | int target_namelen = VNAMELEN(target_vname); | ||
250 | 222 | ||
251 | xfs_itrace_entry(src_dp); | 223 | xfs_itrace_entry(src_dp); |
252 | xfs_itrace_entry(xfs_vtoi(target_dir_vp)); | 224 | xfs_itrace_entry(target_dp); |
253 | |||
254 | /* | ||
255 | * Find the XFS behavior descriptor for the target directory | ||
256 | * vnode since it was not handed to us. | ||
257 | */ | ||
258 | target_dp = xfs_vtoi(target_dir_vp); | ||
259 | if (target_dp == NULL) { | ||
260 | return XFS_ERROR(EXDEV); | ||
261 | } | ||
262 | 225 | ||
263 | if (DM_EVENT_ENABLED(src_dp, DM_EVENT_RENAME) || | 226 | if (DM_EVENT_ENABLED(src_dp, DM_EVENT_RENAME) || |
264 | DM_EVENT_ENABLED(target_dp, DM_EVENT_RENAME)) { | 227 | DM_EVENT_ENABLED(target_dp, DM_EVENT_RENAME)) { |
265 | error = XFS_SEND_NAMESP(mp, DM_EVENT_RENAME, | 228 | error = XFS_SEND_NAMESP(mp, DM_EVENT_RENAME, |
266 | src_dir_vp, DM_RIGHT_NULL, | 229 | src_dp, DM_RIGHT_NULL, |
267 | target_dir_vp, DM_RIGHT_NULL, | 230 | target_dp, DM_RIGHT_NULL, |
268 | src_name, target_name, | 231 | src_name->name, target_name->name, |
269 | 0, 0, 0); | 232 | 0, 0, 0); |
270 | if (error) { | 233 | if (error) { |
271 | return error; | 234 | return error; |
@@ -282,10 +245,8 @@ xfs_rename( | |||
282 | * does not exist in the source directory. | 245 | * does not exist in the source directory. |
283 | */ | 246 | */ |
284 | tp = NULL; | 247 | tp = NULL; |
285 | error = xfs_lock_for_rename(src_dp, target_dp, src_vname, | 248 | error = xfs_lock_for_rename(src_dp, target_dp, src_ip, target_name, |
286 | target_vname, &src_ip, &target_ip, inodes, | 249 | &target_ip, inodes, &num_inodes); |
287 | &num_inodes); | ||
288 | |||
289 | if (error) { | 250 | if (error) { |
290 | /* | 251 | /* |
291 | * We have nothing locked, no inode references, and | 252 | * We have nothing locked, no inode references, and |
@@ -331,7 +292,7 @@ xfs_rename( | |||
331 | XFS_BMAP_INIT(&free_list, &first_block); | 292 | XFS_BMAP_INIT(&free_list, &first_block); |
332 | tp = xfs_trans_alloc(mp, XFS_TRANS_RENAME); | 293 | tp = xfs_trans_alloc(mp, XFS_TRANS_RENAME); |
333 | cancel_flags = XFS_TRANS_RELEASE_LOG_RES; | 294 | cancel_flags = XFS_TRANS_RELEASE_LOG_RES; |
334 | spaceres = XFS_RENAME_SPACE_RES(mp, target_namelen); | 295 | spaceres = XFS_RENAME_SPACE_RES(mp, target_name->len); |
335 | error = xfs_trans_reserve(tp, spaceres, XFS_RENAME_LOG_RES(mp), 0, | 296 | error = xfs_trans_reserve(tp, spaceres, XFS_RENAME_LOG_RES(mp), 0, |
336 | XFS_TRANS_PERM_LOG_RES, XFS_RENAME_LOG_COUNT); | 297 | XFS_TRANS_PERM_LOG_RES, XFS_RENAME_LOG_COUNT); |
337 | if (error == ENOSPC) { | 298 | if (error == ENOSPC) { |
@@ -365,10 +326,10 @@ xfs_rename( | |||
365 | * them when they unlock the inodes. Also, we need to be careful | 326 | * them when they unlock the inodes. Also, we need to be careful |
366 | * not to add an inode to the transaction more than once. | 327 | * not to add an inode to the transaction more than once. |
367 | */ | 328 | */ |
368 | VN_HOLD(src_dir_vp); | 329 | IHOLD(src_dp); |
369 | xfs_trans_ijoin(tp, src_dp, XFS_ILOCK_EXCL); | 330 | xfs_trans_ijoin(tp, src_dp, XFS_ILOCK_EXCL); |
370 | if (new_parent) { | 331 | if (new_parent) { |
371 | VN_HOLD(target_dir_vp); | 332 | IHOLD(target_dp); |
372 | xfs_trans_ijoin(tp, target_dp, XFS_ILOCK_EXCL); | 333 | xfs_trans_ijoin(tp, target_dp, XFS_ILOCK_EXCL); |
373 | } | 334 | } |
374 | if ((src_ip != src_dp) && (src_ip != target_dp)) { | 335 | if ((src_ip != src_dp) && (src_ip != target_dp)) { |
@@ -389,9 +350,8 @@ xfs_rename( | |||
389 | * If there's no space reservation, check the entry will | 350 | * If there's no space reservation, check the entry will |
390 | * fit before actually inserting it. | 351 | * fit before actually inserting it. |
391 | */ | 352 | */ |
392 | if (spaceres == 0 && | 353 | error = xfs_dir_canenter(tp, target_dp, target_name, spaceres); |
393 | (error = xfs_dir_canenter(tp, target_dp, target_name, | 354 | if (error) |
394 | target_namelen))) | ||
395 | goto error_return; | 355 | goto error_return; |
396 | /* | 356 | /* |
397 | * If target does not exist and the rename crosses | 357 | * If target does not exist and the rename crosses |
@@ -399,8 +359,8 @@ xfs_rename( | |||
399 | * to account for the ".." reference from the new entry. | 359 | * to account for the ".." reference from the new entry. |
400 | */ | 360 | */ |
401 | error = xfs_dir_createname(tp, target_dp, target_name, | 361 | error = xfs_dir_createname(tp, target_dp, target_name, |
402 | target_namelen, src_ip->i_ino, | 362 | src_ip->i_ino, &first_block, |
403 | &first_block, &free_list, spaceres); | 363 | &free_list, spaceres); |
404 | if (error == ENOSPC) | 364 | if (error == ENOSPC) |
405 | goto error_return; | 365 | goto error_return; |
406 | if (error) | 366 | if (error) |
@@ -439,7 +399,7 @@ xfs_rename( | |||
439 | * name at the destination directory, remove it first. | 399 | * name at the destination directory, remove it first. |
440 | */ | 400 | */ |
441 | error = xfs_dir_replace(tp, target_dp, target_name, | 401 | error = xfs_dir_replace(tp, target_dp, target_name, |
442 | target_namelen, src_ip->i_ino, | 402 | src_ip->i_ino, |
443 | &first_block, &free_list, spaceres); | 403 | &first_block, &free_list, spaceres); |
444 | if (error) | 404 | if (error) |
445 | goto abort_return; | 405 | goto abort_return; |
@@ -476,7 +436,8 @@ xfs_rename( | |||
476 | * Rewrite the ".." entry to point to the new | 436 | * Rewrite the ".." entry to point to the new |
477 | * directory. | 437 | * directory. |
478 | */ | 438 | */ |
479 | error = xfs_dir_replace(tp, src_ip, "..", 2, target_dp->i_ino, | 439 | error = xfs_dir_replace(tp, src_ip, &xfs_name_dotdot, |
440 | target_dp->i_ino, | ||
480 | &first_block, &free_list, spaceres); | 441 | &first_block, &free_list, spaceres); |
481 | ASSERT(error != EEXIST); | 442 | ASSERT(error != EEXIST); |
482 | if (error) | 443 | if (error) |
@@ -512,8 +473,8 @@ xfs_rename( | |||
512 | goto abort_return; | 473 | goto abort_return; |
513 | } | 474 | } |
514 | 475 | ||
515 | error = xfs_dir_removename(tp, src_dp, src_name, src_namelen, | 476 | error = xfs_dir_removename(tp, src_dp, src_name, src_ip->i_ino, |
516 | src_ip->i_ino, &first_block, &free_list, spaceres); | 477 | &first_block, &free_list, spaceres); |
517 | if (error) | 478 | if (error) |
518 | goto abort_return; | 479 | goto abort_return; |
519 | xfs_ichgtime(src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); | 480 | xfs_ichgtime(src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); |
@@ -580,10 +541,8 @@ xfs_rename( | |||
580 | * the vnode references. | 541 | * the vnode references. |
581 | */ | 542 | */ |
582 | error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); | 543 | error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); |
583 | if (target_ip != NULL) { | 544 | if (target_ip != NULL) |
584 | xfs_refcache_purge_ip(target_ip); | ||
585 | IRELE(target_ip); | 545 | IRELE(target_ip); |
586 | } | ||
587 | /* | 546 | /* |
588 | * Let interposed file systems know about removed links. | 547 | * Let interposed file systems know about removed links. |
589 | */ | 548 | */ |
@@ -598,9 +557,9 @@ std_return: | |||
598 | if (DM_EVENT_ENABLED(src_dp, DM_EVENT_POSTRENAME) || | 557 | if (DM_EVENT_ENABLED(src_dp, DM_EVENT_POSTRENAME) || |
599 | DM_EVENT_ENABLED(target_dp, DM_EVENT_POSTRENAME)) { | 558 | DM_EVENT_ENABLED(target_dp, DM_EVENT_POSTRENAME)) { |
600 | (void) XFS_SEND_NAMESP (mp, DM_EVENT_POSTRENAME, | 559 | (void) XFS_SEND_NAMESP (mp, DM_EVENT_POSTRENAME, |
601 | src_dir_vp, DM_RIGHT_NULL, | 560 | src_dp, DM_RIGHT_NULL, |
602 | target_dir_vp, DM_RIGHT_NULL, | 561 | target_dp, DM_RIGHT_NULL, |
603 | src_name, target_name, | 562 | src_name->name, target_name->name, |
604 | 0, error, 0); | 563 | 0, error, 0); |
605 | } | 564 | } |
606 | return error; | 565 | return error; |
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c index 47082c01872d..a0dc6e5bc5b9 100644 --- a/fs/xfs/xfs_rtalloc.c +++ b/fs/xfs/xfs_rtalloc.c | |||
@@ -44,6 +44,7 @@ | |||
44 | #include "xfs_rw.h" | 44 | #include "xfs_rw.h" |
45 | #include "xfs_inode_item.h" | 45 | #include "xfs_inode_item.h" |
46 | #include "xfs_trans_space.h" | 46 | #include "xfs_trans_space.h" |
47 | #include "xfs_utils.h" | ||
47 | 48 | ||
48 | 49 | ||
49 | /* | 50 | /* |
@@ -123,14 +124,14 @@ xfs_growfs_rt_alloc( | |||
123 | XFS_GROWRTALLOC_LOG_RES(mp), 0, | 124 | XFS_GROWRTALLOC_LOG_RES(mp), 0, |
124 | XFS_TRANS_PERM_LOG_RES, | 125 | XFS_TRANS_PERM_LOG_RES, |
125 | XFS_DEFAULT_PERM_LOG_COUNT))) | 126 | XFS_DEFAULT_PERM_LOG_COUNT))) |
126 | goto error_exit; | 127 | goto error_cancel; |
127 | cancelflags = XFS_TRANS_RELEASE_LOG_RES; | 128 | cancelflags = XFS_TRANS_RELEASE_LOG_RES; |
128 | /* | 129 | /* |
129 | * Lock the inode. | 130 | * Lock the inode. |
130 | */ | 131 | */ |
131 | if ((error = xfs_trans_iget(mp, tp, ino, 0, | 132 | if ((error = xfs_trans_iget(mp, tp, ino, 0, |
132 | XFS_ILOCK_EXCL, &ip))) | 133 | XFS_ILOCK_EXCL, &ip))) |
133 | goto error_exit; | 134 | goto error_cancel; |
134 | XFS_BMAP_INIT(&flist, &firstblock); | 135 | XFS_BMAP_INIT(&flist, &firstblock); |
135 | /* | 136 | /* |
136 | * Allocate blocks to the bitmap file. | 137 | * Allocate blocks to the bitmap file. |
@@ -143,14 +144,16 @@ xfs_growfs_rt_alloc( | |||
143 | if (!error && nmap < 1) | 144 | if (!error && nmap < 1) |
144 | error = XFS_ERROR(ENOSPC); | 145 | error = XFS_ERROR(ENOSPC); |
145 | if (error) | 146 | if (error) |
146 | goto error_exit; | 147 | goto error_cancel; |
147 | /* | 148 | /* |
148 | * Free any blocks freed up in the transaction, then commit. | 149 | * Free any blocks freed up in the transaction, then commit. |
149 | */ | 150 | */ |
150 | error = xfs_bmap_finish(&tp, &flist, &committed); | 151 | error = xfs_bmap_finish(&tp, &flist, &committed); |
151 | if (error) | 152 | if (error) |
152 | goto error_exit; | 153 | goto error_cancel; |
153 | xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); | 154 | error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); |
155 | if (error) | ||
156 | goto error; | ||
154 | /* | 157 | /* |
155 | * Now we need to clear the allocated blocks. | 158 | * Now we need to clear the allocated blocks. |
156 | * Do this one block per transaction, to keep it simple. | 159 | * Do this one block per transaction, to keep it simple. |
@@ -165,13 +168,13 @@ xfs_growfs_rt_alloc( | |||
165 | */ | 168 | */ |
166 | if ((error = xfs_trans_reserve(tp, 0, | 169 | if ((error = xfs_trans_reserve(tp, 0, |
167 | XFS_GROWRTZERO_LOG_RES(mp), 0, 0, 0))) | 170 | XFS_GROWRTZERO_LOG_RES(mp), 0, 0, 0))) |
168 | goto error_exit; | 171 | goto error_cancel; |
169 | /* | 172 | /* |
170 | * Lock the bitmap inode. | 173 | * Lock the bitmap inode. |
171 | */ | 174 | */ |
172 | if ((error = xfs_trans_iget(mp, tp, ino, 0, | 175 | if ((error = xfs_trans_iget(mp, tp, ino, 0, |
173 | XFS_ILOCK_EXCL, &ip))) | 176 | XFS_ILOCK_EXCL, &ip))) |
174 | goto error_exit; | 177 | goto error_cancel; |
175 | /* | 178 | /* |
176 | * Get a buffer for the block. | 179 | * Get a buffer for the block. |
177 | */ | 180 | */ |
@@ -180,14 +183,16 @@ xfs_growfs_rt_alloc( | |||
180 | mp->m_bsize, 0); | 183 | mp->m_bsize, 0); |
181 | if (bp == NULL) { | 184 | if (bp == NULL) { |
182 | error = XFS_ERROR(EIO); | 185 | error = XFS_ERROR(EIO); |
183 | goto error_exit; | 186 | goto error_cancel; |
184 | } | 187 | } |
185 | memset(XFS_BUF_PTR(bp), 0, mp->m_sb.sb_blocksize); | 188 | memset(XFS_BUF_PTR(bp), 0, mp->m_sb.sb_blocksize); |
186 | xfs_trans_log_buf(tp, bp, 0, mp->m_sb.sb_blocksize - 1); | 189 | xfs_trans_log_buf(tp, bp, 0, mp->m_sb.sb_blocksize - 1); |
187 | /* | 190 | /* |
188 | * Commit the transaction. | 191 | * Commit the transaction. |
189 | */ | 192 | */ |
190 | xfs_trans_commit(tp, 0); | 193 | error = xfs_trans_commit(tp, 0); |
194 | if (error) | ||
195 | goto error; | ||
191 | } | 196 | } |
192 | /* | 197 | /* |
193 | * Go on to the next extent, if any. | 198 | * Go on to the next extent, if any. |
@@ -195,8 +200,9 @@ xfs_growfs_rt_alloc( | |||
195 | oblocks = map.br_startoff + map.br_blockcount; | 200 | oblocks = map.br_startoff + map.br_blockcount; |
196 | } | 201 | } |
197 | return 0; | 202 | return 0; |
198 | error_exit: | 203 | error_cancel: |
199 | xfs_trans_cancel(tp, cancelflags); | 204 | xfs_trans_cancel(tp, cancelflags); |
205 | error: | ||
200 | return error; | 206 | return error; |
201 | } | 207 | } |
202 | 208 | ||
@@ -1875,6 +1881,7 @@ xfs_growfs_rt( | |||
1875 | xfs_trans_t *tp; /* transaction pointer */ | 1881 | xfs_trans_t *tp; /* transaction pointer */ |
1876 | 1882 | ||
1877 | sbp = &mp->m_sb; | 1883 | sbp = &mp->m_sb; |
1884 | cancelflags = 0; | ||
1878 | /* | 1885 | /* |
1879 | * Initial error checking. | 1886 | * Initial error checking. |
1880 | */ | 1887 | */ |
@@ -2041,13 +2048,15 @@ xfs_growfs_rt( | |||
2041 | */ | 2048 | */ |
2042 | mp->m_rsumlevels = nrsumlevels; | 2049 | mp->m_rsumlevels = nrsumlevels; |
2043 | mp->m_rsumsize = nrsumsize; | 2050 | mp->m_rsumsize = nrsumsize; |
2044 | /* | 2051 | |
2045 | * Commit the transaction. | 2052 | error = xfs_trans_commit(tp, 0); |
2046 | */ | 2053 | if (error) { |
2047 | xfs_trans_commit(tp, 0); | 2054 | tp = NULL; |
2055 | break; | ||
2056 | } | ||
2048 | } | 2057 | } |
2049 | 2058 | ||
2050 | if (error) | 2059 | if (error && tp) |
2051 | xfs_trans_cancel(tp, cancelflags); | 2060 | xfs_trans_cancel(tp, cancelflags); |
2052 | 2061 | ||
2053 | /* | 2062 | /* |
@@ -2278,7 +2287,7 @@ xfs_rtmount_inodes( | |||
2278 | ASSERT(sbp->sb_rsumino != NULLFSINO); | 2287 | ASSERT(sbp->sb_rsumino != NULLFSINO); |
2279 | error = xfs_iget(mp, NULL, sbp->sb_rsumino, 0, 0, &mp->m_rsumip, 0); | 2288 | error = xfs_iget(mp, NULL, sbp->sb_rsumino, 0, 0, &mp->m_rsumip, 0); |
2280 | if (error) { | 2289 | if (error) { |
2281 | VN_RELE(XFS_ITOV(mp->m_rbmip)); | 2290 | IRELE(mp->m_rbmip); |
2282 | return error; | 2291 | return error; |
2283 | } | 2292 | } |
2284 | ASSERT(mp->m_rsumip != NULL); | 2293 | ASSERT(mp->m_rsumip != NULL); |
diff --git a/fs/xfs/xfs_rw.c b/fs/xfs/xfs_rw.c index cd3ece6cc918..b0f31c09a76d 100644 --- a/fs/xfs/xfs_rw.c +++ b/fs/xfs/xfs_rw.c | |||
@@ -126,11 +126,11 @@ xfs_write_sync_logforce( | |||
126 | * when we return. | 126 | * when we return. |
127 | */ | 127 | */ |
128 | if (iip && iip->ili_last_lsn) { | 128 | if (iip && iip->ili_last_lsn) { |
129 | xfs_log_force(mp, iip->ili_last_lsn, | 129 | error = _xfs_log_force(mp, iip->ili_last_lsn, |
130 | XFS_LOG_FORCE | XFS_LOG_SYNC); | 130 | XFS_LOG_FORCE | XFS_LOG_SYNC, NULL); |
131 | } else if (xfs_ipincount(ip) > 0) { | 131 | } else if (xfs_ipincount(ip) > 0) { |
132 | xfs_log_force(mp, (xfs_lsn_t)0, | 132 | error = _xfs_log_force(mp, (xfs_lsn_t)0, |
133 | XFS_LOG_FORCE | XFS_LOG_SYNC); | 133 | XFS_LOG_FORCE | XFS_LOG_SYNC, NULL); |
134 | } | 134 | } |
135 | 135 | ||
136 | } else { | 136 | } else { |
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h index 7f40628d85c7..0804207c7391 100644 --- a/fs/xfs/xfs_trans.h +++ b/fs/xfs/xfs_trans.h | |||
@@ -113,13 +113,8 @@ struct xfs_mount; | |||
113 | struct xfs_trans; | 113 | struct xfs_trans; |
114 | struct xfs_dquot_acct; | 114 | struct xfs_dquot_acct; |
115 | 115 | ||
116 | typedef struct xfs_ail_entry { | ||
117 | struct xfs_log_item *ail_forw; /* AIL forw pointer */ | ||
118 | struct xfs_log_item *ail_back; /* AIL back pointer */ | ||
119 | } xfs_ail_entry_t; | ||
120 | |||
121 | typedef struct xfs_log_item { | 116 | typedef struct xfs_log_item { |
122 | xfs_ail_entry_t li_ail; /* AIL pointers */ | 117 | struct list_head li_ail; /* AIL pointers */ |
123 | xfs_lsn_t li_lsn; /* last on-disk lsn */ | 118 | xfs_lsn_t li_lsn; /* last on-disk lsn */ |
124 | struct xfs_log_item_desc *li_desc; /* ptr to current desc*/ | 119 | struct xfs_log_item_desc *li_desc; /* ptr to current desc*/ |
125 | struct xfs_mount *li_mountp; /* ptr to fs mount */ | 120 | struct xfs_mount *li_mountp; /* ptr to fs mount */ |
@@ -341,7 +336,6 @@ typedef struct xfs_trans { | |||
341 | unsigned int t_rtx_res; /* # of rt extents resvd */ | 336 | unsigned int t_rtx_res; /* # of rt extents resvd */ |
342 | unsigned int t_rtx_res_used; /* # of resvd rt extents used */ | 337 | unsigned int t_rtx_res_used; /* # of resvd rt extents used */ |
343 | xfs_log_ticket_t t_ticket; /* log mgr ticket */ | 338 | xfs_log_ticket_t t_ticket; /* log mgr ticket */ |
344 | sema_t t_sema; /* sema for commit completion */ | ||
345 | xfs_lsn_t t_lsn; /* log seq num of start of | 339 | xfs_lsn_t t_lsn; /* log seq num of start of |
346 | * transaction. */ | 340 | * transaction. */ |
347 | xfs_lsn_t t_commit_lsn; /* log seq num of end of | 341 | xfs_lsn_t t_commit_lsn; /* log seq num of end of |
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c index 76d470d8a1e6..1f77c00af566 100644 --- a/fs/xfs/xfs_trans_ail.c +++ b/fs/xfs/xfs_trans_ail.c | |||
@@ -28,13 +28,13 @@ | |||
28 | #include "xfs_trans_priv.h" | 28 | #include "xfs_trans_priv.h" |
29 | #include "xfs_error.h" | 29 | #include "xfs_error.h" |
30 | 30 | ||
31 | STATIC void xfs_ail_insert(xfs_ail_entry_t *, xfs_log_item_t *); | 31 | STATIC void xfs_ail_insert(xfs_ail_t *, xfs_log_item_t *); |
32 | STATIC xfs_log_item_t * xfs_ail_delete(xfs_ail_entry_t *, xfs_log_item_t *); | 32 | STATIC xfs_log_item_t * xfs_ail_delete(xfs_ail_t *, xfs_log_item_t *); |
33 | STATIC xfs_log_item_t * xfs_ail_min(xfs_ail_entry_t *); | 33 | STATIC xfs_log_item_t * xfs_ail_min(xfs_ail_t *); |
34 | STATIC xfs_log_item_t * xfs_ail_next(xfs_ail_entry_t *, xfs_log_item_t *); | 34 | STATIC xfs_log_item_t * xfs_ail_next(xfs_ail_t *, xfs_log_item_t *); |
35 | 35 | ||
36 | #ifdef DEBUG | 36 | #ifdef DEBUG |
37 | STATIC void xfs_ail_check(xfs_ail_entry_t *, xfs_log_item_t *); | 37 | STATIC void xfs_ail_check(xfs_ail_t *, xfs_log_item_t *); |
38 | #else | 38 | #else |
39 | #define xfs_ail_check(a,l) | 39 | #define xfs_ail_check(a,l) |
40 | #endif /* DEBUG */ | 40 | #endif /* DEBUG */ |
@@ -57,7 +57,7 @@ xfs_trans_tail_ail( | |||
57 | xfs_log_item_t *lip; | 57 | xfs_log_item_t *lip; |
58 | 58 | ||
59 | spin_lock(&mp->m_ail_lock); | 59 | spin_lock(&mp->m_ail_lock); |
60 | lip = xfs_ail_min(&(mp->m_ail.xa_ail)); | 60 | lip = xfs_ail_min(&mp->m_ail); |
61 | if (lip == NULL) { | 61 | if (lip == NULL) { |
62 | lsn = (xfs_lsn_t)0; | 62 | lsn = (xfs_lsn_t)0; |
63 | } else { | 63 | } else { |
@@ -91,7 +91,7 @@ xfs_trans_push_ail( | |||
91 | { | 91 | { |
92 | xfs_log_item_t *lip; | 92 | xfs_log_item_t *lip; |
93 | 93 | ||
94 | lip = xfs_ail_min(&mp->m_ail.xa_ail); | 94 | lip = xfs_ail_min(&mp->m_ail); |
95 | if (lip && !XFS_FORCED_SHUTDOWN(mp)) { | 95 | if (lip && !XFS_FORCED_SHUTDOWN(mp)) { |
96 | if (XFS_LSN_CMP(threshold_lsn, mp->m_ail.xa_target) > 0) | 96 | if (XFS_LSN_CMP(threshold_lsn, mp->m_ail.xa_target) > 0) |
97 | xfsaild_wakeup(mp, threshold_lsn); | 97 | xfsaild_wakeup(mp, threshold_lsn); |
@@ -111,15 +111,17 @@ xfs_trans_first_push_ail( | |||
111 | { | 111 | { |
112 | xfs_log_item_t *lip; | 112 | xfs_log_item_t *lip; |
113 | 113 | ||
114 | lip = xfs_ail_min(&(mp->m_ail.xa_ail)); | 114 | lip = xfs_ail_min(&mp->m_ail); |
115 | *gen = (int)mp->m_ail.xa_gen; | 115 | *gen = (int)mp->m_ail.xa_gen; |
116 | if (lsn == 0) | 116 | if (lsn == 0) |
117 | return lip; | 117 | return lip; |
118 | 118 | ||
119 | while (lip && (XFS_LSN_CMP(lip->li_lsn, lsn) < 0)) | 119 | list_for_each_entry(lip, &mp->m_ail.xa_ail, li_ail) { |
120 | lip = lip->li_ail.ail_forw; | 120 | if (XFS_LSN_CMP(lip->li_lsn, lsn) >= 0) |
121 | return lip; | ||
122 | } | ||
121 | 123 | ||
122 | return lip; | 124 | return NULL; |
123 | } | 125 | } |
124 | 126 | ||
125 | /* | 127 | /* |
@@ -329,7 +331,7 @@ xfs_trans_unlocked_item( | |||
329 | * the call to xfs_log_move_tail() doesn't do anything if there's | 331 | * the call to xfs_log_move_tail() doesn't do anything if there's |
330 | * not enough free space to wake people up so we're safe calling it. | 332 | * not enough free space to wake people up so we're safe calling it. |
331 | */ | 333 | */ |
332 | min_lip = xfs_ail_min(&mp->m_ail.xa_ail); | 334 | min_lip = xfs_ail_min(&mp->m_ail); |
333 | 335 | ||
334 | if (min_lip == lip) | 336 | if (min_lip == lip) |
335 | xfs_log_move_tail(mp, 1); | 337 | xfs_log_move_tail(mp, 1); |
@@ -357,15 +359,13 @@ xfs_trans_update_ail( | |||
357 | xfs_log_item_t *lip, | 359 | xfs_log_item_t *lip, |
358 | xfs_lsn_t lsn) __releases(mp->m_ail_lock) | 360 | xfs_lsn_t lsn) __releases(mp->m_ail_lock) |
359 | { | 361 | { |
360 | xfs_ail_entry_t *ailp; | ||
361 | xfs_log_item_t *dlip=NULL; | 362 | xfs_log_item_t *dlip=NULL; |
362 | xfs_log_item_t *mlip; /* ptr to minimum lip */ | 363 | xfs_log_item_t *mlip; /* ptr to minimum lip */ |
363 | 364 | ||
364 | ailp = &(mp->m_ail.xa_ail); | 365 | mlip = xfs_ail_min(&mp->m_ail); |
365 | mlip = xfs_ail_min(ailp); | ||
366 | 366 | ||
367 | if (lip->li_flags & XFS_LI_IN_AIL) { | 367 | if (lip->li_flags & XFS_LI_IN_AIL) { |
368 | dlip = xfs_ail_delete(ailp, lip); | 368 | dlip = xfs_ail_delete(&mp->m_ail, lip); |
369 | ASSERT(dlip == lip); | 369 | ASSERT(dlip == lip); |
370 | } else { | 370 | } else { |
371 | lip->li_flags |= XFS_LI_IN_AIL; | 371 | lip->li_flags |= XFS_LI_IN_AIL; |
@@ -373,11 +373,11 @@ xfs_trans_update_ail( | |||
373 | 373 | ||
374 | lip->li_lsn = lsn; | 374 | lip->li_lsn = lsn; |
375 | 375 | ||
376 | xfs_ail_insert(ailp, lip); | 376 | xfs_ail_insert(&mp->m_ail, lip); |
377 | mp->m_ail.xa_gen++; | 377 | mp->m_ail.xa_gen++; |
378 | 378 | ||
379 | if (mlip == dlip) { | 379 | if (mlip == dlip) { |
380 | mlip = xfs_ail_min(&(mp->m_ail.xa_ail)); | 380 | mlip = xfs_ail_min(&mp->m_ail); |
381 | spin_unlock(&mp->m_ail_lock); | 381 | spin_unlock(&mp->m_ail_lock); |
382 | xfs_log_move_tail(mp, mlip->li_lsn); | 382 | xfs_log_move_tail(mp, mlip->li_lsn); |
383 | } else { | 383 | } else { |
@@ -407,14 +407,12 @@ xfs_trans_delete_ail( | |||
407 | xfs_mount_t *mp, | 407 | xfs_mount_t *mp, |
408 | xfs_log_item_t *lip) __releases(mp->m_ail_lock) | 408 | xfs_log_item_t *lip) __releases(mp->m_ail_lock) |
409 | { | 409 | { |
410 | xfs_ail_entry_t *ailp; | ||
411 | xfs_log_item_t *dlip; | 410 | xfs_log_item_t *dlip; |
412 | xfs_log_item_t *mlip; | 411 | xfs_log_item_t *mlip; |
413 | 412 | ||
414 | if (lip->li_flags & XFS_LI_IN_AIL) { | 413 | if (lip->li_flags & XFS_LI_IN_AIL) { |
415 | ailp = &(mp->m_ail.xa_ail); | 414 | mlip = xfs_ail_min(&mp->m_ail); |
416 | mlip = xfs_ail_min(ailp); | 415 | dlip = xfs_ail_delete(&mp->m_ail, lip); |
417 | dlip = xfs_ail_delete(ailp, lip); | ||
418 | ASSERT(dlip == lip); | 416 | ASSERT(dlip == lip); |
419 | 417 | ||
420 | 418 | ||
@@ -423,7 +421,7 @@ xfs_trans_delete_ail( | |||
423 | mp->m_ail.xa_gen++; | 421 | mp->m_ail.xa_gen++; |
424 | 422 | ||
425 | if (mlip == dlip) { | 423 | if (mlip == dlip) { |
426 | mlip = xfs_ail_min(&(mp->m_ail.xa_ail)); | 424 | mlip = xfs_ail_min(&mp->m_ail); |
427 | spin_unlock(&mp->m_ail_lock); | 425 | spin_unlock(&mp->m_ail_lock); |
428 | xfs_log_move_tail(mp, (mlip ? mlip->li_lsn : 0)); | 426 | xfs_log_move_tail(mp, (mlip ? mlip->li_lsn : 0)); |
429 | } else { | 427 | } else { |
@@ -440,7 +438,7 @@ xfs_trans_delete_ail( | |||
440 | else { | 438 | else { |
441 | xfs_cmn_err(XFS_PTAG_AILDELETE, CE_ALERT, mp, | 439 | xfs_cmn_err(XFS_PTAG_AILDELETE, CE_ALERT, mp, |
442 | "%s: attempting to delete a log item that is not in the AIL", | 440 | "%s: attempting to delete a log item that is not in the AIL", |
443 | __FUNCTION__); | 441 | __func__); |
444 | spin_unlock(&mp->m_ail_lock); | 442 | spin_unlock(&mp->m_ail_lock); |
445 | xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); | 443 | xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); |
446 | } | 444 | } |
@@ -461,7 +459,7 @@ xfs_trans_first_ail( | |||
461 | { | 459 | { |
462 | xfs_log_item_t *lip; | 460 | xfs_log_item_t *lip; |
463 | 461 | ||
464 | lip = xfs_ail_min(&(mp->m_ail.xa_ail)); | 462 | lip = xfs_ail_min(&mp->m_ail); |
465 | *gen = (int)mp->m_ail.xa_gen; | 463 | *gen = (int)mp->m_ail.xa_gen; |
466 | 464 | ||
467 | return lip; | 465 | return lip; |
@@ -485,9 +483,9 @@ xfs_trans_next_ail( | |||
485 | 483 | ||
486 | ASSERT(mp && lip && gen); | 484 | ASSERT(mp && lip && gen); |
487 | if (mp->m_ail.xa_gen == *gen) { | 485 | if (mp->m_ail.xa_gen == *gen) { |
488 | nlip = xfs_ail_next(&(mp->m_ail.xa_ail), lip); | 486 | nlip = xfs_ail_next(&mp->m_ail, lip); |
489 | } else { | 487 | } else { |
490 | nlip = xfs_ail_min(&(mp->m_ail).xa_ail); | 488 | nlip = xfs_ail_min(&mp->m_ail); |
491 | *gen = (int)mp->m_ail.xa_gen; | 489 | *gen = (int)mp->m_ail.xa_gen; |
492 | if (restarts != NULL) { | 490 | if (restarts != NULL) { |
493 | XFS_STATS_INC(xs_push_ail_restarts); | 491 | XFS_STATS_INC(xs_push_ail_restarts); |
@@ -517,8 +515,7 @@ int | |||
517 | xfs_trans_ail_init( | 515 | xfs_trans_ail_init( |
518 | xfs_mount_t *mp) | 516 | xfs_mount_t *mp) |
519 | { | 517 | { |
520 | mp->m_ail.xa_ail.ail_forw = (xfs_log_item_t*)&mp->m_ail.xa_ail; | 518 | INIT_LIST_HEAD(&mp->m_ail.xa_ail); |
521 | mp->m_ail.xa_ail.ail_back = (xfs_log_item_t*)&mp->m_ail.xa_ail; | ||
522 | return xfsaild_start(mp); | 519 | return xfsaild_start(mp); |
523 | } | 520 | } |
524 | 521 | ||
@@ -537,7 +534,7 @@ xfs_trans_ail_destroy( | |||
537 | */ | 534 | */ |
538 | STATIC void | 535 | STATIC void |
539 | xfs_ail_insert( | 536 | xfs_ail_insert( |
540 | xfs_ail_entry_t *base, | 537 | xfs_ail_t *ailp, |
541 | xfs_log_item_t *lip) | 538 | xfs_log_item_t *lip) |
542 | /* ARGSUSED */ | 539 | /* ARGSUSED */ |
543 | { | 540 | { |
@@ -546,27 +543,22 @@ xfs_ail_insert( | |||
546 | /* | 543 | /* |
547 | * If the list is empty, just insert the item. | 544 | * If the list is empty, just insert the item. |
548 | */ | 545 | */ |
549 | if (base->ail_back == (xfs_log_item_t*)base) { | 546 | if (list_empty(&ailp->xa_ail)) { |
550 | base->ail_forw = lip; | 547 | list_add(&lip->li_ail, &ailp->xa_ail); |
551 | base->ail_back = lip; | ||
552 | lip->li_ail.ail_forw = (xfs_log_item_t*)base; | ||
553 | lip->li_ail.ail_back = (xfs_log_item_t*)base; | ||
554 | return; | 548 | return; |
555 | } | 549 | } |
556 | 550 | ||
557 | next_lip = base->ail_back; | 551 | list_for_each_entry_reverse(next_lip, &ailp->xa_ail, li_ail) { |
558 | while ((next_lip != (xfs_log_item_t*)base) && | 552 | if (XFS_LSN_CMP(next_lip->li_lsn, lip->li_lsn) <= 0) |
559 | (XFS_LSN_CMP(next_lip->li_lsn, lip->li_lsn) > 0)) { | 553 | break; |
560 | next_lip = next_lip->li_ail.ail_back; | ||
561 | } | 554 | } |
562 | ASSERT((next_lip == (xfs_log_item_t*)base) || | 555 | |
556 | ASSERT((&next_lip->li_ail == &ailp->xa_ail) || | ||
563 | (XFS_LSN_CMP(next_lip->li_lsn, lip->li_lsn) <= 0)); | 557 | (XFS_LSN_CMP(next_lip->li_lsn, lip->li_lsn) <= 0)); |
564 | lip->li_ail.ail_forw = next_lip->li_ail.ail_forw; | ||
565 | lip->li_ail.ail_back = next_lip; | ||
566 | next_lip->li_ail.ail_forw = lip; | ||
567 | lip->li_ail.ail_forw->li_ail.ail_back = lip; | ||
568 | 558 | ||
569 | xfs_ail_check(base, lip); | 559 | list_add(&lip->li_ail, &next_lip->li_ail); |
560 | |||
561 | xfs_ail_check(ailp, lip); | ||
570 | return; | 562 | return; |
571 | } | 563 | } |
572 | 564 | ||
@@ -576,15 +568,13 @@ xfs_ail_insert( | |||
576 | /*ARGSUSED*/ | 568 | /*ARGSUSED*/ |
577 | STATIC xfs_log_item_t * | 569 | STATIC xfs_log_item_t * |
578 | xfs_ail_delete( | 570 | xfs_ail_delete( |
579 | xfs_ail_entry_t *base, | 571 | xfs_ail_t *ailp, |
580 | xfs_log_item_t *lip) | 572 | xfs_log_item_t *lip) |
581 | /* ARGSUSED */ | 573 | /* ARGSUSED */ |
582 | { | 574 | { |
583 | xfs_ail_check(base, lip); | 575 | xfs_ail_check(ailp, lip); |
584 | lip->li_ail.ail_forw->li_ail.ail_back = lip->li_ail.ail_back; | 576 | |
585 | lip->li_ail.ail_back->li_ail.ail_forw = lip->li_ail.ail_forw; | 577 | list_del(&lip->li_ail); |
586 | lip->li_ail.ail_forw = NULL; | ||
587 | lip->li_ail.ail_back = NULL; | ||
588 | 578 | ||
589 | return lip; | 579 | return lip; |
590 | } | 580 | } |
@@ -595,14 +585,13 @@ xfs_ail_delete( | |||
595 | */ | 585 | */ |
596 | STATIC xfs_log_item_t * | 586 | STATIC xfs_log_item_t * |
597 | xfs_ail_min( | 587 | xfs_ail_min( |
598 | xfs_ail_entry_t *base) | 588 | xfs_ail_t *ailp) |
599 | /* ARGSUSED */ | 589 | /* ARGSUSED */ |
600 | { | 590 | { |
601 | register xfs_log_item_t *forw = base->ail_forw; | 591 | if (list_empty(&ailp->xa_ail)) |
602 | if (forw == (xfs_log_item_t*)base) { | ||
603 | return NULL; | 592 | return NULL; |
604 | } | 593 | |
605 | return forw; | 594 | return list_first_entry(&ailp->xa_ail, xfs_log_item_t, li_ail); |
606 | } | 595 | } |
607 | 596 | ||
608 | /* | 597 | /* |
@@ -612,15 +601,14 @@ xfs_ail_min( | |||
612 | */ | 601 | */ |
613 | STATIC xfs_log_item_t * | 602 | STATIC xfs_log_item_t * |
614 | xfs_ail_next( | 603 | xfs_ail_next( |
615 | xfs_ail_entry_t *base, | 604 | xfs_ail_t *ailp, |
616 | xfs_log_item_t *lip) | 605 | xfs_log_item_t *lip) |
617 | /* ARGSUSED */ | 606 | /* ARGSUSED */ |
618 | { | 607 | { |
619 | if (lip->li_ail.ail_forw == (xfs_log_item_t*)base) { | 608 | if (lip->li_ail.next == &ailp->xa_ail) |
620 | return NULL; | 609 | return NULL; |
621 | } | ||
622 | return lip->li_ail.ail_forw; | ||
623 | 610 | ||
611 | return list_first_entry(&lip->li_ail, xfs_log_item_t, li_ail); | ||
624 | } | 612 | } |
625 | 613 | ||
626 | #ifdef DEBUG | 614 | #ifdef DEBUG |
@@ -629,57 +617,40 @@ xfs_ail_next( | |||
629 | */ | 617 | */ |
630 | STATIC void | 618 | STATIC void |
631 | xfs_ail_check( | 619 | xfs_ail_check( |
632 | xfs_ail_entry_t *base, | 620 | xfs_ail_t *ailp, |
633 | xfs_log_item_t *lip) | 621 | xfs_log_item_t *lip) |
634 | { | 622 | { |
635 | xfs_log_item_t *prev_lip; | 623 | xfs_log_item_t *prev_lip; |
636 | 624 | ||
637 | prev_lip = base->ail_forw; | 625 | if (list_empty(&ailp->xa_ail)) |
638 | if (prev_lip == (xfs_log_item_t*)base) { | ||
639 | /* | ||
640 | * Make sure the pointers are correct when the list | ||
641 | * is empty. | ||
642 | */ | ||
643 | ASSERT(base->ail_back == (xfs_log_item_t*)base); | ||
644 | return; | 626 | return; |
645 | } | ||
646 | 627 | ||
647 | /* | 628 | /* |
648 | * Check the next and previous entries are valid. | 629 | * Check the next and previous entries are valid. |
649 | */ | 630 | */ |
650 | ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0); | 631 | ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0); |
651 | prev_lip = lip->li_ail.ail_back; | 632 | prev_lip = list_entry(lip->li_ail.prev, xfs_log_item_t, li_ail); |
652 | if (prev_lip != (xfs_log_item_t*)base) { | 633 | if (&prev_lip->li_ail != &ailp->xa_ail) |
653 | ASSERT(prev_lip->li_ail.ail_forw == lip); | ||
654 | ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0); | 634 | ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0); |
655 | } | 635 | |
656 | prev_lip = lip->li_ail.ail_forw; | 636 | prev_lip = list_entry(lip->li_ail.next, xfs_log_item_t, li_ail); |
657 | if (prev_lip != (xfs_log_item_t*)base) { | 637 | if (&prev_lip->li_ail != &ailp->xa_ail) |
658 | ASSERT(prev_lip->li_ail.ail_back == lip); | ||
659 | ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) >= 0); | 638 | ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) >= 0); |
660 | } | ||
661 | 639 | ||
662 | 640 | ||
663 | #ifdef XFS_TRANS_DEBUG | 641 | #ifdef XFS_TRANS_DEBUG |
664 | /* | 642 | /* |
665 | * Walk the list checking forward and backward pointers, | 643 | * Walk the list checking lsn ordering, and that every entry has the |
666 | * lsn ordering, and that every entry has the XFS_LI_IN_AIL | 644 | * XFS_LI_IN_AIL flag set. This is really expensive, so only do it |
667 | * flag set. This is really expensive, so only do it when | 645 | * when specifically debugging the transaction subsystem. |
668 | * specifically debugging the transaction subsystem. | ||
669 | */ | 646 | */ |
670 | prev_lip = (xfs_log_item_t*)base; | 647 | prev_lip = list_entry(&ailp->xa_ail, xfs_log_item_t, li_ail); |
671 | while (lip != (xfs_log_item_t*)base) { | 648 | list_for_each_entry(lip, &ailp->xa_ail, li_ail) { |
672 | if (prev_lip != (xfs_log_item_t*)base) { | 649 | if (&prev_lip->li_ail != &ailp->xa_ail) |
673 | ASSERT(prev_lip->li_ail.ail_forw == lip); | ||
674 | ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0); | 650 | ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0); |
675 | } | ||
676 | ASSERT(lip->li_ail.ail_back == prev_lip); | ||
677 | ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0); | 651 | ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0); |
678 | prev_lip = lip; | 652 | prev_lip = lip; |
679 | lip = lip->li_ail.ail_forw; | ||
680 | } | 653 | } |
681 | ASSERT(lip == (xfs_log_item_t*)base); | ||
682 | ASSERT(base->ail_back == prev_lip); | ||
683 | #endif /* XFS_TRANS_DEBUG */ | 654 | #endif /* XFS_TRANS_DEBUG */ |
684 | } | 655 | } |
685 | #endif /* DEBUG */ | 656 | #endif /* DEBUG */ |
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c index 60b6b898022b..cb0c5839154b 100644 --- a/fs/xfs/xfs_trans_buf.c +++ b/fs/xfs/xfs_trans_buf.c | |||
@@ -304,7 +304,8 @@ xfs_trans_read_buf( | |||
304 | if (tp == NULL) { | 304 | if (tp == NULL) { |
305 | bp = xfs_buf_read_flags(target, blkno, len, flags | BUF_BUSY); | 305 | bp = xfs_buf_read_flags(target, blkno, len, flags | BUF_BUSY); |
306 | if (!bp) | 306 | if (!bp) |
307 | return XFS_ERROR(ENOMEM); | 307 | return (flags & XFS_BUF_TRYLOCK) ? |
308 | EAGAIN : XFS_ERROR(ENOMEM); | ||
308 | 309 | ||
309 | if ((bp != NULL) && (XFS_BUF_GETERROR(bp) != 0)) { | 310 | if ((bp != NULL) && (XFS_BUF_GETERROR(bp) != 0)) { |
310 | xfs_ioerror_alert("xfs_trans_read_buf", mp, | 311 | xfs_ioerror_alert("xfs_trans_read_buf", mp, |
@@ -353,17 +354,15 @@ xfs_trans_read_buf( | |||
353 | ASSERT(!XFS_BUF_ISASYNC(bp)); | 354 | ASSERT(!XFS_BUF_ISASYNC(bp)); |
354 | XFS_BUF_READ(bp); | 355 | XFS_BUF_READ(bp); |
355 | xfsbdstrat(tp->t_mountp, bp); | 356 | xfsbdstrat(tp->t_mountp, bp); |
356 | xfs_iowait(bp); | 357 | error = xfs_iowait(bp); |
357 | if (XFS_BUF_GETERROR(bp) != 0) { | 358 | if (error) { |
358 | xfs_ioerror_alert("xfs_trans_read_buf", mp, | 359 | xfs_ioerror_alert("xfs_trans_read_buf", mp, |
359 | bp, blkno); | 360 | bp, blkno); |
360 | error = XFS_BUF_GETERROR(bp); | ||
361 | xfs_buf_relse(bp); | 361 | xfs_buf_relse(bp); |
362 | /* | 362 | /* |
363 | * We can gracefully recover from most | 363 | * We can gracefully recover from most read |
364 | * read errors. Ones we can't are those | 364 | * errors. Ones we can't are those that happen |
365 | * that happen after the transaction's | 365 | * after the transaction's already dirty. |
366 | * already dirty. | ||
367 | */ | 366 | */ |
368 | if (tp->t_flags & XFS_TRANS_DIRTY) | 367 | if (tp->t_flags & XFS_TRANS_DIRTY) |
369 | xfs_force_shutdown(tp->t_mountp, | 368 | xfs_force_shutdown(tp->t_mountp, |
diff --git a/fs/xfs/xfs_types.h b/fs/xfs/xfs_types.h index 5c89be475464..0f5191644ab2 100644 --- a/fs/xfs/xfs_types.h +++ b/fs/xfs/xfs_types.h | |||
@@ -160,4 +160,9 @@ typedef enum { | |||
160 | XFS_BTNUM_MAX | 160 | XFS_BTNUM_MAX |
161 | } xfs_btnum_t; | 161 | } xfs_btnum_t; |
162 | 162 | ||
163 | struct xfs_name { | ||
164 | const char *name; | ||
165 | int len; | ||
166 | }; | ||
167 | |||
163 | #endif /* __XFS_TYPES_H__ */ | 168 | #endif /* __XFS_TYPES_H__ */ |
diff --git a/fs/xfs/xfs_utils.c b/fs/xfs/xfs_utils.c index 18a85e746680..2b8dc7e40772 100644 --- a/fs/xfs/xfs_utils.c +++ b/fs/xfs/xfs_utils.c | |||
@@ -40,34 +40,12 @@ | |||
40 | #include "xfs_itable.h" | 40 | #include "xfs_itable.h" |
41 | #include "xfs_utils.h" | 41 | #include "xfs_utils.h" |
42 | 42 | ||
43 | /* | ||
44 | * xfs_get_dir_entry is used to get a reference to an inode given | ||
45 | * its parent directory inode and the name of the file. It does | ||
46 | * not lock the child inode, and it unlocks the directory before | ||
47 | * returning. The directory's generation number is returned for | ||
48 | * use by a later call to xfs_lock_dir_and_entry. | ||
49 | */ | ||
50 | int | ||
51 | xfs_get_dir_entry( | ||
52 | bhv_vname_t *dentry, | ||
53 | xfs_inode_t **ipp) | ||
54 | { | ||
55 | bhv_vnode_t *vp; | ||
56 | |||
57 | vp = VNAME_TO_VNODE(dentry); | ||
58 | |||
59 | *ipp = xfs_vtoi(vp); | ||
60 | if (!*ipp) | ||
61 | return XFS_ERROR(ENOENT); | ||
62 | VN_HOLD(vp); | ||
63 | return 0; | ||
64 | } | ||
65 | 43 | ||
66 | int | 44 | int |
67 | xfs_dir_lookup_int( | 45 | xfs_dir_lookup_int( |
68 | xfs_inode_t *dp, | 46 | xfs_inode_t *dp, |
69 | uint lock_mode, | 47 | uint lock_mode, |
70 | bhv_vname_t *dentry, | 48 | struct xfs_name *name, |
71 | xfs_ino_t *inum, | 49 | xfs_ino_t *inum, |
72 | xfs_inode_t **ipp) | 50 | xfs_inode_t **ipp) |
73 | { | 51 | { |
@@ -75,7 +53,7 @@ xfs_dir_lookup_int( | |||
75 | 53 | ||
76 | xfs_itrace_entry(dp); | 54 | xfs_itrace_entry(dp); |
77 | 55 | ||
78 | error = xfs_dir_lookup(NULL, dp, VNAME(dentry), VNAMELEN(dentry), inum); | 56 | error = xfs_dir_lookup(NULL, dp, name, inum); |
79 | if (!error) { | 57 | if (!error) { |
80 | /* | 58 | /* |
81 | * Unlock the directory. We do this because we can't | 59 | * Unlock the directory. We do this because we can't |
diff --git a/fs/xfs/xfs_utils.h b/fs/xfs/xfs_utils.h index f857fcccb723..175b126d2cab 100644 --- a/fs/xfs/xfs_utils.h +++ b/fs/xfs/xfs_utils.h | |||
@@ -21,15 +21,14 @@ | |||
21 | #define IRELE(ip) VN_RELE(XFS_ITOV(ip)) | 21 | #define IRELE(ip) VN_RELE(XFS_ITOV(ip)) |
22 | #define IHOLD(ip) VN_HOLD(XFS_ITOV(ip)) | 22 | #define IHOLD(ip) VN_HOLD(XFS_ITOV(ip)) |
23 | 23 | ||
24 | extern int xfs_get_dir_entry (bhv_vname_t *, xfs_inode_t **); | 24 | extern int xfs_dir_lookup_int(xfs_inode_t *, uint, struct xfs_name *, |
25 | extern int xfs_dir_lookup_int (xfs_inode_t *, uint, bhv_vname_t *, xfs_ino_t *, | 25 | xfs_ino_t *, xfs_inode_t **); |
26 | xfs_inode_t **); | 26 | extern int xfs_truncate_file(xfs_mount_t *, xfs_inode_t *); |
27 | extern int xfs_truncate_file (xfs_mount_t *, xfs_inode_t *); | 27 | extern int xfs_dir_ialloc(xfs_trans_t **, xfs_inode_t *, mode_t, xfs_nlink_t, |
28 | extern int xfs_dir_ialloc (xfs_trans_t **, xfs_inode_t *, mode_t, xfs_nlink_t, | ||
29 | xfs_dev_t, cred_t *, prid_t, int, | 28 | xfs_dev_t, cred_t *, prid_t, int, |
30 | xfs_inode_t **, int *); | 29 | xfs_inode_t **, int *); |
31 | extern int xfs_droplink (xfs_trans_t *, xfs_inode_t *); | 30 | extern int xfs_droplink(xfs_trans_t *, xfs_inode_t *); |
32 | extern int xfs_bumplink (xfs_trans_t *, xfs_inode_t *); | 31 | extern int xfs_bumplink(xfs_trans_t *, xfs_inode_t *); |
33 | extern void xfs_bump_ino_vers2 (xfs_trans_t *, xfs_inode_t *); | 32 | extern void xfs_bump_ino_vers2(xfs_trans_t *, xfs_inode_t *); |
34 | 33 | ||
35 | #endif /* __XFS_UTILS_H__ */ | 34 | #endif /* __XFS_UTILS_H__ */ |
diff --git a/fs/xfs/xfs_vfsops.c b/fs/xfs/xfs_vfsops.c index 7094caff13cf..fc48158fe479 100644 --- a/fs/xfs/xfs_vfsops.c +++ b/fs/xfs/xfs_vfsops.c | |||
@@ -43,7 +43,6 @@ | |||
43 | #include "xfs_error.h" | 43 | #include "xfs_error.h" |
44 | #include "xfs_bmap.h" | 44 | #include "xfs_bmap.h" |
45 | #include "xfs_rw.h" | 45 | #include "xfs_rw.h" |
46 | #include "xfs_refcache.h" | ||
47 | #include "xfs_buf_item.h" | 46 | #include "xfs_buf_item.h" |
48 | #include "xfs_log_priv.h" | 47 | #include "xfs_log_priv.h" |
49 | #include "xfs_dir2_trace.h" | 48 | #include "xfs_dir2_trace.h" |
@@ -56,6 +55,7 @@ | |||
56 | #include "xfs_fsops.h" | 55 | #include "xfs_fsops.h" |
57 | #include "xfs_vnodeops.h" | 56 | #include "xfs_vnodeops.h" |
58 | #include "xfs_vfsops.h" | 57 | #include "xfs_vfsops.h" |
58 | #include "xfs_utils.h" | ||
59 | 59 | ||
60 | 60 | ||
61 | int __init | 61 | int __init |
@@ -69,15 +69,17 @@ xfs_init(void) | |||
69 | /* | 69 | /* |
70 | * Initialize all of the zone allocators we use. | 70 | * Initialize all of the zone allocators we use. |
71 | */ | 71 | */ |
72 | xfs_log_ticket_zone = kmem_zone_init(sizeof(xlog_ticket_t), | ||
73 | "xfs_log_ticket"); | ||
72 | xfs_bmap_free_item_zone = kmem_zone_init(sizeof(xfs_bmap_free_item_t), | 74 | xfs_bmap_free_item_zone = kmem_zone_init(sizeof(xfs_bmap_free_item_t), |
73 | "xfs_bmap_free_item"); | 75 | "xfs_bmap_free_item"); |
74 | xfs_btree_cur_zone = kmem_zone_init(sizeof(xfs_btree_cur_t), | 76 | xfs_btree_cur_zone = kmem_zone_init(sizeof(xfs_btree_cur_t), |
75 | "xfs_btree_cur"); | 77 | "xfs_btree_cur"); |
76 | xfs_trans_zone = kmem_zone_init(sizeof(xfs_trans_t), "xfs_trans"); | 78 | xfs_da_state_zone = kmem_zone_init(sizeof(xfs_da_state_t), |
77 | xfs_da_state_zone = | 79 | "xfs_da_state"); |
78 | kmem_zone_init(sizeof(xfs_da_state_t), "xfs_da_state"); | ||
79 | xfs_dabuf_zone = kmem_zone_init(sizeof(xfs_dabuf_t), "xfs_dabuf"); | 80 | xfs_dabuf_zone = kmem_zone_init(sizeof(xfs_dabuf_t), "xfs_dabuf"); |
80 | xfs_ifork_zone = kmem_zone_init(sizeof(xfs_ifork_t), "xfs_ifork"); | 81 | xfs_ifork_zone = kmem_zone_init(sizeof(xfs_ifork_t), "xfs_ifork"); |
82 | xfs_trans_zone = kmem_zone_init(sizeof(xfs_trans_t), "xfs_trans"); | ||
81 | xfs_acl_zone_init(xfs_acl_zone, "xfs_acl"); | 83 | xfs_acl_zone_init(xfs_acl_zone, "xfs_acl"); |
82 | xfs_mru_cache_init(); | 84 | xfs_mru_cache_init(); |
83 | xfs_filestream_init(); | 85 | xfs_filestream_init(); |
@@ -113,9 +115,6 @@ xfs_init(void) | |||
113 | xfs_ili_zone = | 115 | xfs_ili_zone = |
114 | kmem_zone_init_flags(sizeof(xfs_inode_log_item_t), "xfs_ili", | 116 | kmem_zone_init_flags(sizeof(xfs_inode_log_item_t), "xfs_ili", |
115 | KM_ZONE_SPREAD, NULL); | 117 | KM_ZONE_SPREAD, NULL); |
116 | xfs_icluster_zone = | ||
117 | kmem_zone_init_flags(sizeof(xfs_icluster_t), "xfs_icluster", | ||
118 | KM_ZONE_SPREAD, NULL); | ||
119 | 118 | ||
120 | /* | 119 | /* |
121 | * Allocate global trace buffers. | 120 | * Allocate global trace buffers. |
@@ -153,11 +152,9 @@ xfs_cleanup(void) | |||
153 | extern kmem_zone_t *xfs_inode_zone; | 152 | extern kmem_zone_t *xfs_inode_zone; |
154 | extern kmem_zone_t *xfs_efd_zone; | 153 | extern kmem_zone_t *xfs_efd_zone; |
155 | extern kmem_zone_t *xfs_efi_zone; | 154 | extern kmem_zone_t *xfs_efi_zone; |
156 | extern kmem_zone_t *xfs_icluster_zone; | ||
157 | 155 | ||
158 | xfs_cleanup_procfs(); | 156 | xfs_cleanup_procfs(); |
159 | xfs_sysctl_unregister(); | 157 | xfs_sysctl_unregister(); |
160 | xfs_refcache_destroy(); | ||
161 | xfs_filestream_uninit(); | 158 | xfs_filestream_uninit(); |
162 | xfs_mru_cache_uninit(); | 159 | xfs_mru_cache_uninit(); |
163 | xfs_acl_zone_destroy(xfs_acl_zone); | 160 | xfs_acl_zone_destroy(xfs_acl_zone); |
@@ -189,7 +186,6 @@ xfs_cleanup(void) | |||
189 | kmem_zone_destroy(xfs_efi_zone); | 186 | kmem_zone_destroy(xfs_efi_zone); |
190 | kmem_zone_destroy(xfs_ifork_zone); | 187 | kmem_zone_destroy(xfs_ifork_zone); |
191 | kmem_zone_destroy(xfs_ili_zone); | 188 | kmem_zone_destroy(xfs_ili_zone); |
192 | kmem_zone_destroy(xfs_icluster_zone); | ||
193 | } | 189 | } |
194 | 190 | ||
195 | /* | 191 | /* |
@@ -573,7 +569,7 @@ xfs_unmount( | |||
573 | #ifdef HAVE_DMAPI | 569 | #ifdef HAVE_DMAPI |
574 | if (mp->m_flags & XFS_MOUNT_DMAPI) { | 570 | if (mp->m_flags & XFS_MOUNT_DMAPI) { |
575 | error = XFS_SEND_PREUNMOUNT(mp, | 571 | error = XFS_SEND_PREUNMOUNT(mp, |
576 | rvp, DM_RIGHT_NULL, rvp, DM_RIGHT_NULL, | 572 | rip, DM_RIGHT_NULL, rip, DM_RIGHT_NULL, |
577 | NULL, NULL, 0, 0, | 573 | NULL, NULL, 0, 0, |
578 | (mp->m_dmevmask & (1<<DM_EVENT_PREUNMOUNT))? | 574 | (mp->m_dmevmask & (1<<DM_EVENT_PREUNMOUNT))? |
579 | 0:DM_FLAGS_UNWANTED); | 575 | 0:DM_FLAGS_UNWANTED); |
@@ -584,11 +580,6 @@ xfs_unmount( | |||
584 | 0 : DM_FLAGS_UNWANTED; | 580 | 0 : DM_FLAGS_UNWANTED; |
585 | } | 581 | } |
586 | #endif | 582 | #endif |
587 | /* | ||
588 | * First blow any referenced inode from this file system | ||
589 | * out of the reference cache, and delete the timer. | ||
590 | */ | ||
591 | xfs_refcache_purge_mp(mp); | ||
592 | 583 | ||
593 | /* | 584 | /* |
594 | * Blow away any referenced inode in the filestreams cache. | 585 | * Blow away any referenced inode in the filestreams cache. |
@@ -607,7 +598,7 @@ xfs_unmount( | |||
607 | /* | 598 | /* |
608 | * Drop the reference count | 599 | * Drop the reference count |
609 | */ | 600 | */ |
610 | VN_RELE(rvp); | 601 | IRELE(rip); |
611 | 602 | ||
612 | /* | 603 | /* |
613 | * If we're forcing a shutdown, typically because of a media error, | 604 | * If we're forcing a shutdown, typically because of a media error, |
@@ -629,7 +620,7 @@ out: | |||
629 | /* Note: mp structure must still exist for | 620 | /* Note: mp structure must still exist for |
630 | * XFS_SEND_UNMOUNT() call. | 621 | * XFS_SEND_UNMOUNT() call. |
631 | */ | 622 | */ |
632 | XFS_SEND_UNMOUNT(mp, error == 0 ? rvp : NULL, | 623 | XFS_SEND_UNMOUNT(mp, error == 0 ? rip : NULL, |
633 | DM_RIGHT_NULL, 0, error, unmount_event_flags); | 624 | DM_RIGHT_NULL, 0, error, unmount_event_flags); |
634 | } | 625 | } |
635 | if (xfs_unmountfs_needed) { | 626 | if (xfs_unmountfs_needed) { |
@@ -646,13 +637,12 @@ out: | |||
646 | return XFS_ERROR(error); | 637 | return XFS_ERROR(error); |
647 | } | 638 | } |
648 | 639 | ||
649 | STATIC int | 640 | STATIC void |
650 | xfs_quiesce_fs( | 641 | xfs_quiesce_fs( |
651 | xfs_mount_t *mp) | 642 | xfs_mount_t *mp) |
652 | { | 643 | { |
653 | int count = 0, pincount; | 644 | int count = 0, pincount; |
654 | 645 | ||
655 | xfs_refcache_purge_mp(mp); | ||
656 | xfs_flush_buftarg(mp->m_ddev_targp, 0); | 646 | xfs_flush_buftarg(mp->m_ddev_targp, 0); |
657 | xfs_finish_reclaim_all(mp, 0); | 647 | xfs_finish_reclaim_all(mp, 0); |
658 | 648 | ||
@@ -671,8 +661,6 @@ xfs_quiesce_fs( | |||
671 | count++; | 661 | count++; |
672 | } | 662 | } |
673 | } while (count < 2); | 663 | } while (count < 2); |
674 | |||
675 | return 0; | ||
676 | } | 664 | } |
677 | 665 | ||
678 | /* | 666 | /* |
@@ -684,6 +672,8 @@ void | |||
684 | xfs_attr_quiesce( | 672 | xfs_attr_quiesce( |
685 | xfs_mount_t *mp) | 673 | xfs_mount_t *mp) |
686 | { | 674 | { |
675 | int error = 0; | ||
676 | |||
687 | /* wait for all modifications to complete */ | 677 | /* wait for all modifications to complete */ |
688 | while (atomic_read(&mp->m_active_trans) > 0) | 678 | while (atomic_read(&mp->m_active_trans) > 0) |
689 | delay(100); | 679 | delay(100); |
@@ -694,7 +684,11 @@ xfs_attr_quiesce( | |||
694 | ASSERT_ALWAYS(atomic_read(&mp->m_active_trans) == 0); | 684 | ASSERT_ALWAYS(atomic_read(&mp->m_active_trans) == 0); |
695 | 685 | ||
696 | /* Push the superblock and write an unmount record */ | 686 | /* Push the superblock and write an unmount record */ |
697 | xfs_log_sbcount(mp, 1); | 687 | error = xfs_log_sbcount(mp, 1); |
688 | if (error) | ||
689 | xfs_fs_cmn_err(CE_WARN, mp, | ||
690 | "xfs_attr_quiesce: failed to log sb changes. " | ||
691 | "Frozen image may not be consistent."); | ||
698 | xfs_log_unmount_write(mp); | 692 | xfs_log_unmount_write(mp); |
699 | xfs_unmountfs_writesb(mp); | 693 | xfs_unmountfs_writesb(mp); |
700 | } | 694 | } |
@@ -790,8 +784,8 @@ xfs_unmount_flush( | |||
790 | goto fscorrupt_out2; | 784 | goto fscorrupt_out2; |
791 | 785 | ||
792 | if (rbmip) { | 786 | if (rbmip) { |
793 | VN_RELE(XFS_ITOV(rbmip)); | 787 | IRELE(rbmip); |
794 | VN_RELE(XFS_ITOV(rsumip)); | 788 | IRELE(rsumip); |
795 | } | 789 | } |
796 | 790 | ||
797 | xfs_iunlock(rip, XFS_ILOCK_EXCL); | 791 | xfs_iunlock(rip, XFS_ILOCK_EXCL); |
@@ -1169,10 +1163,10 @@ xfs_sync_inodes( | |||
1169 | * above, then wait until after we've unlocked | 1163 | * above, then wait until after we've unlocked |
1170 | * the inode to release the reference. This is | 1164 | * the inode to release the reference. This is |
1171 | * because we can be already holding the inode | 1165 | * because we can be already holding the inode |
1172 | * lock when VN_RELE() calls xfs_inactive(). | 1166 | * lock when IRELE() calls xfs_inactive(). |
1173 | * | 1167 | * |
1174 | * Make sure to drop the mount lock before calling | 1168 | * Make sure to drop the mount lock before calling |
1175 | * VN_RELE() so that we don't trip over ourselves if | 1169 | * IRELE() so that we don't trip over ourselves if |
1176 | * we have to go for the mount lock again in the | 1170 | * we have to go for the mount lock again in the |
1177 | * inactive code. | 1171 | * inactive code. |
1178 | */ | 1172 | */ |
@@ -1180,7 +1174,7 @@ xfs_sync_inodes( | |||
1180 | IPOINTER_INSERT(ip, mp); | 1174 | IPOINTER_INSERT(ip, mp); |
1181 | } | 1175 | } |
1182 | 1176 | ||
1183 | VN_RELE(vp); | 1177 | IRELE(ip); |
1184 | 1178 | ||
1185 | vnode_refed = B_FALSE; | 1179 | vnode_refed = B_FALSE; |
1186 | } | 1180 | } |
@@ -1323,30 +1317,8 @@ xfs_syncsub( | |||
1323 | } | 1317 | } |
1324 | 1318 | ||
1325 | /* | 1319 | /* |
1326 | * If this is the periodic sync, then kick some entries out of | ||
1327 | * the reference cache. This ensures that idle entries are | ||
1328 | * eventually kicked out of the cache. | ||
1329 | */ | ||
1330 | if (flags & SYNC_REFCACHE) { | ||
1331 | if (flags & SYNC_WAIT) | ||
1332 | xfs_refcache_purge_mp(mp); | ||
1333 | else | ||
1334 | xfs_refcache_purge_some(mp); | ||
1335 | } | ||
1336 | |||
1337 | /* | ||
1338 | * If asked, update the disk superblock with incore counter values if we | ||
1339 | * are using non-persistent counters so that they don't get too far out | ||
1340 | * of sync if we crash or get a forced shutdown. We don't want to force | ||
1341 | * this to disk, just get a transaction into the iclogs.... | ||
1342 | */ | ||
1343 | if (flags & SYNC_SUPER) | ||
1344 | xfs_log_sbcount(mp, 0); | ||
1345 | |||
1346 | /* | ||
1347 | * Now check to see if the log needs a "dummy" transaction. | 1320 | * Now check to see if the log needs a "dummy" transaction. |
1348 | */ | 1321 | */ |
1349 | |||
1350 | if (!(flags & SYNC_REMOUNT) && xfs_log_need_covered(mp)) { | 1322 | if (!(flags & SYNC_REMOUNT) && xfs_log_need_covered(mp)) { |
1351 | xfs_trans_t *tp; | 1323 | xfs_trans_t *tp; |
1352 | xfs_inode_t *ip; | 1324 | xfs_inode_t *ip; |
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c index 64c5953feca4..6650601c64f7 100644 --- a/fs/xfs/xfs_vnodeops.c +++ b/fs/xfs/xfs_vnodeops.c | |||
@@ -48,7 +48,6 @@ | |||
48 | #include "xfs_quota.h" | 48 | #include "xfs_quota.h" |
49 | #include "xfs_utils.h" | 49 | #include "xfs_utils.h" |
50 | #include "xfs_rtalloc.h" | 50 | #include "xfs_rtalloc.h" |
51 | #include "xfs_refcache.h" | ||
52 | #include "xfs_trans_space.h" | 51 | #include "xfs_trans_space.h" |
53 | #include "xfs_log_priv.h" | 52 | #include "xfs_log_priv.h" |
54 | #include "xfs_filestream.h" | 53 | #include "xfs_filestream.h" |
@@ -327,7 +326,7 @@ xfs_setattr( | |||
327 | if (DM_EVENT_ENABLED(ip, DM_EVENT_TRUNCATE) && | 326 | if (DM_EVENT_ENABLED(ip, DM_EVENT_TRUNCATE) && |
328 | !(flags & ATTR_DMI)) { | 327 | !(flags & ATTR_DMI)) { |
329 | int dmflags = AT_DELAY_FLAG(flags) | DM_SEM_FLAG_WR; | 328 | int dmflags = AT_DELAY_FLAG(flags) | DM_SEM_FLAG_WR; |
330 | code = XFS_SEND_DATA(mp, DM_EVENT_TRUNCATE, vp, | 329 | code = XFS_SEND_DATA(mp, DM_EVENT_TRUNCATE, ip, |
331 | vap->va_size, 0, dmflags, NULL); | 330 | vap->va_size, 0, dmflags, NULL); |
332 | if (code) { | 331 | if (code) { |
333 | lock_flags = 0; | 332 | lock_flags = 0; |
@@ -634,6 +633,15 @@ xfs_setattr( | |||
634 | * Truncate file. Must have write permission and not be a directory. | 633 | * Truncate file. Must have write permission and not be a directory. |
635 | */ | 634 | */ |
636 | if (mask & XFS_AT_SIZE) { | 635 | if (mask & XFS_AT_SIZE) { |
636 | /* | ||
637 | * Only change the c/mtime if we are changing the size | ||
638 | * or we are explicitly asked to change it. This handles | ||
639 | * the semantic difference between truncate() and ftruncate() | ||
640 | * as implemented in the VFS. | ||
641 | */ | ||
642 | if (vap->va_size != ip->i_size || (mask & XFS_AT_CTIME)) | ||
643 | timeflags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG; | ||
644 | |||
637 | if (vap->va_size > ip->i_size) { | 645 | if (vap->va_size > ip->i_size) { |
638 | xfs_igrow_finish(tp, ip, vap->va_size, | 646 | xfs_igrow_finish(tp, ip, vap->va_size, |
639 | !(flags & ATTR_DMI)); | 647 | !(flags & ATTR_DMI)); |
@@ -662,10 +670,6 @@ xfs_setattr( | |||
662 | */ | 670 | */ |
663 | xfs_iflags_set(ip, XFS_ITRUNCATED); | 671 | xfs_iflags_set(ip, XFS_ITRUNCATED); |
664 | } | 672 | } |
665 | /* | ||
666 | * Have to do this even if the file's size doesn't change. | ||
667 | */ | ||
668 | timeflags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG; | ||
669 | } | 673 | } |
670 | 674 | ||
671 | /* | 675 | /* |
@@ -877,7 +881,7 @@ xfs_setattr( | |||
877 | 881 | ||
878 | if (DM_EVENT_ENABLED(ip, DM_EVENT_ATTRIBUTE) && | 882 | if (DM_EVENT_ENABLED(ip, DM_EVENT_ATTRIBUTE) && |
879 | !(flags & ATTR_DMI)) { | 883 | !(flags & ATTR_DMI)) { |
880 | (void) XFS_SEND_NAMESP(mp, DM_EVENT_ATTRIBUTE, vp, DM_RIGHT_NULL, | 884 | (void) XFS_SEND_NAMESP(mp, DM_EVENT_ATTRIBUTE, ip, DM_RIGHT_NULL, |
881 | NULL, DM_RIGHT_NULL, NULL, NULL, | 885 | NULL, DM_RIGHT_NULL, NULL, NULL, |
882 | 0, 0, AT_DELAY_FLAG(flags)); | 886 | 0, 0, AT_DELAY_FLAG(flags)); |
883 | } | 887 | } |
@@ -1443,28 +1447,22 @@ xfs_inactive_attrs( | |||
1443 | tp = *tpp; | 1447 | tp = *tpp; |
1444 | mp = ip->i_mount; | 1448 | mp = ip->i_mount; |
1445 | ASSERT(ip->i_d.di_forkoff != 0); | 1449 | ASSERT(ip->i_d.di_forkoff != 0); |
1446 | xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); | 1450 | error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); |
1447 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | 1451 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
1452 | if (error) | ||
1453 | goto error_unlock; | ||
1448 | 1454 | ||
1449 | error = xfs_attr_inactive(ip); | 1455 | error = xfs_attr_inactive(ip); |
1450 | if (error) { | 1456 | if (error) |
1451 | *tpp = NULL; | 1457 | goto error_unlock; |
1452 | xfs_iunlock(ip, XFS_IOLOCK_EXCL); | ||
1453 | return error; /* goto out */ | ||
1454 | } | ||
1455 | 1458 | ||
1456 | tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE); | 1459 | tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE); |
1457 | error = xfs_trans_reserve(tp, 0, | 1460 | error = xfs_trans_reserve(tp, 0, |
1458 | XFS_IFREE_LOG_RES(mp), | 1461 | XFS_IFREE_LOG_RES(mp), |
1459 | 0, XFS_TRANS_PERM_LOG_RES, | 1462 | 0, XFS_TRANS_PERM_LOG_RES, |
1460 | XFS_INACTIVE_LOG_COUNT); | 1463 | XFS_INACTIVE_LOG_COUNT); |
1461 | if (error) { | 1464 | if (error) |
1462 | ASSERT(XFS_FORCED_SHUTDOWN(mp)); | 1465 | goto error_cancel; |
1463 | xfs_trans_cancel(tp, 0); | ||
1464 | *tpp = NULL; | ||
1465 | xfs_iunlock(ip, XFS_IOLOCK_EXCL); | ||
1466 | return error; | ||
1467 | } | ||
1468 | 1466 | ||
1469 | xfs_ilock(ip, XFS_ILOCK_EXCL); | 1467 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
1470 | xfs_trans_ijoin(tp, ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); | 1468 | xfs_trans_ijoin(tp, ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); |
@@ -1475,6 +1473,14 @@ xfs_inactive_attrs( | |||
1475 | 1473 | ||
1476 | *tpp = tp; | 1474 | *tpp = tp; |
1477 | return 0; | 1475 | return 0; |
1476 | |||
1477 | error_cancel: | ||
1478 | ASSERT(XFS_FORCED_SHUTDOWN(mp)); | ||
1479 | xfs_trans_cancel(tp, 0); | ||
1480 | error_unlock: | ||
1481 | *tpp = NULL; | ||
1482 | xfs_iunlock(ip, XFS_IOLOCK_EXCL); | ||
1483 | return error; | ||
1478 | } | 1484 | } |
1479 | 1485 | ||
1480 | int | 1486 | int |
@@ -1520,12 +1526,6 @@ xfs_release( | |||
1520 | xfs_flush_pages(ip, 0, -1, XFS_B_ASYNC, FI_NONE); | 1526 | xfs_flush_pages(ip, 0, -1, XFS_B_ASYNC, FI_NONE); |
1521 | } | 1527 | } |
1522 | 1528 | ||
1523 | #ifdef HAVE_REFCACHE | ||
1524 | /* If we are in the NFS reference cache then don't do this now */ | ||
1525 | if (ip->i_refcache) | ||
1526 | return 0; | ||
1527 | #endif | ||
1528 | |||
1529 | if (ip->i_d.di_nlink != 0) { | 1529 | if (ip->i_d.di_nlink != 0) { |
1530 | if ((((ip->i_d.di_mode & S_IFMT) == S_IFREG) && | 1530 | if ((((ip->i_d.di_mode & S_IFMT) == S_IFREG) && |
1531 | ((ip->i_size > 0) || (VN_CACHED(vp) > 0 || | 1531 | ((ip->i_size > 0) || (VN_CACHED(vp) > 0 || |
@@ -1588,9 +1588,8 @@ xfs_inactive( | |||
1588 | 1588 | ||
1589 | mp = ip->i_mount; | 1589 | mp = ip->i_mount; |
1590 | 1590 | ||
1591 | if (ip->i_d.di_nlink == 0 && DM_EVENT_ENABLED(ip, DM_EVENT_DESTROY)) { | 1591 | if (ip->i_d.di_nlink == 0 && DM_EVENT_ENABLED(ip, DM_EVENT_DESTROY)) |
1592 | (void) XFS_SEND_DESTROY(mp, vp, DM_RIGHT_NULL); | 1592 | XFS_SEND_DESTROY(mp, ip, DM_RIGHT_NULL); |
1593 | } | ||
1594 | 1593 | ||
1595 | error = 0; | 1594 | error = 0; |
1596 | 1595 | ||
@@ -1744,11 +1743,18 @@ xfs_inactive( | |||
1744 | XFS_TRANS_MOD_DQUOT_BYINO(mp, tp, ip, XFS_TRANS_DQ_ICOUNT, -1); | 1743 | XFS_TRANS_MOD_DQUOT_BYINO(mp, tp, ip, XFS_TRANS_DQ_ICOUNT, -1); |
1745 | 1744 | ||
1746 | /* | 1745 | /* |
1747 | * Just ignore errors at this point. There is | 1746 | * Just ignore errors at this point. There is nothing we can |
1748 | * nothing we can do except to try to keep going. | 1747 | * do except to try to keep going. Make sure it's not a silent |
1748 | * error. | ||
1749 | */ | 1749 | */ |
1750 | (void) xfs_bmap_finish(&tp, &free_list, &committed); | 1750 | error = xfs_bmap_finish(&tp, &free_list, &committed); |
1751 | (void) xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); | 1751 | if (error) |
1752 | xfs_fs_cmn_err(CE_NOTE, mp, "xfs_inactive: " | ||
1753 | "xfs_bmap_finish() returned error %d", error); | ||
1754 | error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); | ||
1755 | if (error) | ||
1756 | xfs_fs_cmn_err(CE_NOTE, mp, "xfs_inactive: " | ||
1757 | "xfs_trans_commit() returned error %d", error); | ||
1752 | } | 1758 | } |
1753 | /* | 1759 | /* |
1754 | * Release the dquots held by inode, if any. | 1760 | * Release the dquots held by inode, if any. |
@@ -1765,8 +1771,8 @@ xfs_inactive( | |||
1765 | int | 1771 | int |
1766 | xfs_lookup( | 1772 | xfs_lookup( |
1767 | xfs_inode_t *dp, | 1773 | xfs_inode_t *dp, |
1768 | bhv_vname_t *dentry, | 1774 | struct xfs_name *name, |
1769 | bhv_vnode_t **vpp) | 1775 | xfs_inode_t **ipp) |
1770 | { | 1776 | { |
1771 | xfs_inode_t *ip; | 1777 | xfs_inode_t *ip; |
1772 | xfs_ino_t e_inum; | 1778 | xfs_ino_t e_inum; |
@@ -1779,9 +1785,9 @@ xfs_lookup( | |||
1779 | return XFS_ERROR(EIO); | 1785 | return XFS_ERROR(EIO); |
1780 | 1786 | ||
1781 | lock_mode = xfs_ilock_map_shared(dp); | 1787 | lock_mode = xfs_ilock_map_shared(dp); |
1782 | error = xfs_dir_lookup_int(dp, lock_mode, dentry, &e_inum, &ip); | 1788 | error = xfs_dir_lookup_int(dp, lock_mode, name, &e_inum, &ip); |
1783 | if (!error) { | 1789 | if (!error) { |
1784 | *vpp = XFS_ITOV(ip); | 1790 | *ipp = ip; |
1785 | xfs_itrace_ref(ip); | 1791 | xfs_itrace_ref(ip); |
1786 | } | 1792 | } |
1787 | xfs_iunlock_map_shared(dp, lock_mode); | 1793 | xfs_iunlock_map_shared(dp, lock_mode); |
@@ -1791,19 +1797,16 @@ xfs_lookup( | |||
1791 | int | 1797 | int |
1792 | xfs_create( | 1798 | xfs_create( |
1793 | xfs_inode_t *dp, | 1799 | xfs_inode_t *dp, |
1794 | bhv_vname_t *dentry, | 1800 | struct xfs_name *name, |
1795 | mode_t mode, | 1801 | mode_t mode, |
1796 | xfs_dev_t rdev, | 1802 | xfs_dev_t rdev, |
1797 | bhv_vnode_t **vpp, | 1803 | xfs_inode_t **ipp, |
1798 | cred_t *credp) | 1804 | cred_t *credp) |
1799 | { | 1805 | { |
1800 | char *name = VNAME(dentry); | 1806 | xfs_mount_t *mp = dp->i_mount; |
1801 | xfs_mount_t *mp = dp->i_mount; | ||
1802 | bhv_vnode_t *dir_vp = XFS_ITOV(dp); | ||
1803 | xfs_inode_t *ip; | 1807 | xfs_inode_t *ip; |
1804 | bhv_vnode_t *vp = NULL; | ||
1805 | xfs_trans_t *tp; | 1808 | xfs_trans_t *tp; |
1806 | int error; | 1809 | int error; |
1807 | xfs_bmap_free_t free_list; | 1810 | xfs_bmap_free_t free_list; |
1808 | xfs_fsblock_t first_block; | 1811 | xfs_fsblock_t first_block; |
1809 | boolean_t unlock_dp_on_error = B_FALSE; | 1812 | boolean_t unlock_dp_on_error = B_FALSE; |
@@ -1813,17 +1816,14 @@ xfs_create( | |||
1813 | xfs_prid_t prid; | 1816 | xfs_prid_t prid; |
1814 | struct xfs_dquot *udqp, *gdqp; | 1817 | struct xfs_dquot *udqp, *gdqp; |
1815 | uint resblks; | 1818 | uint resblks; |
1816 | int namelen; | ||
1817 | 1819 | ||
1818 | ASSERT(!*vpp); | 1820 | ASSERT(!*ipp); |
1819 | xfs_itrace_entry(dp); | 1821 | xfs_itrace_entry(dp); |
1820 | 1822 | ||
1821 | namelen = VNAMELEN(dentry); | ||
1822 | |||
1823 | if (DM_EVENT_ENABLED(dp, DM_EVENT_CREATE)) { | 1823 | if (DM_EVENT_ENABLED(dp, DM_EVENT_CREATE)) { |
1824 | error = XFS_SEND_NAMESP(mp, DM_EVENT_CREATE, | 1824 | error = XFS_SEND_NAMESP(mp, DM_EVENT_CREATE, |
1825 | dir_vp, DM_RIGHT_NULL, NULL, | 1825 | dp, DM_RIGHT_NULL, NULL, |
1826 | DM_RIGHT_NULL, name, NULL, | 1826 | DM_RIGHT_NULL, name->name, NULL, |
1827 | mode, 0, 0); | 1827 | mode, 0, 0); |
1828 | 1828 | ||
1829 | if (error) | 1829 | if (error) |
@@ -1855,7 +1855,7 @@ xfs_create( | |||
1855 | 1855 | ||
1856 | tp = xfs_trans_alloc(mp, XFS_TRANS_CREATE); | 1856 | tp = xfs_trans_alloc(mp, XFS_TRANS_CREATE); |
1857 | cancel_flags = XFS_TRANS_RELEASE_LOG_RES; | 1857 | cancel_flags = XFS_TRANS_RELEASE_LOG_RES; |
1858 | resblks = XFS_CREATE_SPACE_RES(mp, namelen); | 1858 | resblks = XFS_CREATE_SPACE_RES(mp, name->len); |
1859 | /* | 1859 | /* |
1860 | * Initially assume that the file does not exist and | 1860 | * Initially assume that the file does not exist and |
1861 | * reserve the resources for that case. If that is not | 1861 | * reserve the resources for that case. If that is not |
@@ -1888,7 +1888,8 @@ xfs_create( | |||
1888 | if (error) | 1888 | if (error) |
1889 | goto error_return; | 1889 | goto error_return; |
1890 | 1890 | ||
1891 | if (resblks == 0 && (error = xfs_dir_canenter(tp, dp, name, namelen))) | 1891 | error = xfs_dir_canenter(tp, dp, name, resblks); |
1892 | if (error) | ||
1892 | goto error_return; | 1893 | goto error_return; |
1893 | error = xfs_dir_ialloc(&tp, dp, mode, 1, | 1894 | error = xfs_dir_ialloc(&tp, dp, mode, 1, |
1894 | rdev, credp, prid, resblks > 0, | 1895 | rdev, credp, prid, resblks > 0, |
@@ -1914,11 +1915,11 @@ xfs_create( | |||
1914 | * the transaction cancel unlocking dp so don't do it explicitly in the | 1915 | * the transaction cancel unlocking dp so don't do it explicitly in the |
1915 | * error path. | 1916 | * error path. |
1916 | */ | 1917 | */ |
1917 | VN_HOLD(dir_vp); | 1918 | IHOLD(dp); |
1918 | xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL); | 1919 | xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL); |
1919 | unlock_dp_on_error = B_FALSE; | 1920 | unlock_dp_on_error = B_FALSE; |
1920 | 1921 | ||
1921 | error = xfs_dir_createname(tp, dp, name, namelen, ip->i_ino, | 1922 | error = xfs_dir_createname(tp, dp, name, ip->i_ino, |
1922 | &first_block, &free_list, resblks ? | 1923 | &first_block, &free_list, resblks ? |
1923 | resblks - XFS_IALLOC_SPACE_RES(mp) : 0); | 1924 | resblks - XFS_IALLOC_SPACE_RES(mp) : 0); |
1924 | if (error) { | 1925 | if (error) { |
@@ -1952,7 +1953,6 @@ xfs_create( | |||
1952 | * vnode to the caller, we bump the vnode ref count now. | 1953 | * vnode to the caller, we bump the vnode ref count now. |
1953 | */ | 1954 | */ |
1954 | IHOLD(ip); | 1955 | IHOLD(ip); |
1955 | vp = XFS_ITOV(ip); | ||
1956 | 1956 | ||
1957 | error = xfs_bmap_finish(&tp, &free_list, &committed); | 1957 | error = xfs_bmap_finish(&tp, &free_list, &committed); |
1958 | if (error) { | 1958 | if (error) { |
@@ -1970,17 +1970,17 @@ xfs_create( | |||
1970 | XFS_QM_DQRELE(mp, udqp); | 1970 | XFS_QM_DQRELE(mp, udqp); |
1971 | XFS_QM_DQRELE(mp, gdqp); | 1971 | XFS_QM_DQRELE(mp, gdqp); |
1972 | 1972 | ||
1973 | *vpp = vp; | 1973 | *ipp = ip; |
1974 | 1974 | ||
1975 | /* Fallthrough to std_return with error = 0 */ | 1975 | /* Fallthrough to std_return with error = 0 */ |
1976 | 1976 | ||
1977 | std_return: | 1977 | std_return: |
1978 | if ((*vpp || (error != 0 && dm_event_sent != 0)) && | 1978 | if ((*ipp || (error != 0 && dm_event_sent != 0)) && |
1979 | DM_EVENT_ENABLED(dp, DM_EVENT_POSTCREATE)) { | 1979 | DM_EVENT_ENABLED(dp, DM_EVENT_POSTCREATE)) { |
1980 | (void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTCREATE, | 1980 | (void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTCREATE, |
1981 | dir_vp, DM_RIGHT_NULL, | 1981 | dp, DM_RIGHT_NULL, |
1982 | *vpp ? vp:NULL, | 1982 | *ipp ? ip : NULL, |
1983 | DM_RIGHT_NULL, name, NULL, | 1983 | DM_RIGHT_NULL, name->name, NULL, |
1984 | mode, error, 0); | 1984 | mode, error, 0); |
1985 | } | 1985 | } |
1986 | return error; | 1986 | return error; |
@@ -2272,46 +2272,32 @@ int remove_which_error_return = 0; | |||
2272 | int | 2272 | int |
2273 | xfs_remove( | 2273 | xfs_remove( |
2274 | xfs_inode_t *dp, | 2274 | xfs_inode_t *dp, |
2275 | bhv_vname_t *dentry) | 2275 | struct xfs_name *name, |
2276 | xfs_inode_t *ip) | ||
2276 | { | 2277 | { |
2277 | bhv_vnode_t *dir_vp = XFS_ITOV(dp); | ||
2278 | char *name = VNAME(dentry); | ||
2279 | xfs_mount_t *mp = dp->i_mount; | 2278 | xfs_mount_t *mp = dp->i_mount; |
2280 | xfs_inode_t *ip; | ||
2281 | xfs_trans_t *tp = NULL; | 2279 | xfs_trans_t *tp = NULL; |
2282 | int error = 0; | 2280 | int error = 0; |
2283 | xfs_bmap_free_t free_list; | 2281 | xfs_bmap_free_t free_list; |
2284 | xfs_fsblock_t first_block; | 2282 | xfs_fsblock_t first_block; |
2285 | int cancel_flags; | 2283 | int cancel_flags; |
2286 | int committed; | 2284 | int committed; |
2287 | int dm_di_mode = 0; | ||
2288 | int link_zero; | 2285 | int link_zero; |
2289 | uint resblks; | 2286 | uint resblks; |
2290 | int namelen; | ||
2291 | 2287 | ||
2292 | xfs_itrace_entry(dp); | 2288 | xfs_itrace_entry(dp); |
2293 | 2289 | ||
2294 | if (XFS_FORCED_SHUTDOWN(mp)) | 2290 | if (XFS_FORCED_SHUTDOWN(mp)) |
2295 | return XFS_ERROR(EIO); | 2291 | return XFS_ERROR(EIO); |
2296 | 2292 | ||
2297 | namelen = VNAMELEN(dentry); | ||
2298 | |||
2299 | if (!xfs_get_dir_entry(dentry, &ip)) { | ||
2300 | dm_di_mode = ip->i_d.di_mode; | ||
2301 | IRELE(ip); | ||
2302 | } | ||
2303 | |||
2304 | if (DM_EVENT_ENABLED(dp, DM_EVENT_REMOVE)) { | 2293 | if (DM_EVENT_ENABLED(dp, DM_EVENT_REMOVE)) { |
2305 | error = XFS_SEND_NAMESP(mp, DM_EVENT_REMOVE, dir_vp, | 2294 | error = XFS_SEND_NAMESP(mp, DM_EVENT_REMOVE, dp, DM_RIGHT_NULL, |
2306 | DM_RIGHT_NULL, NULL, DM_RIGHT_NULL, | 2295 | NULL, DM_RIGHT_NULL, name->name, NULL, |
2307 | name, NULL, dm_di_mode, 0, 0); | 2296 | ip->i_d.di_mode, 0, 0); |
2308 | if (error) | 2297 | if (error) |
2309 | return error; | 2298 | return error; |
2310 | } | 2299 | } |
2311 | 2300 | ||
2312 | /* From this point on, return through std_return */ | ||
2313 | ip = NULL; | ||
2314 | |||
2315 | /* | 2301 | /* |
2316 | * We need to get a reference to ip before we get our log | 2302 | * We need to get a reference to ip before we get our log |
2317 | * reservation. The reason for this is that we cannot call | 2303 | * reservation. The reason for this is that we cannot call |
@@ -2324,13 +2310,7 @@ xfs_remove( | |||
2324 | * when we call xfs_iget. Instead we get an unlocked reference | 2310 | * when we call xfs_iget. Instead we get an unlocked reference |
2325 | * to the inode before getting our log reservation. | 2311 | * to the inode before getting our log reservation. |
2326 | */ | 2312 | */ |
2327 | error = xfs_get_dir_entry(dentry, &ip); | 2313 | IHOLD(ip); |
2328 | if (error) { | ||
2329 | REMOVE_DEBUG_TRACE(__LINE__); | ||
2330 | goto std_return; | ||
2331 | } | ||
2332 | |||
2333 | dm_di_mode = ip->i_d.di_mode; | ||
2334 | 2314 | ||
2335 | xfs_itrace_entry(ip); | 2315 | xfs_itrace_entry(ip); |
2336 | xfs_itrace_ref(ip); | 2316 | xfs_itrace_ref(ip); |
@@ -2398,7 +2378,7 @@ xfs_remove( | |||
2398 | * Entry must exist since we did a lookup in xfs_lock_dir_and_entry. | 2378 | * Entry must exist since we did a lookup in xfs_lock_dir_and_entry. |
2399 | */ | 2379 | */ |
2400 | XFS_BMAP_INIT(&free_list, &first_block); | 2380 | XFS_BMAP_INIT(&free_list, &first_block); |
2401 | error = xfs_dir_removename(tp, dp, name, namelen, ip->i_ino, | 2381 | error = xfs_dir_removename(tp, dp, name, ip->i_ino, |
2402 | &first_block, &free_list, 0); | 2382 | &first_block, &free_list, 0); |
2403 | if (error) { | 2383 | if (error) { |
2404 | ASSERT(error != ENOENT); | 2384 | ASSERT(error != ENOENT); |
@@ -2449,14 +2429,6 @@ xfs_remove( | |||
2449 | } | 2429 | } |
2450 | 2430 | ||
2451 | /* | 2431 | /* |
2452 | * Before we drop our extra reference to the inode, purge it | ||
2453 | * from the refcache if it is there. By waiting until afterwards | ||
2454 | * to do the IRELE, we ensure that we won't go inactive in the | ||
2455 | * xfs_refcache_purge_ip routine (although that would be OK). | ||
2456 | */ | ||
2457 | xfs_refcache_purge_ip(ip); | ||
2458 | |||
2459 | /* | ||
2460 | * If we are using filestreams, kill the stream association. | 2432 | * If we are using filestreams, kill the stream association. |
2461 | * If the file is still open it may get a new one but that | 2433 | * If the file is still open it may get a new one but that |
2462 | * will get killed on last close in xfs_close() so we don't | 2434 | * will get killed on last close in xfs_close() so we don't |
@@ -2472,9 +2444,9 @@ xfs_remove( | |||
2472 | std_return: | 2444 | std_return: |
2473 | if (DM_EVENT_ENABLED(dp, DM_EVENT_POSTREMOVE)) { | 2445 | if (DM_EVENT_ENABLED(dp, DM_EVENT_POSTREMOVE)) { |
2474 | (void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTREMOVE, | 2446 | (void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTREMOVE, |
2475 | dir_vp, DM_RIGHT_NULL, | 2447 | dp, DM_RIGHT_NULL, |
2476 | NULL, DM_RIGHT_NULL, | 2448 | NULL, DM_RIGHT_NULL, |
2477 | name, NULL, dm_di_mode, error, 0); | 2449 | name->name, NULL, ip->i_d.di_mode, error, 0); |
2478 | } | 2450 | } |
2479 | return error; | 2451 | return error; |
2480 | 2452 | ||
@@ -2495,14 +2467,6 @@ xfs_remove( | |||
2495 | cancel_flags |= XFS_TRANS_ABORT; | 2467 | cancel_flags |= XFS_TRANS_ABORT; |
2496 | xfs_trans_cancel(tp, cancel_flags); | 2468 | xfs_trans_cancel(tp, cancel_flags); |
2497 | 2469 | ||
2498 | /* | ||
2499 | * Before we drop our extra reference to the inode, purge it | ||
2500 | * from the refcache if it is there. By waiting until afterwards | ||
2501 | * to do the IRELE, we ensure that we won't go inactive in the | ||
2502 | * xfs_refcache_purge_ip routine (although that would be OK). | ||
2503 | */ | ||
2504 | xfs_refcache_purge_ip(ip); | ||
2505 | |||
2506 | IRELE(ip); | 2470 | IRELE(ip); |
2507 | 2471 | ||
2508 | goto std_return; | 2472 | goto std_return; |
@@ -2511,12 +2475,10 @@ xfs_remove( | |||
2511 | int | 2475 | int |
2512 | xfs_link( | 2476 | xfs_link( |
2513 | xfs_inode_t *tdp, | 2477 | xfs_inode_t *tdp, |
2514 | bhv_vnode_t *src_vp, | 2478 | xfs_inode_t *sip, |
2515 | bhv_vname_t *dentry) | 2479 | struct xfs_name *target_name) |
2516 | { | 2480 | { |
2517 | bhv_vnode_t *target_dir_vp = XFS_ITOV(tdp); | ||
2518 | xfs_mount_t *mp = tdp->i_mount; | 2481 | xfs_mount_t *mp = tdp->i_mount; |
2519 | xfs_inode_t *sip = xfs_vtoi(src_vp); | ||
2520 | xfs_trans_t *tp; | 2482 | xfs_trans_t *tp; |
2521 | xfs_inode_t *ips[2]; | 2483 | xfs_inode_t *ips[2]; |
2522 | int error; | 2484 | int error; |
@@ -2525,23 +2487,20 @@ xfs_link( | |||
2525 | int cancel_flags; | 2487 | int cancel_flags; |
2526 | int committed; | 2488 | int committed; |
2527 | int resblks; | 2489 | int resblks; |
2528 | char *target_name = VNAME(dentry); | ||
2529 | int target_namelen; | ||
2530 | 2490 | ||
2531 | xfs_itrace_entry(tdp); | 2491 | xfs_itrace_entry(tdp); |
2532 | xfs_itrace_entry(xfs_vtoi(src_vp)); | 2492 | xfs_itrace_entry(sip); |
2533 | 2493 | ||
2534 | target_namelen = VNAMELEN(dentry); | 2494 | ASSERT(!S_ISDIR(sip->i_d.di_mode)); |
2535 | ASSERT(!VN_ISDIR(src_vp)); | ||
2536 | 2495 | ||
2537 | if (XFS_FORCED_SHUTDOWN(mp)) | 2496 | if (XFS_FORCED_SHUTDOWN(mp)) |
2538 | return XFS_ERROR(EIO); | 2497 | return XFS_ERROR(EIO); |
2539 | 2498 | ||
2540 | if (DM_EVENT_ENABLED(tdp, DM_EVENT_LINK)) { | 2499 | if (DM_EVENT_ENABLED(tdp, DM_EVENT_LINK)) { |
2541 | error = XFS_SEND_NAMESP(mp, DM_EVENT_LINK, | 2500 | error = XFS_SEND_NAMESP(mp, DM_EVENT_LINK, |
2542 | target_dir_vp, DM_RIGHT_NULL, | 2501 | tdp, DM_RIGHT_NULL, |
2543 | src_vp, DM_RIGHT_NULL, | 2502 | sip, DM_RIGHT_NULL, |
2544 | target_name, NULL, 0, 0, 0); | 2503 | target_name->name, NULL, 0, 0, 0); |
2545 | if (error) | 2504 | if (error) |
2546 | return error; | 2505 | return error; |
2547 | } | 2506 | } |
@@ -2556,7 +2515,7 @@ xfs_link( | |||
2556 | 2515 | ||
2557 | tp = xfs_trans_alloc(mp, XFS_TRANS_LINK); | 2516 | tp = xfs_trans_alloc(mp, XFS_TRANS_LINK); |
2558 | cancel_flags = XFS_TRANS_RELEASE_LOG_RES; | 2517 | cancel_flags = XFS_TRANS_RELEASE_LOG_RES; |
2559 | resblks = XFS_LINK_SPACE_RES(mp, target_namelen); | 2518 | resblks = XFS_LINK_SPACE_RES(mp, target_name->len); |
2560 | error = xfs_trans_reserve(tp, resblks, XFS_LINK_LOG_RES(mp), 0, | 2519 | error = xfs_trans_reserve(tp, resblks, XFS_LINK_LOG_RES(mp), 0, |
2561 | XFS_TRANS_PERM_LOG_RES, XFS_LINK_LOG_COUNT); | 2520 | XFS_TRANS_PERM_LOG_RES, XFS_LINK_LOG_COUNT); |
2562 | if (error == ENOSPC) { | 2521 | if (error == ENOSPC) { |
@@ -2584,8 +2543,8 @@ xfs_link( | |||
2584 | * xfs_trans_cancel will both unlock the inodes and | 2543 | * xfs_trans_cancel will both unlock the inodes and |
2585 | * decrement the associated ref counts. | 2544 | * decrement the associated ref counts. |
2586 | */ | 2545 | */ |
2587 | VN_HOLD(src_vp); | 2546 | IHOLD(sip); |
2588 | VN_HOLD(target_dir_vp); | 2547 | IHOLD(tdp); |
2589 | xfs_trans_ijoin(tp, sip, XFS_ILOCK_EXCL); | 2548 | xfs_trans_ijoin(tp, sip, XFS_ILOCK_EXCL); |
2590 | xfs_trans_ijoin(tp, tdp, XFS_ILOCK_EXCL); | 2549 | xfs_trans_ijoin(tp, tdp, XFS_ILOCK_EXCL); |
2591 | 2550 | ||
@@ -2608,15 +2567,14 @@ xfs_link( | |||
2608 | goto error_return; | 2567 | goto error_return; |
2609 | } | 2568 | } |
2610 | 2569 | ||
2611 | if (resblks == 0 && | 2570 | error = xfs_dir_canenter(tp, tdp, target_name, resblks); |
2612 | (error = xfs_dir_canenter(tp, tdp, target_name, target_namelen))) | 2571 | if (error) |
2613 | goto error_return; | 2572 | goto error_return; |
2614 | 2573 | ||
2615 | XFS_BMAP_INIT(&free_list, &first_block); | 2574 | XFS_BMAP_INIT(&free_list, &first_block); |
2616 | 2575 | ||
2617 | error = xfs_dir_createname(tp, tdp, target_name, target_namelen, | 2576 | error = xfs_dir_createname(tp, tdp, target_name, sip->i_ino, |
2618 | sip->i_ino, &first_block, &free_list, | 2577 | &first_block, &free_list, resblks); |
2619 | resblks); | ||
2620 | if (error) | 2578 | if (error) |
2621 | goto abort_return; | 2579 | goto abort_return; |
2622 | xfs_ichgtime(tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); | 2580 | xfs_ichgtime(tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); |
@@ -2650,9 +2608,9 @@ xfs_link( | |||
2650 | std_return: | 2608 | std_return: |
2651 | if (DM_EVENT_ENABLED(sip, DM_EVENT_POSTLINK)) { | 2609 | if (DM_EVENT_ENABLED(sip, DM_EVENT_POSTLINK)) { |
2652 | (void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTLINK, | 2610 | (void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTLINK, |
2653 | target_dir_vp, DM_RIGHT_NULL, | 2611 | tdp, DM_RIGHT_NULL, |
2654 | src_vp, DM_RIGHT_NULL, | 2612 | sip, DM_RIGHT_NULL, |
2655 | target_name, NULL, 0, error, 0); | 2613 | target_name->name, NULL, 0, error, 0); |
2656 | } | 2614 | } |
2657 | return error; | 2615 | return error; |
2658 | 2616 | ||
@@ -2669,17 +2627,13 @@ std_return: | |||
2669 | int | 2627 | int |
2670 | xfs_mkdir( | 2628 | xfs_mkdir( |
2671 | xfs_inode_t *dp, | 2629 | xfs_inode_t *dp, |
2672 | bhv_vname_t *dentry, | 2630 | struct xfs_name *dir_name, |
2673 | mode_t mode, | 2631 | mode_t mode, |
2674 | bhv_vnode_t **vpp, | 2632 | xfs_inode_t **ipp, |
2675 | cred_t *credp) | 2633 | cred_t *credp) |
2676 | { | 2634 | { |
2677 | bhv_vnode_t *dir_vp = XFS_ITOV(dp); | ||
2678 | char *dir_name = VNAME(dentry); | ||
2679 | int dir_namelen = VNAMELEN(dentry); | ||
2680 | xfs_mount_t *mp = dp->i_mount; | 2635 | xfs_mount_t *mp = dp->i_mount; |
2681 | xfs_inode_t *cdp; /* inode of created dir */ | 2636 | xfs_inode_t *cdp; /* inode of created dir */ |
2682 | bhv_vnode_t *cvp; /* vnode of created dir */ | ||
2683 | xfs_trans_t *tp; | 2637 | xfs_trans_t *tp; |
2684 | int cancel_flags; | 2638 | int cancel_flags; |
2685 | int error; | 2639 | int error; |
@@ -2700,8 +2654,8 @@ xfs_mkdir( | |||
2700 | 2654 | ||
2701 | if (DM_EVENT_ENABLED(dp, DM_EVENT_CREATE)) { | 2655 | if (DM_EVENT_ENABLED(dp, DM_EVENT_CREATE)) { |
2702 | error = XFS_SEND_NAMESP(mp, DM_EVENT_CREATE, | 2656 | error = XFS_SEND_NAMESP(mp, DM_EVENT_CREATE, |
2703 | dir_vp, DM_RIGHT_NULL, NULL, | 2657 | dp, DM_RIGHT_NULL, NULL, |
2704 | DM_RIGHT_NULL, dir_name, NULL, | 2658 | DM_RIGHT_NULL, dir_name->name, NULL, |
2705 | mode, 0, 0); | 2659 | mode, 0, 0); |
2706 | if (error) | 2660 | if (error) |
2707 | return error; | 2661 | return error; |
@@ -2730,7 +2684,7 @@ xfs_mkdir( | |||
2730 | 2684 | ||
2731 | tp = xfs_trans_alloc(mp, XFS_TRANS_MKDIR); | 2685 | tp = xfs_trans_alloc(mp, XFS_TRANS_MKDIR); |
2732 | cancel_flags = XFS_TRANS_RELEASE_LOG_RES; | 2686 | cancel_flags = XFS_TRANS_RELEASE_LOG_RES; |
2733 | resblks = XFS_MKDIR_SPACE_RES(mp, dir_namelen); | 2687 | resblks = XFS_MKDIR_SPACE_RES(mp, dir_name->len); |
2734 | error = xfs_trans_reserve(tp, resblks, XFS_MKDIR_LOG_RES(mp), 0, | 2688 | error = xfs_trans_reserve(tp, resblks, XFS_MKDIR_LOG_RES(mp), 0, |
2735 | XFS_TRANS_PERM_LOG_RES, XFS_MKDIR_LOG_COUNT); | 2689 | XFS_TRANS_PERM_LOG_RES, XFS_MKDIR_LOG_COUNT); |
2736 | if (error == ENOSPC) { | 2690 | if (error == ENOSPC) { |
@@ -2762,8 +2716,8 @@ xfs_mkdir( | |||
2762 | if (error) | 2716 | if (error) |
2763 | goto error_return; | 2717 | goto error_return; |
2764 | 2718 | ||
2765 | if (resblks == 0 && | 2719 | error = xfs_dir_canenter(tp, dp, dir_name, resblks); |
2766 | (error = xfs_dir_canenter(tp, dp, dir_name, dir_namelen))) | 2720 | if (error) |
2767 | goto error_return; | 2721 | goto error_return; |
2768 | /* | 2722 | /* |
2769 | * create the directory inode. | 2723 | * create the directory inode. |
@@ -2786,15 +2740,15 @@ xfs_mkdir( | |||
2786 | * from here on will result in the transaction cancel | 2740 | * from here on will result in the transaction cancel |
2787 | * unlocking dp so don't do it explicitly in the error path. | 2741 | * unlocking dp so don't do it explicitly in the error path. |
2788 | */ | 2742 | */ |
2789 | VN_HOLD(dir_vp); | 2743 | IHOLD(dp); |
2790 | xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL); | 2744 | xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL); |
2791 | unlock_dp_on_error = B_FALSE; | 2745 | unlock_dp_on_error = B_FALSE; |
2792 | 2746 | ||
2793 | XFS_BMAP_INIT(&free_list, &first_block); | 2747 | XFS_BMAP_INIT(&free_list, &first_block); |
2794 | 2748 | ||
2795 | error = xfs_dir_createname(tp, dp, dir_name, dir_namelen, cdp->i_ino, | 2749 | error = xfs_dir_createname(tp, dp, dir_name, cdp->i_ino, |
2796 | &first_block, &free_list, resblks ? | 2750 | &first_block, &free_list, resblks ? |
2797 | resblks - XFS_IALLOC_SPACE_RES(mp) : 0); | 2751 | resblks - XFS_IALLOC_SPACE_RES(mp) : 0); |
2798 | if (error) { | 2752 | if (error) { |
2799 | ASSERT(error != ENOSPC); | 2753 | ASSERT(error != ENOSPC); |
2800 | goto error1; | 2754 | goto error1; |
@@ -2817,11 +2771,9 @@ xfs_mkdir( | |||
2817 | if (error) | 2771 | if (error) |
2818 | goto error2; | 2772 | goto error2; |
2819 | 2773 | ||
2820 | cvp = XFS_ITOV(cdp); | ||
2821 | |||
2822 | created = B_TRUE; | 2774 | created = B_TRUE; |
2823 | 2775 | ||
2824 | *vpp = cvp; | 2776 | *ipp = cdp; |
2825 | IHOLD(cdp); | 2777 | IHOLD(cdp); |
2826 | 2778 | ||
2827 | /* | 2779 | /* |
@@ -2858,10 +2810,10 @@ std_return: | |||
2858 | if ((created || (error != 0 && dm_event_sent != 0)) && | 2810 | if ((created || (error != 0 && dm_event_sent != 0)) && |
2859 | DM_EVENT_ENABLED(dp, DM_EVENT_POSTCREATE)) { | 2811 | DM_EVENT_ENABLED(dp, DM_EVENT_POSTCREATE)) { |
2860 | (void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTCREATE, | 2812 | (void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTCREATE, |
2861 | dir_vp, DM_RIGHT_NULL, | 2813 | dp, DM_RIGHT_NULL, |
2862 | created ? XFS_ITOV(cdp):NULL, | 2814 | created ? cdp : NULL, |
2863 | DM_RIGHT_NULL, | 2815 | DM_RIGHT_NULL, |
2864 | dir_name, NULL, | 2816 | dir_name->name, NULL, |
2865 | mode, error, 0); | 2817 | mode, error, 0); |
2866 | } | 2818 | } |
2867 | return error; | 2819 | return error; |
@@ -2885,20 +2837,17 @@ std_return: | |||
2885 | int | 2837 | int |
2886 | xfs_rmdir( | 2838 | xfs_rmdir( |
2887 | xfs_inode_t *dp, | 2839 | xfs_inode_t *dp, |
2888 | bhv_vname_t *dentry) | 2840 | struct xfs_name *name, |
2841 | xfs_inode_t *cdp) | ||
2889 | { | 2842 | { |
2890 | bhv_vnode_t *dir_vp = XFS_ITOV(dp); | 2843 | bhv_vnode_t *dir_vp = XFS_ITOV(dp); |
2891 | char *name = VNAME(dentry); | ||
2892 | int namelen = VNAMELEN(dentry); | ||
2893 | xfs_mount_t *mp = dp->i_mount; | 2844 | xfs_mount_t *mp = dp->i_mount; |
2894 | xfs_inode_t *cdp; /* child directory */ | ||
2895 | xfs_trans_t *tp; | 2845 | xfs_trans_t *tp; |
2896 | int error; | 2846 | int error; |
2897 | xfs_bmap_free_t free_list; | 2847 | xfs_bmap_free_t free_list; |
2898 | xfs_fsblock_t first_block; | 2848 | xfs_fsblock_t first_block; |
2899 | int cancel_flags; | 2849 | int cancel_flags; |
2900 | int committed; | 2850 | int committed; |
2901 | int dm_di_mode = S_IFDIR; | ||
2902 | int last_cdp_link; | 2851 | int last_cdp_link; |
2903 | uint resblks; | 2852 | uint resblks; |
2904 | 2853 | ||
@@ -2907,24 +2856,15 @@ xfs_rmdir( | |||
2907 | if (XFS_FORCED_SHUTDOWN(mp)) | 2856 | if (XFS_FORCED_SHUTDOWN(mp)) |
2908 | return XFS_ERROR(EIO); | 2857 | return XFS_ERROR(EIO); |
2909 | 2858 | ||
2910 | if (!xfs_get_dir_entry(dentry, &cdp)) { | ||
2911 | dm_di_mode = cdp->i_d.di_mode; | ||
2912 | IRELE(cdp); | ||
2913 | } | ||
2914 | |||
2915 | if (DM_EVENT_ENABLED(dp, DM_EVENT_REMOVE)) { | 2859 | if (DM_EVENT_ENABLED(dp, DM_EVENT_REMOVE)) { |
2916 | error = XFS_SEND_NAMESP(mp, DM_EVENT_REMOVE, | 2860 | error = XFS_SEND_NAMESP(mp, DM_EVENT_REMOVE, |
2917 | dir_vp, DM_RIGHT_NULL, | 2861 | dp, DM_RIGHT_NULL, |
2918 | NULL, DM_RIGHT_NULL, | 2862 | NULL, DM_RIGHT_NULL, name->name, |
2919 | name, NULL, dm_di_mode, 0, 0); | 2863 | NULL, cdp->i_d.di_mode, 0, 0); |
2920 | if (error) | 2864 | if (error) |
2921 | return XFS_ERROR(error); | 2865 | return XFS_ERROR(error); |
2922 | } | 2866 | } |
2923 | 2867 | ||
2924 | /* Return through std_return after this point. */ | ||
2925 | |||
2926 | cdp = NULL; | ||
2927 | |||
2928 | /* | 2868 | /* |
2929 | * We need to get a reference to cdp before we get our log | 2869 | * We need to get a reference to cdp before we get our log |
2930 | * reservation. The reason for this is that we cannot call | 2870 | * reservation. The reason for this is that we cannot call |
@@ -2937,13 +2877,7 @@ xfs_rmdir( | |||
2937 | * when we call xfs_iget. Instead we get an unlocked reference | 2877 | * when we call xfs_iget. Instead we get an unlocked reference |
2938 | * to the inode before getting our log reservation. | 2878 | * to the inode before getting our log reservation. |
2939 | */ | 2879 | */ |
2940 | error = xfs_get_dir_entry(dentry, &cdp); | 2880 | IHOLD(cdp); |
2941 | if (error) { | ||
2942 | REMOVE_DEBUG_TRACE(__LINE__); | ||
2943 | goto std_return; | ||
2944 | } | ||
2945 | mp = dp->i_mount; | ||
2946 | dm_di_mode = cdp->i_d.di_mode; | ||
2947 | 2881 | ||
2948 | /* | 2882 | /* |
2949 | * Get the dquots for the inodes. | 2883 | * Get the dquots for the inodes. |
@@ -3020,7 +2954,7 @@ xfs_rmdir( | |||
3020 | goto error_return; | 2954 | goto error_return; |
3021 | } | 2955 | } |
3022 | 2956 | ||
3023 | error = xfs_dir_removename(tp, dp, name, namelen, cdp->i_ino, | 2957 | error = xfs_dir_removename(tp, dp, name, cdp->i_ino, |
3024 | &first_block, &free_list, resblks); | 2958 | &first_block, &free_list, resblks); |
3025 | if (error) | 2959 | if (error) |
3026 | goto error1; | 2960 | goto error1; |
@@ -3098,9 +3032,9 @@ xfs_rmdir( | |||
3098 | std_return: | 3032 | std_return: |
3099 | if (DM_EVENT_ENABLED(dp, DM_EVENT_POSTREMOVE)) { | 3033 | if (DM_EVENT_ENABLED(dp, DM_EVENT_POSTREMOVE)) { |
3100 | (void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTREMOVE, | 3034 | (void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTREMOVE, |
3101 | dir_vp, DM_RIGHT_NULL, | 3035 | dp, DM_RIGHT_NULL, |
3102 | NULL, DM_RIGHT_NULL, | 3036 | NULL, DM_RIGHT_NULL, |
3103 | name, NULL, dm_di_mode, | 3037 | name->name, NULL, cdp->i_d.di_mode, |
3104 | error, 0); | 3038 | error, 0); |
3105 | } | 3039 | } |
3106 | return error; | 3040 | return error; |
@@ -3118,13 +3052,12 @@ xfs_rmdir( | |||
3118 | int | 3052 | int |
3119 | xfs_symlink( | 3053 | xfs_symlink( |
3120 | xfs_inode_t *dp, | 3054 | xfs_inode_t *dp, |
3121 | bhv_vname_t *dentry, | 3055 | struct xfs_name *link_name, |
3122 | char *target_path, | 3056 | const char *target_path, |
3123 | mode_t mode, | 3057 | mode_t mode, |
3124 | bhv_vnode_t **vpp, | 3058 | xfs_inode_t **ipp, |
3125 | cred_t *credp) | 3059 | cred_t *credp) |
3126 | { | 3060 | { |
3127 | bhv_vnode_t *dir_vp = XFS_ITOV(dp); | ||
3128 | xfs_mount_t *mp = dp->i_mount; | 3061 | xfs_mount_t *mp = dp->i_mount; |
3129 | xfs_trans_t *tp; | 3062 | xfs_trans_t *tp; |
3130 | xfs_inode_t *ip; | 3063 | xfs_inode_t *ip; |
@@ -3140,17 +3073,15 @@ xfs_symlink( | |||
3140 | int nmaps; | 3073 | int nmaps; |
3141 | xfs_bmbt_irec_t mval[SYMLINK_MAPS]; | 3074 | xfs_bmbt_irec_t mval[SYMLINK_MAPS]; |
3142 | xfs_daddr_t d; | 3075 | xfs_daddr_t d; |
3143 | char *cur_chunk; | 3076 | const char *cur_chunk; |
3144 | int byte_cnt; | 3077 | int byte_cnt; |
3145 | int n; | 3078 | int n; |
3146 | xfs_buf_t *bp; | 3079 | xfs_buf_t *bp; |
3147 | xfs_prid_t prid; | 3080 | xfs_prid_t prid; |
3148 | struct xfs_dquot *udqp, *gdqp; | 3081 | struct xfs_dquot *udqp, *gdqp; |
3149 | uint resblks; | 3082 | uint resblks; |
3150 | char *link_name = VNAME(dentry); | ||
3151 | int link_namelen; | ||
3152 | 3083 | ||
3153 | *vpp = NULL; | 3084 | *ipp = NULL; |
3154 | error = 0; | 3085 | error = 0; |
3155 | ip = NULL; | 3086 | ip = NULL; |
3156 | tp = NULL; | 3087 | tp = NULL; |
@@ -3160,44 +3091,17 @@ xfs_symlink( | |||
3160 | if (XFS_FORCED_SHUTDOWN(mp)) | 3091 | if (XFS_FORCED_SHUTDOWN(mp)) |
3161 | return XFS_ERROR(EIO); | 3092 | return XFS_ERROR(EIO); |
3162 | 3093 | ||
3163 | link_namelen = VNAMELEN(dentry); | ||
3164 | |||
3165 | /* | 3094 | /* |
3166 | * Check component lengths of the target path name. | 3095 | * Check component lengths of the target path name. |
3167 | */ | 3096 | */ |
3168 | pathlen = strlen(target_path); | 3097 | pathlen = strlen(target_path); |
3169 | if (pathlen >= MAXPATHLEN) /* total string too long */ | 3098 | if (pathlen >= MAXPATHLEN) /* total string too long */ |
3170 | return XFS_ERROR(ENAMETOOLONG); | 3099 | return XFS_ERROR(ENAMETOOLONG); |
3171 | if (pathlen >= MAXNAMELEN) { /* is any component too long? */ | ||
3172 | int len, total; | ||
3173 | char *path; | ||
3174 | |||
3175 | for (total = 0, path = target_path; total < pathlen;) { | ||
3176 | /* | ||
3177 | * Skip any slashes. | ||
3178 | */ | ||
3179 | while(*path == '/') { | ||
3180 | total++; | ||
3181 | path++; | ||
3182 | } | ||
3183 | |||
3184 | /* | ||
3185 | * Count up to the next slash or end of path. | ||
3186 | * Error out if the component is bigger than MAXNAMELEN. | ||
3187 | */ | ||
3188 | for(len = 0; *path != '/' && total < pathlen;total++, path++) { | ||
3189 | if (++len >= MAXNAMELEN) { | ||
3190 | error = ENAMETOOLONG; | ||
3191 | return error; | ||
3192 | } | ||
3193 | } | ||
3194 | } | ||
3195 | } | ||
3196 | 3100 | ||
3197 | if (DM_EVENT_ENABLED(dp, DM_EVENT_SYMLINK)) { | 3101 | if (DM_EVENT_ENABLED(dp, DM_EVENT_SYMLINK)) { |
3198 | error = XFS_SEND_NAMESP(mp, DM_EVENT_SYMLINK, dir_vp, | 3102 | error = XFS_SEND_NAMESP(mp, DM_EVENT_SYMLINK, dp, |
3199 | DM_RIGHT_NULL, NULL, DM_RIGHT_NULL, | 3103 | DM_RIGHT_NULL, NULL, DM_RIGHT_NULL, |
3200 | link_name, target_path, 0, 0, 0); | 3104 | link_name->name, target_path, 0, 0, 0); |
3201 | if (error) | 3105 | if (error) |
3202 | return error; | 3106 | return error; |
3203 | } | 3107 | } |
@@ -3229,7 +3133,7 @@ xfs_symlink( | |||
3229 | fs_blocks = 0; | 3133 | fs_blocks = 0; |
3230 | else | 3134 | else |
3231 | fs_blocks = XFS_B_TO_FSB(mp, pathlen); | 3135 | fs_blocks = XFS_B_TO_FSB(mp, pathlen); |
3232 | resblks = XFS_SYMLINK_SPACE_RES(mp, link_namelen, fs_blocks); | 3136 | resblks = XFS_SYMLINK_SPACE_RES(mp, link_name->len, fs_blocks); |
3233 | error = xfs_trans_reserve(tp, resblks, XFS_SYMLINK_LOG_RES(mp), 0, | 3137 | error = xfs_trans_reserve(tp, resblks, XFS_SYMLINK_LOG_RES(mp), 0, |
3234 | XFS_TRANS_PERM_LOG_RES, XFS_SYMLINK_LOG_COUNT); | 3138 | XFS_TRANS_PERM_LOG_RES, XFS_SYMLINK_LOG_COUNT); |
3235 | if (error == ENOSPC && fs_blocks == 0) { | 3139 | if (error == ENOSPC && fs_blocks == 0) { |
@@ -3263,8 +3167,8 @@ xfs_symlink( | |||
3263 | /* | 3167 | /* |
3264 | * Check for ability to enter directory entry, if no space reserved. | 3168 | * Check for ability to enter directory entry, if no space reserved. |
3265 | */ | 3169 | */ |
3266 | if (resblks == 0 && | 3170 | error = xfs_dir_canenter(tp, dp, link_name, resblks); |
3267 | (error = xfs_dir_canenter(tp, dp, link_name, link_namelen))) | 3171 | if (error) |
3268 | goto error_return; | 3172 | goto error_return; |
3269 | /* | 3173 | /* |
3270 | * Initialize the bmap freelist prior to calling either | 3174 | * Initialize the bmap freelist prior to calling either |
@@ -3289,7 +3193,7 @@ xfs_symlink( | |||
3289 | * transaction cancel unlocking dp so don't do it explicitly in the | 3193 | * transaction cancel unlocking dp so don't do it explicitly in the |
3290 | * error path. | 3194 | * error path. |
3291 | */ | 3195 | */ |
3292 | VN_HOLD(dir_vp); | 3196 | IHOLD(dp); |
3293 | xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL); | 3197 | xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL); |
3294 | unlock_dp_on_error = B_FALSE; | 3198 | unlock_dp_on_error = B_FALSE; |
3295 | 3199 | ||
@@ -3356,8 +3260,8 @@ xfs_symlink( | |||
3356 | /* | 3260 | /* |
3357 | * Create the directory entry for the symlink. | 3261 | * Create the directory entry for the symlink. |
3358 | */ | 3262 | */ |
3359 | error = xfs_dir_createname(tp, dp, link_name, link_namelen, ip->i_ino, | 3263 | error = xfs_dir_createname(tp, dp, link_name, ip->i_ino, |
3360 | &first_block, &free_list, resblks); | 3264 | &first_block, &free_list, resblks); |
3361 | if (error) | 3265 | if (error) |
3362 | goto error1; | 3266 | goto error1; |
3363 | xfs_ichgtime(dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); | 3267 | xfs_ichgtime(dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); |
@@ -3399,19 +3303,14 @@ xfs_symlink( | |||
3399 | std_return: | 3303 | std_return: |
3400 | if (DM_EVENT_ENABLED(dp, DM_EVENT_POSTSYMLINK)) { | 3304 | if (DM_EVENT_ENABLED(dp, DM_EVENT_POSTSYMLINK)) { |
3401 | (void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTSYMLINK, | 3305 | (void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTSYMLINK, |
3402 | dir_vp, DM_RIGHT_NULL, | 3306 | dp, DM_RIGHT_NULL, |
3403 | error ? NULL : XFS_ITOV(ip), | 3307 | error ? NULL : ip, |
3404 | DM_RIGHT_NULL, link_name, target_path, | 3308 | DM_RIGHT_NULL, link_name->name, |
3405 | 0, error, 0); | 3309 | target_path, 0, error, 0); |
3406 | } | 3310 | } |
3407 | 3311 | ||
3408 | if (!error) { | 3312 | if (!error) |
3409 | bhv_vnode_t *vp; | 3313 | *ipp = ip; |
3410 | |||
3411 | ASSERT(ip); | ||
3412 | vp = XFS_ITOV(ip); | ||
3413 | *vpp = vp; | ||
3414 | } | ||
3415 | return error; | 3314 | return error; |
3416 | 3315 | ||
3417 | error2: | 3316 | error2: |
@@ -3431,60 +3330,11 @@ std_return: | |||
3431 | } | 3330 | } |
3432 | 3331 | ||
3433 | int | 3332 | int |
3434 | xfs_rwlock( | ||
3435 | xfs_inode_t *ip, | ||
3436 | bhv_vrwlock_t locktype) | ||
3437 | { | ||
3438 | if (S_ISDIR(ip->i_d.di_mode)) | ||
3439 | return 1; | ||
3440 | if (locktype == VRWLOCK_WRITE) { | ||
3441 | xfs_ilock(ip, XFS_IOLOCK_EXCL); | ||
3442 | } else if (locktype == VRWLOCK_TRY_READ) { | ||
3443 | return xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED); | ||
3444 | } else if (locktype == VRWLOCK_TRY_WRITE) { | ||
3445 | return xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL); | ||
3446 | } else { | ||
3447 | ASSERT((locktype == VRWLOCK_READ) || | ||
3448 | (locktype == VRWLOCK_WRITE_DIRECT)); | ||
3449 | xfs_ilock(ip, XFS_IOLOCK_SHARED); | ||
3450 | } | ||
3451 | |||
3452 | return 1; | ||
3453 | } | ||
3454 | |||
3455 | |||
3456 | void | ||
3457 | xfs_rwunlock( | ||
3458 | xfs_inode_t *ip, | ||
3459 | bhv_vrwlock_t locktype) | ||
3460 | { | ||
3461 | if (S_ISDIR(ip->i_d.di_mode)) | ||
3462 | return; | ||
3463 | if (locktype == VRWLOCK_WRITE) { | ||
3464 | /* | ||
3465 | * In the write case, we may have added a new entry to | ||
3466 | * the reference cache. This might store a pointer to | ||
3467 | * an inode to be released in this inode. If it is there, | ||
3468 | * clear the pointer and release the inode after unlocking | ||
3469 | * this one. | ||
3470 | */ | ||
3471 | xfs_refcache_iunlock(ip, XFS_IOLOCK_EXCL); | ||
3472 | } else { | ||
3473 | ASSERT((locktype == VRWLOCK_READ) || | ||
3474 | (locktype == VRWLOCK_WRITE_DIRECT)); | ||
3475 | xfs_iunlock(ip, XFS_IOLOCK_SHARED); | ||
3476 | } | ||
3477 | return; | ||
3478 | } | ||
3479 | |||
3480 | |||
3481 | int | ||
3482 | xfs_inode_flush( | 3333 | xfs_inode_flush( |
3483 | xfs_inode_t *ip, | 3334 | xfs_inode_t *ip, |
3484 | int flags) | 3335 | int flags) |
3485 | { | 3336 | { |
3486 | xfs_mount_t *mp = ip->i_mount; | 3337 | xfs_mount_t *mp = ip->i_mount; |
3487 | xfs_inode_log_item_t *iip = ip->i_itemp; | ||
3488 | int error = 0; | 3338 | int error = 0; |
3489 | 3339 | ||
3490 | if (XFS_FORCED_SHUTDOWN(mp)) | 3340 | if (XFS_FORCED_SHUTDOWN(mp)) |
@@ -3494,33 +3344,9 @@ xfs_inode_flush( | |||
3494 | * Bypass inodes which have already been cleaned by | 3344 | * Bypass inodes which have already been cleaned by |
3495 | * the inode flush clustering code inside xfs_iflush | 3345 | * the inode flush clustering code inside xfs_iflush |
3496 | */ | 3346 | */ |
3497 | if ((ip->i_update_core == 0) && | 3347 | if (xfs_inode_clean(ip)) |
3498 | ((iip == NULL) || !(iip->ili_format.ilf_fields & XFS_ILOG_ALL))) | ||
3499 | return 0; | 3348 | return 0; |
3500 | 3349 | ||
3501 | if (flags & FLUSH_LOG) { | ||
3502 | if (iip && iip->ili_last_lsn) { | ||
3503 | xlog_t *log = mp->m_log; | ||
3504 | xfs_lsn_t sync_lsn; | ||
3505 | int log_flags = XFS_LOG_FORCE; | ||
3506 | |||
3507 | spin_lock(&log->l_grant_lock); | ||
3508 | sync_lsn = log->l_last_sync_lsn; | ||
3509 | spin_unlock(&log->l_grant_lock); | ||
3510 | |||
3511 | if ((XFS_LSN_CMP(iip->ili_last_lsn, sync_lsn) > 0)) { | ||
3512 | if (flags & FLUSH_SYNC) | ||
3513 | log_flags |= XFS_LOG_SYNC; | ||
3514 | error = xfs_log_force(mp, iip->ili_last_lsn, log_flags); | ||
3515 | if (error) | ||
3516 | return error; | ||
3517 | } | ||
3518 | |||
3519 | if (ip->i_update_core == 0) | ||
3520 | return 0; | ||
3521 | } | ||
3522 | } | ||
3523 | |||
3524 | /* | 3350 | /* |
3525 | * We make this non-blocking if the inode is contended, | 3351 | * We make this non-blocking if the inode is contended, |
3526 | * return EAGAIN to indicate to the caller that they | 3352 | * return EAGAIN to indicate to the caller that they |
@@ -3528,30 +3354,22 @@ xfs_inode_flush( | |||
3528 | * blocking on inodes inside another operation right | 3354 | * blocking on inodes inside another operation right |
3529 | * now, they get caught later by xfs_sync. | 3355 | * now, they get caught later by xfs_sync. |
3530 | */ | 3356 | */ |
3531 | if (flags & FLUSH_INODE) { | 3357 | if (flags & FLUSH_SYNC) { |
3532 | int flush_flags; | 3358 | xfs_ilock(ip, XFS_ILOCK_SHARED); |
3533 | 3359 | xfs_iflock(ip); | |
3534 | if (flags & FLUSH_SYNC) { | 3360 | } else if (xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) { |
3535 | xfs_ilock(ip, XFS_ILOCK_SHARED); | 3361 | if (xfs_ipincount(ip) || !xfs_iflock_nowait(ip)) { |
3536 | xfs_iflock(ip); | 3362 | xfs_iunlock(ip, XFS_ILOCK_SHARED); |
3537 | } else if (xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) { | ||
3538 | if (xfs_ipincount(ip) || !xfs_iflock_nowait(ip)) { | ||
3539 | xfs_iunlock(ip, XFS_ILOCK_SHARED); | ||
3540 | return EAGAIN; | ||
3541 | } | ||
3542 | } else { | ||
3543 | return EAGAIN; | 3363 | return EAGAIN; |
3544 | } | 3364 | } |
3545 | 3365 | } else { | |
3546 | if (flags & FLUSH_SYNC) | 3366 | return EAGAIN; |
3547 | flush_flags = XFS_IFLUSH_SYNC; | ||
3548 | else | ||
3549 | flush_flags = XFS_IFLUSH_ASYNC; | ||
3550 | |||
3551 | error = xfs_iflush(ip, flush_flags); | ||
3552 | xfs_iunlock(ip, XFS_ILOCK_SHARED); | ||
3553 | } | 3367 | } |
3554 | 3368 | ||
3369 | error = xfs_iflush(ip, (flags & FLUSH_SYNC) ? XFS_IFLUSH_SYNC | ||
3370 | : XFS_IFLUSH_ASYNC_NOBLOCK); | ||
3371 | xfs_iunlock(ip, XFS_ILOCK_SHARED); | ||
3372 | |||
3555 | return error; | 3373 | return error; |
3556 | } | 3374 | } |
3557 | 3375 | ||
@@ -3694,12 +3512,12 @@ xfs_finish_reclaim( | |||
3694 | * We get the flush lock regardless, though, just to make sure | 3512 | * We get the flush lock regardless, though, just to make sure |
3695 | * we don't free it while it is being flushed. | 3513 | * we don't free it while it is being flushed. |
3696 | */ | 3514 | */ |
3697 | if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) { | 3515 | if (!locked) { |
3698 | if (!locked) { | 3516 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
3699 | xfs_ilock(ip, XFS_ILOCK_EXCL); | 3517 | xfs_iflock(ip); |
3700 | xfs_iflock(ip); | 3518 | } |
3701 | } | ||
3702 | 3519 | ||
3520 | if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) { | ||
3703 | if (ip->i_update_core || | 3521 | if (ip->i_update_core || |
3704 | ((ip->i_itemp != NULL) && | 3522 | ((ip->i_itemp != NULL) && |
3705 | (ip->i_itemp->ili_format.ilf_fields != 0))) { | 3523 | (ip->i_itemp->ili_format.ilf_fields != 0))) { |
@@ -3719,17 +3537,11 @@ xfs_finish_reclaim( | |||
3719 | ASSERT(ip->i_update_core == 0); | 3537 | ASSERT(ip->i_update_core == 0); |
3720 | ASSERT(ip->i_itemp == NULL || | 3538 | ASSERT(ip->i_itemp == NULL || |
3721 | ip->i_itemp->ili_format.ilf_fields == 0); | 3539 | ip->i_itemp->ili_format.ilf_fields == 0); |
3722 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | ||
3723 | } else if (locked) { | ||
3724 | /* | ||
3725 | * We are not interested in doing an iflush if we're | ||
3726 | * in the process of shutting down the filesystem forcibly. | ||
3727 | * So, just reclaim the inode. | ||
3728 | */ | ||
3729 | xfs_ifunlock(ip); | ||
3730 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | ||
3731 | } | 3540 | } |
3732 | 3541 | ||
3542 | xfs_ifunlock(ip); | ||
3543 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | ||
3544 | |||
3733 | reclaim: | 3545 | reclaim: |
3734 | xfs_ireclaim(ip); | 3546 | xfs_ireclaim(ip); |
3735 | return 0; | 3547 | return 0; |
@@ -3845,9 +3657,8 @@ xfs_alloc_file_space( | |||
3845 | end_dmi_offset = offset+len; | 3657 | end_dmi_offset = offset+len; |
3846 | if (end_dmi_offset > ip->i_size) | 3658 | if (end_dmi_offset > ip->i_size) |
3847 | end_dmi_offset = ip->i_size; | 3659 | end_dmi_offset = ip->i_size; |
3848 | error = XFS_SEND_DATA(mp, DM_EVENT_WRITE, XFS_ITOV(ip), | 3660 | error = XFS_SEND_DATA(mp, DM_EVENT_WRITE, ip, offset, |
3849 | offset, end_dmi_offset - offset, | 3661 | end_dmi_offset - offset, 0, NULL); |
3850 | 0, NULL); | ||
3851 | if (error) | 3662 | if (error) |
3852 | return error; | 3663 | return error; |
3853 | } | 3664 | } |
@@ -3956,8 +3767,8 @@ dmapi_enospc_check: | |||
3956 | if (error == ENOSPC && (attr_flags & ATTR_DMI) == 0 && | 3767 | if (error == ENOSPC && (attr_flags & ATTR_DMI) == 0 && |
3957 | DM_EVENT_ENABLED(ip, DM_EVENT_NOSPACE)) { | 3768 | DM_EVENT_ENABLED(ip, DM_EVENT_NOSPACE)) { |
3958 | error = XFS_SEND_NAMESP(mp, DM_EVENT_NOSPACE, | 3769 | error = XFS_SEND_NAMESP(mp, DM_EVENT_NOSPACE, |
3959 | XFS_ITOV(ip), DM_RIGHT_NULL, | 3770 | ip, DM_RIGHT_NULL, |
3960 | XFS_ITOV(ip), DM_RIGHT_NULL, | 3771 | ip, DM_RIGHT_NULL, |
3961 | NULL, NULL, 0, 0, 0); /* Delay flag intentionally unused */ | 3772 | NULL, NULL, 0, 0, 0); /* Delay flag intentionally unused */ |
3962 | if (error == 0) | 3773 | if (error == 0) |
3963 | goto retry; /* Maybe DMAPI app. has made space */ | 3774 | goto retry; /* Maybe DMAPI app. has made space */ |
@@ -4021,7 +3832,8 @@ xfs_zero_remaining_bytes( | |||
4021 | XFS_BUF_READ(bp); | 3832 | XFS_BUF_READ(bp); |
4022 | XFS_BUF_SET_ADDR(bp, XFS_FSB_TO_DB(ip, imap.br_startblock)); | 3833 | XFS_BUF_SET_ADDR(bp, XFS_FSB_TO_DB(ip, imap.br_startblock)); |
4023 | xfsbdstrat(mp, bp); | 3834 | xfsbdstrat(mp, bp); |
4024 | if ((error = xfs_iowait(bp))) { | 3835 | error = xfs_iowait(bp); |
3836 | if (error) { | ||
4025 | xfs_ioerror_alert("xfs_zero_remaining_bytes(read)", | 3837 | xfs_ioerror_alert("xfs_zero_remaining_bytes(read)", |
4026 | mp, bp, XFS_BUF_ADDR(bp)); | 3838 | mp, bp, XFS_BUF_ADDR(bp)); |
4027 | break; | 3839 | break; |
@@ -4033,7 +3845,8 @@ xfs_zero_remaining_bytes( | |||
4033 | XFS_BUF_UNREAD(bp); | 3845 | XFS_BUF_UNREAD(bp); |
4034 | XFS_BUF_WRITE(bp); | 3846 | XFS_BUF_WRITE(bp); |
4035 | xfsbdstrat(mp, bp); | 3847 | xfsbdstrat(mp, bp); |
4036 | if ((error = xfs_iowait(bp))) { | 3848 | error = xfs_iowait(bp); |
3849 | if (error) { | ||
4037 | xfs_ioerror_alert("xfs_zero_remaining_bytes(write)", | 3850 | xfs_ioerror_alert("xfs_zero_remaining_bytes(write)", |
4038 | mp, bp, XFS_BUF_ADDR(bp)); | 3851 | mp, bp, XFS_BUF_ADDR(bp)); |
4039 | break; | 3852 | break; |
@@ -4102,7 +3915,7 @@ xfs_free_file_space( | |||
4102 | DM_EVENT_ENABLED(ip, DM_EVENT_WRITE)) { | 3915 | DM_EVENT_ENABLED(ip, DM_EVENT_WRITE)) { |
4103 | if (end_dmi_offset > ip->i_size) | 3916 | if (end_dmi_offset > ip->i_size) |
4104 | end_dmi_offset = ip->i_size; | 3917 | end_dmi_offset = ip->i_size; |
4105 | error = XFS_SEND_DATA(mp, DM_EVENT_WRITE, vp, | 3918 | error = XFS_SEND_DATA(mp, DM_EVENT_WRITE, ip, |
4106 | offset, end_dmi_offset - offset, | 3919 | offset, end_dmi_offset - offset, |
4107 | AT_DELAY_FLAG(attr_flags), NULL); | 3920 | AT_DELAY_FLAG(attr_flags), NULL); |
4108 | if (error) | 3921 | if (error) |
diff --git a/fs/xfs/xfs_vnodeops.h b/fs/xfs/xfs_vnodeops.h index 4e3970f0e5e3..24c53923dc2c 100644 --- a/fs/xfs/xfs_vnodeops.h +++ b/fs/xfs/xfs_vnodeops.h | |||
@@ -23,31 +23,32 @@ int xfs_fsync(struct xfs_inode *ip, int flag, xfs_off_t start, | |||
23 | xfs_off_t stop); | 23 | xfs_off_t stop); |
24 | int xfs_release(struct xfs_inode *ip); | 24 | int xfs_release(struct xfs_inode *ip); |
25 | int xfs_inactive(struct xfs_inode *ip); | 25 | int xfs_inactive(struct xfs_inode *ip); |
26 | int xfs_lookup(struct xfs_inode *dp, bhv_vname_t *dentry, | 26 | int xfs_lookup(struct xfs_inode *dp, struct xfs_name *name, |
27 | bhv_vnode_t **vpp); | 27 | struct xfs_inode **ipp); |
28 | int xfs_create(struct xfs_inode *dp, bhv_vname_t *dentry, mode_t mode, | 28 | int xfs_create(struct xfs_inode *dp, struct xfs_name *name, mode_t mode, |
29 | xfs_dev_t rdev, bhv_vnode_t **vpp, struct cred *credp); | 29 | xfs_dev_t rdev, struct xfs_inode **ipp, struct cred *credp); |
30 | int xfs_remove(struct xfs_inode *dp, bhv_vname_t *dentry); | 30 | int xfs_remove(struct xfs_inode *dp, struct xfs_name *name, |
31 | int xfs_link(struct xfs_inode *tdp, bhv_vnode_t *src_vp, | 31 | struct xfs_inode *ip); |
32 | bhv_vname_t *dentry); | 32 | int xfs_link(struct xfs_inode *tdp, struct xfs_inode *sip, |
33 | int xfs_mkdir(struct xfs_inode *dp, bhv_vname_t *dentry, | 33 | struct xfs_name *target_name); |
34 | mode_t mode, bhv_vnode_t **vpp, struct cred *credp); | 34 | int xfs_mkdir(struct xfs_inode *dp, struct xfs_name *dir_name, |
35 | int xfs_rmdir(struct xfs_inode *dp, bhv_vname_t *dentry); | 35 | mode_t mode, struct xfs_inode **ipp, struct cred *credp); |
36 | int xfs_rmdir(struct xfs_inode *dp, struct xfs_name *name, | ||
37 | struct xfs_inode *cdp); | ||
36 | int xfs_readdir(struct xfs_inode *dp, void *dirent, size_t bufsize, | 38 | int xfs_readdir(struct xfs_inode *dp, void *dirent, size_t bufsize, |
37 | xfs_off_t *offset, filldir_t filldir); | 39 | xfs_off_t *offset, filldir_t filldir); |
38 | int xfs_symlink(struct xfs_inode *dp, bhv_vname_t *dentry, | 40 | int xfs_symlink(struct xfs_inode *dp, struct xfs_name *link_name, |
39 | char *target_path, mode_t mode, bhv_vnode_t **vpp, | 41 | const char *target_path, mode_t mode, struct xfs_inode **ipp, |
40 | struct cred *credp); | 42 | struct cred *credp); |
41 | int xfs_rwlock(struct xfs_inode *ip, bhv_vrwlock_t locktype); | ||
42 | void xfs_rwunlock(struct xfs_inode *ip, bhv_vrwlock_t locktype); | ||
43 | int xfs_inode_flush(struct xfs_inode *ip, int flags); | 43 | int xfs_inode_flush(struct xfs_inode *ip, int flags); |
44 | int xfs_set_dmattrs(struct xfs_inode *ip, u_int evmask, u_int16_t state); | 44 | int xfs_set_dmattrs(struct xfs_inode *ip, u_int evmask, u_int16_t state); |
45 | int xfs_reclaim(struct xfs_inode *ip); | 45 | int xfs_reclaim(struct xfs_inode *ip); |
46 | int xfs_change_file_space(struct xfs_inode *ip, int cmd, | 46 | int xfs_change_file_space(struct xfs_inode *ip, int cmd, |
47 | xfs_flock64_t *bf, xfs_off_t offset, | 47 | xfs_flock64_t *bf, xfs_off_t offset, |
48 | struct cred *credp, int attr_flags); | 48 | struct cred *credp, int attr_flags); |
49 | int xfs_rename(struct xfs_inode *src_dp, bhv_vname_t *src_vname, | 49 | int xfs_rename(struct xfs_inode *src_dp, struct xfs_name *src_name, |
50 | bhv_vnode_t *target_dir_vp, bhv_vname_t *target_vname); | 50 | struct xfs_inode *src_ip, struct xfs_inode *target_dp, |
51 | struct xfs_name *target_name); | ||
51 | int xfs_attr_get(struct xfs_inode *ip, const char *name, char *value, | 52 | int xfs_attr_get(struct xfs_inode *ip, const char *name, char *value, |
52 | int *valuelenp, int flags, cred_t *cred); | 53 | int *valuelenp, int flags, cred_t *cred); |
53 | int xfs_attr_set(struct xfs_inode *dp, const char *name, char *value, | 54 | int xfs_attr_set(struct xfs_inode *dp, const char *name, char *value, |