diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-08-13 18:17:49 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-08-13 18:17:49 -0400 |
commit | 9ea319b61613085f501a79cf8d405cb221d084f3 (patch) | |
tree | 5bf7e1b9f104a0df029d355927fa9eb398db37bb /fs | |
parent | 3e11acd4306d558249c31cf6cac09f218f2de52e (diff) | |
parent | c6a7b0f8a49aa71792dd108efc535435f462bf79 (diff) |
Merge git://oss.sgi.com:8090/xfs/linux-2.6
* git://oss.sgi.com:8090/xfs/linux-2.6: (45 commits)
[XFS] Fix use after free in xfs_log_done().
[XFS] Make xfs_bmap_*_count_leaves void.
[XFS] Use KM_NOFS for debug trace buffers
[XFS] use KM_MAYFAIL in xfs_mountfs
[XFS] refactor xfs_mount_free
[XFS] don't call xfs_freesb from xfs_unmountfs
[XFS] xfs_unmountfs should return void
[XFS] cleanup xfs_mountfs
[XFS] move root inode IRELE into xfs_unmountfs
[XFS] stop using file_update_time
[XFS] optimize xfs_ichgtime
[XFS] update timestamp in xfs_ialloc manually
[XFS] remove the sema_t from XFS.
[XFS] replace dquot flush semaphore with a completion
[XFS] replace inode flush semaphore with a completion
[XFS] extend completions to provide XFS object flush requirements
[XFS] replace the XFS buf iodone semaphore with a completion
[XFS] clean up stale references to semaphores
[XFS] use get_unaligned_* helpers
[XFS] Fix compile failure in xfs_buf_trace()
...
Diffstat (limited to 'fs')
61 files changed, 838 insertions, 1326 deletions
diff --git a/fs/xfs/linux-2.6/sema.h b/fs/xfs/linux-2.6/sema.h deleted file mode 100644 index 3abe7e9ceb33..000000000000 --- a/fs/xfs/linux-2.6/sema.h +++ /dev/null | |||
@@ -1,52 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc. | ||
3 | * All Rights Reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it would be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write the Free Software Foundation, | ||
16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
17 | */ | ||
18 | #ifndef __XFS_SUPPORT_SEMA_H__ | ||
19 | #define __XFS_SUPPORT_SEMA_H__ | ||
20 | |||
21 | #include <linux/time.h> | ||
22 | #include <linux/wait.h> | ||
23 | #include <linux/semaphore.h> | ||
24 | #include <asm/atomic.h> | ||
25 | |||
26 | /* | ||
27 | * sema_t structure just maps to struct semaphore in Linux kernel. | ||
28 | */ | ||
29 | |||
30 | typedef struct semaphore sema_t; | ||
31 | |||
32 | #define initnsema(sp, val, name) sema_init(sp, val) | ||
33 | #define psema(sp, b) down(sp) | ||
34 | #define vsema(sp) up(sp) | ||
35 | #define freesema(sema) do { } while (0) | ||
36 | |||
37 | static inline int issemalocked(sema_t *sp) | ||
38 | { | ||
39 | return down_trylock(sp) || (up(sp), 0); | ||
40 | } | ||
41 | |||
42 | /* | ||
43 | * Map cpsema (try to get the sema) to down_trylock. We need to switch | ||
44 | * the return values since cpsema returns 1 (acquired) 0 (failed) and | ||
45 | * down_trylock returns the reverse 0 (acquired) 1 (failed). | ||
46 | */ | ||
47 | static inline int cpsema(sema_t *sp) | ||
48 | { | ||
49 | return down_trylock(sp) ? 0 : 1; | ||
50 | } | ||
51 | |||
52 | #endif /* __XFS_SUPPORT_SEMA_H__ */ | ||
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c index fa47e43b8b41..f42f80a3b1fa 100644 --- a/fs/xfs/linux-2.6/xfs_aops.c +++ b/fs/xfs/linux-2.6/xfs_aops.c | |||
@@ -73,7 +73,6 @@ xfs_page_trace( | |||
73 | unsigned long pgoff) | 73 | unsigned long pgoff) |
74 | { | 74 | { |
75 | xfs_inode_t *ip; | 75 | xfs_inode_t *ip; |
76 | bhv_vnode_t *vp = vn_from_inode(inode); | ||
77 | loff_t isize = i_size_read(inode); | 76 | loff_t isize = i_size_read(inode); |
78 | loff_t offset = page_offset(page); | 77 | loff_t offset = page_offset(page); |
79 | int delalloc = -1, unmapped = -1, unwritten = -1; | 78 | int delalloc = -1, unmapped = -1, unwritten = -1; |
@@ -81,7 +80,7 @@ xfs_page_trace( | |||
81 | if (page_has_buffers(page)) | 80 | if (page_has_buffers(page)) |
82 | xfs_count_page_state(page, &delalloc, &unmapped, &unwritten); | 81 | xfs_count_page_state(page, &delalloc, &unmapped, &unwritten); |
83 | 82 | ||
84 | ip = xfs_vtoi(vp); | 83 | ip = XFS_I(inode); |
85 | if (!ip->i_rwtrace) | 84 | if (!ip->i_rwtrace) |
86 | return; | 85 | return; |
87 | 86 | ||
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index 9cc8f0213095..986061ae1b9b 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c | |||
@@ -58,7 +58,7 @@ xfs_buf_trace( | |||
58 | bp, id, | 58 | bp, id, |
59 | (void *)(unsigned long)bp->b_flags, | 59 | (void *)(unsigned long)bp->b_flags, |
60 | (void *)(unsigned long)bp->b_hold.counter, | 60 | (void *)(unsigned long)bp->b_hold.counter, |
61 | (void *)(unsigned long)bp->b_sema.count.counter, | 61 | (void *)(unsigned long)bp->b_sema.count, |
62 | (void *)current, | 62 | (void *)current, |
63 | data, ra, | 63 | data, ra, |
64 | (void *)(unsigned long)((bp->b_file_offset>>32) & 0xffffffff), | 64 | (void *)(unsigned long)((bp->b_file_offset>>32) & 0xffffffff), |
@@ -253,7 +253,7 @@ _xfs_buf_initialize( | |||
253 | 253 | ||
254 | memset(bp, 0, sizeof(xfs_buf_t)); | 254 | memset(bp, 0, sizeof(xfs_buf_t)); |
255 | atomic_set(&bp->b_hold, 1); | 255 | atomic_set(&bp->b_hold, 1); |
256 | init_MUTEX_LOCKED(&bp->b_iodonesema); | 256 | init_completion(&bp->b_iowait); |
257 | INIT_LIST_HEAD(&bp->b_list); | 257 | INIT_LIST_HEAD(&bp->b_list); |
258 | INIT_LIST_HEAD(&bp->b_hash_list); | 258 | INIT_LIST_HEAD(&bp->b_hash_list); |
259 | init_MUTEX_LOCKED(&bp->b_sema); /* held, no waiters */ | 259 | init_MUTEX_LOCKED(&bp->b_sema); /* held, no waiters */ |
@@ -838,6 +838,7 @@ xfs_buf_rele( | |||
838 | return; | 838 | return; |
839 | } | 839 | } |
840 | 840 | ||
841 | ASSERT(atomic_read(&bp->b_hold) > 0); | ||
841 | if (atomic_dec_and_lock(&bp->b_hold, &hash->bh_lock)) { | 842 | if (atomic_dec_and_lock(&bp->b_hold, &hash->bh_lock)) { |
842 | if (bp->b_relse) { | 843 | if (bp->b_relse) { |
843 | atomic_inc(&bp->b_hold); | 844 | atomic_inc(&bp->b_hold); |
@@ -851,11 +852,6 @@ xfs_buf_rele( | |||
851 | spin_unlock(&hash->bh_lock); | 852 | spin_unlock(&hash->bh_lock); |
852 | xfs_buf_free(bp); | 853 | xfs_buf_free(bp); |
853 | } | 854 | } |
854 | } else { | ||
855 | /* | ||
856 | * Catch reference count leaks | ||
857 | */ | ||
858 | ASSERT(atomic_read(&bp->b_hold) >= 0); | ||
859 | } | 855 | } |
860 | } | 856 | } |
861 | 857 | ||
@@ -1037,7 +1033,7 @@ xfs_buf_ioend( | |||
1037 | xfs_buf_iodone_work(&bp->b_iodone_work); | 1033 | xfs_buf_iodone_work(&bp->b_iodone_work); |
1038 | } | 1034 | } |
1039 | } else { | 1035 | } else { |
1040 | up(&bp->b_iodonesema); | 1036 | complete(&bp->b_iowait); |
1041 | } | 1037 | } |
1042 | } | 1038 | } |
1043 | 1039 | ||
@@ -1275,7 +1271,7 @@ xfs_buf_iowait( | |||
1275 | XB_TRACE(bp, "iowait", 0); | 1271 | XB_TRACE(bp, "iowait", 0); |
1276 | if (atomic_read(&bp->b_io_remaining)) | 1272 | if (atomic_read(&bp->b_io_remaining)) |
1277 | blk_run_address_space(bp->b_target->bt_mapping); | 1273 | blk_run_address_space(bp->b_target->bt_mapping); |
1278 | down(&bp->b_iodonesema); | 1274 | wait_for_completion(&bp->b_iowait); |
1279 | XB_TRACE(bp, "iowaited", (long)bp->b_error); | 1275 | XB_TRACE(bp, "iowaited", (long)bp->b_error); |
1280 | return bp->b_error; | 1276 | return bp->b_error; |
1281 | } | 1277 | } |
@@ -1799,7 +1795,7 @@ int __init | |||
1799 | xfs_buf_init(void) | 1795 | xfs_buf_init(void) |
1800 | { | 1796 | { |
1801 | #ifdef XFS_BUF_TRACE | 1797 | #ifdef XFS_BUF_TRACE |
1802 | xfs_buf_trace_buf = ktrace_alloc(XFS_BUF_TRACE_SIZE, KM_SLEEP); | 1798 | xfs_buf_trace_buf = ktrace_alloc(XFS_BUF_TRACE_SIZE, KM_NOFS); |
1803 | #endif | 1799 | #endif |
1804 | 1800 | ||
1805 | xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf", | 1801 | xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf", |
diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h index 29d1d4adc078..fe0109956656 100644 --- a/fs/xfs/linux-2.6/xfs_buf.h +++ b/fs/xfs/linux-2.6/xfs_buf.h | |||
@@ -157,7 +157,7 @@ typedef struct xfs_buf { | |||
157 | xfs_buf_iodone_t b_iodone; /* I/O completion function */ | 157 | xfs_buf_iodone_t b_iodone; /* I/O completion function */ |
158 | xfs_buf_relse_t b_relse; /* releasing function */ | 158 | xfs_buf_relse_t b_relse; /* releasing function */ |
159 | xfs_buf_bdstrat_t b_strat; /* pre-write function */ | 159 | xfs_buf_bdstrat_t b_strat; /* pre-write function */ |
160 | struct semaphore b_iodonesema; /* Semaphore for I/O waiters */ | 160 | struct completion b_iowait; /* queue for I/O waiters */ |
161 | void *b_fspriv; | 161 | void *b_fspriv; |
162 | void *b_fspriv2; | 162 | void *b_fspriv2; |
163 | void *b_fspriv3; | 163 | void *b_fspriv3; |
@@ -352,7 +352,7 @@ extern void xfs_buf_trace(xfs_buf_t *, char *, void *, void *); | |||
352 | #define XFS_BUF_CPSEMA(bp) (xfs_buf_cond_lock(bp) == 0) | 352 | #define XFS_BUF_CPSEMA(bp) (xfs_buf_cond_lock(bp) == 0) |
353 | #define XFS_BUF_VSEMA(bp) xfs_buf_unlock(bp) | 353 | #define XFS_BUF_VSEMA(bp) xfs_buf_unlock(bp) |
354 | #define XFS_BUF_PSEMA(bp,x) xfs_buf_lock(bp) | 354 | #define XFS_BUF_PSEMA(bp,x) xfs_buf_lock(bp) |
355 | #define XFS_BUF_V_IODONESEMA(bp) up(&bp->b_iodonesema); | 355 | #define XFS_BUF_FINISH_IOWAIT(bp) complete(&bp->b_iowait); |
356 | 356 | ||
357 | #define XFS_BUF_SET_TARGET(bp, target) ((bp)->b_target = (target)) | 357 | #define XFS_BUF_SET_TARGET(bp, target) ((bp)->b_target = (target)) |
358 | #define XFS_BUF_TARGET(bp) ((bp)->b_target) | 358 | #define XFS_BUF_TARGET(bp) ((bp)->b_target) |
diff --git a/fs/xfs/linux-2.6/xfs_export.c b/fs/xfs/linux-2.6/xfs_export.c index 987fe84f7b13..24fd598af846 100644 --- a/fs/xfs/linux-2.6/xfs_export.c +++ b/fs/xfs/linux-2.6/xfs_export.c | |||
@@ -139,7 +139,7 @@ xfs_nfs_get_inode( | |||
139 | } | 139 | } |
140 | 140 | ||
141 | xfs_iunlock(ip, XFS_ILOCK_SHARED); | 141 | xfs_iunlock(ip, XFS_ILOCK_SHARED); |
142 | return ip->i_vnode; | 142 | return VFS_I(ip); |
143 | } | 143 | } |
144 | 144 | ||
145 | STATIC struct dentry * | 145 | STATIC struct dentry * |
@@ -167,7 +167,7 @@ xfs_fs_fh_to_dentry(struct super_block *sb, struct fid *fid, | |||
167 | if (!inode) | 167 | if (!inode) |
168 | return NULL; | 168 | return NULL; |
169 | if (IS_ERR(inode)) | 169 | if (IS_ERR(inode)) |
170 | return ERR_PTR(PTR_ERR(inode)); | 170 | return ERR_CAST(inode); |
171 | result = d_alloc_anon(inode); | 171 | result = d_alloc_anon(inode); |
172 | if (!result) { | 172 | if (!result) { |
173 | iput(inode); | 173 | iput(inode); |
@@ -198,7 +198,7 @@ xfs_fs_fh_to_parent(struct super_block *sb, struct fid *fid, | |||
198 | if (!inode) | 198 | if (!inode) |
199 | return NULL; | 199 | return NULL; |
200 | if (IS_ERR(inode)) | 200 | if (IS_ERR(inode)) |
201 | return ERR_PTR(PTR_ERR(inode)); | 201 | return ERR_CAST(inode); |
202 | result = d_alloc_anon(inode); | 202 | result = d_alloc_anon(inode); |
203 | if (!result) { | 203 | if (!result) { |
204 | iput(inode); | 204 | iput(inode); |
@@ -219,9 +219,9 @@ xfs_fs_get_parent( | |||
219 | if (unlikely(error)) | 219 | if (unlikely(error)) |
220 | return ERR_PTR(-error); | 220 | return ERR_PTR(-error); |
221 | 221 | ||
222 | parent = d_alloc_anon(cip->i_vnode); | 222 | parent = d_alloc_anon(VFS_I(cip)); |
223 | if (unlikely(!parent)) { | 223 | if (unlikely(!parent)) { |
224 | iput(cip->i_vnode); | 224 | iput(VFS_I(cip)); |
225 | return ERR_PTR(-ENOMEM); | 225 | return ERR_PTR(-ENOMEM); |
226 | } | 226 | } |
227 | return parent; | 227 | return parent; |
diff --git a/fs/xfs/linux-2.6/xfs_fs_subr.c b/fs/xfs/linux-2.6/xfs_fs_subr.c index 1eefe61f0e10..36caa6d957df 100644 --- a/fs/xfs/linux-2.6/xfs_fs_subr.c +++ b/fs/xfs/linux-2.6/xfs_fs_subr.c | |||
@@ -31,7 +31,7 @@ xfs_tosspages( | |||
31 | xfs_off_t last, | 31 | xfs_off_t last, |
32 | int fiopt) | 32 | int fiopt) |
33 | { | 33 | { |
34 | struct address_space *mapping = ip->i_vnode->i_mapping; | 34 | struct address_space *mapping = VFS_I(ip)->i_mapping; |
35 | 35 | ||
36 | if (mapping->nrpages) | 36 | if (mapping->nrpages) |
37 | truncate_inode_pages(mapping, first); | 37 | truncate_inode_pages(mapping, first); |
@@ -44,7 +44,7 @@ xfs_flushinval_pages( | |||
44 | xfs_off_t last, | 44 | xfs_off_t last, |
45 | int fiopt) | 45 | int fiopt) |
46 | { | 46 | { |
47 | struct address_space *mapping = ip->i_vnode->i_mapping; | 47 | struct address_space *mapping = VFS_I(ip)->i_mapping; |
48 | int ret = 0; | 48 | int ret = 0; |
49 | 49 | ||
50 | if (mapping->nrpages) { | 50 | if (mapping->nrpages) { |
@@ -64,7 +64,7 @@ xfs_flush_pages( | |||
64 | uint64_t flags, | 64 | uint64_t flags, |
65 | int fiopt) | 65 | int fiopt) |
66 | { | 66 | { |
67 | struct address_space *mapping = ip->i_vnode->i_mapping; | 67 | struct address_space *mapping = VFS_I(ip)->i_mapping; |
68 | int ret = 0; | 68 | int ret = 0; |
69 | int ret2; | 69 | int ret2; |
70 | 70 | ||
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c index acb978d9d085..48799ba7e3e6 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl.c +++ b/fs/xfs/linux-2.6/xfs_ioctl.c | |||
@@ -245,7 +245,7 @@ xfs_vget_fsop_handlereq( | |||
245 | 245 | ||
246 | xfs_iunlock(ip, XFS_ILOCK_SHARED); | 246 | xfs_iunlock(ip, XFS_ILOCK_SHARED); |
247 | 247 | ||
248 | *inode = XFS_ITOV(ip); | 248 | *inode = VFS_I(ip); |
249 | return 0; | 249 | return 0; |
250 | } | 250 | } |
251 | 251 | ||
@@ -927,7 +927,7 @@ STATIC void | |||
927 | xfs_diflags_to_linux( | 927 | xfs_diflags_to_linux( |
928 | struct xfs_inode *ip) | 928 | struct xfs_inode *ip) |
929 | { | 929 | { |
930 | struct inode *inode = XFS_ITOV(ip); | 930 | struct inode *inode = VFS_I(ip); |
931 | unsigned int xflags = xfs_ip2xflags(ip); | 931 | unsigned int xflags = xfs_ip2xflags(ip); |
932 | 932 | ||
933 | if (xflags & XFS_XFLAG_IMMUTABLE) | 933 | if (xflags & XFS_XFLAG_IMMUTABLE) |
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c index e88f51028086..91bcd979242c 100644 --- a/fs/xfs/linux-2.6/xfs_iops.c +++ b/fs/xfs/linux-2.6/xfs_iops.c | |||
@@ -62,7 +62,7 @@ void | |||
62 | xfs_synchronize_atime( | 62 | xfs_synchronize_atime( |
63 | xfs_inode_t *ip) | 63 | xfs_inode_t *ip) |
64 | { | 64 | { |
65 | struct inode *inode = ip->i_vnode; | 65 | struct inode *inode = VFS_I(ip); |
66 | 66 | ||
67 | if (inode) { | 67 | if (inode) { |
68 | ip->i_d.di_atime.t_sec = (__int32_t)inode->i_atime.tv_sec; | 68 | ip->i_d.di_atime.t_sec = (__int32_t)inode->i_atime.tv_sec; |
@@ -79,7 +79,7 @@ void | |||
79 | xfs_mark_inode_dirty_sync( | 79 | xfs_mark_inode_dirty_sync( |
80 | xfs_inode_t *ip) | 80 | xfs_inode_t *ip) |
81 | { | 81 | { |
82 | struct inode *inode = ip->i_vnode; | 82 | struct inode *inode = VFS_I(ip); |
83 | 83 | ||
84 | if (inode) | 84 | if (inode) |
85 | mark_inode_dirty_sync(inode); | 85 | mark_inode_dirty_sync(inode); |
@@ -89,36 +89,31 @@ xfs_mark_inode_dirty_sync( | |||
89 | * Change the requested timestamp in the given inode. | 89 | * Change the requested timestamp in the given inode. |
90 | * We don't lock across timestamp updates, and we don't log them but | 90 | * We don't lock across timestamp updates, and we don't log them but |
91 | * we do record the fact that there is dirty information in core. | 91 | * we do record the fact that there is dirty information in core. |
92 | * | ||
93 | * NOTE -- callers MUST combine XFS_ICHGTIME_MOD or XFS_ICHGTIME_CHG | ||
94 | * with XFS_ICHGTIME_ACC to be sure that access time | ||
95 | * update will take. Calling first with XFS_ICHGTIME_ACC | ||
96 | * and then XFS_ICHGTIME_MOD may fail to modify the access | ||
97 | * timestamp if the filesystem is mounted noacctm. | ||
98 | */ | 92 | */ |
99 | void | 93 | void |
100 | xfs_ichgtime( | 94 | xfs_ichgtime( |
101 | xfs_inode_t *ip, | 95 | xfs_inode_t *ip, |
102 | int flags) | 96 | int flags) |
103 | { | 97 | { |
104 | struct inode *inode = vn_to_inode(XFS_ITOV(ip)); | 98 | struct inode *inode = VFS_I(ip); |
105 | timespec_t tv; | 99 | timespec_t tv; |
100 | int sync_it = 0; | ||
101 | |||
102 | tv = current_fs_time(inode->i_sb); | ||
106 | 103 | ||
107 | nanotime(&tv); | 104 | if ((flags & XFS_ICHGTIME_MOD) && |
108 | if (flags & XFS_ICHGTIME_MOD) { | 105 | !timespec_equal(&inode->i_mtime, &tv)) { |
109 | inode->i_mtime = tv; | 106 | inode->i_mtime = tv; |
110 | ip->i_d.di_mtime.t_sec = (__int32_t)tv.tv_sec; | 107 | ip->i_d.di_mtime.t_sec = (__int32_t)tv.tv_sec; |
111 | ip->i_d.di_mtime.t_nsec = (__int32_t)tv.tv_nsec; | 108 | ip->i_d.di_mtime.t_nsec = (__int32_t)tv.tv_nsec; |
109 | sync_it = 1; | ||
112 | } | 110 | } |
113 | if (flags & XFS_ICHGTIME_ACC) { | 111 | if ((flags & XFS_ICHGTIME_CHG) && |
114 | inode->i_atime = tv; | 112 | !timespec_equal(&inode->i_ctime, &tv)) { |
115 | ip->i_d.di_atime.t_sec = (__int32_t)tv.tv_sec; | ||
116 | ip->i_d.di_atime.t_nsec = (__int32_t)tv.tv_nsec; | ||
117 | } | ||
118 | if (flags & XFS_ICHGTIME_CHG) { | ||
119 | inode->i_ctime = tv; | 113 | inode->i_ctime = tv; |
120 | ip->i_d.di_ctime.t_sec = (__int32_t)tv.tv_sec; | 114 | ip->i_d.di_ctime.t_sec = (__int32_t)tv.tv_sec; |
121 | ip->i_d.di_ctime.t_nsec = (__int32_t)tv.tv_nsec; | 115 | ip->i_d.di_ctime.t_nsec = (__int32_t)tv.tv_nsec; |
116 | sync_it = 1; | ||
122 | } | 117 | } |
123 | 118 | ||
124 | /* | 119 | /* |
@@ -130,55 +125,11 @@ xfs_ichgtime( | |||
130 | * ensure that the compiler does not reorder the update | 125 | * ensure that the compiler does not reorder the update |
131 | * of i_update_core above the timestamp updates above. | 126 | * of i_update_core above the timestamp updates above. |
132 | */ | 127 | */ |
133 | SYNCHRONIZE(); | 128 | if (sync_it) { |
134 | ip->i_update_core = 1; | 129 | SYNCHRONIZE(); |
135 | if (!(inode->i_state & I_NEW)) | 130 | ip->i_update_core = 1; |
136 | mark_inode_dirty_sync(inode); | 131 | mark_inode_dirty_sync(inode); |
137 | } | ||
138 | |||
139 | /* | ||
140 | * Variant on the above which avoids querying the system clock | ||
141 | * in situations where we know the Linux inode timestamps have | ||
142 | * just been updated (and so we can update our inode cheaply). | ||
143 | */ | ||
144 | void | ||
145 | xfs_ichgtime_fast( | ||
146 | xfs_inode_t *ip, | ||
147 | struct inode *inode, | ||
148 | int flags) | ||
149 | { | ||
150 | timespec_t *tvp; | ||
151 | |||
152 | /* | ||
153 | * Atime updates for read() & friends are handled lazily now, and | ||
154 | * explicit updates must go through xfs_ichgtime() | ||
155 | */ | ||
156 | ASSERT((flags & XFS_ICHGTIME_ACC) == 0); | ||
157 | |||
158 | if (flags & XFS_ICHGTIME_MOD) { | ||
159 | tvp = &inode->i_mtime; | ||
160 | ip->i_d.di_mtime.t_sec = (__int32_t)tvp->tv_sec; | ||
161 | ip->i_d.di_mtime.t_nsec = (__int32_t)tvp->tv_nsec; | ||
162 | } | 132 | } |
163 | if (flags & XFS_ICHGTIME_CHG) { | ||
164 | tvp = &inode->i_ctime; | ||
165 | ip->i_d.di_ctime.t_sec = (__int32_t)tvp->tv_sec; | ||
166 | ip->i_d.di_ctime.t_nsec = (__int32_t)tvp->tv_nsec; | ||
167 | } | ||
168 | |||
169 | /* | ||
170 | * We update the i_update_core field _after_ changing | ||
171 | * the timestamps in order to coordinate properly with | ||
172 | * xfs_iflush() so that we don't lose timestamp updates. | ||
173 | * This keeps us from having to hold the inode lock | ||
174 | * while doing this. We use the SYNCHRONIZE macro to | ||
175 | * ensure that the compiler does not reorder the update | ||
176 | * of i_update_core above the timestamp updates above. | ||
177 | */ | ||
178 | SYNCHRONIZE(); | ||
179 | ip->i_update_core = 1; | ||
180 | if (!(inode->i_state & I_NEW)) | ||
181 | mark_inode_dirty_sync(inode); | ||
182 | } | 133 | } |
183 | 134 | ||
184 | /* | 135 | /* |
@@ -299,7 +250,7 @@ xfs_vn_mknod( | |||
299 | if (unlikely(error)) | 250 | if (unlikely(error)) |
300 | goto out_free_acl; | 251 | goto out_free_acl; |
301 | 252 | ||
302 | inode = ip->i_vnode; | 253 | inode = VFS_I(ip); |
303 | 254 | ||
304 | error = xfs_init_security(inode, dir); | 255 | error = xfs_init_security(inode, dir); |
305 | if (unlikely(error)) | 256 | if (unlikely(error)) |
@@ -366,7 +317,7 @@ xfs_vn_lookup( | |||
366 | return NULL; | 317 | return NULL; |
367 | } | 318 | } |
368 | 319 | ||
369 | return d_splice_alias(cip->i_vnode, dentry); | 320 | return d_splice_alias(VFS_I(cip), dentry); |
370 | } | 321 | } |
371 | 322 | ||
372 | STATIC struct dentry * | 323 | STATIC struct dentry * |
@@ -399,12 +350,12 @@ xfs_vn_ci_lookup( | |||
399 | 350 | ||
400 | /* if exact match, just splice and exit */ | 351 | /* if exact match, just splice and exit */ |
401 | if (!ci_name.name) | 352 | if (!ci_name.name) |
402 | return d_splice_alias(ip->i_vnode, dentry); | 353 | return d_splice_alias(VFS_I(ip), dentry); |
403 | 354 | ||
404 | /* else case-insensitive match... */ | 355 | /* else case-insensitive match... */ |
405 | dname.name = ci_name.name; | 356 | dname.name = ci_name.name; |
406 | dname.len = ci_name.len; | 357 | dname.len = ci_name.len; |
407 | dentry = d_add_ci(ip->i_vnode, dentry, &dname); | 358 | dentry = d_add_ci(VFS_I(ip), dentry, &dname); |
408 | kmem_free(ci_name.name); | 359 | kmem_free(ci_name.name); |
409 | return dentry; | 360 | return dentry; |
410 | } | 361 | } |
@@ -478,7 +429,7 @@ xfs_vn_symlink( | |||
478 | if (unlikely(error)) | 429 | if (unlikely(error)) |
479 | goto out; | 430 | goto out; |
480 | 431 | ||
481 | inode = cip->i_vnode; | 432 | inode = VFS_I(cip); |
482 | 433 | ||
483 | error = xfs_init_security(inode, dir); | 434 | error = xfs_init_security(inode, dir); |
484 | if (unlikely(error)) | 435 | if (unlikely(error)) |
@@ -710,7 +661,7 @@ out_error: | |||
710 | return error; | 661 | return error; |
711 | } | 662 | } |
712 | 663 | ||
713 | const struct inode_operations xfs_inode_operations = { | 664 | static const struct inode_operations xfs_inode_operations = { |
714 | .permission = xfs_vn_permission, | 665 | .permission = xfs_vn_permission, |
715 | .truncate = xfs_vn_truncate, | 666 | .truncate = xfs_vn_truncate, |
716 | .getattr = xfs_vn_getattr, | 667 | .getattr = xfs_vn_getattr, |
@@ -722,7 +673,7 @@ const struct inode_operations xfs_inode_operations = { | |||
722 | .fallocate = xfs_vn_fallocate, | 673 | .fallocate = xfs_vn_fallocate, |
723 | }; | 674 | }; |
724 | 675 | ||
725 | const struct inode_operations xfs_dir_inode_operations = { | 676 | static const struct inode_operations xfs_dir_inode_operations = { |
726 | .create = xfs_vn_create, | 677 | .create = xfs_vn_create, |
727 | .lookup = xfs_vn_lookup, | 678 | .lookup = xfs_vn_lookup, |
728 | .link = xfs_vn_link, | 679 | .link = xfs_vn_link, |
@@ -747,7 +698,7 @@ const struct inode_operations xfs_dir_inode_operations = { | |||
747 | .listxattr = xfs_vn_listxattr, | 698 | .listxattr = xfs_vn_listxattr, |
748 | }; | 699 | }; |
749 | 700 | ||
750 | const struct inode_operations xfs_dir_ci_inode_operations = { | 701 | static const struct inode_operations xfs_dir_ci_inode_operations = { |
751 | .create = xfs_vn_create, | 702 | .create = xfs_vn_create, |
752 | .lookup = xfs_vn_ci_lookup, | 703 | .lookup = xfs_vn_ci_lookup, |
753 | .link = xfs_vn_link, | 704 | .link = xfs_vn_link, |
@@ -772,7 +723,7 @@ const struct inode_operations xfs_dir_ci_inode_operations = { | |||
772 | .listxattr = xfs_vn_listxattr, | 723 | .listxattr = xfs_vn_listxattr, |
773 | }; | 724 | }; |
774 | 725 | ||
775 | const struct inode_operations xfs_symlink_inode_operations = { | 726 | static const struct inode_operations xfs_symlink_inode_operations = { |
776 | .readlink = generic_readlink, | 727 | .readlink = generic_readlink, |
777 | .follow_link = xfs_vn_follow_link, | 728 | .follow_link = xfs_vn_follow_link, |
778 | .put_link = xfs_vn_put_link, | 729 | .put_link = xfs_vn_put_link, |
@@ -784,3 +735,98 @@ const struct inode_operations xfs_symlink_inode_operations = { | |||
784 | .removexattr = generic_removexattr, | 735 | .removexattr = generic_removexattr, |
785 | .listxattr = xfs_vn_listxattr, | 736 | .listxattr = xfs_vn_listxattr, |
786 | }; | 737 | }; |
738 | |||
739 | STATIC void | ||
740 | xfs_diflags_to_iflags( | ||
741 | struct inode *inode, | ||
742 | struct xfs_inode *ip) | ||
743 | { | ||
744 | if (ip->i_d.di_flags & XFS_DIFLAG_IMMUTABLE) | ||
745 | inode->i_flags |= S_IMMUTABLE; | ||
746 | else | ||
747 | inode->i_flags &= ~S_IMMUTABLE; | ||
748 | if (ip->i_d.di_flags & XFS_DIFLAG_APPEND) | ||
749 | inode->i_flags |= S_APPEND; | ||
750 | else | ||
751 | inode->i_flags &= ~S_APPEND; | ||
752 | if (ip->i_d.di_flags & XFS_DIFLAG_SYNC) | ||
753 | inode->i_flags |= S_SYNC; | ||
754 | else | ||
755 | inode->i_flags &= ~S_SYNC; | ||
756 | if (ip->i_d.di_flags & XFS_DIFLAG_NOATIME) | ||
757 | inode->i_flags |= S_NOATIME; | ||
758 | else | ||
759 | inode->i_flags &= ~S_NOATIME; | ||
760 | } | ||
761 | |||
762 | /* | ||
763 | * Initialize the Linux inode, set up the operation vectors and | ||
764 | * unlock the inode. | ||
765 | * | ||
766 | * When reading existing inodes from disk this is called directly | ||
767 | * from xfs_iget, when creating a new inode it is called from | ||
768 | * xfs_ialloc after setting up the inode. | ||
769 | */ | ||
770 | void | ||
771 | xfs_setup_inode( | ||
772 | struct xfs_inode *ip) | ||
773 | { | ||
774 | struct inode *inode = ip->i_vnode; | ||
775 | |||
776 | inode->i_mode = ip->i_d.di_mode; | ||
777 | inode->i_nlink = ip->i_d.di_nlink; | ||
778 | inode->i_uid = ip->i_d.di_uid; | ||
779 | inode->i_gid = ip->i_d.di_gid; | ||
780 | |||
781 | switch (inode->i_mode & S_IFMT) { | ||
782 | case S_IFBLK: | ||
783 | case S_IFCHR: | ||
784 | inode->i_rdev = | ||
785 | MKDEV(sysv_major(ip->i_df.if_u2.if_rdev) & 0x1ff, | ||
786 | sysv_minor(ip->i_df.if_u2.if_rdev)); | ||
787 | break; | ||
788 | default: | ||
789 | inode->i_rdev = 0; | ||
790 | break; | ||
791 | } | ||
792 | |||
793 | inode->i_generation = ip->i_d.di_gen; | ||
794 | i_size_write(inode, ip->i_d.di_size); | ||
795 | inode->i_atime.tv_sec = ip->i_d.di_atime.t_sec; | ||
796 | inode->i_atime.tv_nsec = ip->i_d.di_atime.t_nsec; | ||
797 | inode->i_mtime.tv_sec = ip->i_d.di_mtime.t_sec; | ||
798 | inode->i_mtime.tv_nsec = ip->i_d.di_mtime.t_nsec; | ||
799 | inode->i_ctime.tv_sec = ip->i_d.di_ctime.t_sec; | ||
800 | inode->i_ctime.tv_nsec = ip->i_d.di_ctime.t_nsec; | ||
801 | xfs_diflags_to_iflags(inode, ip); | ||
802 | xfs_iflags_clear(ip, XFS_IMODIFIED); | ||
803 | |||
804 | switch (inode->i_mode & S_IFMT) { | ||
805 | case S_IFREG: | ||
806 | inode->i_op = &xfs_inode_operations; | ||
807 | inode->i_fop = &xfs_file_operations; | ||
808 | inode->i_mapping->a_ops = &xfs_address_space_operations; | ||
809 | break; | ||
810 | case S_IFDIR: | ||
811 | if (xfs_sb_version_hasasciici(&XFS_M(inode->i_sb)->m_sb)) | ||
812 | inode->i_op = &xfs_dir_ci_inode_operations; | ||
813 | else | ||
814 | inode->i_op = &xfs_dir_inode_operations; | ||
815 | inode->i_fop = &xfs_dir_file_operations; | ||
816 | break; | ||
817 | case S_IFLNK: | ||
818 | inode->i_op = &xfs_symlink_inode_operations; | ||
819 | if (!(ip->i_df.if_flags & XFS_IFINLINE)) | ||
820 | inode->i_mapping->a_ops = &xfs_address_space_operations; | ||
821 | break; | ||
822 | default: | ||
823 | inode->i_op = &xfs_inode_operations; | ||
824 | init_special_inode(inode, inode->i_mode, inode->i_rdev); | ||
825 | break; | ||
826 | } | ||
827 | |||
828 | xfs_iflags_clear(ip, XFS_INEW); | ||
829 | barrier(); | ||
830 | |||
831 | unlock_new_inode(inode); | ||
832 | } | ||
diff --git a/fs/xfs/linux-2.6/xfs_iops.h b/fs/xfs/linux-2.6/xfs_iops.h index d97ba934a2ac..8b1a1e31dc21 100644 --- a/fs/xfs/linux-2.6/xfs_iops.h +++ b/fs/xfs/linux-2.6/xfs_iops.h | |||
@@ -18,10 +18,7 @@ | |||
18 | #ifndef __XFS_IOPS_H__ | 18 | #ifndef __XFS_IOPS_H__ |
19 | #define __XFS_IOPS_H__ | 19 | #define __XFS_IOPS_H__ |
20 | 20 | ||
21 | extern const struct inode_operations xfs_inode_operations; | 21 | struct xfs_inode; |
22 | extern const struct inode_operations xfs_dir_inode_operations; | ||
23 | extern const struct inode_operations xfs_dir_ci_inode_operations; | ||
24 | extern const struct inode_operations xfs_symlink_inode_operations; | ||
25 | 22 | ||
26 | extern const struct file_operations xfs_file_operations; | 23 | extern const struct file_operations xfs_file_operations; |
27 | extern const struct file_operations xfs_dir_file_operations; | 24 | extern const struct file_operations xfs_dir_file_operations; |
@@ -29,14 +26,6 @@ extern const struct file_operations xfs_invis_file_operations; | |||
29 | 26 | ||
30 | extern ssize_t xfs_vn_listxattr(struct dentry *, char *data, size_t size); | 27 | extern ssize_t xfs_vn_listxattr(struct dentry *, char *data, size_t size); |
31 | 28 | ||
32 | struct xfs_inode; | 29 | extern void xfs_setup_inode(struct xfs_inode *); |
33 | extern void xfs_ichgtime(struct xfs_inode *, int); | ||
34 | extern void xfs_ichgtime_fast(struct xfs_inode *, struct inode *, int); | ||
35 | |||
36 | #define xfs_vtoi(vp) \ | ||
37 | ((struct xfs_inode *)vn_to_inode(vp)->i_private) | ||
38 | |||
39 | #define XFS_I(inode) \ | ||
40 | ((struct xfs_inode *)(inode)->i_private) | ||
41 | 30 | ||
42 | #endif /* __XFS_IOPS_H__ */ | 31 | #endif /* __XFS_IOPS_H__ */ |
diff --git a/fs/xfs/linux-2.6/xfs_linux.h b/fs/xfs/linux-2.6/xfs_linux.h index 4d45d9351a6c..3b7c4ff48ba0 100644 --- a/fs/xfs/linux-2.6/xfs_linux.h +++ b/fs/xfs/linux-2.6/xfs_linux.h | |||
@@ -45,13 +45,13 @@ | |||
45 | #include <mrlock.h> | 45 | #include <mrlock.h> |
46 | #include <sv.h> | 46 | #include <sv.h> |
47 | #include <mutex.h> | 47 | #include <mutex.h> |
48 | #include <sema.h> | ||
49 | #include <time.h> | 48 | #include <time.h> |
50 | 49 | ||
51 | #include <support/ktrace.h> | 50 | #include <support/ktrace.h> |
52 | #include <support/debug.h> | 51 | #include <support/debug.h> |
53 | #include <support/uuid.h> | 52 | #include <support/uuid.h> |
54 | 53 | ||
54 | #include <linux/semaphore.h> | ||
55 | #include <linux/mm.h> | 55 | #include <linux/mm.h> |
56 | #include <linux/kernel.h> | 56 | #include <linux/kernel.h> |
57 | #include <linux/blkdev.h> | 57 | #include <linux/blkdev.h> |
@@ -180,7 +180,7 @@ | |||
180 | #define xfs_sort(a,n,s,fn) sort(a,n,s,fn,NULL) | 180 | #define xfs_sort(a,n,s,fn) sort(a,n,s,fn,NULL) |
181 | #define xfs_stack_trace() dump_stack() | 181 | #define xfs_stack_trace() dump_stack() |
182 | #define xfs_itruncate_data(ip, off) \ | 182 | #define xfs_itruncate_data(ip, off) \ |
183 | (-vmtruncate(vn_to_inode(XFS_ITOV(ip)), (off))) | 183 | (-vmtruncate(VFS_I(ip), (off))) |
184 | 184 | ||
185 | 185 | ||
186 | /* Move the kernel do_div definition off to one side */ | 186 | /* Move the kernel do_div definition off to one side */ |
diff --git a/fs/xfs/linux-2.6/xfs_lrw.c b/fs/xfs/linux-2.6/xfs_lrw.c index 82333b3e118e..1957e5357d04 100644 --- a/fs/xfs/linux-2.6/xfs_lrw.c +++ b/fs/xfs/linux-2.6/xfs_lrw.c | |||
@@ -137,7 +137,7 @@ xfs_iozero( | |||
137 | struct address_space *mapping; | 137 | struct address_space *mapping; |
138 | int status; | 138 | int status; |
139 | 139 | ||
140 | mapping = ip->i_vnode->i_mapping; | 140 | mapping = VFS_I(ip)->i_mapping; |
141 | do { | 141 | do { |
142 | unsigned offset, bytes; | 142 | unsigned offset, bytes; |
143 | void *fsdata; | 143 | void *fsdata; |
@@ -674,9 +674,7 @@ start: | |||
674 | */ | 674 | */ |
675 | if (likely(!(ioflags & IO_INVIS) && | 675 | if (likely(!(ioflags & IO_INVIS) && |
676 | !mnt_want_write(file->f_path.mnt))) { | 676 | !mnt_want_write(file->f_path.mnt))) { |
677 | file_update_time(file); | 677 | xfs_ichgtime(xip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); |
678 | xfs_ichgtime_fast(xip, inode, | ||
679 | XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); | ||
680 | mnt_drop_write(file->f_path.mnt); | 678 | mnt_drop_write(file->f_path.mnt); |
681 | } | 679 | } |
682 | 680 | ||
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index 30ae96397e31..73c65f19e549 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c | |||
@@ -581,118 +581,6 @@ xfs_max_file_offset( | |||
581 | return (((__uint64_t)pagefactor) << bitshift) - 1; | 581 | return (((__uint64_t)pagefactor) << bitshift) - 1; |
582 | } | 582 | } |
583 | 583 | ||
584 | STATIC_INLINE void | ||
585 | xfs_set_inodeops( | ||
586 | struct inode *inode) | ||
587 | { | ||
588 | switch (inode->i_mode & S_IFMT) { | ||
589 | case S_IFREG: | ||
590 | inode->i_op = &xfs_inode_operations; | ||
591 | inode->i_fop = &xfs_file_operations; | ||
592 | inode->i_mapping->a_ops = &xfs_address_space_operations; | ||
593 | break; | ||
594 | case S_IFDIR: | ||
595 | if (xfs_sb_version_hasasciici(&XFS_M(inode->i_sb)->m_sb)) | ||
596 | inode->i_op = &xfs_dir_ci_inode_operations; | ||
597 | else | ||
598 | inode->i_op = &xfs_dir_inode_operations; | ||
599 | inode->i_fop = &xfs_dir_file_operations; | ||
600 | break; | ||
601 | case S_IFLNK: | ||
602 | inode->i_op = &xfs_symlink_inode_operations; | ||
603 | if (!(XFS_I(inode)->i_df.if_flags & XFS_IFINLINE)) | ||
604 | inode->i_mapping->a_ops = &xfs_address_space_operations; | ||
605 | break; | ||
606 | default: | ||
607 | inode->i_op = &xfs_inode_operations; | ||
608 | init_special_inode(inode, inode->i_mode, inode->i_rdev); | ||
609 | break; | ||
610 | } | ||
611 | } | ||
612 | |||
613 | STATIC_INLINE void | ||
614 | xfs_revalidate_inode( | ||
615 | xfs_mount_t *mp, | ||
616 | bhv_vnode_t *vp, | ||
617 | xfs_inode_t *ip) | ||
618 | { | ||
619 | struct inode *inode = vn_to_inode(vp); | ||
620 | |||
621 | inode->i_mode = ip->i_d.di_mode; | ||
622 | inode->i_nlink = ip->i_d.di_nlink; | ||
623 | inode->i_uid = ip->i_d.di_uid; | ||
624 | inode->i_gid = ip->i_d.di_gid; | ||
625 | |||
626 | switch (inode->i_mode & S_IFMT) { | ||
627 | case S_IFBLK: | ||
628 | case S_IFCHR: | ||
629 | inode->i_rdev = | ||
630 | MKDEV(sysv_major(ip->i_df.if_u2.if_rdev) & 0x1ff, | ||
631 | sysv_minor(ip->i_df.if_u2.if_rdev)); | ||
632 | break; | ||
633 | default: | ||
634 | inode->i_rdev = 0; | ||
635 | break; | ||
636 | } | ||
637 | |||
638 | inode->i_generation = ip->i_d.di_gen; | ||
639 | i_size_write(inode, ip->i_d.di_size); | ||
640 | inode->i_atime.tv_sec = ip->i_d.di_atime.t_sec; | ||
641 | inode->i_atime.tv_nsec = ip->i_d.di_atime.t_nsec; | ||
642 | inode->i_mtime.tv_sec = ip->i_d.di_mtime.t_sec; | ||
643 | inode->i_mtime.tv_nsec = ip->i_d.di_mtime.t_nsec; | ||
644 | inode->i_ctime.tv_sec = ip->i_d.di_ctime.t_sec; | ||
645 | inode->i_ctime.tv_nsec = ip->i_d.di_ctime.t_nsec; | ||
646 | if (ip->i_d.di_flags & XFS_DIFLAG_IMMUTABLE) | ||
647 | inode->i_flags |= S_IMMUTABLE; | ||
648 | else | ||
649 | inode->i_flags &= ~S_IMMUTABLE; | ||
650 | if (ip->i_d.di_flags & XFS_DIFLAG_APPEND) | ||
651 | inode->i_flags |= S_APPEND; | ||
652 | else | ||
653 | inode->i_flags &= ~S_APPEND; | ||
654 | if (ip->i_d.di_flags & XFS_DIFLAG_SYNC) | ||
655 | inode->i_flags |= S_SYNC; | ||
656 | else | ||
657 | inode->i_flags &= ~S_SYNC; | ||
658 | if (ip->i_d.di_flags & XFS_DIFLAG_NOATIME) | ||
659 | inode->i_flags |= S_NOATIME; | ||
660 | else | ||
661 | inode->i_flags &= ~S_NOATIME; | ||
662 | xfs_iflags_clear(ip, XFS_IMODIFIED); | ||
663 | } | ||
664 | |||
665 | void | ||
666 | xfs_initialize_vnode( | ||
667 | struct xfs_mount *mp, | ||
668 | bhv_vnode_t *vp, | ||
669 | struct xfs_inode *ip) | ||
670 | { | ||
671 | struct inode *inode = vn_to_inode(vp); | ||
672 | |||
673 | if (!ip->i_vnode) { | ||
674 | ip->i_vnode = vp; | ||
675 | inode->i_private = ip; | ||
676 | } | ||
677 | |||
678 | /* | ||
679 | * We need to set the ops vectors, and unlock the inode, but if | ||
680 | * we have been called during the new inode create process, it is | ||
681 | * too early to fill in the Linux inode. We will get called a | ||
682 | * second time once the inode is properly set up, and then we can | ||
683 | * finish our work. | ||
684 | */ | ||
685 | if (ip->i_d.di_mode != 0 && (inode->i_state & I_NEW)) { | ||
686 | xfs_revalidate_inode(mp, vp, ip); | ||
687 | xfs_set_inodeops(inode); | ||
688 | |||
689 | xfs_iflags_clear(ip, XFS_INEW); | ||
690 | barrier(); | ||
691 | |||
692 | unlock_new_inode(inode); | ||
693 | } | ||
694 | } | ||
695 | |||
696 | int | 584 | int |
697 | xfs_blkdev_get( | 585 | xfs_blkdev_get( |
698 | xfs_mount_t *mp, | 586 | xfs_mount_t *mp, |
@@ -982,26 +870,21 @@ STATIC struct inode * | |||
982 | xfs_fs_alloc_inode( | 870 | xfs_fs_alloc_inode( |
983 | struct super_block *sb) | 871 | struct super_block *sb) |
984 | { | 872 | { |
985 | bhv_vnode_t *vp; | 873 | return kmem_zone_alloc(xfs_vnode_zone, KM_SLEEP); |
986 | |||
987 | vp = kmem_zone_alloc(xfs_vnode_zone, KM_SLEEP); | ||
988 | if (unlikely(!vp)) | ||
989 | return NULL; | ||
990 | return vn_to_inode(vp); | ||
991 | } | 874 | } |
992 | 875 | ||
993 | STATIC void | 876 | STATIC void |
994 | xfs_fs_destroy_inode( | 877 | xfs_fs_destroy_inode( |
995 | struct inode *inode) | 878 | struct inode *inode) |
996 | { | 879 | { |
997 | kmem_zone_free(xfs_vnode_zone, vn_from_inode(inode)); | 880 | kmem_zone_free(xfs_vnode_zone, inode); |
998 | } | 881 | } |
999 | 882 | ||
1000 | STATIC void | 883 | STATIC void |
1001 | xfs_fs_inode_init_once( | 884 | xfs_fs_inode_init_once( |
1002 | void *vnode) | 885 | void *vnode) |
1003 | { | 886 | { |
1004 | inode_init_once(vn_to_inode((bhv_vnode_t *)vnode)); | 887 | inode_init_once((struct inode *)vnode); |
1005 | } | 888 | } |
1006 | 889 | ||
1007 | /* | 890 | /* |
@@ -1106,7 +989,7 @@ void | |||
1106 | xfs_flush_inode( | 989 | xfs_flush_inode( |
1107 | xfs_inode_t *ip) | 990 | xfs_inode_t *ip) |
1108 | { | 991 | { |
1109 | struct inode *inode = ip->i_vnode; | 992 | struct inode *inode = VFS_I(ip); |
1110 | 993 | ||
1111 | igrab(inode); | 994 | igrab(inode); |
1112 | xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_inode_work); | 995 | xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_inode_work); |
@@ -1131,7 +1014,7 @@ void | |||
1131 | xfs_flush_device( | 1014 | xfs_flush_device( |
1132 | xfs_inode_t *ip) | 1015 | xfs_inode_t *ip) |
1133 | { | 1016 | { |
1134 | struct inode *inode = vn_to_inode(XFS_ITOV(ip)); | 1017 | struct inode *inode = VFS_I(ip); |
1135 | 1018 | ||
1136 | igrab(inode); | 1019 | igrab(inode); |
1137 | xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_device_work); | 1020 | xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_device_work); |
@@ -1201,6 +1084,15 @@ xfssyncd( | |||
1201 | } | 1084 | } |
1202 | 1085 | ||
1203 | STATIC void | 1086 | STATIC void |
1087 | xfs_free_fsname( | ||
1088 | struct xfs_mount *mp) | ||
1089 | { | ||
1090 | kfree(mp->m_fsname); | ||
1091 | kfree(mp->m_rtname); | ||
1092 | kfree(mp->m_logname); | ||
1093 | } | ||
1094 | |||
1095 | STATIC void | ||
1204 | xfs_fs_put_super( | 1096 | xfs_fs_put_super( |
1205 | struct super_block *sb) | 1097 | struct super_block *sb) |
1206 | { | 1098 | { |
@@ -1239,8 +1131,6 @@ xfs_fs_put_super( | |||
1239 | error = xfs_unmount_flush(mp, 0); | 1131 | error = xfs_unmount_flush(mp, 0); |
1240 | WARN_ON(error); | 1132 | WARN_ON(error); |
1241 | 1133 | ||
1242 | IRELE(rip); | ||
1243 | |||
1244 | /* | 1134 | /* |
1245 | * If we're forcing a shutdown, typically because of a media error, | 1135 | * If we're forcing a shutdown, typically because of a media error, |
1246 | * we want to make sure we invalidate dirty pages that belong to | 1136 | * we want to make sure we invalidate dirty pages that belong to |
@@ -1257,10 +1147,12 @@ xfs_fs_put_super( | |||
1257 | } | 1147 | } |
1258 | 1148 | ||
1259 | xfs_unmountfs(mp); | 1149 | xfs_unmountfs(mp); |
1150 | xfs_freesb(mp); | ||
1260 | xfs_icsb_destroy_counters(mp); | 1151 | xfs_icsb_destroy_counters(mp); |
1261 | xfs_close_devices(mp); | 1152 | xfs_close_devices(mp); |
1262 | xfs_qmops_put(mp); | 1153 | xfs_qmops_put(mp); |
1263 | xfs_dmops_put(mp); | 1154 | xfs_dmops_put(mp); |
1155 | xfs_free_fsname(mp); | ||
1264 | kfree(mp); | 1156 | kfree(mp); |
1265 | } | 1157 | } |
1266 | 1158 | ||
@@ -1517,6 +1409,8 @@ xfs_start_flags( | |||
1517 | struct xfs_mount_args *ap, | 1409 | struct xfs_mount_args *ap, |
1518 | struct xfs_mount *mp) | 1410 | struct xfs_mount *mp) |
1519 | { | 1411 | { |
1412 | int error; | ||
1413 | |||
1520 | /* Values are in BBs */ | 1414 | /* Values are in BBs */ |
1521 | if ((ap->flags & XFSMNT_NOALIGN) != XFSMNT_NOALIGN) { | 1415 | if ((ap->flags & XFSMNT_NOALIGN) != XFSMNT_NOALIGN) { |
1522 | /* | 1416 | /* |
@@ -1549,17 +1443,27 @@ xfs_start_flags( | |||
1549 | ap->logbufsize); | 1443 | ap->logbufsize); |
1550 | return XFS_ERROR(EINVAL); | 1444 | return XFS_ERROR(EINVAL); |
1551 | } | 1445 | } |
1446 | |||
1447 | error = ENOMEM; | ||
1448 | |||
1552 | mp->m_logbsize = ap->logbufsize; | 1449 | mp->m_logbsize = ap->logbufsize; |
1553 | mp->m_fsname_len = strlen(ap->fsname) + 1; | 1450 | mp->m_fsname_len = strlen(ap->fsname) + 1; |
1554 | mp->m_fsname = kmem_alloc(mp->m_fsname_len, KM_SLEEP); | 1451 | |
1555 | strcpy(mp->m_fsname, ap->fsname); | 1452 | mp->m_fsname = kstrdup(ap->fsname, GFP_KERNEL); |
1453 | if (!mp->m_fsname) | ||
1454 | goto out; | ||
1455 | |||
1556 | if (ap->rtname[0]) { | 1456 | if (ap->rtname[0]) { |
1557 | mp->m_rtname = kmem_alloc(strlen(ap->rtname) + 1, KM_SLEEP); | 1457 | mp->m_rtname = kstrdup(ap->rtname, GFP_KERNEL); |
1558 | strcpy(mp->m_rtname, ap->rtname); | 1458 | if (!mp->m_rtname) |
1459 | goto out_free_fsname; | ||
1460 | |||
1559 | } | 1461 | } |
1462 | |||
1560 | if (ap->logname[0]) { | 1463 | if (ap->logname[0]) { |
1561 | mp->m_logname = kmem_alloc(strlen(ap->logname) + 1, KM_SLEEP); | 1464 | mp->m_logname = kstrdup(ap->logname, GFP_KERNEL); |
1562 | strcpy(mp->m_logname, ap->logname); | 1465 | if (!mp->m_logname) |
1466 | goto out_free_rtname; | ||
1563 | } | 1467 | } |
1564 | 1468 | ||
1565 | if (ap->flags & XFSMNT_WSYNC) | 1469 | if (ap->flags & XFSMNT_WSYNC) |
@@ -1632,6 +1536,14 @@ xfs_start_flags( | |||
1632 | if (ap->flags & XFSMNT_DMAPI) | 1536 | if (ap->flags & XFSMNT_DMAPI) |
1633 | mp->m_flags |= XFS_MOUNT_DMAPI; | 1537 | mp->m_flags |= XFS_MOUNT_DMAPI; |
1634 | return 0; | 1538 | return 0; |
1539 | |||
1540 | |||
1541 | out_free_rtname: | ||
1542 | kfree(mp->m_rtname); | ||
1543 | out_free_fsname: | ||
1544 | kfree(mp->m_fsname); | ||
1545 | out: | ||
1546 | return error; | ||
1635 | } | 1547 | } |
1636 | 1548 | ||
1637 | /* | 1549 | /* |
@@ -1792,10 +1704,10 @@ xfs_fs_fill_super( | |||
1792 | */ | 1704 | */ |
1793 | error = xfs_start_flags(args, mp); | 1705 | error = xfs_start_flags(args, mp); |
1794 | if (error) | 1706 | if (error) |
1795 | goto out_destroy_counters; | 1707 | goto out_free_fsname; |
1796 | error = xfs_readsb(mp, flags); | 1708 | error = xfs_readsb(mp, flags); |
1797 | if (error) | 1709 | if (error) |
1798 | goto out_destroy_counters; | 1710 | goto out_free_fsname; |
1799 | error = xfs_finish_flags(args, mp); | 1711 | error = xfs_finish_flags(args, mp); |
1800 | if (error) | 1712 | if (error) |
1801 | goto out_free_sb; | 1713 | goto out_free_sb; |
@@ -1811,7 +1723,7 @@ xfs_fs_fill_super( | |||
1811 | if (error) | 1723 | if (error) |
1812 | goto out_free_sb; | 1724 | goto out_free_sb; |
1813 | 1725 | ||
1814 | error = xfs_mountfs(mp, flags); | 1726 | error = xfs_mountfs(mp); |
1815 | if (error) | 1727 | if (error) |
1816 | goto out_filestream_unmount; | 1728 | goto out_filestream_unmount; |
1817 | 1729 | ||
@@ -1825,7 +1737,7 @@ xfs_fs_fill_super( | |||
1825 | sb->s_time_gran = 1; | 1737 | sb->s_time_gran = 1; |
1826 | set_posix_acl_flag(sb); | 1738 | set_posix_acl_flag(sb); |
1827 | 1739 | ||
1828 | root = igrab(mp->m_rootip->i_vnode); | 1740 | root = igrab(VFS_I(mp->m_rootip)); |
1829 | if (!root) { | 1741 | if (!root) { |
1830 | error = ENOENT; | 1742 | error = ENOENT; |
1831 | goto fail_unmount; | 1743 | goto fail_unmount; |
@@ -1857,7 +1769,8 @@ xfs_fs_fill_super( | |||
1857 | xfs_filestream_unmount(mp); | 1769 | xfs_filestream_unmount(mp); |
1858 | out_free_sb: | 1770 | out_free_sb: |
1859 | xfs_freesb(mp); | 1771 | xfs_freesb(mp); |
1860 | out_destroy_counters: | 1772 | out_free_fsname: |
1773 | xfs_free_fsname(mp); | ||
1861 | xfs_icsb_destroy_counters(mp); | 1774 | xfs_icsb_destroy_counters(mp); |
1862 | xfs_close_devices(mp); | 1775 | xfs_close_devices(mp); |
1863 | out_put_qmops: | 1776 | out_put_qmops: |
@@ -1890,10 +1803,8 @@ xfs_fs_fill_super( | |||
1890 | error = xfs_unmount_flush(mp, 0); | 1803 | error = xfs_unmount_flush(mp, 0); |
1891 | WARN_ON(error); | 1804 | WARN_ON(error); |
1892 | 1805 | ||
1893 | IRELE(mp->m_rootip); | ||
1894 | |||
1895 | xfs_unmountfs(mp); | 1806 | xfs_unmountfs(mp); |
1896 | goto out_destroy_counters; | 1807 | goto out_free_sb; |
1897 | } | 1808 | } |
1898 | 1809 | ||
1899 | STATIC int | 1810 | STATIC int |
@@ -2014,7 +1925,7 @@ xfs_free_trace_bufs(void) | |||
2014 | STATIC int __init | 1925 | STATIC int __init |
2015 | xfs_init_zones(void) | 1926 | xfs_init_zones(void) |
2016 | { | 1927 | { |
2017 | xfs_vnode_zone = kmem_zone_init_flags(sizeof(bhv_vnode_t), "xfs_vnode", | 1928 | xfs_vnode_zone = kmem_zone_init_flags(sizeof(struct inode), "xfs_vnode", |
2018 | KM_ZONE_HWALIGN | KM_ZONE_RECLAIM | | 1929 | KM_ZONE_HWALIGN | KM_ZONE_RECLAIM | |
2019 | KM_ZONE_SPREAD, | 1930 | KM_ZONE_SPREAD, |
2020 | xfs_fs_inode_init_once); | 1931 | xfs_fs_inode_init_once); |
diff --git a/fs/xfs/linux-2.6/xfs_super.h b/fs/xfs/linux-2.6/xfs_super.h index b7d13da01bd6..fe2ef4e6a0f9 100644 --- a/fs/xfs/linux-2.6/xfs_super.h +++ b/fs/xfs/linux-2.6/xfs_super.h | |||
@@ -101,9 +101,6 @@ struct block_device; | |||
101 | 101 | ||
102 | extern __uint64_t xfs_max_file_offset(unsigned int); | 102 | extern __uint64_t xfs_max_file_offset(unsigned int); |
103 | 103 | ||
104 | extern void xfs_initialize_vnode(struct xfs_mount *mp, bhv_vnode_t *vp, | ||
105 | struct xfs_inode *ip); | ||
106 | |||
107 | extern void xfs_flush_inode(struct xfs_inode *); | 104 | extern void xfs_flush_inode(struct xfs_inode *); |
108 | extern void xfs_flush_device(struct xfs_inode *); | 105 | extern void xfs_flush_device(struct xfs_inode *); |
109 | 106 | ||
diff --git a/fs/xfs/linux-2.6/xfs_vnode.c b/fs/xfs/linux-2.6/xfs_vnode.c index 25488b6d9881..b52528bbbfff 100644 --- a/fs/xfs/linux-2.6/xfs_vnode.c +++ b/fs/xfs/linux-2.6/xfs_vnode.c | |||
@@ -33,7 +33,7 @@ | |||
33 | 33 | ||
34 | 34 | ||
35 | /* | 35 | /* |
36 | * Dedicated vnode inactive/reclaim sync semaphores. | 36 | * Dedicated vnode inactive/reclaim sync wait queues. |
37 | * Prime number of hash buckets since address is used as the key. | 37 | * Prime number of hash buckets since address is used as the key. |
38 | */ | 38 | */ |
39 | #define NVSYNC 37 | 39 | #define NVSYNC 37 |
@@ -82,24 +82,6 @@ vn_ioerror( | |||
82 | xfs_do_force_shutdown(ip->i_mount, SHUTDOWN_DEVICE_REQ, f, l); | 82 | xfs_do_force_shutdown(ip->i_mount, SHUTDOWN_DEVICE_REQ, f, l); |
83 | } | 83 | } |
84 | 84 | ||
85 | |||
86 | /* | ||
87 | * Add a reference to a referenced vnode. | ||
88 | */ | ||
89 | bhv_vnode_t * | ||
90 | vn_hold( | ||
91 | bhv_vnode_t *vp) | ||
92 | { | ||
93 | struct inode *inode; | ||
94 | |||
95 | XFS_STATS_INC(vn_hold); | ||
96 | |||
97 | inode = igrab(vn_to_inode(vp)); | ||
98 | ASSERT(inode); | ||
99 | |||
100 | return vp; | ||
101 | } | ||
102 | |||
103 | #ifdef XFS_INODE_TRACE | 85 | #ifdef XFS_INODE_TRACE |
104 | 86 | ||
105 | /* | 87 | /* |
@@ -108,7 +90,7 @@ vn_hold( | |||
108 | */ | 90 | */ |
109 | static inline int xfs_icount(struct xfs_inode *ip) | 91 | static inline int xfs_icount(struct xfs_inode *ip) |
110 | { | 92 | { |
111 | bhv_vnode_t *vp = XFS_ITOV_NULL(ip); | 93 | struct inode *vp = VFS_I(ip); |
112 | 94 | ||
113 | if (vp) | 95 | if (vp) |
114 | return vn_count(vp); | 96 | return vn_count(vp); |
diff --git a/fs/xfs/linux-2.6/xfs_vnode.h b/fs/xfs/linux-2.6/xfs_vnode.h index 41ca2cec5d31..683ce16210ff 100644 --- a/fs/xfs/linux-2.6/xfs_vnode.h +++ b/fs/xfs/linux-2.6/xfs_vnode.h | |||
@@ -22,20 +22,6 @@ struct file; | |||
22 | struct xfs_iomap; | 22 | struct xfs_iomap; |
23 | struct attrlist_cursor_kern; | 23 | struct attrlist_cursor_kern; |
24 | 24 | ||
25 | typedef struct inode bhv_vnode_t; | ||
26 | |||
27 | /* | ||
28 | * Vnode to Linux inode mapping. | ||
29 | */ | ||
30 | static inline bhv_vnode_t *vn_from_inode(struct inode *inode) | ||
31 | { | ||
32 | return inode; | ||
33 | } | ||
34 | static inline struct inode *vn_to_inode(bhv_vnode_t *vnode) | ||
35 | { | ||
36 | return vnode; | ||
37 | } | ||
38 | |||
39 | /* | 25 | /* |
40 | * Return values for xfs_inactive. A return value of | 26 | * Return values for xfs_inactive. A return value of |
41 | * VN_INACTIVE_NOCACHE implies that the file system behavior | 27 | * VN_INACTIVE_NOCACHE implies that the file system behavior |
@@ -76,57 +62,52 @@ extern void vn_iowait(struct xfs_inode *ip); | |||
76 | extern void vn_iowake(struct xfs_inode *ip); | 62 | extern void vn_iowake(struct xfs_inode *ip); |
77 | extern void vn_ioerror(struct xfs_inode *ip, int error, char *f, int l); | 63 | extern void vn_ioerror(struct xfs_inode *ip, int error, char *f, int l); |
78 | 64 | ||
79 | static inline int vn_count(bhv_vnode_t *vp) | 65 | static inline int vn_count(struct inode *vp) |
80 | { | 66 | { |
81 | return atomic_read(&vn_to_inode(vp)->i_count); | 67 | return atomic_read(&vp->i_count); |
82 | } | 68 | } |
83 | 69 | ||
84 | /* | 70 | #define IHOLD(ip) \ |
85 | * Vnode reference counting functions (and macros for compatibility). | 71 | do { \ |
86 | */ | 72 | ASSERT(atomic_read(&VFS_I(ip)->i_count) > 0) ; \ |
87 | extern bhv_vnode_t *vn_hold(bhv_vnode_t *); | 73 | atomic_inc(&(VFS_I(ip)->i_count)); \ |
74 | xfs_itrace_hold((ip), __FILE__, __LINE__, (inst_t *)__return_address); \ | ||
75 | } while (0) | ||
88 | 76 | ||
89 | #if defined(XFS_INODE_TRACE) | 77 | #define IRELE(ip) \ |
90 | #define VN_HOLD(vp) \ | 78 | do { \ |
91 | ((void)vn_hold(vp), \ | 79 | xfs_itrace_rele((ip), __FILE__, __LINE__, (inst_t *)__return_address); \ |
92 | xfs_itrace_hold(xfs_vtoi(vp), __FILE__, __LINE__, (inst_t *)__return_address)) | 80 | iput(VFS_I(ip)); \ |
93 | #define VN_RELE(vp) \ | 81 | } while (0) |
94 | (xfs_itrace_rele(xfs_vtoi(vp), __FILE__, __LINE__, (inst_t *)__return_address), \ | ||
95 | iput(vn_to_inode(vp))) | ||
96 | #else | ||
97 | #define VN_HOLD(vp) ((void)vn_hold(vp)) | ||
98 | #define VN_RELE(vp) (iput(vn_to_inode(vp))) | ||
99 | #endif | ||
100 | 82 | ||
101 | static inline bhv_vnode_t *vn_grab(bhv_vnode_t *vp) | 83 | static inline struct inode *vn_grab(struct inode *vp) |
102 | { | 84 | { |
103 | struct inode *inode = igrab(vn_to_inode(vp)); | 85 | return igrab(vp); |
104 | return inode ? vn_from_inode(inode) : NULL; | ||
105 | } | 86 | } |
106 | 87 | ||
107 | /* | 88 | /* |
108 | * Dealing with bad inodes | 89 | * Dealing with bad inodes |
109 | */ | 90 | */ |
110 | static inline int VN_BAD(bhv_vnode_t *vp) | 91 | static inline int VN_BAD(struct inode *vp) |
111 | { | 92 | { |
112 | return is_bad_inode(vn_to_inode(vp)); | 93 | return is_bad_inode(vp); |
113 | } | 94 | } |
114 | 95 | ||
115 | /* | 96 | /* |
116 | * Extracting atime values in various formats | 97 | * Extracting atime values in various formats |
117 | */ | 98 | */ |
118 | static inline void vn_atime_to_bstime(bhv_vnode_t *vp, xfs_bstime_t *bs_atime) | 99 | static inline void vn_atime_to_bstime(struct inode *vp, xfs_bstime_t *bs_atime) |
119 | { | 100 | { |
120 | bs_atime->tv_sec = vp->i_atime.tv_sec; | 101 | bs_atime->tv_sec = vp->i_atime.tv_sec; |
121 | bs_atime->tv_nsec = vp->i_atime.tv_nsec; | 102 | bs_atime->tv_nsec = vp->i_atime.tv_nsec; |
122 | } | 103 | } |
123 | 104 | ||
124 | static inline void vn_atime_to_timespec(bhv_vnode_t *vp, struct timespec *ts) | 105 | static inline void vn_atime_to_timespec(struct inode *vp, struct timespec *ts) |
125 | { | 106 | { |
126 | *ts = vp->i_atime; | 107 | *ts = vp->i_atime; |
127 | } | 108 | } |
128 | 109 | ||
129 | static inline void vn_atime_to_time_t(bhv_vnode_t *vp, time_t *tt) | 110 | static inline void vn_atime_to_time_t(struct inode *vp, time_t *tt) |
130 | { | 111 | { |
131 | *tt = vp->i_atime.tv_sec; | 112 | *tt = vp->i_atime.tv_sec; |
132 | } | 113 | } |
@@ -134,9 +115,9 @@ static inline void vn_atime_to_time_t(bhv_vnode_t *vp, time_t *tt) | |||
134 | /* | 115 | /* |
135 | * Some useful predicates. | 116 | * Some useful predicates. |
136 | */ | 117 | */ |
137 | #define VN_MAPPED(vp) mapping_mapped(vn_to_inode(vp)->i_mapping) | 118 | #define VN_MAPPED(vp) mapping_mapped(vp->i_mapping) |
138 | #define VN_CACHED(vp) (vn_to_inode(vp)->i_mapping->nrpages) | 119 | #define VN_CACHED(vp) (vp->i_mapping->nrpages) |
139 | #define VN_DIRTY(vp) mapping_tagged(vn_to_inode(vp)->i_mapping, \ | 120 | #define VN_DIRTY(vp) mapping_tagged(vp->i_mapping, \ |
140 | PAGECACHE_TAG_DIRTY) | 121 | PAGECACHE_TAG_DIRTY) |
141 | 122 | ||
142 | 123 | ||
diff --git a/fs/xfs/quota/xfs_dquot.c b/fs/xfs/quota/xfs_dquot.c index fc9f3fb39b7b..f2705f2fd43c 100644 --- a/fs/xfs/quota/xfs_dquot.c +++ b/fs/xfs/quota/xfs_dquot.c | |||
@@ -101,11 +101,18 @@ xfs_qm_dqinit( | |||
101 | if (brandnewdquot) { | 101 | if (brandnewdquot) { |
102 | dqp->dq_flnext = dqp->dq_flprev = dqp; | 102 | dqp->dq_flnext = dqp->dq_flprev = dqp; |
103 | mutex_init(&dqp->q_qlock); | 103 | mutex_init(&dqp->q_qlock); |
104 | initnsema(&dqp->q_flock, 1, "fdq"); | ||
105 | sv_init(&dqp->q_pinwait, SV_DEFAULT, "pdq"); | 104 | sv_init(&dqp->q_pinwait, SV_DEFAULT, "pdq"); |
106 | 105 | ||
106 | /* | ||
107 | * Because we want to use a counting completion, complete | ||
108 | * the flush completion once to allow a single access to | ||
109 | * the flush completion without blocking. | ||
110 | */ | ||
111 | init_completion(&dqp->q_flush); | ||
112 | complete(&dqp->q_flush); | ||
113 | |||
107 | #ifdef XFS_DQUOT_TRACE | 114 | #ifdef XFS_DQUOT_TRACE |
108 | dqp->q_trace = ktrace_alloc(DQUOT_TRACE_SIZE, KM_SLEEP); | 115 | dqp->q_trace = ktrace_alloc(DQUOT_TRACE_SIZE, KM_NOFS); |
109 | xfs_dqtrace_entry(dqp, "DQINIT"); | 116 | xfs_dqtrace_entry(dqp, "DQINIT"); |
110 | #endif | 117 | #endif |
111 | } else { | 118 | } else { |
@@ -150,7 +157,6 @@ xfs_qm_dqdestroy( | |||
150 | ASSERT(! XFS_DQ_IS_ON_FREELIST(dqp)); | 157 | ASSERT(! XFS_DQ_IS_ON_FREELIST(dqp)); |
151 | 158 | ||
152 | mutex_destroy(&dqp->q_qlock); | 159 | mutex_destroy(&dqp->q_qlock); |
153 | freesema(&dqp->q_flock); | ||
154 | sv_destroy(&dqp->q_pinwait); | 160 | sv_destroy(&dqp->q_pinwait); |
155 | 161 | ||
156 | #ifdef XFS_DQUOT_TRACE | 162 | #ifdef XFS_DQUOT_TRACE |
@@ -431,7 +437,7 @@ xfs_qm_dqalloc( | |||
431 | * when it unlocks the inode. Since we want to keep the quota | 437 | * when it unlocks the inode. Since we want to keep the quota |
432 | * inode around, we bump the vnode ref count now. | 438 | * inode around, we bump the vnode ref count now. |
433 | */ | 439 | */ |
434 | VN_HOLD(XFS_ITOV(quotip)); | 440 | IHOLD(quotip); |
435 | 441 | ||
436 | xfs_trans_ijoin(tp, quotip, XFS_ILOCK_EXCL); | 442 | xfs_trans_ijoin(tp, quotip, XFS_ILOCK_EXCL); |
437 | nmaps = 1; | 443 | nmaps = 1; |
@@ -1211,7 +1217,7 @@ xfs_qm_dqflush( | |||
1211 | int error; | 1217 | int error; |
1212 | 1218 | ||
1213 | ASSERT(XFS_DQ_IS_LOCKED(dqp)); | 1219 | ASSERT(XFS_DQ_IS_LOCKED(dqp)); |
1214 | ASSERT(XFS_DQ_IS_FLUSH_LOCKED(dqp)); | 1220 | ASSERT(!completion_done(&dqp->q_flush)); |
1215 | xfs_dqtrace_entry(dqp, "DQFLUSH"); | 1221 | xfs_dqtrace_entry(dqp, "DQFLUSH"); |
1216 | 1222 | ||
1217 | /* | 1223 | /* |
@@ -1348,34 +1354,18 @@ xfs_qm_dqflush_done( | |||
1348 | xfs_dqfunlock(dqp); | 1354 | xfs_dqfunlock(dqp); |
1349 | } | 1355 | } |
1350 | 1356 | ||
1351 | |||
1352 | int | ||
1353 | xfs_qm_dqflock_nowait( | ||
1354 | xfs_dquot_t *dqp) | ||
1355 | { | ||
1356 | int locked; | ||
1357 | |||
1358 | locked = cpsema(&((dqp)->q_flock)); | ||
1359 | |||
1360 | /* XXX ifdef these out */ | ||
1361 | if (locked) | ||
1362 | (dqp)->dq_flags |= XFS_DQ_FLOCKED; | ||
1363 | return (locked); | ||
1364 | } | ||
1365 | |||
1366 | |||
1367 | int | 1357 | int |
1368 | xfs_qm_dqlock_nowait( | 1358 | xfs_qm_dqlock_nowait( |
1369 | xfs_dquot_t *dqp) | 1359 | xfs_dquot_t *dqp) |
1370 | { | 1360 | { |
1371 | return (mutex_trylock(&((dqp)->q_qlock))); | 1361 | return mutex_trylock(&dqp->q_qlock); |
1372 | } | 1362 | } |
1373 | 1363 | ||
1374 | void | 1364 | void |
1375 | xfs_dqlock( | 1365 | xfs_dqlock( |
1376 | xfs_dquot_t *dqp) | 1366 | xfs_dquot_t *dqp) |
1377 | { | 1367 | { |
1378 | mutex_lock(&(dqp->q_qlock)); | 1368 | mutex_lock(&dqp->q_qlock); |
1379 | } | 1369 | } |
1380 | 1370 | ||
1381 | void | 1371 | void |
@@ -1468,7 +1458,7 @@ xfs_qm_dqpurge( | |||
1468 | * if we're turning off quotas. Basically, we need this flush | 1458 | * if we're turning off quotas. Basically, we need this flush |
1469 | * lock, and are willing to block on it. | 1459 | * lock, and are willing to block on it. |
1470 | */ | 1460 | */ |
1471 | if (! xfs_qm_dqflock_nowait(dqp)) { | 1461 | if (!xfs_dqflock_nowait(dqp)) { |
1472 | /* | 1462 | /* |
1473 | * Block on the flush lock after nudging dquot buffer, | 1463 | * Block on the flush lock after nudging dquot buffer, |
1474 | * if it is incore. | 1464 | * if it is incore. |
diff --git a/fs/xfs/quota/xfs_dquot.h b/fs/xfs/quota/xfs_dquot.h index f7393bba4e95..8958d0faf8d3 100644 --- a/fs/xfs/quota/xfs_dquot.h +++ b/fs/xfs/quota/xfs_dquot.h | |||
@@ -82,7 +82,7 @@ typedef struct xfs_dquot { | |||
82 | xfs_qcnt_t q_res_icount; /* total inos allocd+reserved */ | 82 | xfs_qcnt_t q_res_icount; /* total inos allocd+reserved */ |
83 | xfs_qcnt_t q_res_rtbcount;/* total realtime blks used+reserved */ | 83 | xfs_qcnt_t q_res_rtbcount;/* total realtime blks used+reserved */ |
84 | mutex_t q_qlock; /* quota lock */ | 84 | mutex_t q_qlock; /* quota lock */ |
85 | sema_t q_flock; /* flush lock */ | 85 | struct completion q_flush; /* flush completion queue */ |
86 | uint q_pincount; /* pin count for this dquot */ | 86 | uint q_pincount; /* pin count for this dquot */ |
87 | sv_t q_pinwait; /* sync var for pinning */ | 87 | sv_t q_pinwait; /* sync var for pinning */ |
88 | #ifdef XFS_DQUOT_TRACE | 88 | #ifdef XFS_DQUOT_TRACE |
@@ -113,17 +113,25 @@ XFS_DQ_IS_LOCKED(xfs_dquot_t *dqp) | |||
113 | 113 | ||
114 | 114 | ||
115 | /* | 115 | /* |
116 | * The following three routines simply manage the q_flock | 116 | * Manage the q_flush completion queue embedded in the dquot. This completion |
117 | * semaphore embedded in the dquot. This semaphore synchronizes | 117 | * queue synchronizes processes attempting to flush the in-core dquot back to |
118 | * processes attempting to flush the in-core dquot back to disk. | 118 | * disk. |
119 | */ | 119 | */ |
120 | #define xfs_dqflock(dqp) { psema(&((dqp)->q_flock), PINOD | PRECALC);\ | 120 | static inline void xfs_dqflock(xfs_dquot_t *dqp) |
121 | (dqp)->dq_flags |= XFS_DQ_FLOCKED; } | 121 | { |
122 | #define xfs_dqfunlock(dqp) { ASSERT(issemalocked(&((dqp)->q_flock))); \ | 122 | wait_for_completion(&dqp->q_flush); |
123 | vsema(&((dqp)->q_flock)); \ | 123 | } |
124 | (dqp)->dq_flags &= ~(XFS_DQ_FLOCKED); } | 124 | |
125 | static inline int xfs_dqflock_nowait(xfs_dquot_t *dqp) | ||
126 | { | ||
127 | return try_wait_for_completion(&dqp->q_flush); | ||
128 | } | ||
129 | |||
130 | static inline void xfs_dqfunlock(xfs_dquot_t *dqp) | ||
131 | { | ||
132 | complete(&dqp->q_flush); | ||
133 | } | ||
125 | 134 | ||
126 | #define XFS_DQ_IS_FLUSH_LOCKED(dqp) (issemalocked(&((dqp)->q_flock))) | ||
127 | #define XFS_DQ_IS_ON_FREELIST(dqp) ((dqp)->dq_flnext != (dqp)) | 135 | #define XFS_DQ_IS_ON_FREELIST(dqp) ((dqp)->dq_flnext != (dqp)) |
128 | #define XFS_DQ_IS_DIRTY(dqp) ((dqp)->dq_flags & XFS_DQ_DIRTY) | 136 | #define XFS_DQ_IS_DIRTY(dqp) ((dqp)->dq_flags & XFS_DQ_DIRTY) |
129 | #define XFS_QM_ISUDQ(dqp) ((dqp)->dq_flags & XFS_DQ_USER) | 137 | #define XFS_QM_ISUDQ(dqp) ((dqp)->dq_flags & XFS_DQ_USER) |
@@ -167,7 +175,6 @@ extern int xfs_qm_dqflush(xfs_dquot_t *, uint); | |||
167 | extern int xfs_qm_dqpurge(xfs_dquot_t *); | 175 | extern int xfs_qm_dqpurge(xfs_dquot_t *); |
168 | extern void xfs_qm_dqunpin_wait(xfs_dquot_t *); | 176 | extern void xfs_qm_dqunpin_wait(xfs_dquot_t *); |
169 | extern int xfs_qm_dqlock_nowait(xfs_dquot_t *); | 177 | extern int xfs_qm_dqlock_nowait(xfs_dquot_t *); |
170 | extern int xfs_qm_dqflock_nowait(xfs_dquot_t *); | ||
171 | extern void xfs_qm_dqflock_pushbuf_wait(xfs_dquot_t *dqp); | 178 | extern void xfs_qm_dqflock_pushbuf_wait(xfs_dquot_t *dqp); |
172 | extern void xfs_qm_adjust_dqtimers(xfs_mount_t *, | 179 | extern void xfs_qm_adjust_dqtimers(xfs_mount_t *, |
173 | xfs_disk_dquot_t *); | 180 | xfs_disk_dquot_t *); |
diff --git a/fs/xfs/quota/xfs_dquot_item.c b/fs/xfs/quota/xfs_dquot_item.c index 08d2fc89e6a1..f028644caa5e 100644 --- a/fs/xfs/quota/xfs_dquot_item.c +++ b/fs/xfs/quota/xfs_dquot_item.c | |||
@@ -151,7 +151,7 @@ xfs_qm_dquot_logitem_push( | |||
151 | dqp = logitem->qli_dquot; | 151 | dqp = logitem->qli_dquot; |
152 | 152 | ||
153 | ASSERT(XFS_DQ_IS_LOCKED(dqp)); | 153 | ASSERT(XFS_DQ_IS_LOCKED(dqp)); |
154 | ASSERT(XFS_DQ_IS_FLUSH_LOCKED(dqp)); | 154 | ASSERT(!completion_done(&dqp->q_flush)); |
155 | 155 | ||
156 | /* | 156 | /* |
157 | * Since we were able to lock the dquot's flush lock and | 157 | * Since we were able to lock the dquot's flush lock and |
@@ -245,7 +245,7 @@ xfs_qm_dquot_logitem_pushbuf( | |||
245 | * inode flush completed and the inode was taken off the AIL. | 245 | * inode flush completed and the inode was taken off the AIL. |
246 | * So, just get out. | 246 | * So, just get out. |
247 | */ | 247 | */ |
248 | if (!issemalocked(&(dqp->q_flock)) || | 248 | if (completion_done(&dqp->q_flush) || |
249 | ((qip->qli_item.li_flags & XFS_LI_IN_AIL) == 0)) { | 249 | ((qip->qli_item.li_flags & XFS_LI_IN_AIL) == 0)) { |
250 | qip->qli_pushbuf_flag = 0; | 250 | qip->qli_pushbuf_flag = 0; |
251 | xfs_dqunlock(dqp); | 251 | xfs_dqunlock(dqp); |
@@ -258,7 +258,7 @@ xfs_qm_dquot_logitem_pushbuf( | |||
258 | if (bp != NULL) { | 258 | if (bp != NULL) { |
259 | if (XFS_BUF_ISDELAYWRITE(bp)) { | 259 | if (XFS_BUF_ISDELAYWRITE(bp)) { |
260 | dopush = ((qip->qli_item.li_flags & XFS_LI_IN_AIL) && | 260 | dopush = ((qip->qli_item.li_flags & XFS_LI_IN_AIL) && |
261 | issemalocked(&(dqp->q_flock))); | 261 | !completion_done(&dqp->q_flush)); |
262 | qip->qli_pushbuf_flag = 0; | 262 | qip->qli_pushbuf_flag = 0; |
263 | xfs_dqunlock(dqp); | 263 | xfs_dqunlock(dqp); |
264 | 264 | ||
@@ -317,7 +317,7 @@ xfs_qm_dquot_logitem_trylock( | |||
317 | return (XFS_ITEM_LOCKED); | 317 | return (XFS_ITEM_LOCKED); |
318 | 318 | ||
319 | retval = XFS_ITEM_SUCCESS; | 319 | retval = XFS_ITEM_SUCCESS; |
320 | if (! xfs_qm_dqflock_nowait(dqp)) { | 320 | if (!xfs_dqflock_nowait(dqp)) { |
321 | /* | 321 | /* |
322 | * The dquot is already being flushed. It may have been | 322 | * The dquot is already being flushed. It may have been |
323 | * flushed delayed write, however, and we don't want to | 323 | * flushed delayed write, however, and we don't want to |
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c index 021934a3d456..df0ffef9775a 100644 --- a/fs/xfs/quota/xfs_qm.c +++ b/fs/xfs/quota/xfs_qm.c | |||
@@ -310,8 +310,7 @@ xfs_qm_unmount_quotadestroy( | |||
310 | */ | 310 | */ |
311 | void | 311 | void |
312 | xfs_qm_mount_quotas( | 312 | xfs_qm_mount_quotas( |
313 | xfs_mount_t *mp, | 313 | xfs_mount_t *mp) |
314 | int mfsi_flags) | ||
315 | { | 314 | { |
316 | int error = 0; | 315 | int error = 0; |
317 | uint sbf; | 316 | uint sbf; |
@@ -346,8 +345,7 @@ xfs_qm_mount_quotas( | |||
346 | /* | 345 | /* |
347 | * If any of the quotas are not consistent, do a quotacheck. | 346 | * If any of the quotas are not consistent, do a quotacheck. |
348 | */ | 347 | */ |
349 | if (XFS_QM_NEED_QUOTACHECK(mp) && | 348 | if (XFS_QM_NEED_QUOTACHECK(mp)) { |
350 | !(mfsi_flags & XFS_MFSI_NO_QUOTACHECK)) { | ||
351 | error = xfs_qm_quotacheck(mp); | 349 | error = xfs_qm_quotacheck(mp); |
352 | if (error) { | 350 | if (error) { |
353 | /* Quotacheck failed and disabled quotas. */ | 351 | /* Quotacheck failed and disabled quotas. */ |
@@ -484,7 +482,7 @@ again: | |||
484 | xfs_dqtrace_entry(dqp, "FLUSHALL: DQDIRTY"); | 482 | xfs_dqtrace_entry(dqp, "FLUSHALL: DQDIRTY"); |
485 | /* XXX a sentinel would be better */ | 483 | /* XXX a sentinel would be better */ |
486 | recl = XFS_QI_MPLRECLAIMS(mp); | 484 | recl = XFS_QI_MPLRECLAIMS(mp); |
487 | if (! xfs_qm_dqflock_nowait(dqp)) { | 485 | if (!xfs_dqflock_nowait(dqp)) { |
488 | /* | 486 | /* |
489 | * If we can't grab the flush lock then check | 487 | * If we can't grab the flush lock then check |
490 | * to see if the dquot has been flushed delayed | 488 | * to see if the dquot has been flushed delayed |
@@ -1062,7 +1060,7 @@ xfs_qm_sync( | |||
1062 | 1060 | ||
1063 | /* XXX a sentinel would be better */ | 1061 | /* XXX a sentinel would be better */ |
1064 | recl = XFS_QI_MPLRECLAIMS(mp); | 1062 | recl = XFS_QI_MPLRECLAIMS(mp); |
1065 | if (! xfs_qm_dqflock_nowait(dqp)) { | 1063 | if (!xfs_dqflock_nowait(dqp)) { |
1066 | if (nowait) { | 1064 | if (nowait) { |
1067 | xfs_dqunlock(dqp); | 1065 | xfs_dqunlock(dqp); |
1068 | continue; | 1066 | continue; |
@@ -2079,7 +2077,7 @@ xfs_qm_shake_freelist( | |||
2079 | * Try to grab the flush lock. If this dquot is in the process of | 2077 | * Try to grab the flush lock. If this dquot is in the process of |
2080 | * getting flushed to disk, we don't want to reclaim it. | 2078 | * getting flushed to disk, we don't want to reclaim it. |
2081 | */ | 2079 | */ |
2082 | if (! xfs_qm_dqflock_nowait(dqp)) { | 2080 | if (!xfs_dqflock_nowait(dqp)) { |
2083 | xfs_dqunlock(dqp); | 2081 | xfs_dqunlock(dqp); |
2084 | dqp = dqp->dq_flnext; | 2082 | dqp = dqp->dq_flnext; |
2085 | continue; | 2083 | continue; |
@@ -2257,7 +2255,7 @@ xfs_qm_dqreclaim_one(void) | |||
2257 | * Try to grab the flush lock. If this dquot is in the process of | 2255 | * Try to grab the flush lock. If this dquot is in the process of |
2258 | * getting flushed to disk, we don't want to reclaim it. | 2256 | * getting flushed to disk, we don't want to reclaim it. |
2259 | */ | 2257 | */ |
2260 | if (! xfs_qm_dqflock_nowait(dqp)) { | 2258 | if (!xfs_dqflock_nowait(dqp)) { |
2261 | xfs_dqunlock(dqp); | 2259 | xfs_dqunlock(dqp); |
2262 | continue; | 2260 | continue; |
2263 | } | 2261 | } |
diff --git a/fs/xfs/quota/xfs_qm.h b/fs/xfs/quota/xfs_qm.h index cd2300e374af..44f25349e478 100644 --- a/fs/xfs/quota/xfs_qm.h +++ b/fs/xfs/quota/xfs_qm.h | |||
@@ -165,7 +165,7 @@ typedef struct xfs_dquot_acct { | |||
165 | #define XFS_QM_RELE(xqm) ((xqm)->qm_nrefs--) | 165 | #define XFS_QM_RELE(xqm) ((xqm)->qm_nrefs--) |
166 | 166 | ||
167 | extern void xfs_qm_destroy_quotainfo(xfs_mount_t *); | 167 | extern void xfs_qm_destroy_quotainfo(xfs_mount_t *); |
168 | extern void xfs_qm_mount_quotas(xfs_mount_t *, int); | 168 | extern void xfs_qm_mount_quotas(xfs_mount_t *); |
169 | extern int xfs_qm_quotacheck(xfs_mount_t *); | 169 | extern int xfs_qm_quotacheck(xfs_mount_t *); |
170 | extern void xfs_qm_unmount_quotadestroy(xfs_mount_t *); | 170 | extern void xfs_qm_unmount_quotadestroy(xfs_mount_t *); |
171 | extern int xfs_qm_unmount_quotas(xfs_mount_t *); | 171 | extern int xfs_qm_unmount_quotas(xfs_mount_t *); |
diff --git a/fs/xfs/quota/xfs_qm_bhv.c b/fs/xfs/quota/xfs_qm_bhv.c index f4f6c4c861d7..eea2e60b456b 100644 --- a/fs/xfs/quota/xfs_qm_bhv.c +++ b/fs/xfs/quota/xfs_qm_bhv.c | |||
@@ -162,7 +162,7 @@ xfs_qm_newmount( | |||
162 | * mounting, and get on with the boring life | 162 | * mounting, and get on with the boring life |
163 | * without disk quotas. | 163 | * without disk quotas. |
164 | */ | 164 | */ |
165 | xfs_qm_mount_quotas(mp, 0); | 165 | xfs_qm_mount_quotas(mp); |
166 | } else { | 166 | } else { |
167 | /* | 167 | /* |
168 | * Clear the quota flags, but remember them. This | 168 | * Clear the quota flags, but remember them. This |
@@ -184,13 +184,12 @@ STATIC int | |||
184 | xfs_qm_endmount( | 184 | xfs_qm_endmount( |
185 | xfs_mount_t *mp, | 185 | xfs_mount_t *mp, |
186 | uint needquotamount, | 186 | uint needquotamount, |
187 | uint quotaflags, | 187 | uint quotaflags) |
188 | int mfsi_flags) | ||
189 | { | 188 | { |
190 | if (needquotamount) { | 189 | if (needquotamount) { |
191 | ASSERT(mp->m_qflags == 0); | 190 | ASSERT(mp->m_qflags == 0); |
192 | mp->m_qflags = quotaflags; | 191 | mp->m_qflags = quotaflags; |
193 | xfs_qm_mount_quotas(mp, mfsi_flags); | 192 | xfs_qm_mount_quotas(mp); |
194 | } | 193 | } |
195 | 194 | ||
196 | #if defined(DEBUG) && defined(XFS_LOUD_RECOVERY) | 195 | #if defined(DEBUG) && defined(XFS_LOUD_RECOVERY) |
diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c index adfb8723f65a..1a3b803dfa55 100644 --- a/fs/xfs/quota/xfs_qm_syscalls.c +++ b/fs/xfs/quota/xfs_qm_syscalls.c | |||
@@ -1034,7 +1034,7 @@ xfs_qm_dqrele_all_inodes( | |||
1034 | { | 1034 | { |
1035 | xfs_inode_t *ip, *topino; | 1035 | xfs_inode_t *ip, *topino; |
1036 | uint ireclaims; | 1036 | uint ireclaims; |
1037 | bhv_vnode_t *vp; | 1037 | struct inode *vp; |
1038 | boolean_t vnode_refd; | 1038 | boolean_t vnode_refd; |
1039 | 1039 | ||
1040 | ASSERT(mp->m_quotainfo); | 1040 | ASSERT(mp->m_quotainfo); |
@@ -1059,7 +1059,7 @@ again: | |||
1059 | ip = ip->i_mnext; | 1059 | ip = ip->i_mnext; |
1060 | continue; | 1060 | continue; |
1061 | } | 1061 | } |
1062 | vp = XFS_ITOV_NULL(ip); | 1062 | vp = VFS_I(ip); |
1063 | if (!vp) { | 1063 | if (!vp) { |
1064 | ASSERT(ip->i_udquot == NULL); | 1064 | ASSERT(ip->i_udquot == NULL); |
1065 | ASSERT(ip->i_gdquot == NULL); | 1065 | ASSERT(ip->i_gdquot == NULL); |
diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c index 3e4648ad9cfc..b2f639a1416f 100644 --- a/fs/xfs/xfs_acl.c +++ b/fs/xfs/xfs_acl.c | |||
@@ -37,15 +37,15 @@ | |||
37 | #include <linux/capability.h> | 37 | #include <linux/capability.h> |
38 | #include <linux/posix_acl_xattr.h> | 38 | #include <linux/posix_acl_xattr.h> |
39 | 39 | ||
40 | STATIC int xfs_acl_setmode(bhv_vnode_t *, xfs_acl_t *, int *); | 40 | STATIC int xfs_acl_setmode(struct inode *, xfs_acl_t *, int *); |
41 | STATIC void xfs_acl_filter_mode(mode_t, xfs_acl_t *); | 41 | STATIC void xfs_acl_filter_mode(mode_t, xfs_acl_t *); |
42 | STATIC void xfs_acl_get_endian(xfs_acl_t *); | 42 | STATIC void xfs_acl_get_endian(xfs_acl_t *); |
43 | STATIC int xfs_acl_access(uid_t, gid_t, xfs_acl_t *, mode_t, cred_t *); | 43 | STATIC int xfs_acl_access(uid_t, gid_t, xfs_acl_t *, mode_t, cred_t *); |
44 | STATIC int xfs_acl_invalid(xfs_acl_t *); | 44 | STATIC int xfs_acl_invalid(xfs_acl_t *); |
45 | STATIC void xfs_acl_sync_mode(mode_t, xfs_acl_t *); | 45 | STATIC void xfs_acl_sync_mode(mode_t, xfs_acl_t *); |
46 | STATIC void xfs_acl_get_attr(bhv_vnode_t *, xfs_acl_t *, int, int, int *); | 46 | STATIC void xfs_acl_get_attr(struct inode *, xfs_acl_t *, int, int, int *); |
47 | STATIC void xfs_acl_set_attr(bhv_vnode_t *, xfs_acl_t *, int, int *); | 47 | STATIC void xfs_acl_set_attr(struct inode *, xfs_acl_t *, int, int *); |
48 | STATIC int xfs_acl_allow_set(bhv_vnode_t *, int); | 48 | STATIC int xfs_acl_allow_set(struct inode *, int); |
49 | 49 | ||
50 | kmem_zone_t *xfs_acl_zone; | 50 | kmem_zone_t *xfs_acl_zone; |
51 | 51 | ||
@@ -55,7 +55,7 @@ kmem_zone_t *xfs_acl_zone; | |||
55 | */ | 55 | */ |
56 | int | 56 | int |
57 | xfs_acl_vhasacl_access( | 57 | xfs_acl_vhasacl_access( |
58 | bhv_vnode_t *vp) | 58 | struct inode *vp) |
59 | { | 59 | { |
60 | int error; | 60 | int error; |
61 | 61 | ||
@@ -68,7 +68,7 @@ xfs_acl_vhasacl_access( | |||
68 | */ | 68 | */ |
69 | int | 69 | int |
70 | xfs_acl_vhasacl_default( | 70 | xfs_acl_vhasacl_default( |
71 | bhv_vnode_t *vp) | 71 | struct inode *vp) |
72 | { | 72 | { |
73 | int error; | 73 | int error; |
74 | 74 | ||
@@ -207,7 +207,7 @@ posix_acl_xfs_to_xattr( | |||
207 | 207 | ||
208 | int | 208 | int |
209 | xfs_acl_vget( | 209 | xfs_acl_vget( |
210 | bhv_vnode_t *vp, | 210 | struct inode *vp, |
211 | void *acl, | 211 | void *acl, |
212 | size_t size, | 212 | size_t size, |
213 | int kind) | 213 | int kind) |
@@ -217,7 +217,6 @@ xfs_acl_vget( | |||
217 | posix_acl_xattr_header *ext_acl = acl; | 217 | posix_acl_xattr_header *ext_acl = acl; |
218 | int flags = 0; | 218 | int flags = 0; |
219 | 219 | ||
220 | VN_HOLD(vp); | ||
221 | if(size) { | 220 | if(size) { |
222 | if (!(_ACL_ALLOC(xfs_acl))) { | 221 | if (!(_ACL_ALLOC(xfs_acl))) { |
223 | error = ENOMEM; | 222 | error = ENOMEM; |
@@ -239,11 +238,10 @@ xfs_acl_vget( | |||
239 | goto out; | 238 | goto out; |
240 | } | 239 | } |
241 | if (kind == _ACL_TYPE_ACCESS) | 240 | if (kind == _ACL_TYPE_ACCESS) |
242 | xfs_acl_sync_mode(xfs_vtoi(vp)->i_d.di_mode, xfs_acl); | 241 | xfs_acl_sync_mode(XFS_I(vp)->i_d.di_mode, xfs_acl); |
243 | error = -posix_acl_xfs_to_xattr(xfs_acl, ext_acl, size); | 242 | error = -posix_acl_xfs_to_xattr(xfs_acl, ext_acl, size); |
244 | } | 243 | } |
245 | out: | 244 | out: |
246 | VN_RELE(vp); | ||
247 | if(xfs_acl) | 245 | if(xfs_acl) |
248 | _ACL_FREE(xfs_acl); | 246 | _ACL_FREE(xfs_acl); |
249 | return -error; | 247 | return -error; |
@@ -251,28 +249,26 @@ out: | |||
251 | 249 | ||
252 | int | 250 | int |
253 | xfs_acl_vremove( | 251 | xfs_acl_vremove( |
254 | bhv_vnode_t *vp, | 252 | struct inode *vp, |
255 | int kind) | 253 | int kind) |
256 | { | 254 | { |
257 | int error; | 255 | int error; |
258 | 256 | ||
259 | VN_HOLD(vp); | ||
260 | error = xfs_acl_allow_set(vp, kind); | 257 | error = xfs_acl_allow_set(vp, kind); |
261 | if (!error) { | 258 | if (!error) { |
262 | error = xfs_attr_remove(xfs_vtoi(vp), | 259 | error = xfs_attr_remove(XFS_I(vp), |
263 | kind == _ACL_TYPE_DEFAULT? | 260 | kind == _ACL_TYPE_DEFAULT? |
264 | SGI_ACL_DEFAULT: SGI_ACL_FILE, | 261 | SGI_ACL_DEFAULT: SGI_ACL_FILE, |
265 | ATTR_ROOT); | 262 | ATTR_ROOT); |
266 | if (error == ENOATTR) | 263 | if (error == ENOATTR) |
267 | error = 0; /* 'scool */ | 264 | error = 0; /* 'scool */ |
268 | } | 265 | } |
269 | VN_RELE(vp); | ||
270 | return -error; | 266 | return -error; |
271 | } | 267 | } |
272 | 268 | ||
273 | int | 269 | int |
274 | xfs_acl_vset( | 270 | xfs_acl_vset( |
275 | bhv_vnode_t *vp, | 271 | struct inode *vp, |
276 | void *acl, | 272 | void *acl, |
277 | size_t size, | 273 | size_t size, |
278 | int kind) | 274 | int kind) |
@@ -298,7 +294,6 @@ xfs_acl_vset( | |||
298 | return 0; | 294 | return 0; |
299 | } | 295 | } |
300 | 296 | ||
301 | VN_HOLD(vp); | ||
302 | error = xfs_acl_allow_set(vp, kind); | 297 | error = xfs_acl_allow_set(vp, kind); |
303 | 298 | ||
304 | /* Incoming ACL exists, set file mode based on its value */ | 299 | /* Incoming ACL exists, set file mode based on its value */ |
@@ -321,7 +316,6 @@ xfs_acl_vset( | |||
321 | } | 316 | } |
322 | 317 | ||
323 | out: | 318 | out: |
324 | VN_RELE(vp); | ||
325 | _ACL_FREE(xfs_acl); | 319 | _ACL_FREE(xfs_acl); |
326 | return -error; | 320 | return -error; |
327 | } | 321 | } |
@@ -363,7 +357,7 @@ xfs_acl_iaccess( | |||
363 | 357 | ||
364 | STATIC int | 358 | STATIC int |
365 | xfs_acl_allow_set( | 359 | xfs_acl_allow_set( |
366 | bhv_vnode_t *vp, | 360 | struct inode *vp, |
367 | int kind) | 361 | int kind) |
368 | { | 362 | { |
369 | if (vp->i_flags & (S_IMMUTABLE|S_APPEND)) | 363 | if (vp->i_flags & (S_IMMUTABLE|S_APPEND)) |
@@ -372,7 +366,7 @@ xfs_acl_allow_set( | |||
372 | return ENOTDIR; | 366 | return ENOTDIR; |
373 | if (vp->i_sb->s_flags & MS_RDONLY) | 367 | if (vp->i_sb->s_flags & MS_RDONLY) |
374 | return EROFS; | 368 | return EROFS; |
375 | if (xfs_vtoi(vp)->i_d.di_uid != current->fsuid && !capable(CAP_FOWNER)) | 369 | if (XFS_I(vp)->i_d.di_uid != current->fsuid && !capable(CAP_FOWNER)) |
376 | return EPERM; | 370 | return EPERM; |
377 | return 0; | 371 | return 0; |
378 | } | 372 | } |
@@ -566,7 +560,7 @@ xfs_acl_get_endian( | |||
566 | */ | 560 | */ |
567 | STATIC void | 561 | STATIC void |
568 | xfs_acl_get_attr( | 562 | xfs_acl_get_attr( |
569 | bhv_vnode_t *vp, | 563 | struct inode *vp, |
570 | xfs_acl_t *aclp, | 564 | xfs_acl_t *aclp, |
571 | int kind, | 565 | int kind, |
572 | int flags, | 566 | int flags, |
@@ -576,7 +570,7 @@ xfs_acl_get_attr( | |||
576 | 570 | ||
577 | ASSERT((flags & ATTR_KERNOVAL) ? (aclp == NULL) : 1); | 571 | ASSERT((flags & ATTR_KERNOVAL) ? (aclp == NULL) : 1); |
578 | flags |= ATTR_ROOT; | 572 | flags |= ATTR_ROOT; |
579 | *error = xfs_attr_get(xfs_vtoi(vp), | 573 | *error = xfs_attr_get(XFS_I(vp), |
580 | kind == _ACL_TYPE_ACCESS ? | 574 | kind == _ACL_TYPE_ACCESS ? |
581 | SGI_ACL_FILE : SGI_ACL_DEFAULT, | 575 | SGI_ACL_FILE : SGI_ACL_DEFAULT, |
582 | (char *)aclp, &len, flags); | 576 | (char *)aclp, &len, flags); |
@@ -590,7 +584,7 @@ xfs_acl_get_attr( | |||
590 | */ | 584 | */ |
591 | STATIC void | 585 | STATIC void |
592 | xfs_acl_set_attr( | 586 | xfs_acl_set_attr( |
593 | bhv_vnode_t *vp, | 587 | struct inode *vp, |
594 | xfs_acl_t *aclp, | 588 | xfs_acl_t *aclp, |
595 | int kind, | 589 | int kind, |
596 | int *error) | 590 | int *error) |
@@ -615,7 +609,7 @@ xfs_acl_set_attr( | |||
615 | INT_SET(newace->ae_perm, ARCH_CONVERT, ace->ae_perm); | 609 | INT_SET(newace->ae_perm, ARCH_CONVERT, ace->ae_perm); |
616 | } | 610 | } |
617 | INT_SET(newacl->acl_cnt, ARCH_CONVERT, aclp->acl_cnt); | 611 | INT_SET(newacl->acl_cnt, ARCH_CONVERT, aclp->acl_cnt); |
618 | *error = xfs_attr_set(xfs_vtoi(vp), | 612 | *error = xfs_attr_set(XFS_I(vp), |
619 | kind == _ACL_TYPE_ACCESS ? | 613 | kind == _ACL_TYPE_ACCESS ? |
620 | SGI_ACL_FILE: SGI_ACL_DEFAULT, | 614 | SGI_ACL_FILE: SGI_ACL_DEFAULT, |
621 | (char *)newacl, len, ATTR_ROOT); | 615 | (char *)newacl, len, ATTR_ROOT); |
@@ -624,7 +618,7 @@ xfs_acl_set_attr( | |||
624 | 618 | ||
625 | int | 619 | int |
626 | xfs_acl_vtoacl( | 620 | xfs_acl_vtoacl( |
627 | bhv_vnode_t *vp, | 621 | struct inode *vp, |
628 | xfs_acl_t *access_acl, | 622 | xfs_acl_t *access_acl, |
629 | xfs_acl_t *default_acl) | 623 | xfs_acl_t *default_acl) |
630 | { | 624 | { |
@@ -639,7 +633,7 @@ xfs_acl_vtoacl( | |||
639 | if (error) | 633 | if (error) |
640 | access_acl->acl_cnt = XFS_ACL_NOT_PRESENT; | 634 | access_acl->acl_cnt = XFS_ACL_NOT_PRESENT; |
641 | else /* We have a good ACL and the file mode, synchronize. */ | 635 | else /* We have a good ACL and the file mode, synchronize. */ |
642 | xfs_acl_sync_mode(xfs_vtoi(vp)->i_d.di_mode, access_acl); | 636 | xfs_acl_sync_mode(XFS_I(vp)->i_d.di_mode, access_acl); |
643 | } | 637 | } |
644 | 638 | ||
645 | if (default_acl) { | 639 | if (default_acl) { |
@@ -656,7 +650,7 @@ xfs_acl_vtoacl( | |||
656 | */ | 650 | */ |
657 | int | 651 | int |
658 | xfs_acl_inherit( | 652 | xfs_acl_inherit( |
659 | bhv_vnode_t *vp, | 653 | struct inode *vp, |
660 | mode_t mode, | 654 | mode_t mode, |
661 | xfs_acl_t *pdaclp) | 655 | xfs_acl_t *pdaclp) |
662 | { | 656 | { |
@@ -715,7 +709,7 @@ out_error: | |||
715 | */ | 709 | */ |
716 | STATIC int | 710 | STATIC int |
717 | xfs_acl_setmode( | 711 | xfs_acl_setmode( |
718 | bhv_vnode_t *vp, | 712 | struct inode *vp, |
719 | xfs_acl_t *acl, | 713 | xfs_acl_t *acl, |
720 | int *basicperms) | 714 | int *basicperms) |
721 | { | 715 | { |
@@ -734,7 +728,7 @@ xfs_acl_setmode( | |||
734 | * mode. The m:: bits take precedence over the g:: bits. | 728 | * mode. The m:: bits take precedence over the g:: bits. |
735 | */ | 729 | */ |
736 | iattr.ia_valid = ATTR_MODE; | 730 | iattr.ia_valid = ATTR_MODE; |
737 | iattr.ia_mode = xfs_vtoi(vp)->i_d.di_mode; | 731 | iattr.ia_mode = XFS_I(vp)->i_d.di_mode; |
738 | iattr.ia_mode &= ~(S_IRWXU|S_IRWXG|S_IRWXO); | 732 | iattr.ia_mode &= ~(S_IRWXU|S_IRWXG|S_IRWXO); |
739 | ap = acl->acl_entry; | 733 | ap = acl->acl_entry; |
740 | for (i = 0; i < acl->acl_cnt; ++i) { | 734 | for (i = 0; i < acl->acl_cnt; ++i) { |
@@ -764,7 +758,7 @@ xfs_acl_setmode( | |||
764 | if (gap && nomask) | 758 | if (gap && nomask) |
765 | iattr.ia_mode |= gap->ae_perm << 3; | 759 | iattr.ia_mode |= gap->ae_perm << 3; |
766 | 760 | ||
767 | return xfs_setattr(xfs_vtoi(vp), &iattr, 0, sys_cred); | 761 | return xfs_setattr(XFS_I(vp), &iattr, 0, sys_cred); |
768 | } | 762 | } |
769 | 763 | ||
770 | /* | 764 | /* |
diff --git a/fs/xfs/xfs_acl.h b/fs/xfs/xfs_acl.h index 323ee94cf831..a4e293b93efa 100644 --- a/fs/xfs/xfs_acl.h +++ b/fs/xfs/xfs_acl.h | |||
@@ -59,14 +59,14 @@ extern struct kmem_zone *xfs_acl_zone; | |||
59 | (zone) = kmem_zone_init(sizeof(xfs_acl_t), (name)) | 59 | (zone) = kmem_zone_init(sizeof(xfs_acl_t), (name)) |
60 | #define xfs_acl_zone_destroy(zone) kmem_zone_destroy(zone) | 60 | #define xfs_acl_zone_destroy(zone) kmem_zone_destroy(zone) |
61 | 61 | ||
62 | extern int xfs_acl_inherit(bhv_vnode_t *, mode_t mode, xfs_acl_t *); | 62 | extern int xfs_acl_inherit(struct inode *, mode_t mode, xfs_acl_t *); |
63 | extern int xfs_acl_iaccess(struct xfs_inode *, mode_t, cred_t *); | 63 | extern int xfs_acl_iaccess(struct xfs_inode *, mode_t, cred_t *); |
64 | extern int xfs_acl_vtoacl(bhv_vnode_t *, xfs_acl_t *, xfs_acl_t *); | 64 | extern int xfs_acl_vtoacl(struct inode *, xfs_acl_t *, xfs_acl_t *); |
65 | extern int xfs_acl_vhasacl_access(bhv_vnode_t *); | 65 | extern int xfs_acl_vhasacl_access(struct inode *); |
66 | extern int xfs_acl_vhasacl_default(bhv_vnode_t *); | 66 | extern int xfs_acl_vhasacl_default(struct inode *); |
67 | extern int xfs_acl_vset(bhv_vnode_t *, void *, size_t, int); | 67 | extern int xfs_acl_vset(struct inode *, void *, size_t, int); |
68 | extern int xfs_acl_vget(bhv_vnode_t *, void *, size_t, int); | 68 | extern int xfs_acl_vget(struct inode *, void *, size_t, int); |
69 | extern int xfs_acl_vremove(bhv_vnode_t *, int); | 69 | extern int xfs_acl_vremove(struct inode *, int); |
70 | 70 | ||
71 | #define _ACL_PERM_INVALID(perm) ((perm) & ~(ACL_READ|ACL_WRITE|ACL_EXECUTE)) | 71 | #define _ACL_PERM_INVALID(perm) ((perm) & ~(ACL_READ|ACL_WRITE|ACL_EXECUTE)) |
72 | 72 | ||
diff --git a/fs/xfs/xfs_arch.h b/fs/xfs/xfs_arch.h index f9472a2076d4..0b3b5efe848c 100644 --- a/fs/xfs/xfs_arch.h +++ b/fs/xfs/xfs_arch.h | |||
@@ -92,16 +92,6 @@ | |||
92 | ((__u8*)(pointer))[1] = (((value) ) & 0xff); \ | 92 | ((__u8*)(pointer))[1] = (((value) ) & 0xff); \ |
93 | } | 93 | } |
94 | 94 | ||
95 | /* define generic INT_ macros */ | ||
96 | |||
97 | #define INT_GET(reference,arch) \ | ||
98 | (((arch) == ARCH_NOCONVERT) \ | ||
99 | ? \ | ||
100 | (reference) \ | ||
101 | : \ | ||
102 | INT_SWAP((reference),(reference)) \ | ||
103 | ) | ||
104 | |||
105 | /* does not return a value */ | 95 | /* does not return a value */ |
106 | #define INT_SET(reference,arch,valueref) \ | 96 | #define INT_SET(reference,arch,valueref) \ |
107 | (__builtin_constant_p(valueref) ? \ | 97 | (__builtin_constant_p(valueref) ? \ |
@@ -112,64 +102,6 @@ | |||
112 | ) \ | 102 | ) \ |
113 | ) | 103 | ) |
114 | 104 | ||
115 | /* does not return a value */ | ||
116 | #define INT_MOD_EXPR(reference,arch,code) \ | ||
117 | (((arch) == ARCH_NOCONVERT) \ | ||
118 | ? \ | ||
119 | (void)((reference) code) \ | ||
120 | : \ | ||
121 | (void)( \ | ||
122 | (reference) = INT_GET((reference),arch) , \ | ||
123 | ((reference) code), \ | ||
124 | INT_SET(reference, arch, reference) \ | ||
125 | ) \ | ||
126 | ) | ||
127 | |||
128 | /* does not return a value */ | ||
129 | #define INT_MOD(reference,arch,delta) \ | ||
130 | (void)( \ | ||
131 | INT_MOD_EXPR(reference,arch,+=(delta)) \ | ||
132 | ) | ||
133 | |||
134 | /* | ||
135 | * INT_COPY - copy a value between two locations with the | ||
136 | * _same architecture_ but _potentially different sizes_ | ||
137 | * | ||
138 | * if the types of the two parameters are equal or they are | ||
139 | * in native architecture, a simple copy is done | ||
140 | * | ||
141 | * otherwise, architecture conversions are done | ||
142 | * | ||
143 | */ | ||
144 | |||
145 | /* does not return a value */ | ||
146 | #define INT_COPY(dst,src,arch) \ | ||
147 | ( \ | ||
148 | ((sizeof(dst) == sizeof(src)) || ((arch) == ARCH_NOCONVERT)) \ | ||
149 | ? \ | ||
150 | (void)((dst) = (src)) \ | ||
151 | : \ | ||
152 | INT_SET(dst, arch, INT_GET(src, arch)) \ | ||
153 | ) | ||
154 | |||
155 | /* | ||
156 | * INT_XLATE - copy a value in either direction between two locations | ||
157 | * with different architectures | ||
158 | * | ||
159 | * dir < 0 - copy from memory to buffer (native to arch) | ||
160 | * dir > 0 - copy from buffer to memory (arch to native) | ||
161 | */ | ||
162 | |||
163 | /* does not return a value */ | ||
164 | #define INT_XLATE(buf,mem,dir,arch) {\ | ||
165 | ASSERT(dir); \ | ||
166 | if (dir>0) { \ | ||
167 | (mem)=INT_GET(buf, arch); \ | ||
168 | } else { \ | ||
169 | INT_SET(buf, arch, mem); \ | ||
170 | } \ | ||
171 | } | ||
172 | |||
173 | /* | 105 | /* |
174 | * In directories inode numbers are stored as unaligned arrays of unsigned | 106 | * In directories inode numbers are stored as unaligned arrays of unsigned |
175 | * 8bit integers on disk. | 107 | * 8bit integers on disk. |
diff --git a/fs/xfs/xfs_attr.c b/fs/xfs/xfs_attr.c index 78de80e3caa2..f7cdc28aff41 100644 --- a/fs/xfs/xfs_attr.c +++ b/fs/xfs/xfs_attr.c | |||
@@ -194,6 +194,46 @@ xfs_attr_get( | |||
194 | return(error); | 194 | return(error); |
195 | } | 195 | } |
196 | 196 | ||
197 | /* | ||
198 | * Calculate how many blocks we need for the new attribute, | ||
199 | */ | ||
200 | int | ||
201 | xfs_attr_calc_size( | ||
202 | struct xfs_inode *ip, | ||
203 | int namelen, | ||
204 | int valuelen, | ||
205 | int *local) | ||
206 | { | ||
207 | struct xfs_mount *mp = ip->i_mount; | ||
208 | int size; | ||
209 | int nblks; | ||
210 | |||
211 | /* | ||
212 | * Determine space new attribute will use, and if it would be | ||
213 | * "local" or "remote" (note: local != inline). | ||
214 | */ | ||
215 | size = xfs_attr_leaf_newentsize(namelen, valuelen, | ||
216 | mp->m_sb.sb_blocksize, local); | ||
217 | |||
218 | nblks = XFS_DAENTER_SPACE_RES(mp, XFS_ATTR_FORK); | ||
219 | if (*local) { | ||
220 | if (size > (mp->m_sb.sb_blocksize >> 1)) { | ||
221 | /* Double split possible */ | ||
222 | nblks *= 2; | ||
223 | } | ||
224 | } else { | ||
225 | /* | ||
226 | * Out of line attribute, cannot double split, but | ||
227 | * make room for the attribute value itself. | ||
228 | */ | ||
229 | uint dblocks = XFS_B_TO_FSB(mp, valuelen); | ||
230 | nblks += dblocks; | ||
231 | nblks += XFS_NEXTENTADD_SPACE_RES(mp, dblocks, XFS_ATTR_FORK); | ||
232 | } | ||
233 | |||
234 | return nblks; | ||
235 | } | ||
236 | |||
197 | STATIC int | 237 | STATIC int |
198 | xfs_attr_set_int(xfs_inode_t *dp, struct xfs_name *name, | 238 | xfs_attr_set_int(xfs_inode_t *dp, struct xfs_name *name, |
199 | char *value, int valuelen, int flags) | 239 | char *value, int valuelen, int flags) |
@@ -202,10 +242,9 @@ xfs_attr_set_int(xfs_inode_t *dp, struct xfs_name *name, | |||
202 | xfs_fsblock_t firstblock; | 242 | xfs_fsblock_t firstblock; |
203 | xfs_bmap_free_t flist; | 243 | xfs_bmap_free_t flist; |
204 | int error, err2, committed; | 244 | int error, err2, committed; |
205 | int local, size; | ||
206 | uint nblks; | ||
207 | xfs_mount_t *mp = dp->i_mount; | 245 | xfs_mount_t *mp = dp->i_mount; |
208 | int rsvd = (flags & ATTR_ROOT) != 0; | 246 | int rsvd = (flags & ATTR_ROOT) != 0; |
247 | int local; | ||
209 | 248 | ||
210 | /* | 249 | /* |
211 | * Attach the dquots to the inode. | 250 | * Attach the dquots to the inode. |
@@ -241,30 +280,8 @@ xfs_attr_set_int(xfs_inode_t *dp, struct xfs_name *name, | |||
241 | args.whichfork = XFS_ATTR_FORK; | 280 | args.whichfork = XFS_ATTR_FORK; |
242 | args.op_flags = XFS_DA_OP_ADDNAME | XFS_DA_OP_OKNOENT; | 281 | args.op_flags = XFS_DA_OP_ADDNAME | XFS_DA_OP_OKNOENT; |
243 | 282 | ||
244 | /* | ||
245 | * Determine space new attribute will use, and if it would be | ||
246 | * "local" or "remote" (note: local != inline). | ||
247 | */ | ||
248 | size = xfs_attr_leaf_newentsize(name->len, valuelen, | ||
249 | mp->m_sb.sb_blocksize, &local); | ||
250 | |||
251 | nblks = XFS_DAENTER_SPACE_RES(mp, XFS_ATTR_FORK); | ||
252 | if (local) { | ||
253 | if (size > (mp->m_sb.sb_blocksize >> 1)) { | ||
254 | /* Double split possible */ | ||
255 | nblks <<= 1; | ||
256 | } | ||
257 | } else { | ||
258 | uint dblocks = XFS_B_TO_FSB(mp, valuelen); | ||
259 | /* Out of line attribute, cannot double split, but make | ||
260 | * room for the attribute value itself. | ||
261 | */ | ||
262 | nblks += dblocks; | ||
263 | nblks += XFS_NEXTENTADD_SPACE_RES(mp, dblocks, XFS_ATTR_FORK); | ||
264 | } | ||
265 | |||
266 | /* Size is now blocks for attribute data */ | 283 | /* Size is now blocks for attribute data */ |
267 | args.total = nblks; | 284 | args.total = xfs_attr_calc_size(dp, name->len, valuelen, &local); |
268 | 285 | ||
269 | /* | 286 | /* |
270 | * Start our first transaction of the day. | 287 | * Start our first transaction of the day. |
@@ -286,18 +303,17 @@ xfs_attr_set_int(xfs_inode_t *dp, struct xfs_name *name, | |||
286 | if (rsvd) | 303 | if (rsvd) |
287 | args.trans->t_flags |= XFS_TRANS_RESERVE; | 304 | args.trans->t_flags |= XFS_TRANS_RESERVE; |
288 | 305 | ||
289 | if ((error = xfs_trans_reserve(args.trans, (uint) nblks, | 306 | if ((error = xfs_trans_reserve(args.trans, args.total, |
290 | XFS_ATTRSET_LOG_RES(mp, nblks), | 307 | XFS_ATTRSET_LOG_RES(mp, args.total), 0, |
291 | 0, XFS_TRANS_PERM_LOG_RES, | 308 | XFS_TRANS_PERM_LOG_RES, XFS_ATTRSET_LOG_COUNT))) { |
292 | XFS_ATTRSET_LOG_COUNT))) { | ||
293 | xfs_trans_cancel(args.trans, 0); | 309 | xfs_trans_cancel(args.trans, 0); |
294 | return(error); | 310 | return(error); |
295 | } | 311 | } |
296 | xfs_ilock(dp, XFS_ILOCK_EXCL); | 312 | xfs_ilock(dp, XFS_ILOCK_EXCL); |
297 | 313 | ||
298 | error = XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, args.trans, dp, nblks, 0, | 314 | error = XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, args.trans, dp, args.total, 0, |
299 | rsvd ? XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES : | 315 | rsvd ? XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES : |
300 | XFS_QMOPT_RES_REGBLKS); | 316 | XFS_QMOPT_RES_REGBLKS); |
301 | if (error) { | 317 | if (error) { |
302 | xfs_iunlock(dp, XFS_ILOCK_EXCL); | 318 | xfs_iunlock(dp, XFS_ILOCK_EXCL); |
303 | xfs_trans_cancel(args.trans, XFS_TRANS_RELEASE_LOG_RES); | 319 | xfs_trans_cancel(args.trans, XFS_TRANS_RELEASE_LOG_RES); |
@@ -384,7 +400,9 @@ xfs_attr_set_int(xfs_inode_t *dp, struct xfs_name *name, | |||
384 | * Commit the leaf transformation. We'll need another (linked) | 400 | * Commit the leaf transformation. We'll need another (linked) |
385 | * transaction to add the new attribute to the leaf. | 401 | * transaction to add the new attribute to the leaf. |
386 | */ | 402 | */ |
387 | if ((error = xfs_attr_rolltrans(&args.trans, dp))) | 403 | |
404 | error = xfs_trans_roll(&args.trans, dp); | ||
405 | if (error) | ||
388 | goto out; | 406 | goto out; |
389 | 407 | ||
390 | } | 408 | } |
@@ -964,7 +982,8 @@ xfs_attr_leaf_addname(xfs_da_args_t *args) | |||
964 | * Commit the current trans (including the inode) and start | 982 | * Commit the current trans (including the inode) and start |
965 | * a new one. | 983 | * a new one. |
966 | */ | 984 | */ |
967 | if ((error = xfs_attr_rolltrans(&args->trans, dp))) | 985 | error = xfs_trans_roll(&args->trans, dp); |
986 | if (error) | ||
968 | return (error); | 987 | return (error); |
969 | 988 | ||
970 | /* | 989 | /* |
@@ -978,7 +997,8 @@ xfs_attr_leaf_addname(xfs_da_args_t *args) | |||
978 | * Commit the transaction that added the attr name so that | 997 | * Commit the transaction that added the attr name so that |
979 | * later routines can manage their own transactions. | 998 | * later routines can manage their own transactions. |
980 | */ | 999 | */ |
981 | if ((error = xfs_attr_rolltrans(&args->trans, dp))) | 1000 | error = xfs_trans_roll(&args->trans, dp); |
1001 | if (error) | ||
982 | return (error); | 1002 | return (error); |
983 | 1003 | ||
984 | /* | 1004 | /* |
@@ -1067,7 +1087,7 @@ xfs_attr_leaf_addname(xfs_da_args_t *args) | |||
1067 | /* | 1087 | /* |
1068 | * Commit the remove and start the next trans in series. | 1088 | * Commit the remove and start the next trans in series. |
1069 | */ | 1089 | */ |
1070 | error = xfs_attr_rolltrans(&args->trans, dp); | 1090 | error = xfs_trans_roll(&args->trans, dp); |
1071 | 1091 | ||
1072 | } else if (args->rmtblkno > 0) { | 1092 | } else if (args->rmtblkno > 0) { |
1073 | /* | 1093 | /* |
@@ -1298,7 +1318,8 @@ restart: | |||
1298 | * Commit the node conversion and start the next | 1318 | * Commit the node conversion and start the next |
1299 | * trans in the chain. | 1319 | * trans in the chain. |
1300 | */ | 1320 | */ |
1301 | if ((error = xfs_attr_rolltrans(&args->trans, dp))) | 1321 | error = xfs_trans_roll(&args->trans, dp); |
1322 | if (error) | ||
1302 | goto out; | 1323 | goto out; |
1303 | 1324 | ||
1304 | goto restart; | 1325 | goto restart; |
@@ -1349,7 +1370,8 @@ restart: | |||
1349 | * Commit the leaf addition or btree split and start the next | 1370 | * Commit the leaf addition or btree split and start the next |
1350 | * trans in the chain. | 1371 | * trans in the chain. |
1351 | */ | 1372 | */ |
1352 | if ((error = xfs_attr_rolltrans(&args->trans, dp))) | 1373 | error = xfs_trans_roll(&args->trans, dp); |
1374 | if (error) | ||
1353 | goto out; | 1375 | goto out; |
1354 | 1376 | ||
1355 | /* | 1377 | /* |
@@ -1449,7 +1471,8 @@ restart: | |||
1449 | /* | 1471 | /* |
1450 | * Commit and start the next trans in the chain. | 1472 | * Commit and start the next trans in the chain. |
1451 | */ | 1473 | */ |
1452 | if ((error = xfs_attr_rolltrans(&args->trans, dp))) | 1474 | error = xfs_trans_roll(&args->trans, dp); |
1475 | if (error) | ||
1453 | goto out; | 1476 | goto out; |
1454 | 1477 | ||
1455 | } else if (args->rmtblkno > 0) { | 1478 | } else if (args->rmtblkno > 0) { |
@@ -1581,7 +1604,8 @@ xfs_attr_node_removename(xfs_da_args_t *args) | |||
1581 | /* | 1604 | /* |
1582 | * Commit the Btree join operation and start a new trans. | 1605 | * Commit the Btree join operation and start a new trans. |
1583 | */ | 1606 | */ |
1584 | if ((error = xfs_attr_rolltrans(&args->trans, dp))) | 1607 | error = xfs_trans_roll(&args->trans, dp); |
1608 | if (error) | ||
1585 | goto out; | 1609 | goto out; |
1586 | } | 1610 | } |
1587 | 1611 | ||
@@ -2082,7 +2106,8 @@ xfs_attr_rmtval_set(xfs_da_args_t *args) | |||
2082 | /* | 2106 | /* |
2083 | * Start the next trans in the chain. | 2107 | * Start the next trans in the chain. |
2084 | */ | 2108 | */ |
2085 | if ((error = xfs_attr_rolltrans(&args->trans, dp))) | 2109 | error = xfs_trans_roll(&args->trans, dp); |
2110 | if (error) | ||
2086 | return (error); | 2111 | return (error); |
2087 | } | 2112 | } |
2088 | 2113 | ||
@@ -2232,7 +2257,8 @@ xfs_attr_rmtval_remove(xfs_da_args_t *args) | |||
2232 | /* | 2257 | /* |
2233 | * Close out trans and start the next one in the chain. | 2258 | * Close out trans and start the next one in the chain. |
2234 | */ | 2259 | */ |
2235 | if ((error = xfs_attr_rolltrans(&args->trans, args->dp))) | 2260 | error = xfs_trans_roll(&args->trans, args->dp); |
2261 | if (error) | ||
2236 | return (error); | 2262 | return (error); |
2237 | } | 2263 | } |
2238 | return(0); | 2264 | return(0); |
diff --git a/fs/xfs/xfs_attr.h b/fs/xfs/xfs_attr.h index 8b2d31c19e4d..fb3b2a68b9b9 100644 --- a/fs/xfs/xfs_attr.h +++ b/fs/xfs/xfs_attr.h | |||
@@ -129,6 +129,7 @@ typedef struct xfs_attr_list_context { | |||
129 | /* | 129 | /* |
130 | * Overall external interface routines. | 130 | * Overall external interface routines. |
131 | */ | 131 | */ |
132 | int xfs_attr_calc_size(struct xfs_inode *, int, int, int *); | ||
132 | int xfs_attr_inactive(struct xfs_inode *dp); | 133 | int xfs_attr_inactive(struct xfs_inode *dp); |
133 | int xfs_attr_fetch(struct xfs_inode *, struct xfs_name *, char *, int *, int); | 134 | int xfs_attr_fetch(struct xfs_inode *, struct xfs_name *, char *, int *, int); |
134 | int xfs_attr_rmtval_get(struct xfs_da_args *args); | 135 | int xfs_attr_rmtval_get(struct xfs_da_args *args); |
diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/xfs_attr_leaf.c index 23ef5d7c87e1..79da6b2ea99e 100644 --- a/fs/xfs/xfs_attr_leaf.c +++ b/fs/xfs/xfs_attr_leaf.c | |||
@@ -2498,9 +2498,7 @@ xfs_attr_leaf_clearflag(xfs_da_args_t *args) | |||
2498 | /* | 2498 | /* |
2499 | * Commit the flag value change and start the next trans in series. | 2499 | * Commit the flag value change and start the next trans in series. |
2500 | */ | 2500 | */ |
2501 | error = xfs_attr_rolltrans(&args->trans, args->dp); | 2501 | return xfs_trans_roll(&args->trans, args->dp); |
2502 | |||
2503 | return(error); | ||
2504 | } | 2502 | } |
2505 | 2503 | ||
2506 | /* | 2504 | /* |
@@ -2547,9 +2545,7 @@ xfs_attr_leaf_setflag(xfs_da_args_t *args) | |||
2547 | /* | 2545 | /* |
2548 | * Commit the flag value change and start the next trans in series. | 2546 | * Commit the flag value change and start the next trans in series. |
2549 | */ | 2547 | */ |
2550 | error = xfs_attr_rolltrans(&args->trans, args->dp); | 2548 | return xfs_trans_roll(&args->trans, args->dp); |
2551 | |||
2552 | return(error); | ||
2553 | } | 2549 | } |
2554 | 2550 | ||
2555 | /* | 2551 | /* |
@@ -2665,7 +2661,7 @@ xfs_attr_leaf_flipflags(xfs_da_args_t *args) | |||
2665 | /* | 2661 | /* |
2666 | * Commit the flag value change and start the next trans in series. | 2662 | * Commit the flag value change and start the next trans in series. |
2667 | */ | 2663 | */ |
2668 | error = xfs_attr_rolltrans(&args->trans, args->dp); | 2664 | error = xfs_trans_roll(&args->trans, args->dp); |
2669 | 2665 | ||
2670 | return(error); | 2666 | return(error); |
2671 | } | 2667 | } |
@@ -2723,7 +2719,7 @@ xfs_attr_root_inactive(xfs_trans_t **trans, xfs_inode_t *dp) | |||
2723 | /* | 2719 | /* |
2724 | * Commit the invalidate and start the next transaction. | 2720 | * Commit the invalidate and start the next transaction. |
2725 | */ | 2721 | */ |
2726 | error = xfs_attr_rolltrans(trans, dp); | 2722 | error = xfs_trans_roll(trans, dp); |
2727 | 2723 | ||
2728 | return (error); | 2724 | return (error); |
2729 | } | 2725 | } |
@@ -2825,7 +2821,8 @@ xfs_attr_node_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp, | |||
2825 | /* | 2821 | /* |
2826 | * Atomically commit the whole invalidate stuff. | 2822 | * Atomically commit the whole invalidate stuff. |
2827 | */ | 2823 | */ |
2828 | if ((error = xfs_attr_rolltrans(trans, dp))) | 2824 | error = xfs_trans_roll(trans, dp); |
2825 | if (error) | ||
2829 | return (error); | 2826 | return (error); |
2830 | } | 2827 | } |
2831 | 2828 | ||
@@ -2964,7 +2961,8 @@ xfs_attr_leaf_freextent(xfs_trans_t **trans, xfs_inode_t *dp, | |||
2964 | /* | 2961 | /* |
2965 | * Roll to next transaction. | 2962 | * Roll to next transaction. |
2966 | */ | 2963 | */ |
2967 | if ((error = xfs_attr_rolltrans(trans, dp))) | 2964 | error = xfs_trans_roll(trans, dp); |
2965 | if (error) | ||
2968 | return (error); | 2966 | return (error); |
2969 | } | 2967 | } |
2970 | 2968 | ||
@@ -2974,60 +2972,3 @@ xfs_attr_leaf_freextent(xfs_trans_t **trans, xfs_inode_t *dp, | |||
2974 | 2972 | ||
2975 | return(0); | 2973 | return(0); |
2976 | } | 2974 | } |
2977 | |||
2978 | |||
2979 | /* | ||
2980 | * Roll from one trans in the sequence of PERMANENT transactions to the next. | ||
2981 | */ | ||
2982 | int | ||
2983 | xfs_attr_rolltrans(xfs_trans_t **transp, xfs_inode_t *dp) | ||
2984 | { | ||
2985 | xfs_trans_t *trans; | ||
2986 | unsigned int logres, count; | ||
2987 | int error; | ||
2988 | |||
2989 | /* | ||
2990 | * Ensure that the inode is always logged. | ||
2991 | */ | ||
2992 | trans = *transp; | ||
2993 | xfs_trans_log_inode(trans, dp, XFS_ILOG_CORE); | ||
2994 | |||
2995 | /* | ||
2996 | * Copy the critical parameters from one trans to the next. | ||
2997 | */ | ||
2998 | logres = trans->t_log_res; | ||
2999 | count = trans->t_log_count; | ||
3000 | *transp = xfs_trans_dup(trans); | ||
3001 | |||
3002 | /* | ||
3003 | * Commit the current transaction. | ||
3004 | * If this commit failed, then it'd just unlock those items that | ||
3005 | * are not marked ihold. That also means that a filesystem shutdown | ||
3006 | * is in progress. The caller takes the responsibility to cancel | ||
3007 | * the duplicate transaction that gets returned. | ||
3008 | */ | ||
3009 | if ((error = xfs_trans_commit(trans, 0))) | ||
3010 | return (error); | ||
3011 | |||
3012 | trans = *transp; | ||
3013 | |||
3014 | /* | ||
3015 | * Reserve space in the log for th next transaction. | ||
3016 | * This also pushes items in the "AIL", the list of logged items, | ||
3017 | * out to disk if they are taking up space at the tail of the log | ||
3018 | * that we want to use. This requires that either nothing be locked | ||
3019 | * across this call, or that anything that is locked be logged in | ||
3020 | * the prior and the next transactions. | ||
3021 | */ | ||
3022 | error = xfs_trans_reserve(trans, 0, logres, 0, | ||
3023 | XFS_TRANS_PERM_LOG_RES, count); | ||
3024 | /* | ||
3025 | * Ensure that the inode is in the new transaction and locked. | ||
3026 | */ | ||
3027 | if (!error) { | ||
3028 | xfs_trans_ijoin(trans, dp, XFS_ILOCK_EXCL); | ||
3029 | xfs_trans_ihold(trans, dp); | ||
3030 | } | ||
3031 | return (error); | ||
3032 | |||
3033 | } | ||
diff --git a/fs/xfs/xfs_attr_leaf.h b/fs/xfs/xfs_attr_leaf.h index 5ecf437b7825..83e9af417ca2 100644 --- a/fs/xfs/xfs_attr_leaf.h +++ b/fs/xfs/xfs_attr_leaf.h | |||
@@ -274,6 +274,4 @@ int xfs_attr_leaf_order(struct xfs_dabuf *leaf1_bp, | |||
274 | struct xfs_dabuf *leaf2_bp); | 274 | struct xfs_dabuf *leaf2_bp); |
275 | int xfs_attr_leaf_newentsize(int namelen, int valuelen, int blocksize, | 275 | int xfs_attr_leaf_newentsize(int namelen, int valuelen, int blocksize, |
276 | int *local); | 276 | int *local); |
277 | int xfs_attr_rolltrans(struct xfs_trans **transp, struct xfs_inode *dp); | ||
278 | |||
279 | #endif /* __XFS_ATTR_LEAF_H__ */ | 277 | #endif /* __XFS_ATTR_LEAF_H__ */ |
diff --git a/fs/xfs/xfs_bit.c b/fs/xfs/xfs_bit.c index fab0b6d5a41b..48228848f5ae 100644 --- a/fs/xfs/xfs_bit.c +++ b/fs/xfs/xfs_bit.c | |||
@@ -25,109 +25,6 @@ | |||
25 | * XFS bit manipulation routines, used in non-realtime code. | 25 | * XFS bit manipulation routines, used in non-realtime code. |
26 | */ | 26 | */ |
27 | 27 | ||
28 | #ifndef HAVE_ARCH_HIGHBIT | ||
29 | /* | ||
30 | * Index of high bit number in byte, -1 for none set, 0..7 otherwise. | ||
31 | */ | ||
32 | static const char xfs_highbit[256] = { | ||
33 | -1, 0, 1, 1, 2, 2, 2, 2, /* 00 .. 07 */ | ||
34 | 3, 3, 3, 3, 3, 3, 3, 3, /* 08 .. 0f */ | ||
35 | 4, 4, 4, 4, 4, 4, 4, 4, /* 10 .. 17 */ | ||
36 | 4, 4, 4, 4, 4, 4, 4, 4, /* 18 .. 1f */ | ||
37 | 5, 5, 5, 5, 5, 5, 5, 5, /* 20 .. 27 */ | ||
38 | 5, 5, 5, 5, 5, 5, 5, 5, /* 28 .. 2f */ | ||
39 | 5, 5, 5, 5, 5, 5, 5, 5, /* 30 .. 37 */ | ||
40 | 5, 5, 5, 5, 5, 5, 5, 5, /* 38 .. 3f */ | ||
41 | 6, 6, 6, 6, 6, 6, 6, 6, /* 40 .. 47 */ | ||
42 | 6, 6, 6, 6, 6, 6, 6, 6, /* 48 .. 4f */ | ||
43 | 6, 6, 6, 6, 6, 6, 6, 6, /* 50 .. 57 */ | ||
44 | 6, 6, 6, 6, 6, 6, 6, 6, /* 58 .. 5f */ | ||
45 | 6, 6, 6, 6, 6, 6, 6, 6, /* 60 .. 67 */ | ||
46 | 6, 6, 6, 6, 6, 6, 6, 6, /* 68 .. 6f */ | ||
47 | 6, 6, 6, 6, 6, 6, 6, 6, /* 70 .. 77 */ | ||
48 | 6, 6, 6, 6, 6, 6, 6, 6, /* 78 .. 7f */ | ||
49 | 7, 7, 7, 7, 7, 7, 7, 7, /* 80 .. 87 */ | ||
50 | 7, 7, 7, 7, 7, 7, 7, 7, /* 88 .. 8f */ | ||
51 | 7, 7, 7, 7, 7, 7, 7, 7, /* 90 .. 97 */ | ||
52 | 7, 7, 7, 7, 7, 7, 7, 7, /* 98 .. 9f */ | ||
53 | 7, 7, 7, 7, 7, 7, 7, 7, /* a0 .. a7 */ | ||
54 | 7, 7, 7, 7, 7, 7, 7, 7, /* a8 .. af */ | ||
55 | 7, 7, 7, 7, 7, 7, 7, 7, /* b0 .. b7 */ | ||
56 | 7, 7, 7, 7, 7, 7, 7, 7, /* b8 .. bf */ | ||
57 | 7, 7, 7, 7, 7, 7, 7, 7, /* c0 .. c7 */ | ||
58 | 7, 7, 7, 7, 7, 7, 7, 7, /* c8 .. cf */ | ||
59 | 7, 7, 7, 7, 7, 7, 7, 7, /* d0 .. d7 */ | ||
60 | 7, 7, 7, 7, 7, 7, 7, 7, /* d8 .. df */ | ||
61 | 7, 7, 7, 7, 7, 7, 7, 7, /* e0 .. e7 */ | ||
62 | 7, 7, 7, 7, 7, 7, 7, 7, /* e8 .. ef */ | ||
63 | 7, 7, 7, 7, 7, 7, 7, 7, /* f0 .. f7 */ | ||
64 | 7, 7, 7, 7, 7, 7, 7, 7, /* f8 .. ff */ | ||
65 | }; | ||
66 | #endif | ||
67 | |||
68 | /* | ||
69 | * xfs_highbit32: get high bit set out of 32-bit argument, -1 if none set. | ||
70 | */ | ||
71 | inline int | ||
72 | xfs_highbit32( | ||
73 | __uint32_t v) | ||
74 | { | ||
75 | #ifdef HAVE_ARCH_HIGHBIT | ||
76 | return highbit32(v); | ||
77 | #else | ||
78 | int i; | ||
79 | |||
80 | if (v & 0xffff0000) | ||
81 | if (v & 0xff000000) | ||
82 | i = 24; | ||
83 | else | ||
84 | i = 16; | ||
85 | else if (v & 0x0000ffff) | ||
86 | if (v & 0x0000ff00) | ||
87 | i = 8; | ||
88 | else | ||
89 | i = 0; | ||
90 | else | ||
91 | return -1; | ||
92 | return i + xfs_highbit[(v >> i) & 0xff]; | ||
93 | #endif | ||
94 | } | ||
95 | |||
96 | /* | ||
97 | * xfs_lowbit64: get low bit set out of 64-bit argument, -1 if none set. | ||
98 | */ | ||
99 | int | ||
100 | xfs_lowbit64( | ||
101 | __uint64_t v) | ||
102 | { | ||
103 | __uint32_t w = (__uint32_t)v; | ||
104 | int n = 0; | ||
105 | |||
106 | if (w) { /* lower bits */ | ||
107 | n = ffs(w); | ||
108 | } else { /* upper bits */ | ||
109 | w = (__uint32_t)(v >> 32); | ||
110 | if (w && (n = ffs(w))) | ||
111 | n += 32; | ||
112 | } | ||
113 | return n - 1; | ||
114 | } | ||
115 | |||
116 | /* | ||
117 | * xfs_highbit64: get high bit set out of 64-bit argument, -1 if none set. | ||
118 | */ | ||
119 | int | ||
120 | xfs_highbit64( | ||
121 | __uint64_t v) | ||
122 | { | ||
123 | __uint32_t h = (__uint32_t)(v >> 32); | ||
124 | |||
125 | if (h) | ||
126 | return xfs_highbit32(h) + 32; | ||
127 | return xfs_highbit32((__uint32_t)v); | ||
128 | } | ||
129 | |||
130 | |||
131 | /* | 28 | /* |
132 | * Return whether bitmap is empty. | 29 | * Return whether bitmap is empty. |
133 | * Size is number of words in the bitmap, which is padded to word boundary | 30 | * Size is number of words in the bitmap, which is padded to word boundary |
diff --git a/fs/xfs/xfs_bit.h b/fs/xfs/xfs_bit.h index 082641a9782c..8e0e463dae2d 100644 --- a/fs/xfs/xfs_bit.h +++ b/fs/xfs/xfs_bit.h | |||
@@ -47,13 +47,39 @@ static inline __uint64_t xfs_mask64lo(int n) | |||
47 | } | 47 | } |
48 | 48 | ||
49 | /* Get high bit set out of 32-bit argument, -1 if none set */ | 49 | /* Get high bit set out of 32-bit argument, -1 if none set */ |
50 | extern int xfs_highbit32(__uint32_t v); | 50 | static inline int xfs_highbit32(__uint32_t v) |
51 | { | ||
52 | return fls(v) - 1; | ||
53 | } | ||
54 | |||
55 | /* Get high bit set out of 64-bit argument, -1 if none set */ | ||
56 | static inline int xfs_highbit64(__uint64_t v) | ||
57 | { | ||
58 | return fls64(v) - 1; | ||
59 | } | ||
60 | |||
61 | /* Get low bit set out of 32-bit argument, -1 if none set */ | ||
62 | static inline int xfs_lowbit32(__uint32_t v) | ||
63 | { | ||
64 | unsigned long t = v; | ||
65 | return (v) ? find_first_bit(&t, 32) : -1; | ||
66 | } | ||
51 | 67 | ||
52 | /* Get low bit set out of 64-bit argument, -1 if none set */ | 68 | /* Get low bit set out of 64-bit argument, -1 if none set */ |
53 | extern int xfs_lowbit64(__uint64_t v); | 69 | static inline int xfs_lowbit64(__uint64_t v) |
70 | { | ||
71 | __uint32_t w = (__uint32_t)v; | ||
72 | int n = 0; | ||
54 | 73 | ||
55 | /* Get high bit set out of 64-bit argument, -1 if none set */ | 74 | if (w) { /* lower bits */ |
56 | extern int xfs_highbit64(__uint64_t); | 75 | n = ffs(w); |
76 | } else { /* upper bits */ | ||
77 | w = (__uint32_t)(v >> 32); | ||
78 | if (w && (n = ffs(w))) | ||
79 | n += 32; | ||
80 | } | ||
81 | return n - 1; | ||
82 | } | ||
57 | 83 | ||
58 | /* Return whether bitmap is empty (1 == empty) */ | 84 | /* Return whether bitmap is empty (1 == empty) */ |
59 | extern int xfs_bitmap_empty(uint *map, uint size); | 85 | extern int xfs_bitmap_empty(uint *map, uint size); |
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c index 3c4beb3a4326..a1aab9275d5a 100644 --- a/fs/xfs/xfs_bmap.c +++ b/fs/xfs/xfs_bmap.c | |||
@@ -384,14 +384,14 @@ xfs_bmap_count_tree( | |||
384 | int levelin, | 384 | int levelin, |
385 | int *count); | 385 | int *count); |
386 | 386 | ||
387 | STATIC int | 387 | STATIC void |
388 | xfs_bmap_count_leaves( | 388 | xfs_bmap_count_leaves( |
389 | xfs_ifork_t *ifp, | 389 | xfs_ifork_t *ifp, |
390 | xfs_extnum_t idx, | 390 | xfs_extnum_t idx, |
391 | int numrecs, | 391 | int numrecs, |
392 | int *count); | 392 | int *count); |
393 | 393 | ||
394 | STATIC int | 394 | STATIC void |
395 | xfs_bmap_disk_count_leaves( | 395 | xfs_bmap_disk_count_leaves( |
396 | xfs_extnum_t idx, | 396 | xfs_extnum_t idx, |
397 | xfs_bmbt_block_t *block, | 397 | xfs_bmbt_block_t *block, |
@@ -4000,7 +4000,7 @@ xfs_bmap_add_attrfork( | |||
4000 | ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS; | 4000 | ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS; |
4001 | } | 4001 | } |
4002 | ASSERT(ip->i_d.di_anextents == 0); | 4002 | ASSERT(ip->i_d.di_anextents == 0); |
4003 | VN_HOLD(XFS_ITOV(ip)); | 4003 | IHOLD(ip); |
4004 | xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); | 4004 | xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); |
4005 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); | 4005 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); |
4006 | switch (ip->i_d.di_format) { | 4006 | switch (ip->i_d.di_format) { |
@@ -6096,7 +6096,7 @@ xfs_bmap_get_bp( | |||
6096 | tp = cur->bc_tp; | 6096 | tp = cur->bc_tp; |
6097 | licp = &tp->t_items; | 6097 | licp = &tp->t_items; |
6098 | while (!bp && licp != NULL) { | 6098 | while (!bp && licp != NULL) { |
6099 | if (XFS_LIC_ARE_ALL_FREE(licp)) { | 6099 | if (xfs_lic_are_all_free(licp)) { |
6100 | licp = licp->lic_next; | 6100 | licp = licp->lic_next; |
6101 | continue; | 6101 | continue; |
6102 | } | 6102 | } |
@@ -6106,11 +6106,11 @@ xfs_bmap_get_bp( | |||
6106 | xfs_buf_log_item_t *bip; | 6106 | xfs_buf_log_item_t *bip; |
6107 | xfs_buf_t *lbp; | 6107 | xfs_buf_t *lbp; |
6108 | 6108 | ||
6109 | if (XFS_LIC_ISFREE(licp, i)) { | 6109 | if (xfs_lic_isfree(licp, i)) { |
6110 | continue; | 6110 | continue; |
6111 | } | 6111 | } |
6112 | 6112 | ||
6113 | lidp = XFS_LIC_SLOT(licp, i); | 6113 | lidp = xfs_lic_slot(licp, i); |
6114 | lip = lidp->lid_item; | 6114 | lip = lidp->lid_item; |
6115 | if (lip->li_type != XFS_LI_BUF) | 6115 | if (lip->li_type != XFS_LI_BUF) |
6116 | continue; | 6116 | continue; |
@@ -6367,13 +6367,9 @@ xfs_bmap_count_blocks( | |||
6367 | mp = ip->i_mount; | 6367 | mp = ip->i_mount; |
6368 | ifp = XFS_IFORK_PTR(ip, whichfork); | 6368 | ifp = XFS_IFORK_PTR(ip, whichfork); |
6369 | if ( XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ) { | 6369 | if ( XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ) { |
6370 | if (unlikely(xfs_bmap_count_leaves(ifp, 0, | 6370 | xfs_bmap_count_leaves(ifp, 0, |
6371 | ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t), | 6371 | ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t), |
6372 | count) < 0)) { | 6372 | count); |
6373 | XFS_ERROR_REPORT("xfs_bmap_count_blocks(1)", | ||
6374 | XFS_ERRLEVEL_LOW, mp); | ||
6375 | return XFS_ERROR(EFSCORRUPTED); | ||
6376 | } | ||
6377 | return 0; | 6373 | return 0; |
6378 | } | 6374 | } |
6379 | 6375 | ||
@@ -6454,13 +6450,7 @@ xfs_bmap_count_tree( | |||
6454 | for (;;) { | 6450 | for (;;) { |
6455 | nextbno = be64_to_cpu(block->bb_rightsib); | 6451 | nextbno = be64_to_cpu(block->bb_rightsib); |
6456 | numrecs = be16_to_cpu(block->bb_numrecs); | 6452 | numrecs = be16_to_cpu(block->bb_numrecs); |
6457 | if (unlikely(xfs_bmap_disk_count_leaves(0, | 6453 | xfs_bmap_disk_count_leaves(0, block, numrecs, count); |
6458 | block, numrecs, count) < 0)) { | ||
6459 | xfs_trans_brelse(tp, bp); | ||
6460 | XFS_ERROR_REPORT("xfs_bmap_count_tree(2)", | ||
6461 | XFS_ERRLEVEL_LOW, mp); | ||
6462 | return XFS_ERROR(EFSCORRUPTED); | ||
6463 | } | ||
6464 | xfs_trans_brelse(tp, bp); | 6454 | xfs_trans_brelse(tp, bp); |
6465 | if (nextbno == NULLFSBLOCK) | 6455 | if (nextbno == NULLFSBLOCK) |
6466 | break; | 6456 | break; |
@@ -6478,7 +6468,7 @@ xfs_bmap_count_tree( | |||
6478 | /* | 6468 | /* |
6479 | * Count leaf blocks given a range of extent records. | 6469 | * Count leaf blocks given a range of extent records. |
6480 | */ | 6470 | */ |
6481 | STATIC int | 6471 | STATIC void |
6482 | xfs_bmap_count_leaves( | 6472 | xfs_bmap_count_leaves( |
6483 | xfs_ifork_t *ifp, | 6473 | xfs_ifork_t *ifp, |
6484 | xfs_extnum_t idx, | 6474 | xfs_extnum_t idx, |
@@ -6491,14 +6481,13 @@ xfs_bmap_count_leaves( | |||
6491 | xfs_bmbt_rec_host_t *frp = xfs_iext_get_ext(ifp, idx + b); | 6481 | xfs_bmbt_rec_host_t *frp = xfs_iext_get_ext(ifp, idx + b); |
6492 | *count += xfs_bmbt_get_blockcount(frp); | 6482 | *count += xfs_bmbt_get_blockcount(frp); |
6493 | } | 6483 | } |
6494 | return 0; | ||
6495 | } | 6484 | } |
6496 | 6485 | ||
6497 | /* | 6486 | /* |
6498 | * Count leaf blocks given a range of extent records originally | 6487 | * Count leaf blocks given a range of extent records originally |
6499 | * in btree format. | 6488 | * in btree format. |
6500 | */ | 6489 | */ |
6501 | STATIC int | 6490 | STATIC void |
6502 | xfs_bmap_disk_count_leaves( | 6491 | xfs_bmap_disk_count_leaves( |
6503 | xfs_extnum_t idx, | 6492 | xfs_extnum_t idx, |
6504 | xfs_bmbt_block_t *block, | 6493 | xfs_bmbt_block_t *block, |
@@ -6512,5 +6501,4 @@ xfs_bmap_disk_count_leaves( | |||
6512 | frp = XFS_BTREE_REC_ADDR(xfs_bmbt, block, idx + b); | 6501 | frp = XFS_BTREE_REC_ADDR(xfs_bmbt, block, idx + b); |
6513 | *count += xfs_bmbt_disk_get_blockcount(frp); | 6502 | *count += xfs_bmbt_disk_get_blockcount(frp); |
6514 | } | 6503 | } |
6515 | return 0; | ||
6516 | } | 6504 | } |
diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c index aeb87ca69fcc..cc593a84c345 100644 --- a/fs/xfs/xfs_btree.c +++ b/fs/xfs/xfs_btree.c | |||
@@ -46,38 +46,11 @@ kmem_zone_t *xfs_btree_cur_zone; | |||
46 | /* | 46 | /* |
47 | * Btree magic numbers. | 47 | * Btree magic numbers. |
48 | */ | 48 | */ |
49 | const __uint32_t xfs_magics[XFS_BTNUM_MAX] = | 49 | const __uint32_t xfs_magics[XFS_BTNUM_MAX] = { |
50 | { | ||
51 | XFS_ABTB_MAGIC, XFS_ABTC_MAGIC, XFS_BMAP_MAGIC, XFS_IBT_MAGIC | 50 | XFS_ABTB_MAGIC, XFS_ABTC_MAGIC, XFS_BMAP_MAGIC, XFS_IBT_MAGIC |
52 | }; | 51 | }; |
53 | 52 | ||
54 | /* | 53 | /* |
55 | * Prototypes for internal routines. | ||
56 | */ | ||
57 | |||
58 | /* | ||
59 | * Checking routine: return maxrecs for the block. | ||
60 | */ | ||
61 | STATIC int /* number of records fitting in block */ | ||
62 | xfs_btree_maxrecs( | ||
63 | xfs_btree_cur_t *cur, /* btree cursor */ | ||
64 | xfs_btree_block_t *block);/* generic btree block pointer */ | ||
65 | |||
66 | /* | ||
67 | * Internal routines. | ||
68 | */ | ||
69 | |||
70 | /* | ||
71 | * Retrieve the block pointer from the cursor at the given level. | ||
72 | * This may be a bmap btree root or from a buffer. | ||
73 | */ | ||
74 | STATIC xfs_btree_block_t * /* generic btree block pointer */ | ||
75 | xfs_btree_get_block( | ||
76 | xfs_btree_cur_t *cur, /* btree cursor */ | ||
77 | int level, /* level in btree */ | ||
78 | struct xfs_buf **bpp); /* buffer containing the block */ | ||
79 | |||
80 | /* | ||
81 | * Checking routine: return maxrecs for the block. | 54 | * Checking routine: return maxrecs for the block. |
82 | */ | 55 | */ |
83 | STATIC int /* number of records fitting in block */ | 56 | STATIC int /* number of records fitting in block */ |
@@ -457,35 +430,6 @@ xfs_btree_dup_cursor( | |||
457 | } | 430 | } |
458 | 431 | ||
459 | /* | 432 | /* |
460 | * Change the cursor to point to the first record at the given level. | ||
461 | * Other levels are unaffected. | ||
462 | */ | ||
463 | int /* success=1, failure=0 */ | ||
464 | xfs_btree_firstrec( | ||
465 | xfs_btree_cur_t *cur, /* btree cursor */ | ||
466 | int level) /* level to change */ | ||
467 | { | ||
468 | xfs_btree_block_t *block; /* generic btree block pointer */ | ||
469 | xfs_buf_t *bp; /* buffer containing block */ | ||
470 | |||
471 | /* | ||
472 | * Get the block pointer for this level. | ||
473 | */ | ||
474 | block = xfs_btree_get_block(cur, level, &bp); | ||
475 | xfs_btree_check_block(cur, block, level, bp); | ||
476 | /* | ||
477 | * It's empty, there is no such record. | ||
478 | */ | ||
479 | if (!block->bb_h.bb_numrecs) | ||
480 | return 0; | ||
481 | /* | ||
482 | * Set the ptr value to 1, that's the first record/key. | ||
483 | */ | ||
484 | cur->bc_ptrs[level] = 1; | ||
485 | return 1; | ||
486 | } | ||
487 | |||
488 | /* | ||
489 | * Retrieve the block pointer from the cursor at the given level. | 433 | * Retrieve the block pointer from the cursor at the given level. |
490 | * This may be a bmap btree root or from a buffer. | 434 | * This may be a bmap btree root or from a buffer. |
491 | */ | 435 | */ |
@@ -626,6 +570,13 @@ xfs_btree_init_cursor( | |||
626 | cur->bc_private.a.agbp = agbp; | 570 | cur->bc_private.a.agbp = agbp; |
627 | cur->bc_private.a.agno = agno; | 571 | cur->bc_private.a.agno = agno; |
628 | break; | 572 | break; |
573 | case XFS_BTNUM_INO: | ||
574 | /* | ||
575 | * Inode allocation btree fields. | ||
576 | */ | ||
577 | cur->bc_private.a.agbp = agbp; | ||
578 | cur->bc_private.a.agno = agno; | ||
579 | break; | ||
629 | case XFS_BTNUM_BMAP: | 580 | case XFS_BTNUM_BMAP: |
630 | /* | 581 | /* |
631 | * Bmap btree fields. | 582 | * Bmap btree fields. |
@@ -638,13 +589,6 @@ xfs_btree_init_cursor( | |||
638 | cur->bc_private.b.flags = 0; | 589 | cur->bc_private.b.flags = 0; |
639 | cur->bc_private.b.whichfork = whichfork; | 590 | cur->bc_private.b.whichfork = whichfork; |
640 | break; | 591 | break; |
641 | case XFS_BTNUM_INO: | ||
642 | /* | ||
643 | * Inode allocation btree fields. | ||
644 | */ | ||
645 | cur->bc_private.i.agbp = agbp; | ||
646 | cur->bc_private.i.agno = agno; | ||
647 | break; | ||
648 | default: | 592 | default: |
649 | ASSERT(0); | 593 | ASSERT(0); |
650 | } | 594 | } |
@@ -671,6 +615,35 @@ xfs_btree_islastblock( | |||
671 | } | 615 | } |
672 | 616 | ||
673 | /* | 617 | /* |
618 | * Change the cursor to point to the first record at the given level. | ||
619 | * Other levels are unaffected. | ||
620 | */ | ||
621 | int /* success=1, failure=0 */ | ||
622 | xfs_btree_firstrec( | ||
623 | xfs_btree_cur_t *cur, /* btree cursor */ | ||
624 | int level) /* level to change */ | ||
625 | { | ||
626 | xfs_btree_block_t *block; /* generic btree block pointer */ | ||
627 | xfs_buf_t *bp; /* buffer containing block */ | ||
628 | |||
629 | /* | ||
630 | * Get the block pointer for this level. | ||
631 | */ | ||
632 | block = xfs_btree_get_block(cur, level, &bp); | ||
633 | xfs_btree_check_block(cur, block, level, bp); | ||
634 | /* | ||
635 | * It's empty, there is no such record. | ||
636 | */ | ||
637 | if (!block->bb_h.bb_numrecs) | ||
638 | return 0; | ||
639 | /* | ||
640 | * Set the ptr value to 1, that's the first record/key. | ||
641 | */ | ||
642 | cur->bc_ptrs[level] = 1; | ||
643 | return 1; | ||
644 | } | ||
645 | |||
646 | /* | ||
674 | * Change the cursor to point to the last record in the current block | 647 | * Change the cursor to point to the last record in the current block |
675 | * at the given level. Other levels are unaffected. | 648 | * at the given level. Other levels are unaffected. |
676 | */ | 649 | */ |
@@ -890,12 +863,12 @@ xfs_btree_readahead_core( | |||
890 | case XFS_BTNUM_INO: | 863 | case XFS_BTNUM_INO: |
891 | i = XFS_BUF_TO_INOBT_BLOCK(cur->bc_bufs[lev]); | 864 | i = XFS_BUF_TO_INOBT_BLOCK(cur->bc_bufs[lev]); |
892 | if ((lr & XFS_BTCUR_LEFTRA) && be32_to_cpu(i->bb_leftsib) != NULLAGBLOCK) { | 865 | if ((lr & XFS_BTCUR_LEFTRA) && be32_to_cpu(i->bb_leftsib) != NULLAGBLOCK) { |
893 | xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.i.agno, | 866 | xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.a.agno, |
894 | be32_to_cpu(i->bb_leftsib), 1); | 867 | be32_to_cpu(i->bb_leftsib), 1); |
895 | rval++; | 868 | rval++; |
896 | } | 869 | } |
897 | if ((lr & XFS_BTCUR_RIGHTRA) && be32_to_cpu(i->bb_rightsib) != NULLAGBLOCK) { | 870 | if ((lr & XFS_BTCUR_RIGHTRA) && be32_to_cpu(i->bb_rightsib) != NULLAGBLOCK) { |
898 | xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.i.agno, | 871 | xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.a.agno, |
899 | be32_to_cpu(i->bb_rightsib), 1); | 872 | be32_to_cpu(i->bb_rightsib), 1); |
900 | rval++; | 873 | rval++; |
901 | } | 874 | } |
diff --git a/fs/xfs/xfs_btree.h b/fs/xfs/xfs_btree.h index 7440b78f9cec..1f528a2a3754 100644 --- a/fs/xfs/xfs_btree.h +++ b/fs/xfs/xfs_btree.h | |||
@@ -158,8 +158,8 @@ typedef struct xfs_btree_cur | |||
158 | __uint8_t bc_blocklog; /* log2(blocksize) of btree blocks */ | 158 | __uint8_t bc_blocklog; /* log2(blocksize) of btree blocks */ |
159 | xfs_btnum_t bc_btnum; /* identifies which btree type */ | 159 | xfs_btnum_t bc_btnum; /* identifies which btree type */ |
160 | union { | 160 | union { |
161 | struct { /* needed for BNO, CNT */ | 161 | struct { /* needed for BNO, CNT, INO */ |
162 | struct xfs_buf *agbp; /* agf buffer pointer */ | 162 | struct xfs_buf *agbp; /* agf/agi buffer pointer */ |
163 | xfs_agnumber_t agno; /* ag number */ | 163 | xfs_agnumber_t agno; /* ag number */ |
164 | } a; | 164 | } a; |
165 | struct { /* needed for BMAP */ | 165 | struct { /* needed for BMAP */ |
@@ -172,10 +172,6 @@ typedef struct xfs_btree_cur | |||
172 | char flags; /* flags */ | 172 | char flags; /* flags */ |
173 | #define XFS_BTCUR_BPRV_WASDEL 1 /* was delayed */ | 173 | #define XFS_BTCUR_BPRV_WASDEL 1 /* was delayed */ |
174 | } b; | 174 | } b; |
175 | struct { /* needed for INO */ | ||
176 | struct xfs_buf *agbp; /* agi buffer pointer */ | ||
177 | xfs_agnumber_t agno; /* ag number */ | ||
178 | } i; | ||
179 | } bc_private; /* per-btree type data */ | 175 | } bc_private; /* per-btree type data */ |
180 | } xfs_btree_cur_t; | 176 | } xfs_btree_cur_t; |
181 | 177 | ||
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c index d86ca2c03a70..608c30c3f76b 100644 --- a/fs/xfs/xfs_buf_item.c +++ b/fs/xfs/xfs_buf_item.c | |||
@@ -737,7 +737,7 @@ xfs_buf_item_init( | |||
737 | bip->bli_format.blf_len = (ushort)BTOBB(XFS_BUF_COUNT(bp)); | 737 | bip->bli_format.blf_len = (ushort)BTOBB(XFS_BUF_COUNT(bp)); |
738 | bip->bli_format.blf_map_size = map_size; | 738 | bip->bli_format.blf_map_size = map_size; |
739 | #ifdef XFS_BLI_TRACE | 739 | #ifdef XFS_BLI_TRACE |
740 | bip->bli_trace = ktrace_alloc(XFS_BLI_TRACE_SIZE, KM_SLEEP); | 740 | bip->bli_trace = ktrace_alloc(XFS_BLI_TRACE_SIZE, KM_NOFS); |
741 | #endif | 741 | #endif |
742 | 742 | ||
743 | #ifdef XFS_TRANS_DEBUG | 743 | #ifdef XFS_TRANS_DEBUG |
@@ -1056,7 +1056,7 @@ xfs_buf_iodone_callbacks( | |||
1056 | anyway. */ | 1056 | anyway. */ |
1057 | XFS_BUF_SET_BRELSE_FUNC(bp,xfs_buf_error_relse); | 1057 | XFS_BUF_SET_BRELSE_FUNC(bp,xfs_buf_error_relse); |
1058 | XFS_BUF_DONE(bp); | 1058 | XFS_BUF_DONE(bp); |
1059 | XFS_BUF_V_IODONESEMA(bp); | 1059 | XFS_BUF_FINISH_IOWAIT(bp); |
1060 | } | 1060 | } |
1061 | return; | 1061 | return; |
1062 | } | 1062 | } |
diff --git a/fs/xfs/xfs_dfrag.c b/fs/xfs/xfs_dfrag.c index 2211e885ef24..760f4c5b5160 100644 --- a/fs/xfs/xfs_dfrag.c +++ b/fs/xfs/xfs_dfrag.c | |||
@@ -128,10 +128,8 @@ xfs_swap_extents( | |||
128 | xfs_swapext_t *sxp) | 128 | xfs_swapext_t *sxp) |
129 | { | 129 | { |
130 | xfs_mount_t *mp; | 130 | xfs_mount_t *mp; |
131 | xfs_inode_t *ips[2]; | ||
132 | xfs_trans_t *tp; | 131 | xfs_trans_t *tp; |
133 | xfs_bstat_t *sbp = &sxp->sx_stat; | 132 | xfs_bstat_t *sbp = &sxp->sx_stat; |
134 | bhv_vnode_t *vp, *tvp; | ||
135 | xfs_ifork_t *tempifp, *ifp, *tifp; | 133 | xfs_ifork_t *tempifp, *ifp, *tifp; |
136 | int ilf_fields, tilf_fields; | 134 | int ilf_fields, tilf_fields; |
137 | static uint lock_flags = XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL; | 135 | static uint lock_flags = XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL; |
@@ -150,19 +148,8 @@ xfs_swap_extents( | |||
150 | } | 148 | } |
151 | 149 | ||
152 | sbp = &sxp->sx_stat; | 150 | sbp = &sxp->sx_stat; |
153 | vp = XFS_ITOV(ip); | ||
154 | tvp = XFS_ITOV(tip); | ||
155 | |||
156 | /* Lock in i_ino order */ | ||
157 | if (ip->i_ino < tip->i_ino) { | ||
158 | ips[0] = ip; | ||
159 | ips[1] = tip; | ||
160 | } else { | ||
161 | ips[0] = tip; | ||
162 | ips[1] = ip; | ||
163 | } | ||
164 | 151 | ||
165 | xfs_lock_inodes(ips, 2, lock_flags); | 152 | xfs_lock_two_inodes(ip, tip, lock_flags); |
166 | locked = 1; | 153 | locked = 1; |
167 | 154 | ||
168 | /* Verify that both files have the same format */ | 155 | /* Verify that both files have the same format */ |
@@ -184,7 +171,7 @@ xfs_swap_extents( | |||
184 | goto error0; | 171 | goto error0; |
185 | } | 172 | } |
186 | 173 | ||
187 | if (VN_CACHED(tvp) != 0) { | 174 | if (VN_CACHED(VFS_I(tip)) != 0) { |
188 | xfs_inval_cached_trace(tip, 0, -1, 0, -1); | 175 | xfs_inval_cached_trace(tip, 0, -1, 0, -1); |
189 | error = xfs_flushinval_pages(tip, 0, -1, | 176 | error = xfs_flushinval_pages(tip, 0, -1, |
190 | FI_REMAPF_LOCKED); | 177 | FI_REMAPF_LOCKED); |
@@ -193,7 +180,7 @@ xfs_swap_extents( | |||
193 | } | 180 | } |
194 | 181 | ||
195 | /* Verify O_DIRECT for ftmp */ | 182 | /* Verify O_DIRECT for ftmp */ |
196 | if (VN_CACHED(tvp) != 0) { | 183 | if (VN_CACHED(VFS_I(tip)) != 0) { |
197 | error = XFS_ERROR(EINVAL); | 184 | error = XFS_ERROR(EINVAL); |
198 | goto error0; | 185 | goto error0; |
199 | } | 186 | } |
@@ -237,7 +224,7 @@ xfs_swap_extents( | |||
237 | * vop_read (or write in the case of autogrow) they block on the iolock | 224 | * vop_read (or write in the case of autogrow) they block on the iolock |
238 | * until we have switched the extents. | 225 | * until we have switched the extents. |
239 | */ | 226 | */ |
240 | if (VN_MAPPED(vp)) { | 227 | if (VN_MAPPED(VFS_I(ip))) { |
241 | error = XFS_ERROR(EBUSY); | 228 | error = XFS_ERROR(EBUSY); |
242 | goto error0; | 229 | goto error0; |
243 | } | 230 | } |
@@ -265,7 +252,7 @@ xfs_swap_extents( | |||
265 | locked = 0; | 252 | locked = 0; |
266 | goto error0; | 253 | goto error0; |
267 | } | 254 | } |
268 | xfs_lock_inodes(ips, 2, XFS_ILOCK_EXCL); | 255 | xfs_lock_two_inodes(ip, tip, XFS_ILOCK_EXCL); |
269 | 256 | ||
270 | /* | 257 | /* |
271 | * Count the number of extended attribute blocks | 258 | * Count the number of extended attribute blocks |
@@ -350,15 +337,11 @@ xfs_swap_extents( | |||
350 | break; | 337 | break; |
351 | } | 338 | } |
352 | 339 | ||
353 | /* | ||
354 | * Increment vnode ref counts since xfs_trans_commit & | ||
355 | * xfs_trans_cancel will both unlock the inodes and | ||
356 | * decrement the associated ref counts. | ||
357 | */ | ||
358 | VN_HOLD(vp); | ||
359 | VN_HOLD(tvp); | ||
360 | 340 | ||
341 | IHOLD(ip); | ||
361 | xfs_trans_ijoin(tp, ip, lock_flags); | 342 | xfs_trans_ijoin(tp, ip, lock_flags); |
343 | |||
344 | IHOLD(tip); | ||
362 | xfs_trans_ijoin(tp, tip, lock_flags); | 345 | xfs_trans_ijoin(tp, tip, lock_flags); |
363 | 346 | ||
364 | xfs_trans_log_inode(tp, ip, ilf_fields); | 347 | xfs_trans_log_inode(tp, ip, ilf_fields); |
diff --git a/fs/xfs/xfs_error.c b/fs/xfs/xfs_error.c index f66756cfb5e8..f227ecd1a294 100644 --- a/fs/xfs/xfs_error.c +++ b/fs/xfs/xfs_error.c | |||
@@ -58,9 +58,6 @@ xfs_error_trap(int e) | |||
58 | } | 58 | } |
59 | return e; | 59 | return e; |
60 | } | 60 | } |
61 | #endif | ||
62 | |||
63 | #if (defined(DEBUG) || defined(INDUCE_IO_ERROR)) | ||
64 | 61 | ||
65 | int xfs_etest[XFS_NUM_INJECT_ERROR]; | 62 | int xfs_etest[XFS_NUM_INJECT_ERROR]; |
66 | int64_t xfs_etest_fsid[XFS_NUM_INJECT_ERROR]; | 63 | int64_t xfs_etest_fsid[XFS_NUM_INJECT_ERROR]; |
@@ -154,7 +151,7 @@ xfs_errortag_clearall(xfs_mount_t *mp, int loud) | |||
154 | 151 | ||
155 | return 0; | 152 | return 0; |
156 | } | 153 | } |
157 | #endif /* DEBUG || INDUCE_IO_ERROR */ | 154 | #endif /* DEBUG */ |
158 | 155 | ||
159 | static void | 156 | static void |
160 | xfs_fs_vcmn_err(int level, xfs_mount_t *mp, char *fmt, va_list ap) | 157 | xfs_fs_vcmn_err(int level, xfs_mount_t *mp, char *fmt, va_list ap) |
diff --git a/fs/xfs/xfs_error.h b/fs/xfs/xfs_error.h index d8559d132efa..11543f10b0c6 100644 --- a/fs/xfs/xfs_error.h +++ b/fs/xfs/xfs_error.h | |||
@@ -125,22 +125,14 @@ extern void xfs_corruption_error(char *tag, int level, struct xfs_mount *mp, | |||
125 | #define XFS_RANDOM_DIOWRITE_IOERR (XFS_RANDOM_DEFAULT/10) | 125 | #define XFS_RANDOM_DIOWRITE_IOERR (XFS_RANDOM_DEFAULT/10) |
126 | #define XFS_RANDOM_BMAPIFORMAT XFS_RANDOM_DEFAULT | 126 | #define XFS_RANDOM_BMAPIFORMAT XFS_RANDOM_DEFAULT |
127 | 127 | ||
128 | #if (defined(DEBUG) || defined(INDUCE_IO_ERROR)) | 128 | #ifdef DEBUG |
129 | extern int xfs_error_test(int, int *, char *, int, char *, unsigned long); | 129 | extern int xfs_error_test(int, int *, char *, int, char *, unsigned long); |
130 | 130 | ||
131 | #define XFS_NUM_INJECT_ERROR 10 | 131 | #define XFS_NUM_INJECT_ERROR 10 |
132 | |||
133 | #ifdef __ANSI_CPP__ | ||
134 | #define XFS_TEST_ERROR(expr, mp, tag, rf) \ | ||
135 | ((expr) || \ | ||
136 | xfs_error_test((tag), (mp)->m_fixedfsid, #expr, __LINE__, __FILE__, \ | ||
137 | (rf))) | ||
138 | #else | ||
139 | #define XFS_TEST_ERROR(expr, mp, tag, rf) \ | 132 | #define XFS_TEST_ERROR(expr, mp, tag, rf) \ |
140 | ((expr) || \ | 133 | ((expr) || \ |
141 | xfs_error_test((tag), (mp)->m_fixedfsid, "expr", __LINE__, __FILE__, \ | 134 | xfs_error_test((tag), (mp)->m_fixedfsid, "expr", __LINE__, __FILE__, \ |
142 | (rf))) | 135 | (rf))) |
143 | #endif /* __ANSI_CPP__ */ | ||
144 | 136 | ||
145 | extern int xfs_errortag_add(int error_tag, xfs_mount_t *mp); | 137 | extern int xfs_errortag_add(int error_tag, xfs_mount_t *mp); |
146 | extern int xfs_errortag_clearall(xfs_mount_t *mp, int loud); | 138 | extern int xfs_errortag_clearall(xfs_mount_t *mp, int loud); |
@@ -148,7 +140,7 @@ extern int xfs_errortag_clearall(xfs_mount_t *mp, int loud); | |||
148 | #define XFS_TEST_ERROR(expr, mp, tag, rf) (expr) | 140 | #define XFS_TEST_ERROR(expr, mp, tag, rf) (expr) |
149 | #define xfs_errortag_add(tag, mp) (ENOSYS) | 141 | #define xfs_errortag_add(tag, mp) (ENOSYS) |
150 | #define xfs_errortag_clearall(mp, loud) (ENOSYS) | 142 | #define xfs_errortag_clearall(mp, loud) (ENOSYS) |
151 | #endif /* (DEBUG || INDUCE_IO_ERROR) */ | 143 | #endif /* DEBUG */ |
152 | 144 | ||
153 | /* | 145 | /* |
154 | * XFS panic tags -- allow a call to xfs_cmn_err() be turned into | 146 | * XFS panic tags -- allow a call to xfs_cmn_err() be turned into |
diff --git a/fs/xfs/xfs_filestream.c b/fs/xfs/xfs_filestream.c index c38fd14fca29..f3bb75da384e 100644 --- a/fs/xfs/xfs_filestream.c +++ b/fs/xfs/xfs_filestream.c | |||
@@ -400,7 +400,7 @@ xfs_filestream_init(void) | |||
400 | if (!item_zone) | 400 | if (!item_zone) |
401 | return -ENOMEM; | 401 | return -ENOMEM; |
402 | #ifdef XFS_FILESTREAMS_TRACE | 402 | #ifdef XFS_FILESTREAMS_TRACE |
403 | xfs_filestreams_trace_buf = ktrace_alloc(XFS_FSTRM_KTRACE_SIZE, KM_SLEEP); | 403 | xfs_filestreams_trace_buf = ktrace_alloc(XFS_FSTRM_KTRACE_SIZE, KM_NOFS); |
404 | #endif | 404 | #endif |
405 | return 0; | 405 | return 0; |
406 | } | 406 | } |
diff --git a/fs/xfs/xfs_ialloc_btree.c b/fs/xfs/xfs_ialloc_btree.c index e5310c90e50f..83502f3edef0 100644 --- a/fs/xfs/xfs_ialloc_btree.c +++ b/fs/xfs/xfs_ialloc_btree.c | |||
@@ -181,7 +181,7 @@ xfs_inobt_delrec( | |||
181 | * then we can get rid of this level. | 181 | * then we can get rid of this level. |
182 | */ | 182 | */ |
183 | if (numrecs == 1 && level > 0) { | 183 | if (numrecs == 1 && level > 0) { |
184 | agbp = cur->bc_private.i.agbp; | 184 | agbp = cur->bc_private.a.agbp; |
185 | agi = XFS_BUF_TO_AGI(agbp); | 185 | agi = XFS_BUF_TO_AGI(agbp); |
186 | /* | 186 | /* |
187 | * pp is still set to the first pointer in the block. | 187 | * pp is still set to the first pointer in the block. |
@@ -194,7 +194,7 @@ xfs_inobt_delrec( | |||
194 | * Free the block. | 194 | * Free the block. |
195 | */ | 195 | */ |
196 | if ((error = xfs_free_extent(cur->bc_tp, | 196 | if ((error = xfs_free_extent(cur->bc_tp, |
197 | XFS_AGB_TO_FSB(mp, cur->bc_private.i.agno, bno), 1))) | 197 | XFS_AGB_TO_FSB(mp, cur->bc_private.a.agno, bno), 1))) |
198 | return error; | 198 | return error; |
199 | xfs_trans_binval(cur->bc_tp, bp); | 199 | xfs_trans_binval(cur->bc_tp, bp); |
200 | xfs_ialloc_log_agi(cur->bc_tp, agbp, | 200 | xfs_ialloc_log_agi(cur->bc_tp, agbp, |
@@ -379,7 +379,7 @@ xfs_inobt_delrec( | |||
379 | rrecs = be16_to_cpu(right->bb_numrecs); | 379 | rrecs = be16_to_cpu(right->bb_numrecs); |
380 | rbp = bp; | 380 | rbp = bp; |
381 | if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, | 381 | if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, |
382 | cur->bc_private.i.agno, lbno, 0, &lbp, | 382 | cur->bc_private.a.agno, lbno, 0, &lbp, |
383 | XFS_INO_BTREE_REF))) | 383 | XFS_INO_BTREE_REF))) |
384 | return error; | 384 | return error; |
385 | left = XFS_BUF_TO_INOBT_BLOCK(lbp); | 385 | left = XFS_BUF_TO_INOBT_BLOCK(lbp); |
@@ -401,7 +401,7 @@ xfs_inobt_delrec( | |||
401 | lrecs = be16_to_cpu(left->bb_numrecs); | 401 | lrecs = be16_to_cpu(left->bb_numrecs); |
402 | lbp = bp; | 402 | lbp = bp; |
403 | if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, | 403 | if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, |
404 | cur->bc_private.i.agno, rbno, 0, &rbp, | 404 | cur->bc_private.a.agno, rbno, 0, &rbp, |
405 | XFS_INO_BTREE_REF))) | 405 | XFS_INO_BTREE_REF))) |
406 | return error; | 406 | return error; |
407 | right = XFS_BUF_TO_INOBT_BLOCK(rbp); | 407 | right = XFS_BUF_TO_INOBT_BLOCK(rbp); |
@@ -484,7 +484,7 @@ xfs_inobt_delrec( | |||
484 | xfs_buf_t *rrbp; | 484 | xfs_buf_t *rrbp; |
485 | 485 | ||
486 | if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, | 486 | if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, |
487 | cur->bc_private.i.agno, be32_to_cpu(left->bb_rightsib), 0, | 487 | cur->bc_private.a.agno, be32_to_cpu(left->bb_rightsib), 0, |
488 | &rrbp, XFS_INO_BTREE_REF))) | 488 | &rrbp, XFS_INO_BTREE_REF))) |
489 | return error; | 489 | return error; |
490 | rrblock = XFS_BUF_TO_INOBT_BLOCK(rrbp); | 490 | rrblock = XFS_BUF_TO_INOBT_BLOCK(rrbp); |
@@ -497,7 +497,7 @@ xfs_inobt_delrec( | |||
497 | * Free the deleting block. | 497 | * Free the deleting block. |
498 | */ | 498 | */ |
499 | if ((error = xfs_free_extent(cur->bc_tp, XFS_AGB_TO_FSB(mp, | 499 | if ((error = xfs_free_extent(cur->bc_tp, XFS_AGB_TO_FSB(mp, |
500 | cur->bc_private.i.agno, rbno), 1))) | 500 | cur->bc_private.a.agno, rbno), 1))) |
501 | return error; | 501 | return error; |
502 | xfs_trans_binval(cur->bc_tp, rbp); | 502 | xfs_trans_binval(cur->bc_tp, rbp); |
503 | /* | 503 | /* |
@@ -854,7 +854,7 @@ xfs_inobt_lookup( | |||
854 | { | 854 | { |
855 | xfs_agi_t *agi; /* a.g. inode header */ | 855 | xfs_agi_t *agi; /* a.g. inode header */ |
856 | 856 | ||
857 | agi = XFS_BUF_TO_AGI(cur->bc_private.i.agbp); | 857 | agi = XFS_BUF_TO_AGI(cur->bc_private.a.agbp); |
858 | agno = be32_to_cpu(agi->agi_seqno); | 858 | agno = be32_to_cpu(agi->agi_seqno); |
859 | agbno = be32_to_cpu(agi->agi_root); | 859 | agbno = be32_to_cpu(agi->agi_root); |
860 | } | 860 | } |
@@ -1089,7 +1089,7 @@ xfs_inobt_lshift( | |||
1089 | * Set up the left neighbor as "left". | 1089 | * Set up the left neighbor as "left". |
1090 | */ | 1090 | */ |
1091 | if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, | 1091 | if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, |
1092 | cur->bc_private.i.agno, be32_to_cpu(right->bb_leftsib), | 1092 | cur->bc_private.a.agno, be32_to_cpu(right->bb_leftsib), |
1093 | 0, &lbp, XFS_INO_BTREE_REF))) | 1093 | 0, &lbp, XFS_INO_BTREE_REF))) |
1094 | return error; | 1094 | return error; |
1095 | left = XFS_BUF_TO_INOBT_BLOCK(lbp); | 1095 | left = XFS_BUF_TO_INOBT_BLOCK(lbp); |
@@ -1207,10 +1207,10 @@ xfs_inobt_newroot( | |||
1207 | /* | 1207 | /* |
1208 | * Get a block & a buffer. | 1208 | * Get a block & a buffer. |
1209 | */ | 1209 | */ |
1210 | agi = XFS_BUF_TO_AGI(cur->bc_private.i.agbp); | 1210 | agi = XFS_BUF_TO_AGI(cur->bc_private.a.agbp); |
1211 | args.tp = cur->bc_tp; | 1211 | args.tp = cur->bc_tp; |
1212 | args.mp = cur->bc_mp; | 1212 | args.mp = cur->bc_mp; |
1213 | args.fsbno = XFS_AGB_TO_FSB(args.mp, cur->bc_private.i.agno, | 1213 | args.fsbno = XFS_AGB_TO_FSB(args.mp, cur->bc_private.a.agno, |
1214 | be32_to_cpu(agi->agi_root)); | 1214 | be32_to_cpu(agi->agi_root)); |
1215 | args.mod = args.minleft = args.alignment = args.total = args.wasdel = | 1215 | args.mod = args.minleft = args.alignment = args.total = args.wasdel = |
1216 | args.isfl = args.userdata = args.minalignslop = 0; | 1216 | args.isfl = args.userdata = args.minalignslop = 0; |
@@ -1233,7 +1233,7 @@ xfs_inobt_newroot( | |||
1233 | */ | 1233 | */ |
1234 | agi->agi_root = cpu_to_be32(args.agbno); | 1234 | agi->agi_root = cpu_to_be32(args.agbno); |
1235 | be32_add_cpu(&agi->agi_level, 1); | 1235 | be32_add_cpu(&agi->agi_level, 1); |
1236 | xfs_ialloc_log_agi(args.tp, cur->bc_private.i.agbp, | 1236 | xfs_ialloc_log_agi(args.tp, cur->bc_private.a.agbp, |
1237 | XFS_AGI_ROOT | XFS_AGI_LEVEL); | 1237 | XFS_AGI_ROOT | XFS_AGI_LEVEL); |
1238 | /* | 1238 | /* |
1239 | * At the previous root level there are now two blocks: the old | 1239 | * At the previous root level there are now two blocks: the old |
@@ -1376,7 +1376,7 @@ xfs_inobt_rshift( | |||
1376 | * Set up the right neighbor as "right". | 1376 | * Set up the right neighbor as "right". |
1377 | */ | 1377 | */ |
1378 | if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, | 1378 | if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, |
1379 | cur->bc_private.i.agno, be32_to_cpu(left->bb_rightsib), | 1379 | cur->bc_private.a.agno, be32_to_cpu(left->bb_rightsib), |
1380 | 0, &rbp, XFS_INO_BTREE_REF))) | 1380 | 0, &rbp, XFS_INO_BTREE_REF))) |
1381 | return error; | 1381 | return error; |
1382 | right = XFS_BUF_TO_INOBT_BLOCK(rbp); | 1382 | right = XFS_BUF_TO_INOBT_BLOCK(rbp); |
@@ -1492,7 +1492,7 @@ xfs_inobt_split( | |||
1492 | * Allocate the new block. | 1492 | * Allocate the new block. |
1493 | * If we can't do it, we're toast. Give up. | 1493 | * If we can't do it, we're toast. Give up. |
1494 | */ | 1494 | */ |
1495 | args.fsbno = XFS_AGB_TO_FSB(args.mp, cur->bc_private.i.agno, lbno); | 1495 | args.fsbno = XFS_AGB_TO_FSB(args.mp, cur->bc_private.a.agno, lbno); |
1496 | args.mod = args.minleft = args.alignment = args.total = args.wasdel = | 1496 | args.mod = args.minleft = args.alignment = args.total = args.wasdel = |
1497 | args.isfl = args.userdata = args.minalignslop = 0; | 1497 | args.isfl = args.userdata = args.minalignslop = 0; |
1498 | args.minlen = args.maxlen = args.prod = 1; | 1498 | args.minlen = args.maxlen = args.prod = 1; |
@@ -1725,7 +1725,7 @@ xfs_inobt_decrement( | |||
1725 | 1725 | ||
1726 | agbno = be32_to_cpu(*XFS_INOBT_PTR_ADDR(block, cur->bc_ptrs[lev], cur)); | 1726 | agbno = be32_to_cpu(*XFS_INOBT_PTR_ADDR(block, cur->bc_ptrs[lev], cur)); |
1727 | if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, | 1727 | if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, |
1728 | cur->bc_private.i.agno, agbno, 0, &bp, | 1728 | cur->bc_private.a.agno, agbno, 0, &bp, |
1729 | XFS_INO_BTREE_REF))) | 1729 | XFS_INO_BTREE_REF))) |
1730 | return error; | 1730 | return error; |
1731 | lev--; | 1731 | lev--; |
@@ -1897,7 +1897,7 @@ xfs_inobt_increment( | |||
1897 | 1897 | ||
1898 | agbno = be32_to_cpu(*XFS_INOBT_PTR_ADDR(block, cur->bc_ptrs[lev], cur)); | 1898 | agbno = be32_to_cpu(*XFS_INOBT_PTR_ADDR(block, cur->bc_ptrs[lev], cur)); |
1899 | if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, | 1899 | if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, |
1900 | cur->bc_private.i.agno, agbno, 0, &bp, | 1900 | cur->bc_private.a.agno, agbno, 0, &bp, |
1901 | XFS_INO_BTREE_REF))) | 1901 | XFS_INO_BTREE_REF))) |
1902 | return error; | 1902 | return error; |
1903 | lev--; | 1903 | lev--; |
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c index b07604b94d9f..e229e9e001c2 100644 --- a/fs/xfs/xfs_iget.c +++ b/fs/xfs/xfs_iget.c | |||
@@ -216,7 +216,14 @@ finish_inode: | |||
216 | mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino); | 216 | mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino); |
217 | init_waitqueue_head(&ip->i_ipin_wait); | 217 | init_waitqueue_head(&ip->i_ipin_wait); |
218 | atomic_set(&ip->i_pincount, 0); | 218 | atomic_set(&ip->i_pincount, 0); |
219 | initnsema(&ip->i_flock, 1, "xfsfino"); | 219 | |
220 | /* | ||
221 | * Because we want to use a counting completion, complete | ||
222 | * the flush completion once to allow a single access to | ||
223 | * the flush completion without blocking. | ||
224 | */ | ||
225 | init_completion(&ip->i_flush); | ||
226 | complete(&ip->i_flush); | ||
220 | 227 | ||
221 | if (lock_flags) | 228 | if (lock_flags) |
222 | xfs_ilock(ip, lock_flags); | 229 | xfs_ilock(ip, lock_flags); |
@@ -288,10 +295,17 @@ finish_inode: | |||
288 | *ipp = ip; | 295 | *ipp = ip; |
289 | 296 | ||
290 | /* | 297 | /* |
298 | * Set up the Linux with the Linux inode. | ||
299 | */ | ||
300 | ip->i_vnode = inode; | ||
301 | inode->i_private = ip; | ||
302 | |||
303 | /* | ||
291 | * If we have a real type for an on-disk inode, we can set ops(&unlock) | 304 | * If we have a real type for an on-disk inode, we can set ops(&unlock) |
292 | * now. If it's a new inode being created, xfs_ialloc will handle it. | 305 | * now. If it's a new inode being created, xfs_ialloc will handle it. |
293 | */ | 306 | */ |
294 | xfs_initialize_vnode(mp, inode, ip); | 307 | if (ip->i_d.di_mode != 0) |
308 | xfs_setup_inode(ip); | ||
295 | return 0; | 309 | return 0; |
296 | } | 310 | } |
297 | 311 | ||
@@ -411,10 +425,11 @@ xfs_iput(xfs_inode_t *ip, | |||
411 | * Special iput for brand-new inodes that are still locked | 425 | * Special iput for brand-new inodes that are still locked |
412 | */ | 426 | */ |
413 | void | 427 | void |
414 | xfs_iput_new(xfs_inode_t *ip, | 428 | xfs_iput_new( |
415 | uint lock_flags) | 429 | xfs_inode_t *ip, |
430 | uint lock_flags) | ||
416 | { | 431 | { |
417 | struct inode *inode = ip->i_vnode; | 432 | struct inode *inode = VFS_I(ip); |
418 | 433 | ||
419 | xfs_itrace_entry(ip); | 434 | xfs_itrace_entry(ip); |
420 | 435 | ||
@@ -775,26 +790,3 @@ xfs_isilocked( | |||
775 | } | 790 | } |
776 | #endif | 791 | #endif |
777 | 792 | ||
778 | /* | ||
779 | * The following three routines simply manage the i_flock | ||
780 | * semaphore embedded in the inode. This semaphore synchronizes | ||
781 | * processes attempting to flush the in-core inode back to disk. | ||
782 | */ | ||
783 | void | ||
784 | xfs_iflock(xfs_inode_t *ip) | ||
785 | { | ||
786 | psema(&(ip->i_flock), PINOD|PLTWAIT); | ||
787 | } | ||
788 | |||
789 | int | ||
790 | xfs_iflock_nowait(xfs_inode_t *ip) | ||
791 | { | ||
792 | return (cpsema(&(ip->i_flock))); | ||
793 | } | ||
794 | |||
795 | void | ||
796 | xfs_ifunlock(xfs_inode_t *ip) | ||
797 | { | ||
798 | ASSERT(issemalocked(&(ip->i_flock))); | ||
799 | vsema(&(ip->i_flock)); | ||
800 | } | ||
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index bedc66163176..358511b85ced 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c | |||
@@ -580,8 +580,8 @@ xfs_iformat_extents( | |||
580 | xfs_validate_extents(ifp, nex, XFS_EXTFMT_INODE(ip)); | 580 | xfs_validate_extents(ifp, nex, XFS_EXTFMT_INODE(ip)); |
581 | for (i = 0; i < nex; i++, dp++) { | 581 | for (i = 0; i < nex; i++, dp++) { |
582 | xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i); | 582 | xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i); |
583 | ep->l0 = be64_to_cpu(get_unaligned(&dp->l0)); | 583 | ep->l0 = get_unaligned_be64(&dp->l0); |
584 | ep->l1 = be64_to_cpu(get_unaligned(&dp->l1)); | 584 | ep->l1 = get_unaligned_be64(&dp->l1); |
585 | } | 585 | } |
586 | XFS_BMAP_TRACE_EXLIST(ip, nex, whichfork); | 586 | XFS_BMAP_TRACE_EXLIST(ip, nex, whichfork); |
587 | if (whichfork != XFS_DATA_FORK || | 587 | if (whichfork != XFS_DATA_FORK || |
@@ -835,22 +835,22 @@ xfs_iread( | |||
835 | * Do this before xfs_iformat in case it adds entries. | 835 | * Do this before xfs_iformat in case it adds entries. |
836 | */ | 836 | */ |
837 | #ifdef XFS_INODE_TRACE | 837 | #ifdef XFS_INODE_TRACE |
838 | ip->i_trace = ktrace_alloc(INODE_TRACE_SIZE, KM_SLEEP); | 838 | ip->i_trace = ktrace_alloc(INODE_TRACE_SIZE, KM_NOFS); |
839 | #endif | 839 | #endif |
840 | #ifdef XFS_BMAP_TRACE | 840 | #ifdef XFS_BMAP_TRACE |
841 | ip->i_xtrace = ktrace_alloc(XFS_BMAP_KTRACE_SIZE, KM_SLEEP); | 841 | ip->i_xtrace = ktrace_alloc(XFS_BMAP_KTRACE_SIZE, KM_NOFS); |
842 | #endif | 842 | #endif |
843 | #ifdef XFS_BMBT_TRACE | 843 | #ifdef XFS_BMBT_TRACE |
844 | ip->i_btrace = ktrace_alloc(XFS_BMBT_KTRACE_SIZE, KM_SLEEP); | 844 | ip->i_btrace = ktrace_alloc(XFS_BMBT_KTRACE_SIZE, KM_NOFS); |
845 | #endif | 845 | #endif |
846 | #ifdef XFS_RW_TRACE | 846 | #ifdef XFS_RW_TRACE |
847 | ip->i_rwtrace = ktrace_alloc(XFS_RW_KTRACE_SIZE, KM_SLEEP); | 847 | ip->i_rwtrace = ktrace_alloc(XFS_RW_KTRACE_SIZE, KM_NOFS); |
848 | #endif | 848 | #endif |
849 | #ifdef XFS_ILOCK_TRACE | 849 | #ifdef XFS_ILOCK_TRACE |
850 | ip->i_lock_trace = ktrace_alloc(XFS_ILOCK_KTRACE_SIZE, KM_SLEEP); | 850 | ip->i_lock_trace = ktrace_alloc(XFS_ILOCK_KTRACE_SIZE, KM_NOFS); |
851 | #endif | 851 | #endif |
852 | #ifdef XFS_DIR2_TRACE | 852 | #ifdef XFS_DIR2_TRACE |
853 | ip->i_dir_trace = ktrace_alloc(XFS_DIR2_KTRACE_SIZE, KM_SLEEP); | 853 | ip->i_dir_trace = ktrace_alloc(XFS_DIR2_KTRACE_SIZE, KM_NOFS); |
854 | #endif | 854 | #endif |
855 | 855 | ||
856 | /* | 856 | /* |
@@ -1046,9 +1046,9 @@ xfs_ialloc( | |||
1046 | { | 1046 | { |
1047 | xfs_ino_t ino; | 1047 | xfs_ino_t ino; |
1048 | xfs_inode_t *ip; | 1048 | xfs_inode_t *ip; |
1049 | bhv_vnode_t *vp; | ||
1050 | uint flags; | 1049 | uint flags; |
1051 | int error; | 1050 | int error; |
1051 | timespec_t tv; | ||
1052 | 1052 | ||
1053 | /* | 1053 | /* |
1054 | * Call the space management code to pick | 1054 | * Call the space management code to pick |
@@ -1077,7 +1077,6 @@ xfs_ialloc( | |||
1077 | } | 1077 | } |
1078 | ASSERT(ip != NULL); | 1078 | ASSERT(ip != NULL); |
1079 | 1079 | ||
1080 | vp = XFS_ITOV(ip); | ||
1081 | ip->i_d.di_mode = (__uint16_t)mode; | 1080 | ip->i_d.di_mode = (__uint16_t)mode; |
1082 | ip->i_d.di_onlink = 0; | 1081 | ip->i_d.di_onlink = 0; |
1083 | ip->i_d.di_nlink = nlink; | 1082 | ip->i_d.di_nlink = nlink; |
@@ -1130,7 +1129,13 @@ xfs_ialloc( | |||
1130 | ip->i_size = 0; | 1129 | ip->i_size = 0; |
1131 | ip->i_d.di_nextents = 0; | 1130 | ip->i_d.di_nextents = 0; |
1132 | ASSERT(ip->i_d.di_nblocks == 0); | 1131 | ASSERT(ip->i_d.di_nblocks == 0); |
1133 | xfs_ichgtime(ip, XFS_ICHGTIME_CHG|XFS_ICHGTIME_ACC|XFS_ICHGTIME_MOD); | 1132 | |
1133 | nanotime(&tv); | ||
1134 | ip->i_d.di_mtime.t_sec = (__int32_t)tv.tv_sec; | ||
1135 | ip->i_d.di_mtime.t_nsec = (__int32_t)tv.tv_nsec; | ||
1136 | ip->i_d.di_atime = ip->i_d.di_mtime; | ||
1137 | ip->i_d.di_ctime = ip->i_d.di_mtime; | ||
1138 | |||
1134 | /* | 1139 | /* |
1135 | * di_gen will have been taken care of in xfs_iread. | 1140 | * di_gen will have been taken care of in xfs_iread. |
1136 | */ | 1141 | */ |
@@ -1220,7 +1225,7 @@ xfs_ialloc( | |||
1220 | xfs_trans_log_inode(tp, ip, flags); | 1225 | xfs_trans_log_inode(tp, ip, flags); |
1221 | 1226 | ||
1222 | /* now that we have an i_mode we can setup inode ops and unlock */ | 1227 | /* now that we have an i_mode we can setup inode ops and unlock */ |
1223 | xfs_initialize_vnode(tp->t_mountp, vp, ip); | 1228 | xfs_setup_inode(ip); |
1224 | 1229 | ||
1225 | *ipp = ip; | 1230 | *ipp = ip; |
1226 | return 0; | 1231 | return 0; |
@@ -1399,7 +1404,6 @@ xfs_itruncate_start( | |||
1399 | xfs_fsize_t last_byte; | 1404 | xfs_fsize_t last_byte; |
1400 | xfs_off_t toss_start; | 1405 | xfs_off_t toss_start; |
1401 | xfs_mount_t *mp; | 1406 | xfs_mount_t *mp; |
1402 | bhv_vnode_t *vp; | ||
1403 | int error = 0; | 1407 | int error = 0; |
1404 | 1408 | ||
1405 | ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); | 1409 | ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); |
@@ -1408,7 +1412,6 @@ xfs_itruncate_start( | |||
1408 | (flags == XFS_ITRUNC_MAYBE)); | 1412 | (flags == XFS_ITRUNC_MAYBE)); |
1409 | 1413 | ||
1410 | mp = ip->i_mount; | 1414 | mp = ip->i_mount; |
1411 | vp = XFS_ITOV(ip); | ||
1412 | 1415 | ||
1413 | /* wait for the completion of any pending DIOs */ | 1416 | /* wait for the completion of any pending DIOs */ |
1414 | if (new_size < ip->i_size) | 1417 | if (new_size < ip->i_size) |
@@ -1457,7 +1460,7 @@ xfs_itruncate_start( | |||
1457 | 1460 | ||
1458 | #ifdef DEBUG | 1461 | #ifdef DEBUG |
1459 | if (new_size == 0) { | 1462 | if (new_size == 0) { |
1460 | ASSERT(VN_CACHED(vp) == 0); | 1463 | ASSERT(VN_CACHED(VFS_I(ip)) == 0); |
1461 | } | 1464 | } |
1462 | #endif | 1465 | #endif |
1463 | return error; | 1466 | return error; |
@@ -2630,7 +2633,6 @@ xfs_idestroy( | |||
2630 | xfs_idestroy_fork(ip, XFS_ATTR_FORK); | 2633 | xfs_idestroy_fork(ip, XFS_ATTR_FORK); |
2631 | mrfree(&ip->i_lock); | 2634 | mrfree(&ip->i_lock); |
2632 | mrfree(&ip->i_iolock); | 2635 | mrfree(&ip->i_iolock); |
2633 | freesema(&ip->i_flock); | ||
2634 | 2636 | ||
2635 | #ifdef XFS_INODE_TRACE | 2637 | #ifdef XFS_INODE_TRACE |
2636 | ktrace_free(ip->i_trace); | 2638 | ktrace_free(ip->i_trace); |
@@ -3048,10 +3050,10 @@ cluster_corrupt_out: | |||
3048 | /* | 3050 | /* |
3049 | * xfs_iflush() will write a modified inode's changes out to the | 3051 | * xfs_iflush() will write a modified inode's changes out to the |
3050 | * inode's on disk home. The caller must have the inode lock held | 3052 | * inode's on disk home. The caller must have the inode lock held |
3051 | * in at least shared mode and the inode flush semaphore must be | 3053 | * in at least shared mode and the inode flush completion must be |
3052 | * held as well. The inode lock will still be held upon return from | 3054 | * active as well. The inode lock will still be held upon return from |
3053 | * the call and the caller is free to unlock it. | 3055 | * the call and the caller is free to unlock it. |
3054 | * The inode flush lock will be unlocked when the inode reaches the disk. | 3056 | * The inode flush will be completed when the inode reaches the disk. |
3055 | * The flags indicate how the inode's buffer should be written out. | 3057 | * The flags indicate how the inode's buffer should be written out. |
3056 | */ | 3058 | */ |
3057 | int | 3059 | int |
@@ -3070,7 +3072,7 @@ xfs_iflush( | |||
3070 | XFS_STATS_INC(xs_iflush_count); | 3072 | XFS_STATS_INC(xs_iflush_count); |
3071 | 3073 | ||
3072 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); | 3074 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); |
3073 | ASSERT(issemalocked(&(ip->i_flock))); | 3075 | ASSERT(!completion_done(&ip->i_flush)); |
3074 | ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE || | 3076 | ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE || |
3075 | ip->i_d.di_nextents > ip->i_df.if_ext_max); | 3077 | ip->i_d.di_nextents > ip->i_df.if_ext_max); |
3076 | 3078 | ||
@@ -3233,7 +3235,7 @@ xfs_iflush_int( | |||
3233 | #endif | 3235 | #endif |
3234 | 3236 | ||
3235 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); | 3237 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); |
3236 | ASSERT(issemalocked(&(ip->i_flock))); | 3238 | ASSERT(!completion_done(&ip->i_flush)); |
3237 | ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE || | 3239 | ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE || |
3238 | ip->i_d.di_nextents > ip->i_df.if_ext_max); | 3240 | ip->i_d.di_nextents > ip->i_df.if_ext_max); |
3239 | 3241 | ||
@@ -3465,7 +3467,6 @@ xfs_iflush_all( | |||
3465 | xfs_mount_t *mp) | 3467 | xfs_mount_t *mp) |
3466 | { | 3468 | { |
3467 | xfs_inode_t *ip; | 3469 | xfs_inode_t *ip; |
3468 | bhv_vnode_t *vp; | ||
3469 | 3470 | ||
3470 | again: | 3471 | again: |
3471 | XFS_MOUNT_ILOCK(mp); | 3472 | XFS_MOUNT_ILOCK(mp); |
@@ -3480,14 +3481,13 @@ xfs_iflush_all( | |||
3480 | continue; | 3481 | continue; |
3481 | } | 3482 | } |
3482 | 3483 | ||
3483 | vp = XFS_ITOV_NULL(ip); | 3484 | if (!VFS_I(ip)) { |
3484 | if (!vp) { | ||
3485 | XFS_MOUNT_IUNLOCK(mp); | 3485 | XFS_MOUNT_IUNLOCK(mp); |
3486 | xfs_finish_reclaim(ip, 0, XFS_IFLUSH_ASYNC); | 3486 | xfs_finish_reclaim(ip, 0, XFS_IFLUSH_ASYNC); |
3487 | goto again; | 3487 | goto again; |
3488 | } | 3488 | } |
3489 | 3489 | ||
3490 | ASSERT(vn_count(vp) == 0); | 3490 | ASSERT(vn_count(VFS_I(ip)) == 0); |
3491 | 3491 | ||
3492 | ip = ip->i_mnext; | 3492 | ip = ip->i_mnext; |
3493 | } while (ip != mp->m_inodes); | 3493 | } while (ip != mp->m_inodes); |
@@ -3707,7 +3707,7 @@ xfs_iext_add_indirect_multi( | |||
3707 | * (all extents past */ | 3707 | * (all extents past */ |
3708 | if (nex2) { | 3708 | if (nex2) { |
3709 | byte_diff = nex2 * sizeof(xfs_bmbt_rec_t); | 3709 | byte_diff = nex2 * sizeof(xfs_bmbt_rec_t); |
3710 | nex2_ep = (xfs_bmbt_rec_t *) kmem_alloc(byte_diff, KM_SLEEP); | 3710 | nex2_ep = (xfs_bmbt_rec_t *) kmem_alloc(byte_diff, KM_NOFS); |
3711 | memmove(nex2_ep, &erp->er_extbuf[idx], byte_diff); | 3711 | memmove(nex2_ep, &erp->er_extbuf[idx], byte_diff); |
3712 | erp->er_extcount -= nex2; | 3712 | erp->er_extcount -= nex2; |
3713 | xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -nex2); | 3713 | xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -nex2); |
@@ -4007,8 +4007,7 @@ xfs_iext_realloc_direct( | |||
4007 | ifp->if_u1.if_extents = | 4007 | ifp->if_u1.if_extents = |
4008 | kmem_realloc(ifp->if_u1.if_extents, | 4008 | kmem_realloc(ifp->if_u1.if_extents, |
4009 | rnew_size, | 4009 | rnew_size, |
4010 | ifp->if_real_bytes, | 4010 | ifp->if_real_bytes, KM_NOFS); |
4011 | KM_SLEEP); | ||
4012 | } | 4011 | } |
4013 | if (rnew_size > ifp->if_real_bytes) { | 4012 | if (rnew_size > ifp->if_real_bytes) { |
4014 | memset(&ifp->if_u1.if_extents[ifp->if_bytes / | 4013 | memset(&ifp->if_u1.if_extents[ifp->if_bytes / |
@@ -4067,7 +4066,7 @@ xfs_iext_inline_to_direct( | |||
4067 | xfs_ifork_t *ifp, /* inode fork pointer */ | 4066 | xfs_ifork_t *ifp, /* inode fork pointer */ |
4068 | int new_size) /* number of extents in file */ | 4067 | int new_size) /* number of extents in file */ |
4069 | { | 4068 | { |
4070 | ifp->if_u1.if_extents = kmem_alloc(new_size, KM_SLEEP); | 4069 | ifp->if_u1.if_extents = kmem_alloc(new_size, KM_NOFS); |
4071 | memset(ifp->if_u1.if_extents, 0, new_size); | 4070 | memset(ifp->if_u1.if_extents, 0, new_size); |
4072 | if (ifp->if_bytes) { | 4071 | if (ifp->if_bytes) { |
4073 | memcpy(ifp->if_u1.if_extents, ifp->if_u2.if_inline_ext, | 4072 | memcpy(ifp->if_u1.if_extents, ifp->if_u2.if_inline_ext, |
@@ -4099,7 +4098,7 @@ xfs_iext_realloc_indirect( | |||
4099 | } else { | 4098 | } else { |
4100 | ifp->if_u1.if_ext_irec = (xfs_ext_irec_t *) | 4099 | ifp->if_u1.if_ext_irec = (xfs_ext_irec_t *) |
4101 | kmem_realloc(ifp->if_u1.if_ext_irec, | 4100 | kmem_realloc(ifp->if_u1.if_ext_irec, |
4102 | new_size, size, KM_SLEEP); | 4101 | new_size, size, KM_NOFS); |
4103 | } | 4102 | } |
4104 | } | 4103 | } |
4105 | 4104 | ||
@@ -4341,11 +4340,10 @@ xfs_iext_irec_init( | |||
4341 | nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); | 4340 | nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); |
4342 | ASSERT(nextents <= XFS_LINEAR_EXTS); | 4341 | ASSERT(nextents <= XFS_LINEAR_EXTS); |
4343 | 4342 | ||
4344 | erp = (xfs_ext_irec_t *) | 4343 | erp = kmem_alloc(sizeof(xfs_ext_irec_t), KM_NOFS); |
4345 | kmem_alloc(sizeof(xfs_ext_irec_t), KM_SLEEP); | ||
4346 | 4344 | ||
4347 | if (nextents == 0) { | 4345 | if (nextents == 0) { |
4348 | ifp->if_u1.if_extents = kmem_alloc(XFS_IEXT_BUFSZ, KM_SLEEP); | 4346 | ifp->if_u1.if_extents = kmem_alloc(XFS_IEXT_BUFSZ, KM_NOFS); |
4349 | } else if (!ifp->if_real_bytes) { | 4347 | } else if (!ifp->if_real_bytes) { |
4350 | xfs_iext_inline_to_direct(ifp, XFS_IEXT_BUFSZ); | 4348 | xfs_iext_inline_to_direct(ifp, XFS_IEXT_BUFSZ); |
4351 | } else if (ifp->if_real_bytes < XFS_IEXT_BUFSZ) { | 4349 | } else if (ifp->if_real_bytes < XFS_IEXT_BUFSZ) { |
@@ -4393,7 +4391,7 @@ xfs_iext_irec_new( | |||
4393 | 4391 | ||
4394 | /* Initialize new extent record */ | 4392 | /* Initialize new extent record */ |
4395 | erp = ifp->if_u1.if_ext_irec; | 4393 | erp = ifp->if_u1.if_ext_irec; |
4396 | erp[erp_idx].er_extbuf = kmem_alloc(XFS_IEXT_BUFSZ, KM_SLEEP); | 4394 | erp[erp_idx].er_extbuf = kmem_alloc(XFS_IEXT_BUFSZ, KM_NOFS); |
4397 | ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ; | 4395 | ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ; |
4398 | memset(erp[erp_idx].er_extbuf, 0, XFS_IEXT_BUFSZ); | 4396 | memset(erp[erp_idx].er_extbuf, 0, XFS_IEXT_BUFSZ); |
4399 | erp[erp_idx].er_extcount = 0; | 4397 | erp[erp_idx].er_extcount = 0; |
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index 17a04b6321ed..1420c49674d7 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h | |||
@@ -87,8 +87,7 @@ typedef struct xfs_ifork { | |||
87 | * Flags for xfs_ichgtime(). | 87 | * Flags for xfs_ichgtime(). |
88 | */ | 88 | */ |
89 | #define XFS_ICHGTIME_MOD 0x1 /* data fork modification timestamp */ | 89 | #define XFS_ICHGTIME_MOD 0x1 /* data fork modification timestamp */ |
90 | #define XFS_ICHGTIME_ACC 0x2 /* data fork access timestamp */ | 90 | #define XFS_ICHGTIME_CHG 0x2 /* inode field change timestamp */ |
91 | #define XFS_ICHGTIME_CHG 0x4 /* inode field change timestamp */ | ||
92 | 91 | ||
93 | /* | 92 | /* |
94 | * Per-fork incore inode flags. | 93 | * Per-fork incore inode flags. |
@@ -204,7 +203,7 @@ typedef struct xfs_inode { | |||
204 | struct xfs_inode *i_mprev; /* ptr to prev inode */ | 203 | struct xfs_inode *i_mprev; /* ptr to prev inode */ |
205 | struct xfs_mount *i_mount; /* fs mount struct ptr */ | 204 | struct xfs_mount *i_mount; /* fs mount struct ptr */ |
206 | struct list_head i_reclaim; /* reclaim list */ | 205 | struct list_head i_reclaim; /* reclaim list */ |
207 | bhv_vnode_t *i_vnode; /* vnode backpointer */ | 206 | struct inode *i_vnode; /* vnode backpointer */ |
208 | struct xfs_dquot *i_udquot; /* user dquot */ | 207 | struct xfs_dquot *i_udquot; /* user dquot */ |
209 | struct xfs_dquot *i_gdquot; /* group dquot */ | 208 | struct xfs_dquot *i_gdquot; /* group dquot */ |
210 | 209 | ||
@@ -223,7 +222,7 @@ typedef struct xfs_inode { | |||
223 | struct xfs_inode_log_item *i_itemp; /* logging information */ | 222 | struct xfs_inode_log_item *i_itemp; /* logging information */ |
224 | mrlock_t i_lock; /* inode lock */ | 223 | mrlock_t i_lock; /* inode lock */ |
225 | mrlock_t i_iolock; /* inode IO lock */ | 224 | mrlock_t i_iolock; /* inode IO lock */ |
226 | sema_t i_flock; /* inode flush lock */ | 225 | struct completion i_flush; /* inode flush completion q */ |
227 | atomic_t i_pincount; /* inode pin count */ | 226 | atomic_t i_pincount; /* inode pin count */ |
228 | wait_queue_head_t i_ipin_wait; /* inode pinning wait queue */ | 227 | wait_queue_head_t i_ipin_wait; /* inode pinning wait queue */ |
229 | spinlock_t i_flags_lock; /* inode i_flags lock */ | 228 | spinlock_t i_flags_lock; /* inode i_flags lock */ |
@@ -263,6 +262,18 @@ typedef struct xfs_inode { | |||
263 | #define XFS_ISIZE(ip) (((ip)->i_d.di_mode & S_IFMT) == S_IFREG) ? \ | 262 | #define XFS_ISIZE(ip) (((ip)->i_d.di_mode & S_IFMT) == S_IFREG) ? \ |
264 | (ip)->i_size : (ip)->i_d.di_size; | 263 | (ip)->i_size : (ip)->i_d.di_size; |
265 | 264 | ||
265 | /* Convert from vfs inode to xfs inode */ | ||
266 | static inline struct xfs_inode *XFS_I(struct inode *inode) | ||
267 | { | ||
268 | return (struct xfs_inode *)inode->i_private; | ||
269 | } | ||
270 | |||
271 | /* convert from xfs inode to vfs inode */ | ||
272 | static inline struct inode *VFS_I(struct xfs_inode *ip) | ||
273 | { | ||
274 | return (struct inode *)ip->i_vnode; | ||
275 | } | ||
276 | |||
266 | /* | 277 | /* |
267 | * i_flags helper functions | 278 | * i_flags helper functions |
268 | */ | 279 | */ |
@@ -439,9 +450,6 @@ xfs_iflags_test_and_clear(xfs_inode_t *ip, unsigned short flags) | |||
439 | #define XFS_ITRUNC_DEFINITE 0x1 | 450 | #define XFS_ITRUNC_DEFINITE 0x1 |
440 | #define XFS_ITRUNC_MAYBE 0x2 | 451 | #define XFS_ITRUNC_MAYBE 0x2 |
441 | 452 | ||
442 | #define XFS_ITOV(ip) ((ip)->i_vnode) | ||
443 | #define XFS_ITOV_NULL(ip) ((ip)->i_vnode) | ||
444 | |||
445 | /* | 453 | /* |
446 | * For multiple groups support: if S_ISGID bit is set in the parent | 454 | * For multiple groups support: if S_ISGID bit is set in the parent |
447 | * directory, group of new file is set to that of the parent, and | 455 | * directory, group of new file is set to that of the parent, and |
@@ -473,11 +481,8 @@ int xfs_ilock_nowait(xfs_inode_t *, uint); | |||
473 | void xfs_iunlock(xfs_inode_t *, uint); | 481 | void xfs_iunlock(xfs_inode_t *, uint); |
474 | void xfs_ilock_demote(xfs_inode_t *, uint); | 482 | void xfs_ilock_demote(xfs_inode_t *, uint); |
475 | int xfs_isilocked(xfs_inode_t *, uint); | 483 | int xfs_isilocked(xfs_inode_t *, uint); |
476 | void xfs_iflock(xfs_inode_t *); | ||
477 | int xfs_iflock_nowait(xfs_inode_t *); | ||
478 | uint xfs_ilock_map_shared(xfs_inode_t *); | 484 | uint xfs_ilock_map_shared(xfs_inode_t *); |
479 | void xfs_iunlock_map_shared(xfs_inode_t *, uint); | 485 | void xfs_iunlock_map_shared(xfs_inode_t *, uint); |
480 | void xfs_ifunlock(xfs_inode_t *); | ||
481 | void xfs_ireclaim(xfs_inode_t *); | 486 | void xfs_ireclaim(xfs_inode_t *); |
482 | int xfs_finish_reclaim(xfs_inode_t *, int, int); | 487 | int xfs_finish_reclaim(xfs_inode_t *, int, int); |
483 | int xfs_finish_reclaim_all(struct xfs_mount *, int); | 488 | int xfs_finish_reclaim_all(struct xfs_mount *, int); |
@@ -522,6 +527,7 @@ void xfs_iflush_all(struct xfs_mount *); | |||
522 | void xfs_ichgtime(xfs_inode_t *, int); | 527 | void xfs_ichgtime(xfs_inode_t *, int); |
523 | xfs_fsize_t xfs_file_last_byte(xfs_inode_t *); | 528 | xfs_fsize_t xfs_file_last_byte(xfs_inode_t *); |
524 | void xfs_lock_inodes(xfs_inode_t **, int, uint); | 529 | void xfs_lock_inodes(xfs_inode_t **, int, uint); |
530 | void xfs_lock_two_inodes(xfs_inode_t *, xfs_inode_t *, uint); | ||
525 | 531 | ||
526 | void xfs_synchronize_atime(xfs_inode_t *); | 532 | void xfs_synchronize_atime(xfs_inode_t *); |
527 | void xfs_mark_inode_dirty_sync(xfs_inode_t *); | 533 | void xfs_mark_inode_dirty_sync(xfs_inode_t *); |
@@ -570,6 +576,26 @@ extern struct kmem_zone *xfs_ifork_zone; | |||
570 | extern struct kmem_zone *xfs_inode_zone; | 576 | extern struct kmem_zone *xfs_inode_zone; |
571 | extern struct kmem_zone *xfs_ili_zone; | 577 | extern struct kmem_zone *xfs_ili_zone; |
572 | 578 | ||
579 | /* | ||
580 | * Manage the i_flush queue embedded in the inode. This completion | ||
581 | * queue synchronizes processes attempting to flush the in-core | ||
582 | * inode back to disk. | ||
583 | */ | ||
584 | static inline void xfs_iflock(xfs_inode_t *ip) | ||
585 | { | ||
586 | wait_for_completion(&ip->i_flush); | ||
587 | } | ||
588 | |||
589 | static inline int xfs_iflock_nowait(xfs_inode_t *ip) | ||
590 | { | ||
591 | return try_wait_for_completion(&ip->i_flush); | ||
592 | } | ||
593 | |||
594 | static inline void xfs_ifunlock(xfs_inode_t *ip) | ||
595 | { | ||
596 | complete(&ip->i_flush); | ||
597 | } | ||
598 | |||
573 | #endif /* __KERNEL__ */ | 599 | #endif /* __KERNEL__ */ |
574 | 600 | ||
575 | #endif /* __XFS_INODE_H__ */ | 601 | #endif /* __XFS_INODE_H__ */ |
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c index 0eee08a32c26..97c7452e2620 100644 --- a/fs/xfs/xfs_inode_item.c +++ b/fs/xfs/xfs_inode_item.c | |||
@@ -779,11 +779,10 @@ xfs_inode_item_pushbuf( | |||
779 | ASSERT(iip->ili_push_owner == current_pid()); | 779 | ASSERT(iip->ili_push_owner == current_pid()); |
780 | 780 | ||
781 | /* | 781 | /* |
782 | * If flushlock isn't locked anymore, chances are that the | 782 | * If a flush is not in progress anymore, chances are that the |
783 | * inode flush completed and the inode was taken off the AIL. | 783 | * inode was taken off the AIL. So, just get out. |
784 | * So, just get out. | ||
785 | */ | 784 | */ |
786 | if (!issemalocked(&(ip->i_flock)) || | 785 | if (completion_done(&ip->i_flush) || |
787 | ((iip->ili_item.li_flags & XFS_LI_IN_AIL) == 0)) { | 786 | ((iip->ili_item.li_flags & XFS_LI_IN_AIL) == 0)) { |
788 | iip->ili_pushbuf_flag = 0; | 787 | iip->ili_pushbuf_flag = 0; |
789 | xfs_iunlock(ip, XFS_ILOCK_SHARED); | 788 | xfs_iunlock(ip, XFS_ILOCK_SHARED); |
@@ -805,7 +804,7 @@ xfs_inode_item_pushbuf( | |||
805 | * If not, we can flush it async. | 804 | * If not, we can flush it async. |
806 | */ | 805 | */ |
807 | dopush = ((iip->ili_item.li_flags & XFS_LI_IN_AIL) && | 806 | dopush = ((iip->ili_item.li_flags & XFS_LI_IN_AIL) && |
808 | issemalocked(&(ip->i_flock))); | 807 | !completion_done(&ip->i_flush)); |
809 | iip->ili_pushbuf_flag = 0; | 808 | iip->ili_pushbuf_flag = 0; |
810 | xfs_iunlock(ip, XFS_ILOCK_SHARED); | 809 | xfs_iunlock(ip, XFS_ILOCK_SHARED); |
811 | xfs_buftrace("INODE ITEM PUSH", bp); | 810 | xfs_buftrace("INODE ITEM PUSH", bp); |
@@ -858,7 +857,7 @@ xfs_inode_item_push( | |||
858 | ip = iip->ili_inode; | 857 | ip = iip->ili_inode; |
859 | 858 | ||
860 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED)); | 859 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED)); |
861 | ASSERT(issemalocked(&(ip->i_flock))); | 860 | ASSERT(!completion_done(&ip->i_flush)); |
862 | /* | 861 | /* |
863 | * Since we were able to lock the inode's flush lock and | 862 | * Since we were able to lock the inode's flush lock and |
864 | * we found it on the AIL, the inode must be dirty. This | 863 | * we found it on the AIL, the inode must be dirty. This |
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c index 9a3ef9dcaeb9..cf6754a3c5b3 100644 --- a/fs/xfs/xfs_itable.c +++ b/fs/xfs/xfs_itable.c | |||
@@ -59,7 +59,6 @@ xfs_bulkstat_one_iget( | |||
59 | { | 59 | { |
60 | xfs_icdinode_t *dic; /* dinode core info pointer */ | 60 | xfs_icdinode_t *dic; /* dinode core info pointer */ |
61 | xfs_inode_t *ip; /* incore inode pointer */ | 61 | xfs_inode_t *ip; /* incore inode pointer */ |
62 | bhv_vnode_t *vp; | ||
63 | int error; | 62 | int error; |
64 | 63 | ||
65 | error = xfs_iget(mp, NULL, ino, | 64 | error = xfs_iget(mp, NULL, ino, |
@@ -72,7 +71,6 @@ xfs_bulkstat_one_iget( | |||
72 | ASSERT(ip != NULL); | 71 | ASSERT(ip != NULL); |
73 | ASSERT(ip->i_blkno != (xfs_daddr_t)0); | 72 | ASSERT(ip->i_blkno != (xfs_daddr_t)0); |
74 | 73 | ||
75 | vp = XFS_ITOV(ip); | ||
76 | dic = &ip->i_d; | 74 | dic = &ip->i_d; |
77 | 75 | ||
78 | /* xfs_iget returns the following without needing | 76 | /* xfs_iget returns the following without needing |
@@ -85,7 +83,7 @@ xfs_bulkstat_one_iget( | |||
85 | buf->bs_uid = dic->di_uid; | 83 | buf->bs_uid = dic->di_uid; |
86 | buf->bs_gid = dic->di_gid; | 84 | buf->bs_gid = dic->di_gid; |
87 | buf->bs_size = dic->di_size; | 85 | buf->bs_size = dic->di_size; |
88 | vn_atime_to_bstime(vp, &buf->bs_atime); | 86 | vn_atime_to_bstime(VFS_I(ip), &buf->bs_atime); |
89 | buf->bs_mtime.tv_sec = dic->di_mtime.t_sec; | 87 | buf->bs_mtime.tv_sec = dic->di_mtime.t_sec; |
90 | buf->bs_mtime.tv_nsec = dic->di_mtime.t_nsec; | 88 | buf->bs_mtime.tv_nsec = dic->di_mtime.t_nsec; |
91 | buf->bs_ctime.tv_sec = dic->di_ctime.t_sec; | 89 | buf->bs_ctime.tv_sec = dic->di_ctime.t_sec; |
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 91b00a5686cd..ccba14eb9dbe 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c | |||
@@ -160,7 +160,7 @@ void | |||
160 | xlog_trace_iclog(xlog_in_core_t *iclog, uint state) | 160 | xlog_trace_iclog(xlog_in_core_t *iclog, uint state) |
161 | { | 161 | { |
162 | if (!iclog->ic_trace) | 162 | if (!iclog->ic_trace) |
163 | iclog->ic_trace = ktrace_alloc(256, KM_SLEEP); | 163 | iclog->ic_trace = ktrace_alloc(256, KM_NOFS); |
164 | ktrace_enter(iclog->ic_trace, | 164 | ktrace_enter(iclog->ic_trace, |
165 | (void *)((unsigned long)state), | 165 | (void *)((unsigned long)state), |
166 | (void *)((unsigned long)current_pid()), | 166 | (void *)((unsigned long)current_pid()), |
@@ -336,15 +336,12 @@ xfs_log_done(xfs_mount_t *mp, | |||
336 | } else { | 336 | } else { |
337 | xlog_trace_loggrant(log, ticket, "xfs_log_done: (permanent)"); | 337 | xlog_trace_loggrant(log, ticket, "xfs_log_done: (permanent)"); |
338 | xlog_regrant_reserve_log_space(log, ticket); | 338 | xlog_regrant_reserve_log_space(log, ticket); |
339 | } | 339 | /* If this ticket was a permanent reservation and we aren't |
340 | 340 | * trying to release it, reset the inited flags; so next time | |
341 | /* If this ticket was a permanent reservation and we aren't | 341 | * we write, a start record will be written out. |
342 | * trying to release it, reset the inited flags; so next time | 342 | */ |
343 | * we write, a start record will be written out. | ||
344 | */ | ||
345 | if ((ticket->t_flags & XLOG_TIC_PERM_RESERV) && | ||
346 | (flags & XFS_LOG_REL_PERM_RESERV) == 0) | ||
347 | ticket->t_flags |= XLOG_TIC_INITED; | 343 | ticket->t_flags |= XLOG_TIC_INITED; |
344 | } | ||
348 | 345 | ||
349 | return lsn; | 346 | return lsn; |
350 | } /* xfs_log_done */ | 347 | } /* xfs_log_done */ |
@@ -357,11 +354,11 @@ xfs_log_done(xfs_mount_t *mp, | |||
357 | * Asynchronous forces are implemented by setting the WANT_SYNC | 354 | * Asynchronous forces are implemented by setting the WANT_SYNC |
358 | * bit in the appropriate in-core log and then returning. | 355 | * bit in the appropriate in-core log and then returning. |
359 | * | 356 | * |
360 | * Synchronous forces are implemented with a semaphore. All callers | 357 | * Synchronous forces are implemented with a signal variable. All callers |
361 | * to force a given lsn to disk will wait on a semaphore attached to the | 358 | * to force a given lsn to disk will wait on a the sv attached to the |
362 | * specific in-core log. When given in-core log finally completes its | 359 | * specific in-core log. When given in-core log finally completes its |
363 | * write to disk, that thread will wake up all threads waiting on the | 360 | * write to disk, that thread will wake up all threads waiting on the |
364 | * semaphore. | 361 | * sv. |
365 | */ | 362 | */ |
366 | int | 363 | int |
367 | _xfs_log_force( | 364 | _xfs_log_force( |
@@ -588,12 +585,12 @@ error: | |||
588 | * mp - ubiquitous xfs mount point structure | 585 | * mp - ubiquitous xfs mount point structure |
589 | */ | 586 | */ |
590 | int | 587 | int |
591 | xfs_log_mount_finish(xfs_mount_t *mp, int mfsi_flags) | 588 | xfs_log_mount_finish(xfs_mount_t *mp) |
592 | { | 589 | { |
593 | int error; | 590 | int error; |
594 | 591 | ||
595 | if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) | 592 | if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) |
596 | error = xlog_recover_finish(mp->m_log, mfsi_flags); | 593 | error = xlog_recover_finish(mp->m_log); |
597 | else { | 594 | else { |
598 | error = 0; | 595 | error = 0; |
599 | ASSERT(mp->m_flags & XFS_MOUNT_RDONLY); | 596 | ASSERT(mp->m_flags & XFS_MOUNT_RDONLY); |
@@ -707,7 +704,7 @@ xfs_log_unmount_write(xfs_mount_t *mp) | |||
707 | if (!(iclog->ic_state == XLOG_STATE_ACTIVE || | 704 | if (!(iclog->ic_state == XLOG_STATE_ACTIVE || |
708 | iclog->ic_state == XLOG_STATE_DIRTY)) { | 705 | iclog->ic_state == XLOG_STATE_DIRTY)) { |
709 | if (!XLOG_FORCED_SHUTDOWN(log)) { | 706 | if (!XLOG_FORCED_SHUTDOWN(log)) { |
710 | sv_wait(&iclog->ic_forcesema, PMEM, | 707 | sv_wait(&iclog->ic_force_wait, PMEM, |
711 | &log->l_icloglock, s); | 708 | &log->l_icloglock, s); |
712 | } else { | 709 | } else { |
713 | spin_unlock(&log->l_icloglock); | 710 | spin_unlock(&log->l_icloglock); |
@@ -748,7 +745,7 @@ xfs_log_unmount_write(xfs_mount_t *mp) | |||
748 | || iclog->ic_state == XLOG_STATE_DIRTY | 745 | || iclog->ic_state == XLOG_STATE_DIRTY |
749 | || iclog->ic_state == XLOG_STATE_IOERROR) ) { | 746 | || iclog->ic_state == XLOG_STATE_IOERROR) ) { |
750 | 747 | ||
751 | sv_wait(&iclog->ic_forcesema, PMEM, | 748 | sv_wait(&iclog->ic_force_wait, PMEM, |
752 | &log->l_icloglock, s); | 749 | &log->l_icloglock, s); |
753 | } else { | 750 | } else { |
754 | spin_unlock(&log->l_icloglock); | 751 | spin_unlock(&log->l_icloglock); |
@@ -838,7 +835,7 @@ xfs_log_move_tail(xfs_mount_t *mp, | |||
838 | break; | 835 | break; |
839 | tail_lsn = 0; | 836 | tail_lsn = 0; |
840 | free_bytes -= tic->t_unit_res; | 837 | free_bytes -= tic->t_unit_res; |
841 | sv_signal(&tic->t_sema); | 838 | sv_signal(&tic->t_wait); |
842 | tic = tic->t_next; | 839 | tic = tic->t_next; |
843 | } while (tic != log->l_write_headq); | 840 | } while (tic != log->l_write_headq); |
844 | } | 841 | } |
@@ -859,7 +856,7 @@ xfs_log_move_tail(xfs_mount_t *mp, | |||
859 | break; | 856 | break; |
860 | tail_lsn = 0; | 857 | tail_lsn = 0; |
861 | free_bytes -= need_bytes; | 858 | free_bytes -= need_bytes; |
862 | sv_signal(&tic->t_sema); | 859 | sv_signal(&tic->t_wait); |
863 | tic = tic->t_next; | 860 | tic = tic->t_next; |
864 | } while (tic != log->l_reserve_headq); | 861 | } while (tic != log->l_reserve_headq); |
865 | } | 862 | } |
@@ -1285,8 +1282,8 @@ xlog_alloc_log(xfs_mount_t *mp, | |||
1285 | 1282 | ||
1286 | ASSERT(XFS_BUF_ISBUSY(iclog->ic_bp)); | 1283 | ASSERT(XFS_BUF_ISBUSY(iclog->ic_bp)); |
1287 | ASSERT(XFS_BUF_VALUSEMA(iclog->ic_bp) <= 0); | 1284 | ASSERT(XFS_BUF_VALUSEMA(iclog->ic_bp) <= 0); |
1288 | sv_init(&iclog->ic_forcesema, SV_DEFAULT, "iclog-force"); | 1285 | sv_init(&iclog->ic_force_wait, SV_DEFAULT, "iclog-force"); |
1289 | sv_init(&iclog->ic_writesema, SV_DEFAULT, "iclog-write"); | 1286 | sv_init(&iclog->ic_write_wait, SV_DEFAULT, "iclog-write"); |
1290 | 1287 | ||
1291 | iclogp = &iclog->ic_next; | 1288 | iclogp = &iclog->ic_next; |
1292 | } | 1289 | } |
@@ -1565,8 +1562,8 @@ xlog_dealloc_log(xlog_t *log) | |||
1565 | 1562 | ||
1566 | iclog = log->l_iclog; | 1563 | iclog = log->l_iclog; |
1567 | for (i=0; i<log->l_iclog_bufs; i++) { | 1564 | for (i=0; i<log->l_iclog_bufs; i++) { |
1568 | sv_destroy(&iclog->ic_forcesema); | 1565 | sv_destroy(&iclog->ic_force_wait); |
1569 | sv_destroy(&iclog->ic_writesema); | 1566 | sv_destroy(&iclog->ic_write_wait); |
1570 | xfs_buf_free(iclog->ic_bp); | 1567 | xfs_buf_free(iclog->ic_bp); |
1571 | #ifdef XFS_LOG_TRACE | 1568 | #ifdef XFS_LOG_TRACE |
1572 | if (iclog->ic_trace != NULL) { | 1569 | if (iclog->ic_trace != NULL) { |
@@ -1976,7 +1973,7 @@ xlog_write(xfs_mount_t * mp, | |||
1976 | /* Clean iclogs starting from the head. This ordering must be | 1973 | /* Clean iclogs starting from the head. This ordering must be |
1977 | * maintained, so an iclog doesn't become ACTIVE beyond one that | 1974 | * maintained, so an iclog doesn't become ACTIVE beyond one that |
1978 | * is SYNCING. This is also required to maintain the notion that we use | 1975 | * is SYNCING. This is also required to maintain the notion that we use |
1979 | * a counting semaphore to hold off would be writers to the log when every | 1976 | * a ordered wait queue to hold off would be writers to the log when every |
1980 | * iclog is trying to sync to disk. | 1977 | * iclog is trying to sync to disk. |
1981 | * | 1978 | * |
1982 | * State Change: DIRTY -> ACTIVE | 1979 | * State Change: DIRTY -> ACTIVE |
@@ -2240,7 +2237,7 @@ xlog_state_do_callback( | |||
2240 | xlog_state_clean_log(log); | 2237 | xlog_state_clean_log(log); |
2241 | 2238 | ||
2242 | /* wake up threads waiting in xfs_log_force() */ | 2239 | /* wake up threads waiting in xfs_log_force() */ |
2243 | sv_broadcast(&iclog->ic_forcesema); | 2240 | sv_broadcast(&iclog->ic_force_wait); |
2244 | 2241 | ||
2245 | iclog = iclog->ic_next; | 2242 | iclog = iclog->ic_next; |
2246 | } while (first_iclog != iclog); | 2243 | } while (first_iclog != iclog); |
@@ -2302,8 +2299,7 @@ xlog_state_do_callback( | |||
2302 | * the second completion goes through. | 2299 | * the second completion goes through. |
2303 | * | 2300 | * |
2304 | * Callbacks could take time, so they are done outside the scope of the | 2301 | * Callbacks could take time, so they are done outside the scope of the |
2305 | * global state machine log lock. Assume that the calls to cvsema won't | 2302 | * global state machine log lock. |
2306 | * take a long time. At least we know it won't sleep. | ||
2307 | */ | 2303 | */ |
2308 | STATIC void | 2304 | STATIC void |
2309 | xlog_state_done_syncing( | 2305 | xlog_state_done_syncing( |
@@ -2339,7 +2335,7 @@ xlog_state_done_syncing( | |||
2339 | * iclog buffer, we wake them all, one will get to do the | 2335 | * iclog buffer, we wake them all, one will get to do the |
2340 | * I/O, the others get to wait for the result. | 2336 | * I/O, the others get to wait for the result. |
2341 | */ | 2337 | */ |
2342 | sv_broadcast(&iclog->ic_writesema); | 2338 | sv_broadcast(&iclog->ic_write_wait); |
2343 | spin_unlock(&log->l_icloglock); | 2339 | spin_unlock(&log->l_icloglock); |
2344 | xlog_state_do_callback(log, aborted, iclog); /* also cleans log */ | 2340 | xlog_state_do_callback(log, aborted, iclog); /* also cleans log */ |
2345 | } /* xlog_state_done_syncing */ | 2341 | } /* xlog_state_done_syncing */ |
@@ -2347,11 +2343,9 @@ xlog_state_done_syncing( | |||
2347 | 2343 | ||
2348 | /* | 2344 | /* |
2349 | * If the head of the in-core log ring is not (ACTIVE or DIRTY), then we must | 2345 | * If the head of the in-core log ring is not (ACTIVE or DIRTY), then we must |
2350 | * sleep. The flush semaphore is set to the number of in-core buffers and | 2346 | * sleep. We wait on the flush queue on the head iclog as that should be |
2351 | * decremented around disk syncing. Therefore, if all buffers are syncing, | 2347 | * the first iclog to complete flushing. Hence if all iclogs are syncing, |
2352 | * this semaphore will cause new writes to sleep until a sync completes. | 2348 | * we will wait here and all new writes will sleep until a sync completes. |
2353 | * Otherwise, this code just does p() followed by v(). This approximates | ||
2354 | * a sleep/wakeup except we can't race. | ||
2355 | * | 2349 | * |
2356 | * The in-core logs are used in a circular fashion. They are not used | 2350 | * The in-core logs are used in a circular fashion. They are not used |
2357 | * out-of-order even when an iclog past the head is free. | 2351 | * out-of-order even when an iclog past the head is free. |
@@ -2508,7 +2502,7 @@ xlog_grant_log_space(xlog_t *log, | |||
2508 | goto error_return; | 2502 | goto error_return; |
2509 | 2503 | ||
2510 | XFS_STATS_INC(xs_sleep_logspace); | 2504 | XFS_STATS_INC(xs_sleep_logspace); |
2511 | sv_wait(&tic->t_sema, PINOD|PLTWAIT, &log->l_grant_lock, s); | 2505 | sv_wait(&tic->t_wait, PINOD|PLTWAIT, &log->l_grant_lock, s); |
2512 | /* | 2506 | /* |
2513 | * If we got an error, and the filesystem is shutting down, | 2507 | * If we got an error, and the filesystem is shutting down, |
2514 | * we'll catch it down below. So just continue... | 2508 | * we'll catch it down below. So just continue... |
@@ -2534,7 +2528,7 @@ redo: | |||
2534 | xlog_trace_loggrant(log, tic, | 2528 | xlog_trace_loggrant(log, tic, |
2535 | "xlog_grant_log_space: sleep 2"); | 2529 | "xlog_grant_log_space: sleep 2"); |
2536 | XFS_STATS_INC(xs_sleep_logspace); | 2530 | XFS_STATS_INC(xs_sleep_logspace); |
2537 | sv_wait(&tic->t_sema, PINOD|PLTWAIT, &log->l_grant_lock, s); | 2531 | sv_wait(&tic->t_wait, PINOD|PLTWAIT, &log->l_grant_lock, s); |
2538 | 2532 | ||
2539 | if (XLOG_FORCED_SHUTDOWN(log)) { | 2533 | if (XLOG_FORCED_SHUTDOWN(log)) { |
2540 | spin_lock(&log->l_grant_lock); | 2534 | spin_lock(&log->l_grant_lock); |
@@ -2633,7 +2627,7 @@ xlog_regrant_write_log_space(xlog_t *log, | |||
2633 | if (free_bytes < ntic->t_unit_res) | 2627 | if (free_bytes < ntic->t_unit_res) |
2634 | break; | 2628 | break; |
2635 | free_bytes -= ntic->t_unit_res; | 2629 | free_bytes -= ntic->t_unit_res; |
2636 | sv_signal(&ntic->t_sema); | 2630 | sv_signal(&ntic->t_wait); |
2637 | ntic = ntic->t_next; | 2631 | ntic = ntic->t_next; |
2638 | } while (ntic != log->l_write_headq); | 2632 | } while (ntic != log->l_write_headq); |
2639 | 2633 | ||
@@ -2644,7 +2638,7 @@ xlog_regrant_write_log_space(xlog_t *log, | |||
2644 | xlog_trace_loggrant(log, tic, | 2638 | xlog_trace_loggrant(log, tic, |
2645 | "xlog_regrant_write_log_space: sleep 1"); | 2639 | "xlog_regrant_write_log_space: sleep 1"); |
2646 | XFS_STATS_INC(xs_sleep_logspace); | 2640 | XFS_STATS_INC(xs_sleep_logspace); |
2647 | sv_wait(&tic->t_sema, PINOD|PLTWAIT, | 2641 | sv_wait(&tic->t_wait, PINOD|PLTWAIT, |
2648 | &log->l_grant_lock, s); | 2642 | &log->l_grant_lock, s); |
2649 | 2643 | ||
2650 | /* If we're shutting down, this tic is already | 2644 | /* If we're shutting down, this tic is already |
@@ -2673,7 +2667,7 @@ redo: | |||
2673 | if ((tic->t_flags & XLOG_TIC_IN_Q) == 0) | 2667 | if ((tic->t_flags & XLOG_TIC_IN_Q) == 0) |
2674 | xlog_ins_ticketq(&log->l_write_headq, tic); | 2668 | xlog_ins_ticketq(&log->l_write_headq, tic); |
2675 | XFS_STATS_INC(xs_sleep_logspace); | 2669 | XFS_STATS_INC(xs_sleep_logspace); |
2676 | sv_wait(&tic->t_sema, PINOD|PLTWAIT, &log->l_grant_lock, s); | 2670 | sv_wait(&tic->t_wait, PINOD|PLTWAIT, &log->l_grant_lock, s); |
2677 | 2671 | ||
2678 | /* If we're shutting down, this tic is already off the queue */ | 2672 | /* If we're shutting down, this tic is already off the queue */ |
2679 | if (XLOG_FORCED_SHUTDOWN(log)) { | 2673 | if (XLOG_FORCED_SHUTDOWN(log)) { |
@@ -2916,7 +2910,7 @@ xlog_state_switch_iclogs(xlog_t *log, | |||
2916 | * 2. the current iclog is drity, and the previous iclog is in the | 2910 | * 2. the current iclog is drity, and the previous iclog is in the |
2917 | * active or dirty state. | 2911 | * active or dirty state. |
2918 | * | 2912 | * |
2919 | * We may sleep (call psema) if: | 2913 | * We may sleep if: |
2920 | * | 2914 | * |
2921 | * 1. the current iclog is not in the active nor dirty state. | 2915 | * 1. the current iclog is not in the active nor dirty state. |
2922 | * 2. the current iclog dirty, and the previous iclog is not in the | 2916 | * 2. the current iclog dirty, and the previous iclog is not in the |
@@ -3013,7 +3007,7 @@ maybe_sleep: | |||
3013 | return XFS_ERROR(EIO); | 3007 | return XFS_ERROR(EIO); |
3014 | } | 3008 | } |
3015 | XFS_STATS_INC(xs_log_force_sleep); | 3009 | XFS_STATS_INC(xs_log_force_sleep); |
3016 | sv_wait(&iclog->ic_forcesema, PINOD, &log->l_icloglock, s); | 3010 | sv_wait(&iclog->ic_force_wait, PINOD, &log->l_icloglock, s); |
3017 | /* | 3011 | /* |
3018 | * No need to grab the log lock here since we're | 3012 | * No need to grab the log lock here since we're |
3019 | * only deciding whether or not to return EIO | 3013 | * only deciding whether or not to return EIO |
@@ -3096,7 +3090,7 @@ try_again: | |||
3096 | XLOG_STATE_SYNCING))) { | 3090 | XLOG_STATE_SYNCING))) { |
3097 | ASSERT(!(iclog->ic_state & XLOG_STATE_IOERROR)); | 3091 | ASSERT(!(iclog->ic_state & XLOG_STATE_IOERROR)); |
3098 | XFS_STATS_INC(xs_log_force_sleep); | 3092 | XFS_STATS_INC(xs_log_force_sleep); |
3099 | sv_wait(&iclog->ic_prev->ic_writesema, PSWP, | 3093 | sv_wait(&iclog->ic_prev->ic_write_wait, PSWP, |
3100 | &log->l_icloglock, s); | 3094 | &log->l_icloglock, s); |
3101 | *log_flushed = 1; | 3095 | *log_flushed = 1; |
3102 | already_slept = 1; | 3096 | already_slept = 1; |
@@ -3116,7 +3110,7 @@ try_again: | |||
3116 | !(iclog->ic_state & (XLOG_STATE_ACTIVE | XLOG_STATE_DIRTY))) { | 3110 | !(iclog->ic_state & (XLOG_STATE_ACTIVE | XLOG_STATE_DIRTY))) { |
3117 | 3111 | ||
3118 | /* | 3112 | /* |
3119 | * Don't wait on the forcesema if we know that we've | 3113 | * Don't wait on completion if we know that we've |
3120 | * gotten a log write error. | 3114 | * gotten a log write error. |
3121 | */ | 3115 | */ |
3122 | if (iclog->ic_state & XLOG_STATE_IOERROR) { | 3116 | if (iclog->ic_state & XLOG_STATE_IOERROR) { |
@@ -3124,7 +3118,7 @@ try_again: | |||
3124 | return XFS_ERROR(EIO); | 3118 | return XFS_ERROR(EIO); |
3125 | } | 3119 | } |
3126 | XFS_STATS_INC(xs_log_force_sleep); | 3120 | XFS_STATS_INC(xs_log_force_sleep); |
3127 | sv_wait(&iclog->ic_forcesema, PSWP, &log->l_icloglock, s); | 3121 | sv_wait(&iclog->ic_force_wait, PSWP, &log->l_icloglock, s); |
3128 | /* | 3122 | /* |
3129 | * No need to grab the log lock here since we're | 3123 | * No need to grab the log lock here since we're |
3130 | * only deciding whether or not to return EIO | 3124 | * only deciding whether or not to return EIO |
@@ -3180,7 +3174,7 @@ STATIC void | |||
3180 | xlog_ticket_put(xlog_t *log, | 3174 | xlog_ticket_put(xlog_t *log, |
3181 | xlog_ticket_t *ticket) | 3175 | xlog_ticket_t *ticket) |
3182 | { | 3176 | { |
3183 | sv_destroy(&ticket->t_sema); | 3177 | sv_destroy(&ticket->t_wait); |
3184 | kmem_zone_free(xfs_log_ticket_zone, ticket); | 3178 | kmem_zone_free(xfs_log_ticket_zone, ticket); |
3185 | } /* xlog_ticket_put */ | 3179 | } /* xlog_ticket_put */ |
3186 | 3180 | ||
@@ -3270,7 +3264,7 @@ xlog_ticket_get(xlog_t *log, | |||
3270 | tic->t_trans_type = 0; | 3264 | tic->t_trans_type = 0; |
3271 | if (xflags & XFS_LOG_PERM_RESERV) | 3265 | if (xflags & XFS_LOG_PERM_RESERV) |
3272 | tic->t_flags |= XLOG_TIC_PERM_RESERV; | 3266 | tic->t_flags |= XLOG_TIC_PERM_RESERV; |
3273 | sv_init(&(tic->t_sema), SV_DEFAULT, "logtick"); | 3267 | sv_init(&(tic->t_wait), SV_DEFAULT, "logtick"); |
3274 | 3268 | ||
3275 | xlog_tic_reset_res(tic); | 3269 | xlog_tic_reset_res(tic); |
3276 | 3270 | ||
@@ -3557,14 +3551,14 @@ xfs_log_force_umount( | |||
3557 | */ | 3551 | */ |
3558 | if ((tic = log->l_reserve_headq)) { | 3552 | if ((tic = log->l_reserve_headq)) { |
3559 | do { | 3553 | do { |
3560 | sv_signal(&tic->t_sema); | 3554 | sv_signal(&tic->t_wait); |
3561 | tic = tic->t_next; | 3555 | tic = tic->t_next; |
3562 | } while (tic != log->l_reserve_headq); | 3556 | } while (tic != log->l_reserve_headq); |
3563 | } | 3557 | } |
3564 | 3558 | ||
3565 | if ((tic = log->l_write_headq)) { | 3559 | if ((tic = log->l_write_headq)) { |
3566 | do { | 3560 | do { |
3567 | sv_signal(&tic->t_sema); | 3561 | sv_signal(&tic->t_wait); |
3568 | tic = tic->t_next; | 3562 | tic = tic->t_next; |
3569 | } while (tic != log->l_write_headq); | 3563 | } while (tic != log->l_write_headq); |
3570 | } | 3564 | } |
diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h index d1d678ecb63e..d47b91f10822 100644 --- a/fs/xfs/xfs_log.h +++ b/fs/xfs/xfs_log.h | |||
@@ -149,7 +149,7 @@ int xfs_log_mount(struct xfs_mount *mp, | |||
149 | struct xfs_buftarg *log_target, | 149 | struct xfs_buftarg *log_target, |
150 | xfs_daddr_t start_block, | 150 | xfs_daddr_t start_block, |
151 | int num_bblocks); | 151 | int num_bblocks); |
152 | int xfs_log_mount_finish(struct xfs_mount *mp, int); | 152 | int xfs_log_mount_finish(struct xfs_mount *mp); |
153 | void xfs_log_move_tail(struct xfs_mount *mp, | 153 | void xfs_log_move_tail(struct xfs_mount *mp, |
154 | xfs_lsn_t tail_lsn); | 154 | xfs_lsn_t tail_lsn); |
155 | int xfs_log_notify(struct xfs_mount *mp, | 155 | int xfs_log_notify(struct xfs_mount *mp, |
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h index 6245913196b4..c8a5b22ee3e3 100644 --- a/fs/xfs/xfs_log_priv.h +++ b/fs/xfs/xfs_log_priv.h | |||
@@ -241,7 +241,7 @@ typedef struct xlog_res { | |||
241 | } xlog_res_t; | 241 | } xlog_res_t; |
242 | 242 | ||
243 | typedef struct xlog_ticket { | 243 | typedef struct xlog_ticket { |
244 | sv_t t_sema; /* sleep on this semaphore : 20 */ | 244 | sv_t t_wait; /* ticket wait queue : 20 */ |
245 | struct xlog_ticket *t_next; /* :4|8 */ | 245 | struct xlog_ticket *t_next; /* :4|8 */ |
246 | struct xlog_ticket *t_prev; /* :4|8 */ | 246 | struct xlog_ticket *t_prev; /* :4|8 */ |
247 | xlog_tid_t t_tid; /* transaction identifier : 4 */ | 247 | xlog_tid_t t_tid; /* transaction identifier : 4 */ |
@@ -314,7 +314,7 @@ typedef struct xlog_rec_ext_header { | |||
314 | * xlog_rec_header_t into the reserved space. | 314 | * xlog_rec_header_t into the reserved space. |
315 | * - ic_data follows, so a write to disk can start at the beginning of | 315 | * - ic_data follows, so a write to disk can start at the beginning of |
316 | * the iclog. | 316 | * the iclog. |
317 | * - ic_forcesema is used to implement synchronous forcing of the iclog to disk. | 317 | * - ic_forcewait is used to implement synchronous forcing of the iclog to disk. |
318 | * - ic_next is the pointer to the next iclog in the ring. | 318 | * - ic_next is the pointer to the next iclog in the ring. |
319 | * - ic_bp is a pointer to the buffer used to write this incore log to disk. | 319 | * - ic_bp is a pointer to the buffer used to write this incore log to disk. |
320 | * - ic_log is a pointer back to the global log structure. | 320 | * - ic_log is a pointer back to the global log structure. |
@@ -339,8 +339,8 @@ typedef struct xlog_rec_ext_header { | |||
339 | * and move everything else out to subsequent cachelines. | 339 | * and move everything else out to subsequent cachelines. |
340 | */ | 340 | */ |
341 | typedef struct xlog_iclog_fields { | 341 | typedef struct xlog_iclog_fields { |
342 | sv_t ic_forcesema; | 342 | sv_t ic_force_wait; |
343 | sv_t ic_writesema; | 343 | sv_t ic_write_wait; |
344 | struct xlog_in_core *ic_next; | 344 | struct xlog_in_core *ic_next; |
345 | struct xlog_in_core *ic_prev; | 345 | struct xlog_in_core *ic_prev; |
346 | struct xfs_buf *ic_bp; | 346 | struct xfs_buf *ic_bp; |
@@ -377,8 +377,8 @@ typedef struct xlog_in_core { | |||
377 | /* | 377 | /* |
378 | * Defines to save our code from this glop. | 378 | * Defines to save our code from this glop. |
379 | */ | 379 | */ |
380 | #define ic_forcesema hic_fields.ic_forcesema | 380 | #define ic_force_wait hic_fields.ic_force_wait |
381 | #define ic_writesema hic_fields.ic_writesema | 381 | #define ic_write_wait hic_fields.ic_write_wait |
382 | #define ic_next hic_fields.ic_next | 382 | #define ic_next hic_fields.ic_next |
383 | #define ic_prev hic_fields.ic_prev | 383 | #define ic_prev hic_fields.ic_prev |
384 | #define ic_bp hic_fields.ic_bp | 384 | #define ic_bp hic_fields.ic_bp |
@@ -468,7 +468,7 @@ extern int xlog_find_tail(xlog_t *log, | |||
468 | xfs_daddr_t *head_blk, | 468 | xfs_daddr_t *head_blk, |
469 | xfs_daddr_t *tail_blk); | 469 | xfs_daddr_t *tail_blk); |
470 | extern int xlog_recover(xlog_t *log); | 470 | extern int xlog_recover(xlog_t *log); |
471 | extern int xlog_recover_finish(xlog_t *log, int mfsi_flags); | 471 | extern int xlog_recover_finish(xlog_t *log); |
472 | extern void xlog_pack_data(xlog_t *log, xlog_in_core_t *iclog, int); | 472 | extern void xlog_pack_data(xlog_t *log, xlog_in_core_t *iclog, int); |
473 | extern void xlog_recover_process_iunlinks(xlog_t *log); | 473 | extern void xlog_recover_process_iunlinks(xlog_t *log); |
474 | 474 | ||
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 9eb722ec744e..82d46ce69d5f 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c | |||
@@ -3940,8 +3940,7 @@ xlog_recover( | |||
3940 | */ | 3940 | */ |
3941 | int | 3941 | int |
3942 | xlog_recover_finish( | 3942 | xlog_recover_finish( |
3943 | xlog_t *log, | 3943 | xlog_t *log) |
3944 | int mfsi_flags) | ||
3945 | { | 3944 | { |
3946 | /* | 3945 | /* |
3947 | * Now we're ready to do the transactions needed for the | 3946 | * Now we're ready to do the transactions needed for the |
@@ -3969,9 +3968,7 @@ xlog_recover_finish( | |||
3969 | xfs_log_force(log->l_mp, (xfs_lsn_t)0, | 3968 | xfs_log_force(log->l_mp, (xfs_lsn_t)0, |
3970 | (XFS_LOG_FORCE | XFS_LOG_SYNC)); | 3969 | (XFS_LOG_FORCE | XFS_LOG_SYNC)); |
3971 | 3970 | ||
3972 | if ( (mfsi_flags & XFS_MFSI_NOUNLINK) == 0 ) { | 3971 | xlog_recover_process_iunlinks(log); |
3973 | xlog_recover_process_iunlinks(log); | ||
3974 | } | ||
3975 | 3972 | ||
3976 | xlog_recover_check_summary(log); | 3973 | xlog_recover_check_summary(log); |
3977 | 3974 | ||
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index 6c5d1325e7f6..a4503f5e9497 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c | |||
@@ -128,7 +128,7 @@ static const struct { | |||
128 | * initialized. | 128 | * initialized. |
129 | */ | 129 | */ |
130 | STATIC void | 130 | STATIC void |
131 | xfs_mount_free( | 131 | xfs_free_perag( |
132 | xfs_mount_t *mp) | 132 | xfs_mount_t *mp) |
133 | { | 133 | { |
134 | if (mp->m_perag) { | 134 | if (mp->m_perag) { |
@@ -139,20 +139,6 @@ xfs_mount_free( | |||
139 | kmem_free(mp->m_perag[agno].pagb_list); | 139 | kmem_free(mp->m_perag[agno].pagb_list); |
140 | kmem_free(mp->m_perag); | 140 | kmem_free(mp->m_perag); |
141 | } | 141 | } |
142 | |||
143 | spinlock_destroy(&mp->m_ail_lock); | ||
144 | spinlock_destroy(&mp->m_sb_lock); | ||
145 | mutex_destroy(&mp->m_ilock); | ||
146 | mutex_destroy(&mp->m_growlock); | ||
147 | if (mp->m_quotainfo) | ||
148 | XFS_QM_DONE(mp); | ||
149 | |||
150 | if (mp->m_fsname != NULL) | ||
151 | kmem_free(mp->m_fsname); | ||
152 | if (mp->m_rtname != NULL) | ||
153 | kmem_free(mp->m_rtname); | ||
154 | if (mp->m_logname != NULL) | ||
155 | kmem_free(mp->m_logname); | ||
156 | } | 142 | } |
157 | 143 | ||
158 | /* | 144 | /* |
@@ -704,11 +690,11 @@ xfs_initialize_perag_data(xfs_mount_t *mp, xfs_agnumber_t agcount) | |||
704 | * Update alignment values based on mount options and sb values | 690 | * Update alignment values based on mount options and sb values |
705 | */ | 691 | */ |
706 | STATIC int | 692 | STATIC int |
707 | xfs_update_alignment(xfs_mount_t *mp, int mfsi_flags, __uint64_t *update_flags) | 693 | xfs_update_alignment(xfs_mount_t *mp, __uint64_t *update_flags) |
708 | { | 694 | { |
709 | xfs_sb_t *sbp = &(mp->m_sb); | 695 | xfs_sb_t *sbp = &(mp->m_sb); |
710 | 696 | ||
711 | if (mp->m_dalign && !(mfsi_flags & XFS_MFSI_SECOND)) { | 697 | if (mp->m_dalign) { |
712 | /* | 698 | /* |
713 | * If stripe unit and stripe width are not multiples | 699 | * If stripe unit and stripe width are not multiples |
714 | * of the fs blocksize turn off alignment. | 700 | * of the fs blocksize turn off alignment. |
@@ -864,7 +850,7 @@ xfs_set_inoalignment(xfs_mount_t *mp) | |||
864 | * Check that the data (and log if separate) are an ok size. | 850 | * Check that the data (and log if separate) are an ok size. |
865 | */ | 851 | */ |
866 | STATIC int | 852 | STATIC int |
867 | xfs_check_sizes(xfs_mount_t *mp, int mfsi_flags) | 853 | xfs_check_sizes(xfs_mount_t *mp) |
868 | { | 854 | { |
869 | xfs_buf_t *bp; | 855 | xfs_buf_t *bp; |
870 | xfs_daddr_t d; | 856 | xfs_daddr_t d; |
@@ -887,8 +873,7 @@ xfs_check_sizes(xfs_mount_t *mp, int mfsi_flags) | |||
887 | return error; | 873 | return error; |
888 | } | 874 | } |
889 | 875 | ||
890 | if (((mfsi_flags & XFS_MFSI_CLIENT) == 0) && | 876 | if (mp->m_logdev_targp != mp->m_ddev_targp) { |
891 | mp->m_logdev_targp != mp->m_ddev_targp) { | ||
892 | d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks); | 877 | d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks); |
893 | if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_logblocks) { | 878 | if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_logblocks) { |
894 | cmn_err(CE_WARN, "XFS: size check 3 failed"); | 879 | cmn_err(CE_WARN, "XFS: size check 3 failed"); |
@@ -923,15 +908,13 @@ xfs_check_sizes(xfs_mount_t *mp, int mfsi_flags) | |||
923 | */ | 908 | */ |
924 | int | 909 | int |
925 | xfs_mountfs( | 910 | xfs_mountfs( |
926 | xfs_mount_t *mp, | 911 | xfs_mount_t *mp) |
927 | int mfsi_flags) | ||
928 | { | 912 | { |
929 | xfs_sb_t *sbp = &(mp->m_sb); | 913 | xfs_sb_t *sbp = &(mp->m_sb); |
930 | xfs_inode_t *rip; | 914 | xfs_inode_t *rip; |
931 | __uint64_t resblks; | 915 | __uint64_t resblks; |
932 | __int64_t update_flags = 0LL; | 916 | __int64_t update_flags = 0LL; |
933 | uint quotamount, quotaflags; | 917 | uint quotamount, quotaflags; |
934 | int agno; | ||
935 | int uuid_mounted = 0; | 918 | int uuid_mounted = 0; |
936 | int error = 0; | 919 | int error = 0; |
937 | 920 | ||
@@ -985,7 +968,7 @@ xfs_mountfs( | |||
985 | * allocator alignment is within an ag, therefore ag has | 968 | * allocator alignment is within an ag, therefore ag has |
986 | * to be aligned at stripe boundary. | 969 | * to be aligned at stripe boundary. |
987 | */ | 970 | */ |
988 | error = xfs_update_alignment(mp, mfsi_flags, &update_flags); | 971 | error = xfs_update_alignment(mp, &update_flags); |
989 | if (error) | 972 | if (error) |
990 | goto error1; | 973 | goto error1; |
991 | 974 | ||
@@ -1004,8 +987,7 @@ xfs_mountfs( | |||
1004 | * since a single partition filesystem is identical to a single | 987 | * since a single partition filesystem is identical to a single |
1005 | * partition volume/filesystem. | 988 | * partition volume/filesystem. |
1006 | */ | 989 | */ |
1007 | if ((mfsi_flags & XFS_MFSI_SECOND) == 0 && | 990 | if ((mp->m_flags & XFS_MOUNT_NOUUID) == 0) { |
1008 | (mp->m_flags & XFS_MOUNT_NOUUID) == 0) { | ||
1009 | if (xfs_uuid_mount(mp)) { | 991 | if (xfs_uuid_mount(mp)) { |
1010 | error = XFS_ERROR(EINVAL); | 992 | error = XFS_ERROR(EINVAL); |
1011 | goto error1; | 993 | goto error1; |
@@ -1033,7 +1015,7 @@ xfs_mountfs( | |||
1033 | /* | 1015 | /* |
1034 | * Check that the data (and log if separate) are an ok size. | 1016 | * Check that the data (and log if separate) are an ok size. |
1035 | */ | 1017 | */ |
1036 | error = xfs_check_sizes(mp, mfsi_flags); | 1018 | error = xfs_check_sizes(mp); |
1037 | if (error) | 1019 | if (error) |
1038 | goto error1; | 1020 | goto error1; |
1039 | 1021 | ||
@@ -1047,13 +1029,6 @@ xfs_mountfs( | |||
1047 | } | 1029 | } |
1048 | 1030 | ||
1049 | /* | 1031 | /* |
1050 | * For client case we are done now | ||
1051 | */ | ||
1052 | if (mfsi_flags & XFS_MFSI_CLIENT) { | ||
1053 | return 0; | ||
1054 | } | ||
1055 | |||
1056 | /* | ||
1057 | * Copies the low order bits of the timestamp and the randomly | 1032 | * Copies the low order bits of the timestamp and the randomly |
1058 | * set "sequence" number out of a UUID. | 1033 | * set "sequence" number out of a UUID. |
1059 | */ | 1034 | */ |
@@ -1077,8 +1052,10 @@ xfs_mountfs( | |||
1077 | * Allocate and initialize the per-ag data. | 1052 | * Allocate and initialize the per-ag data. |
1078 | */ | 1053 | */ |
1079 | init_rwsem(&mp->m_peraglock); | 1054 | init_rwsem(&mp->m_peraglock); |
1080 | mp->m_perag = | 1055 | mp->m_perag = kmem_zalloc(sbp->sb_agcount * sizeof(xfs_perag_t), |
1081 | kmem_zalloc(sbp->sb_agcount * sizeof(xfs_perag_t), KM_SLEEP); | 1056 | KM_MAYFAIL); |
1057 | if (!mp->m_perag) | ||
1058 | goto error1; | ||
1082 | 1059 | ||
1083 | mp->m_maxagi = xfs_initialize_perag(mp, sbp->sb_agcount); | 1060 | mp->m_maxagi = xfs_initialize_perag(mp, sbp->sb_agcount); |
1084 | 1061 | ||
@@ -1190,7 +1167,7 @@ xfs_mountfs( | |||
1190 | * delayed until after the root and real-time bitmap inodes | 1167 | * delayed until after the root and real-time bitmap inodes |
1191 | * were consistently read in. | 1168 | * were consistently read in. |
1192 | */ | 1169 | */ |
1193 | error = xfs_log_mount_finish(mp, mfsi_flags); | 1170 | error = xfs_log_mount_finish(mp); |
1194 | if (error) { | 1171 | if (error) { |
1195 | cmn_err(CE_WARN, "XFS: log mount finish failed"); | 1172 | cmn_err(CE_WARN, "XFS: log mount finish failed"); |
1196 | goto error4; | 1173 | goto error4; |
@@ -1199,7 +1176,7 @@ xfs_mountfs( | |||
1199 | /* | 1176 | /* |
1200 | * Complete the quota initialisation, post-log-replay component. | 1177 | * Complete the quota initialisation, post-log-replay component. |
1201 | */ | 1178 | */ |
1202 | error = XFS_QM_MOUNT(mp, quotamount, quotaflags, mfsi_flags); | 1179 | error = XFS_QM_MOUNT(mp, quotamount, quotaflags); |
1203 | if (error) | 1180 | if (error) |
1204 | goto error4; | 1181 | goto error4; |
1205 | 1182 | ||
@@ -1233,12 +1210,7 @@ xfs_mountfs( | |||
1233 | error3: | 1210 | error3: |
1234 | xfs_log_unmount_dealloc(mp); | 1211 | xfs_log_unmount_dealloc(mp); |
1235 | error2: | 1212 | error2: |
1236 | for (agno = 0; agno < sbp->sb_agcount; agno++) | 1213 | xfs_free_perag(mp); |
1237 | if (mp->m_perag[agno].pagb_list) | ||
1238 | kmem_free(mp->m_perag[agno].pagb_list); | ||
1239 | kmem_free(mp->m_perag); | ||
1240 | mp->m_perag = NULL; | ||
1241 | /* FALLTHROUGH */ | ||
1242 | error1: | 1214 | error1: |
1243 | if (uuid_mounted) | 1215 | if (uuid_mounted) |
1244 | uuid_table_remove(&mp->m_sb.sb_uuid); | 1216 | uuid_table_remove(&mp->m_sb.sb_uuid); |
@@ -1246,16 +1218,17 @@ xfs_mountfs( | |||
1246 | } | 1218 | } |
1247 | 1219 | ||
1248 | /* | 1220 | /* |
1249 | * xfs_unmountfs | ||
1250 | * | ||
1251 | * This flushes out the inodes,dquots and the superblock, unmounts the | 1221 | * This flushes out the inodes,dquots and the superblock, unmounts the |
1252 | * log and makes sure that incore structures are freed. | 1222 | * log and makes sure that incore structures are freed. |
1253 | */ | 1223 | */ |
1254 | int | 1224 | void |
1255 | xfs_unmountfs(xfs_mount_t *mp) | 1225 | xfs_unmountfs( |
1226 | struct xfs_mount *mp) | ||
1256 | { | 1227 | { |
1257 | __uint64_t resblks; | 1228 | __uint64_t resblks; |
1258 | int error = 0; | 1229 | int error; |
1230 | |||
1231 | IRELE(mp->m_rootip); | ||
1259 | 1232 | ||
1260 | /* | 1233 | /* |
1261 | * We can potentially deadlock here if we have an inode cluster | 1234 | * We can potentially deadlock here if we have an inode cluster |
@@ -1312,8 +1285,6 @@ xfs_unmountfs(xfs_mount_t *mp) | |||
1312 | xfs_unmountfs_wait(mp); /* wait for async bufs */ | 1285 | xfs_unmountfs_wait(mp); /* wait for async bufs */ |
1313 | xfs_log_unmount(mp); /* Done! No more fs ops. */ | 1286 | xfs_log_unmount(mp); /* Done! No more fs ops. */ |
1314 | 1287 | ||
1315 | xfs_freesb(mp); | ||
1316 | |||
1317 | /* | 1288 | /* |
1318 | * All inodes from this mount point should be freed. | 1289 | * All inodes from this mount point should be freed. |
1319 | */ | 1290 | */ |
@@ -1322,11 +1293,12 @@ xfs_unmountfs(xfs_mount_t *mp) | |||
1322 | if ((mp->m_flags & XFS_MOUNT_NOUUID) == 0) | 1293 | if ((mp->m_flags & XFS_MOUNT_NOUUID) == 0) |
1323 | uuid_table_remove(&mp->m_sb.sb_uuid); | 1294 | uuid_table_remove(&mp->m_sb.sb_uuid); |
1324 | 1295 | ||
1325 | #if defined(DEBUG) || defined(INDUCE_IO_ERROR) | 1296 | #if defined(DEBUG) |
1326 | xfs_errortag_clearall(mp, 0); | 1297 | xfs_errortag_clearall(mp, 0); |
1327 | #endif | 1298 | #endif |
1328 | xfs_mount_free(mp); | 1299 | xfs_free_perag(mp); |
1329 | return 0; | 1300 | if (mp->m_quotainfo) |
1301 | XFS_QM_DONE(mp); | ||
1330 | } | 1302 | } |
1331 | 1303 | ||
1332 | STATIC void | 1304 | STATIC void |
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index 5269bd6e3df0..f3c1024b1241 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h | |||
@@ -114,7 +114,7 @@ struct xfs_dqtrxops; | |||
114 | struct xfs_quotainfo; | 114 | struct xfs_quotainfo; |
115 | 115 | ||
116 | typedef int (*xfs_qminit_t)(struct xfs_mount *, uint *, uint *); | 116 | typedef int (*xfs_qminit_t)(struct xfs_mount *, uint *, uint *); |
117 | typedef int (*xfs_qmmount_t)(struct xfs_mount *, uint, uint, int); | 117 | typedef int (*xfs_qmmount_t)(struct xfs_mount *, uint, uint); |
118 | typedef int (*xfs_qmunmount_t)(struct xfs_mount *); | 118 | typedef int (*xfs_qmunmount_t)(struct xfs_mount *); |
119 | typedef void (*xfs_qmdone_t)(struct xfs_mount *); | 119 | typedef void (*xfs_qmdone_t)(struct xfs_mount *); |
120 | typedef void (*xfs_dqrele_t)(struct xfs_dquot *); | 120 | typedef void (*xfs_dqrele_t)(struct xfs_dquot *); |
@@ -158,8 +158,8 @@ typedef struct xfs_qmops { | |||
158 | 158 | ||
159 | #define XFS_QM_INIT(mp, mnt, fl) \ | 159 | #define XFS_QM_INIT(mp, mnt, fl) \ |
160 | (*(mp)->m_qm_ops->xfs_qminit)(mp, mnt, fl) | 160 | (*(mp)->m_qm_ops->xfs_qminit)(mp, mnt, fl) |
161 | #define XFS_QM_MOUNT(mp, mnt, fl, mfsi_flags) \ | 161 | #define XFS_QM_MOUNT(mp, mnt, fl) \ |
162 | (*(mp)->m_qm_ops->xfs_qmmount)(mp, mnt, fl, mfsi_flags) | 162 | (*(mp)->m_qm_ops->xfs_qmmount)(mp, mnt, fl) |
163 | #define XFS_QM_UNMOUNT(mp) \ | 163 | #define XFS_QM_UNMOUNT(mp) \ |
164 | (*(mp)->m_qm_ops->xfs_qmunmount)(mp) | 164 | (*(mp)->m_qm_ops->xfs_qmunmount)(mp) |
165 | #define XFS_QM_DONE(mp) \ | 165 | #define XFS_QM_DONE(mp) \ |
@@ -442,13 +442,6 @@ void xfs_do_force_shutdown(struct xfs_mount *mp, int flags, char *fname, | |||
442 | /* | 442 | /* |
443 | * Flags for xfs_mountfs | 443 | * Flags for xfs_mountfs |
444 | */ | 444 | */ |
445 | #define XFS_MFSI_SECOND 0x01 /* Secondary mount -- skip stuff */ | ||
446 | #define XFS_MFSI_CLIENT 0x02 /* Is a client -- skip lots of stuff */ | ||
447 | /* XFS_MFSI_RRINODES */ | ||
448 | #define XFS_MFSI_NOUNLINK 0x08 /* Skip unlinked inode processing in */ | ||
449 | /* log recovery */ | ||
450 | #define XFS_MFSI_NO_QUOTACHECK 0x10 /* Skip quotacheck processing */ | ||
451 | /* XFS_MFSI_CONVERT_SUNIT */ | ||
452 | #define XFS_MFSI_QUIET 0x40 /* Be silent if mount errors found */ | 445 | #define XFS_MFSI_QUIET 0x40 /* Be silent if mount errors found */ |
453 | 446 | ||
454 | #define XFS_DADDR_TO_AGNO(mp,d) xfs_daddr_to_agno(mp,d) | 447 | #define XFS_DADDR_TO_AGNO(mp,d) xfs_daddr_to_agno(mp,d) |
@@ -517,10 +510,10 @@ typedef struct xfs_mod_sb { | |||
517 | 510 | ||
518 | extern void xfs_mod_sb(xfs_trans_t *, __int64_t); | 511 | extern void xfs_mod_sb(xfs_trans_t *, __int64_t); |
519 | extern int xfs_log_sbcount(xfs_mount_t *, uint); | 512 | extern int xfs_log_sbcount(xfs_mount_t *, uint); |
520 | extern int xfs_mountfs(xfs_mount_t *mp, int); | 513 | extern int xfs_mountfs(xfs_mount_t *mp); |
521 | extern void xfs_mountfs_check_barriers(xfs_mount_t *mp); | 514 | extern void xfs_mountfs_check_barriers(xfs_mount_t *mp); |
522 | 515 | ||
523 | extern int xfs_unmountfs(xfs_mount_t *); | 516 | extern void xfs_unmountfs(xfs_mount_t *); |
524 | extern int xfs_unmountfs_writesb(xfs_mount_t *); | 517 | extern int xfs_unmountfs_writesb(xfs_mount_t *); |
525 | extern int xfs_unmount_flush(xfs_mount_t *, int); | 518 | extern int xfs_unmount_flush(xfs_mount_t *, int); |
526 | extern int xfs_mod_incore_sb(xfs_mount_t *, xfs_sb_field_t, int64_t, int); | 519 | extern int xfs_mod_incore_sb(xfs_mount_t *, xfs_sb_field_t, int64_t, int); |
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c index bf87a5913504..e2f68de16159 100644 --- a/fs/xfs/xfs_rtalloc.c +++ b/fs/xfs/xfs_rtalloc.c | |||
@@ -74,18 +74,6 @@ STATIC int xfs_rtmodify_summary(xfs_mount_t *, xfs_trans_t *, int, | |||
74 | */ | 74 | */ |
75 | 75 | ||
76 | /* | 76 | /* |
77 | * xfs_lowbit32: get low bit set out of 32-bit argument, -1 if none set. | ||
78 | */ | ||
79 | STATIC int | ||
80 | xfs_lowbit32( | ||
81 | __uint32_t v) | ||
82 | { | ||
83 | if (v) | ||
84 | return ffs(v) - 1; | ||
85 | return -1; | ||
86 | } | ||
87 | |||
88 | /* | ||
89 | * Allocate space to the bitmap or summary file, and zero it, for growfs. | 77 | * Allocate space to the bitmap or summary file, and zero it, for growfs. |
90 | */ | 78 | */ |
91 | STATIC int /* error */ | 79 | STATIC int /* error */ |
@@ -450,6 +438,7 @@ xfs_rtallocate_extent_near( | |||
450 | } | 438 | } |
451 | bbno = XFS_BITTOBLOCK(mp, bno); | 439 | bbno = XFS_BITTOBLOCK(mp, bno); |
452 | i = 0; | 440 | i = 0; |
441 | ASSERT(minlen != 0); | ||
453 | log2len = xfs_highbit32(minlen); | 442 | log2len = xfs_highbit32(minlen); |
454 | /* | 443 | /* |
455 | * Loop over all bitmap blocks (bbno + i is current block). | 444 | * Loop over all bitmap blocks (bbno + i is current block). |
@@ -618,6 +607,8 @@ xfs_rtallocate_extent_size( | |||
618 | xfs_suminfo_t sum; /* summary information for extents */ | 607 | xfs_suminfo_t sum; /* summary information for extents */ |
619 | 608 | ||
620 | ASSERT(minlen % prod == 0 && maxlen % prod == 0); | 609 | ASSERT(minlen % prod == 0 && maxlen % prod == 0); |
610 | ASSERT(maxlen != 0); | ||
611 | |||
621 | /* | 612 | /* |
622 | * Loop over all the levels starting with maxlen. | 613 | * Loop over all the levels starting with maxlen. |
623 | * At each level, look at all the bitmap blocks, to see if there | 614 | * At each level, look at all the bitmap blocks, to see if there |
@@ -675,6 +666,9 @@ xfs_rtallocate_extent_size( | |||
675 | *rtblock = NULLRTBLOCK; | 666 | *rtblock = NULLRTBLOCK; |
676 | return 0; | 667 | return 0; |
677 | } | 668 | } |
669 | ASSERT(minlen != 0); | ||
670 | ASSERT(maxlen != 0); | ||
671 | |||
678 | /* | 672 | /* |
679 | * Loop over sizes, from maxlen down to minlen. | 673 | * Loop over sizes, from maxlen down to minlen. |
680 | * This time, when we do the allocations, allow smaller ones | 674 | * This time, when we do the allocations, allow smaller ones |
@@ -1961,6 +1955,7 @@ xfs_growfs_rt( | |||
1961 | nsbp->sb_blocksize * nsbp->sb_rextsize); | 1955 | nsbp->sb_blocksize * nsbp->sb_rextsize); |
1962 | nsbp->sb_rextents = nsbp->sb_rblocks; | 1956 | nsbp->sb_rextents = nsbp->sb_rblocks; |
1963 | do_div(nsbp->sb_rextents, nsbp->sb_rextsize); | 1957 | do_div(nsbp->sb_rextents, nsbp->sb_rextsize); |
1958 | ASSERT(nsbp->sb_rextents != 0); | ||
1964 | nsbp->sb_rextslog = xfs_highbit32(nsbp->sb_rextents); | 1959 | nsbp->sb_rextslog = xfs_highbit32(nsbp->sb_rextents); |
1965 | nrsumlevels = nmp->m_rsumlevels = nsbp->sb_rextslog + 1; | 1960 | nrsumlevels = nmp->m_rsumlevels = nsbp->sb_rextslog + 1; |
1966 | nrsumsize = | 1961 | nrsumsize = |
diff --git a/fs/xfs/xfs_rw.c b/fs/xfs/xfs_rw.c index b0f31c09a76d..3a82576dde9a 100644 --- a/fs/xfs/xfs_rw.c +++ b/fs/xfs/xfs_rw.c | |||
@@ -314,7 +314,7 @@ xfs_bioerror_relse( | |||
314 | * ASYNC buffers. | 314 | * ASYNC buffers. |
315 | */ | 315 | */ |
316 | XFS_BUF_ERROR(bp, EIO); | 316 | XFS_BUF_ERROR(bp, EIO); |
317 | XFS_BUF_V_IODONESEMA(bp); | 317 | XFS_BUF_FINISH_IOWAIT(bp); |
318 | } else { | 318 | } else { |
319 | xfs_buf_relse(bp); | 319 | xfs_buf_relse(bp); |
320 | } | 320 | } |
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c index e4ebddd3c500..4e1c22a23be5 100644 --- a/fs/xfs/xfs_trans.c +++ b/fs/xfs/xfs_trans.c | |||
@@ -43,6 +43,7 @@ | |||
43 | #include "xfs_quota.h" | 43 | #include "xfs_quota.h" |
44 | #include "xfs_trans_priv.h" | 44 | #include "xfs_trans_priv.h" |
45 | #include "xfs_trans_space.h" | 45 | #include "xfs_trans_space.h" |
46 | #include "xfs_inode_item.h" | ||
46 | 47 | ||
47 | 48 | ||
48 | STATIC void xfs_trans_apply_sb_deltas(xfs_trans_t *); | 49 | STATIC void xfs_trans_apply_sb_deltas(xfs_trans_t *); |
@@ -253,7 +254,7 @@ _xfs_trans_alloc( | |||
253 | tp->t_mountp = mp; | 254 | tp->t_mountp = mp; |
254 | tp->t_items_free = XFS_LIC_NUM_SLOTS; | 255 | tp->t_items_free = XFS_LIC_NUM_SLOTS; |
255 | tp->t_busy_free = XFS_LBC_NUM_SLOTS; | 256 | tp->t_busy_free = XFS_LBC_NUM_SLOTS; |
256 | XFS_LIC_INIT(&(tp->t_items)); | 257 | xfs_lic_init(&(tp->t_items)); |
257 | XFS_LBC_INIT(&(tp->t_busy)); | 258 | XFS_LBC_INIT(&(tp->t_busy)); |
258 | return tp; | 259 | return tp; |
259 | } | 260 | } |
@@ -282,7 +283,7 @@ xfs_trans_dup( | |||
282 | ntp->t_mountp = tp->t_mountp; | 283 | ntp->t_mountp = tp->t_mountp; |
283 | ntp->t_items_free = XFS_LIC_NUM_SLOTS; | 284 | ntp->t_items_free = XFS_LIC_NUM_SLOTS; |
284 | ntp->t_busy_free = XFS_LBC_NUM_SLOTS; | 285 | ntp->t_busy_free = XFS_LBC_NUM_SLOTS; |
285 | XFS_LIC_INIT(&(ntp->t_items)); | 286 | xfs_lic_init(&(ntp->t_items)); |
286 | XFS_LBC_INIT(&(ntp->t_busy)); | 287 | XFS_LBC_INIT(&(ntp->t_busy)); |
287 | 288 | ||
288 | ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); | 289 | ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); |
@@ -1169,7 +1170,7 @@ xfs_trans_cancel( | |||
1169 | while (licp != NULL) { | 1170 | while (licp != NULL) { |
1170 | lidp = licp->lic_descs; | 1171 | lidp = licp->lic_descs; |
1171 | for (i = 0; i < licp->lic_unused; i++, lidp++) { | 1172 | for (i = 0; i < licp->lic_unused; i++, lidp++) { |
1172 | if (XFS_LIC_ISFREE(licp, i)) { | 1173 | if (xfs_lic_isfree(licp, i)) { |
1173 | continue; | 1174 | continue; |
1174 | } | 1175 | } |
1175 | 1176 | ||
@@ -1216,6 +1217,68 @@ xfs_trans_free( | |||
1216 | kmem_zone_free(xfs_trans_zone, tp); | 1217 | kmem_zone_free(xfs_trans_zone, tp); |
1217 | } | 1218 | } |
1218 | 1219 | ||
1220 | /* | ||
1221 | * Roll from one trans in the sequence of PERMANENT transactions to | ||
1222 | * the next: permanent transactions are only flushed out when | ||
1223 | * committed with XFS_TRANS_RELEASE_LOG_RES, but we still want as soon | ||
1224 | * as possible to let chunks of it go to the log. So we commit the | ||
1225 | * chunk we've been working on and get a new transaction to continue. | ||
1226 | */ | ||
1227 | int | ||
1228 | xfs_trans_roll( | ||
1229 | struct xfs_trans **tpp, | ||
1230 | struct xfs_inode *dp) | ||
1231 | { | ||
1232 | struct xfs_trans *trans; | ||
1233 | unsigned int logres, count; | ||
1234 | int error; | ||
1235 | |||
1236 | /* | ||
1237 | * Ensure that the inode is always logged. | ||
1238 | */ | ||
1239 | trans = *tpp; | ||
1240 | xfs_trans_log_inode(trans, dp, XFS_ILOG_CORE); | ||
1241 | |||
1242 | /* | ||
1243 | * Copy the critical parameters from one trans to the next. | ||
1244 | */ | ||
1245 | logres = trans->t_log_res; | ||
1246 | count = trans->t_log_count; | ||
1247 | *tpp = xfs_trans_dup(trans); | ||
1248 | |||
1249 | /* | ||
1250 | * Commit the current transaction. | ||
1251 | * If this commit failed, then it'd just unlock those items that | ||
1252 | * are not marked ihold. That also means that a filesystem shutdown | ||
1253 | * is in progress. The caller takes the responsibility to cancel | ||
1254 | * the duplicate transaction that gets returned. | ||
1255 | */ | ||
1256 | error = xfs_trans_commit(trans, 0); | ||
1257 | if (error) | ||
1258 | return (error); | ||
1259 | |||
1260 | trans = *tpp; | ||
1261 | |||
1262 | /* | ||
1263 | * Reserve space in the log for th next transaction. | ||
1264 | * This also pushes items in the "AIL", the list of logged items, | ||
1265 | * out to disk if they are taking up space at the tail of the log | ||
1266 | * that we want to use. This requires that either nothing be locked | ||
1267 | * across this call, or that anything that is locked be logged in | ||
1268 | * the prior and the next transactions. | ||
1269 | */ | ||
1270 | error = xfs_trans_reserve(trans, 0, logres, 0, | ||
1271 | XFS_TRANS_PERM_LOG_RES, count); | ||
1272 | /* | ||
1273 | * Ensure that the inode is in the new transaction and locked. | ||
1274 | */ | ||
1275 | if (error) | ||
1276 | return error; | ||
1277 | |||
1278 | xfs_trans_ijoin(trans, dp, XFS_ILOCK_EXCL); | ||
1279 | xfs_trans_ihold(trans, dp); | ||
1280 | return 0; | ||
1281 | } | ||
1219 | 1282 | ||
1220 | /* | 1283 | /* |
1221 | * THIS SHOULD BE REWRITTEN TO USE xfs_trans_next_item(). | 1284 | * THIS SHOULD BE REWRITTEN TO USE xfs_trans_next_item(). |
@@ -1253,7 +1316,7 @@ xfs_trans_committed( | |||
1253 | * Special case the chunk embedded in the transaction. | 1316 | * Special case the chunk embedded in the transaction. |
1254 | */ | 1317 | */ |
1255 | licp = &(tp->t_items); | 1318 | licp = &(tp->t_items); |
1256 | if (!(XFS_LIC_ARE_ALL_FREE(licp))) { | 1319 | if (!(xfs_lic_are_all_free(licp))) { |
1257 | xfs_trans_chunk_committed(licp, tp->t_lsn, abortflag); | 1320 | xfs_trans_chunk_committed(licp, tp->t_lsn, abortflag); |
1258 | } | 1321 | } |
1259 | 1322 | ||
@@ -1262,7 +1325,7 @@ xfs_trans_committed( | |||
1262 | */ | 1325 | */ |
1263 | licp = licp->lic_next; | 1326 | licp = licp->lic_next; |
1264 | while (licp != NULL) { | 1327 | while (licp != NULL) { |
1265 | ASSERT(!XFS_LIC_ARE_ALL_FREE(licp)); | 1328 | ASSERT(!xfs_lic_are_all_free(licp)); |
1266 | xfs_trans_chunk_committed(licp, tp->t_lsn, abortflag); | 1329 | xfs_trans_chunk_committed(licp, tp->t_lsn, abortflag); |
1267 | next_licp = licp->lic_next; | 1330 | next_licp = licp->lic_next; |
1268 | kmem_free(licp); | 1331 | kmem_free(licp); |
@@ -1325,7 +1388,7 @@ xfs_trans_chunk_committed( | |||
1325 | 1388 | ||
1326 | lidp = licp->lic_descs; | 1389 | lidp = licp->lic_descs; |
1327 | for (i = 0; i < licp->lic_unused; i++, lidp++) { | 1390 | for (i = 0; i < licp->lic_unused; i++, lidp++) { |
1328 | if (XFS_LIC_ISFREE(licp, i)) { | 1391 | if (xfs_lic_isfree(licp, i)) { |
1329 | continue; | 1392 | continue; |
1330 | } | 1393 | } |
1331 | 1394 | ||
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h index 0804207c7391..74c80bd2b0ec 100644 --- a/fs/xfs/xfs_trans.h +++ b/fs/xfs/xfs_trans.h | |||
@@ -210,62 +210,52 @@ typedef struct xfs_log_item_chunk { | |||
210 | * lic_unused to the right value (0 matches all free). The | 210 | * lic_unused to the right value (0 matches all free). The |
211 | * lic_descs.lid_index values are set up as each desc is allocated. | 211 | * lic_descs.lid_index values are set up as each desc is allocated. |
212 | */ | 212 | */ |
213 | #define XFS_LIC_INIT(cp) xfs_lic_init(cp) | ||
214 | static inline void xfs_lic_init(xfs_log_item_chunk_t *cp) | 213 | static inline void xfs_lic_init(xfs_log_item_chunk_t *cp) |
215 | { | 214 | { |
216 | cp->lic_free = XFS_LIC_FREEMASK; | 215 | cp->lic_free = XFS_LIC_FREEMASK; |
217 | } | 216 | } |
218 | 217 | ||
219 | #define XFS_LIC_INIT_SLOT(cp,slot) xfs_lic_init_slot(cp, slot) | ||
220 | static inline void xfs_lic_init_slot(xfs_log_item_chunk_t *cp, int slot) | 218 | static inline void xfs_lic_init_slot(xfs_log_item_chunk_t *cp, int slot) |
221 | { | 219 | { |
222 | cp->lic_descs[slot].lid_index = (unsigned char)(slot); | 220 | cp->lic_descs[slot].lid_index = (unsigned char)(slot); |
223 | } | 221 | } |
224 | 222 | ||
225 | #define XFS_LIC_VACANCY(cp) xfs_lic_vacancy(cp) | ||
226 | static inline int xfs_lic_vacancy(xfs_log_item_chunk_t *cp) | 223 | static inline int xfs_lic_vacancy(xfs_log_item_chunk_t *cp) |
227 | { | 224 | { |
228 | return cp->lic_free & XFS_LIC_FREEMASK; | 225 | return cp->lic_free & XFS_LIC_FREEMASK; |
229 | } | 226 | } |
230 | 227 | ||
231 | #define XFS_LIC_ALL_FREE(cp) xfs_lic_all_free(cp) | ||
232 | static inline void xfs_lic_all_free(xfs_log_item_chunk_t *cp) | 228 | static inline void xfs_lic_all_free(xfs_log_item_chunk_t *cp) |
233 | { | 229 | { |
234 | cp->lic_free = XFS_LIC_FREEMASK; | 230 | cp->lic_free = XFS_LIC_FREEMASK; |
235 | } | 231 | } |
236 | 232 | ||
237 | #define XFS_LIC_ARE_ALL_FREE(cp) xfs_lic_are_all_free(cp) | ||
238 | static inline int xfs_lic_are_all_free(xfs_log_item_chunk_t *cp) | 233 | static inline int xfs_lic_are_all_free(xfs_log_item_chunk_t *cp) |
239 | { | 234 | { |
240 | return ((cp->lic_free & XFS_LIC_FREEMASK) == XFS_LIC_FREEMASK); | 235 | return ((cp->lic_free & XFS_LIC_FREEMASK) == XFS_LIC_FREEMASK); |
241 | } | 236 | } |
242 | 237 | ||
243 | #define XFS_LIC_ISFREE(cp,slot) xfs_lic_isfree(cp,slot) | ||
244 | static inline int xfs_lic_isfree(xfs_log_item_chunk_t *cp, int slot) | 238 | static inline int xfs_lic_isfree(xfs_log_item_chunk_t *cp, int slot) |
245 | { | 239 | { |
246 | return (cp->lic_free & (1 << slot)); | 240 | return (cp->lic_free & (1 << slot)); |
247 | } | 241 | } |
248 | 242 | ||
249 | #define XFS_LIC_CLAIM(cp,slot) xfs_lic_claim(cp,slot) | ||
250 | static inline void xfs_lic_claim(xfs_log_item_chunk_t *cp, int slot) | 243 | static inline void xfs_lic_claim(xfs_log_item_chunk_t *cp, int slot) |
251 | { | 244 | { |
252 | cp->lic_free &= ~(1 << slot); | 245 | cp->lic_free &= ~(1 << slot); |
253 | } | 246 | } |
254 | 247 | ||
255 | #define XFS_LIC_RELSE(cp,slot) xfs_lic_relse(cp,slot) | ||
256 | static inline void xfs_lic_relse(xfs_log_item_chunk_t *cp, int slot) | 248 | static inline void xfs_lic_relse(xfs_log_item_chunk_t *cp, int slot) |
257 | { | 249 | { |
258 | cp->lic_free |= 1 << slot; | 250 | cp->lic_free |= 1 << slot; |
259 | } | 251 | } |
260 | 252 | ||
261 | #define XFS_LIC_SLOT(cp,slot) xfs_lic_slot(cp,slot) | ||
262 | static inline xfs_log_item_desc_t * | 253 | static inline xfs_log_item_desc_t * |
263 | xfs_lic_slot(xfs_log_item_chunk_t *cp, int slot) | 254 | xfs_lic_slot(xfs_log_item_chunk_t *cp, int slot) |
264 | { | 255 | { |
265 | return &(cp->lic_descs[slot]); | 256 | return &(cp->lic_descs[slot]); |
266 | } | 257 | } |
267 | 258 | ||
268 | #define XFS_LIC_DESC_TO_SLOT(dp) xfs_lic_desc_to_slot(dp) | ||
269 | static inline int xfs_lic_desc_to_slot(xfs_log_item_desc_t *dp) | 259 | static inline int xfs_lic_desc_to_slot(xfs_log_item_desc_t *dp) |
270 | { | 260 | { |
271 | return (uint)dp->lid_index; | 261 | return (uint)dp->lid_index; |
@@ -278,7 +268,6 @@ static inline int xfs_lic_desc_to_slot(xfs_log_item_desc_t *dp) | |||
278 | * All of this yields the address of the chunk, which is | 268 | * All of this yields the address of the chunk, which is |
279 | * cast to a chunk pointer. | 269 | * cast to a chunk pointer. |
280 | */ | 270 | */ |
281 | #define XFS_LIC_DESC_TO_CHUNK(dp) xfs_lic_desc_to_chunk(dp) | ||
282 | static inline xfs_log_item_chunk_t * | 271 | static inline xfs_log_item_chunk_t * |
283 | xfs_lic_desc_to_chunk(xfs_log_item_desc_t *dp) | 272 | xfs_lic_desc_to_chunk(xfs_log_item_desc_t *dp) |
284 | { | 273 | { |
@@ -986,6 +975,7 @@ int _xfs_trans_commit(xfs_trans_t *, | |||
986 | int *); | 975 | int *); |
987 | #define xfs_trans_commit(tp, flags) _xfs_trans_commit(tp, flags, NULL) | 976 | #define xfs_trans_commit(tp, flags) _xfs_trans_commit(tp, flags, NULL) |
988 | void xfs_trans_cancel(xfs_trans_t *, int); | 977 | void xfs_trans_cancel(xfs_trans_t *, int); |
978 | int xfs_trans_roll(struct xfs_trans **, struct xfs_inode *); | ||
989 | int xfs_trans_ail_init(struct xfs_mount *); | 979 | int xfs_trans_ail_init(struct xfs_mount *); |
990 | void xfs_trans_ail_destroy(struct xfs_mount *); | 980 | void xfs_trans_ail_destroy(struct xfs_mount *); |
991 | void xfs_trans_push_ail(struct xfs_mount *, xfs_lsn_t); | 981 | void xfs_trans_push_ail(struct xfs_mount *, xfs_lsn_t); |
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c index cb0c5839154b..4e855b5ced66 100644 --- a/fs/xfs/xfs_trans_buf.c +++ b/fs/xfs/xfs_trans_buf.c | |||
@@ -1021,16 +1021,16 @@ xfs_trans_buf_item_match( | |||
1021 | bp = NULL; | 1021 | bp = NULL; |
1022 | len = BBTOB(len); | 1022 | len = BBTOB(len); |
1023 | licp = &tp->t_items; | 1023 | licp = &tp->t_items; |
1024 | if (!XFS_LIC_ARE_ALL_FREE(licp)) { | 1024 | if (!xfs_lic_are_all_free(licp)) { |
1025 | for (i = 0; i < licp->lic_unused; i++) { | 1025 | for (i = 0; i < licp->lic_unused; i++) { |
1026 | /* | 1026 | /* |
1027 | * Skip unoccupied slots. | 1027 | * Skip unoccupied slots. |
1028 | */ | 1028 | */ |
1029 | if (XFS_LIC_ISFREE(licp, i)) { | 1029 | if (xfs_lic_isfree(licp, i)) { |
1030 | continue; | 1030 | continue; |
1031 | } | 1031 | } |
1032 | 1032 | ||
1033 | lidp = XFS_LIC_SLOT(licp, i); | 1033 | lidp = xfs_lic_slot(licp, i); |
1034 | blip = (xfs_buf_log_item_t *)lidp->lid_item; | 1034 | blip = (xfs_buf_log_item_t *)lidp->lid_item; |
1035 | if (blip->bli_item.li_type != XFS_LI_BUF) { | 1035 | if (blip->bli_item.li_type != XFS_LI_BUF) { |
1036 | continue; | 1036 | continue; |
@@ -1074,7 +1074,7 @@ xfs_trans_buf_item_match_all( | |||
1074 | bp = NULL; | 1074 | bp = NULL; |
1075 | len = BBTOB(len); | 1075 | len = BBTOB(len); |
1076 | for (licp = &tp->t_items; licp != NULL; licp = licp->lic_next) { | 1076 | for (licp = &tp->t_items; licp != NULL; licp = licp->lic_next) { |
1077 | if (XFS_LIC_ARE_ALL_FREE(licp)) { | 1077 | if (xfs_lic_are_all_free(licp)) { |
1078 | ASSERT(licp == &tp->t_items); | 1078 | ASSERT(licp == &tp->t_items); |
1079 | ASSERT(licp->lic_next == NULL); | 1079 | ASSERT(licp->lic_next == NULL); |
1080 | return NULL; | 1080 | return NULL; |
@@ -1083,11 +1083,11 @@ xfs_trans_buf_item_match_all( | |||
1083 | /* | 1083 | /* |
1084 | * Skip unoccupied slots. | 1084 | * Skip unoccupied slots. |
1085 | */ | 1085 | */ |
1086 | if (XFS_LIC_ISFREE(licp, i)) { | 1086 | if (xfs_lic_isfree(licp, i)) { |
1087 | continue; | 1087 | continue; |
1088 | } | 1088 | } |
1089 | 1089 | ||
1090 | lidp = XFS_LIC_SLOT(licp, i); | 1090 | lidp = xfs_lic_slot(licp, i); |
1091 | blip = (xfs_buf_log_item_t *)lidp->lid_item; | 1091 | blip = (xfs_buf_log_item_t *)lidp->lid_item; |
1092 | if (blip->bli_item.li_type != XFS_LI_BUF) { | 1092 | if (blip->bli_item.li_type != XFS_LI_BUF) { |
1093 | continue; | 1093 | continue; |
diff --git a/fs/xfs/xfs_trans_item.c b/fs/xfs/xfs_trans_item.c index db5c83595526..3c666e8317f8 100644 --- a/fs/xfs/xfs_trans_item.c +++ b/fs/xfs/xfs_trans_item.c | |||
@@ -53,11 +53,11 @@ xfs_trans_add_item(xfs_trans_t *tp, xfs_log_item_t *lip) | |||
53 | * Initialize the chunk, and then | 53 | * Initialize the chunk, and then |
54 | * claim the first slot in the newly allocated chunk. | 54 | * claim the first slot in the newly allocated chunk. |
55 | */ | 55 | */ |
56 | XFS_LIC_INIT(licp); | 56 | xfs_lic_init(licp); |
57 | XFS_LIC_CLAIM(licp, 0); | 57 | xfs_lic_claim(licp, 0); |
58 | licp->lic_unused = 1; | 58 | licp->lic_unused = 1; |
59 | XFS_LIC_INIT_SLOT(licp, 0); | 59 | xfs_lic_init_slot(licp, 0); |
60 | lidp = XFS_LIC_SLOT(licp, 0); | 60 | lidp = xfs_lic_slot(licp, 0); |
61 | 61 | ||
62 | /* | 62 | /* |
63 | * Link in the new chunk and update the free count. | 63 | * Link in the new chunk and update the free count. |
@@ -88,14 +88,14 @@ xfs_trans_add_item(xfs_trans_t *tp, xfs_log_item_t *lip) | |||
88 | */ | 88 | */ |
89 | licp = &tp->t_items; | 89 | licp = &tp->t_items; |
90 | while (licp != NULL) { | 90 | while (licp != NULL) { |
91 | if (XFS_LIC_VACANCY(licp)) { | 91 | if (xfs_lic_vacancy(licp)) { |
92 | if (licp->lic_unused <= XFS_LIC_MAX_SLOT) { | 92 | if (licp->lic_unused <= XFS_LIC_MAX_SLOT) { |
93 | i = licp->lic_unused; | 93 | i = licp->lic_unused; |
94 | ASSERT(XFS_LIC_ISFREE(licp, i)); | 94 | ASSERT(xfs_lic_isfree(licp, i)); |
95 | break; | 95 | break; |
96 | } | 96 | } |
97 | for (i = 0; i <= XFS_LIC_MAX_SLOT; i++) { | 97 | for (i = 0; i <= XFS_LIC_MAX_SLOT; i++) { |
98 | if (XFS_LIC_ISFREE(licp, i)) | 98 | if (xfs_lic_isfree(licp, i)) |
99 | break; | 99 | break; |
100 | } | 100 | } |
101 | ASSERT(i <= XFS_LIC_MAX_SLOT); | 101 | ASSERT(i <= XFS_LIC_MAX_SLOT); |
@@ -108,12 +108,12 @@ xfs_trans_add_item(xfs_trans_t *tp, xfs_log_item_t *lip) | |||
108 | * If we find a free descriptor, claim it, | 108 | * If we find a free descriptor, claim it, |
109 | * initialize it, and return it. | 109 | * initialize it, and return it. |
110 | */ | 110 | */ |
111 | XFS_LIC_CLAIM(licp, i); | 111 | xfs_lic_claim(licp, i); |
112 | if (licp->lic_unused <= i) { | 112 | if (licp->lic_unused <= i) { |
113 | licp->lic_unused = i + 1; | 113 | licp->lic_unused = i + 1; |
114 | XFS_LIC_INIT_SLOT(licp, i); | 114 | xfs_lic_init_slot(licp, i); |
115 | } | 115 | } |
116 | lidp = XFS_LIC_SLOT(licp, i); | 116 | lidp = xfs_lic_slot(licp, i); |
117 | tp->t_items_free--; | 117 | tp->t_items_free--; |
118 | lidp->lid_item = lip; | 118 | lidp->lid_item = lip; |
119 | lidp->lid_flags = 0; | 119 | lidp->lid_flags = 0; |
@@ -136,9 +136,9 @@ xfs_trans_free_item(xfs_trans_t *tp, xfs_log_item_desc_t *lidp) | |||
136 | xfs_log_item_chunk_t *licp; | 136 | xfs_log_item_chunk_t *licp; |
137 | xfs_log_item_chunk_t **licpp; | 137 | xfs_log_item_chunk_t **licpp; |
138 | 138 | ||
139 | slot = XFS_LIC_DESC_TO_SLOT(lidp); | 139 | slot = xfs_lic_desc_to_slot(lidp); |
140 | licp = XFS_LIC_DESC_TO_CHUNK(lidp); | 140 | licp = xfs_lic_desc_to_chunk(lidp); |
141 | XFS_LIC_RELSE(licp, slot); | 141 | xfs_lic_relse(licp, slot); |
142 | lidp->lid_item->li_desc = NULL; | 142 | lidp->lid_item->li_desc = NULL; |
143 | tp->t_items_free++; | 143 | tp->t_items_free++; |
144 | 144 | ||
@@ -154,7 +154,7 @@ xfs_trans_free_item(xfs_trans_t *tp, xfs_log_item_desc_t *lidp) | |||
154 | * Also decrement the transaction structure's count of free items | 154 | * Also decrement the transaction structure's count of free items |
155 | * by the number in a chunk since we are freeing an empty chunk. | 155 | * by the number in a chunk since we are freeing an empty chunk. |
156 | */ | 156 | */ |
157 | if (XFS_LIC_ARE_ALL_FREE(licp) && (licp != &(tp->t_items))) { | 157 | if (xfs_lic_are_all_free(licp) && (licp != &(tp->t_items))) { |
158 | licpp = &(tp->t_items.lic_next); | 158 | licpp = &(tp->t_items.lic_next); |
159 | while (*licpp != licp) { | 159 | while (*licpp != licp) { |
160 | ASSERT(*licpp != NULL); | 160 | ASSERT(*licpp != NULL); |
@@ -207,20 +207,20 @@ xfs_trans_first_item(xfs_trans_t *tp) | |||
207 | /* | 207 | /* |
208 | * If it's not in the first chunk, skip to the second. | 208 | * If it's not in the first chunk, skip to the second. |
209 | */ | 209 | */ |
210 | if (XFS_LIC_ARE_ALL_FREE(licp)) { | 210 | if (xfs_lic_are_all_free(licp)) { |
211 | licp = licp->lic_next; | 211 | licp = licp->lic_next; |
212 | } | 212 | } |
213 | 213 | ||
214 | /* | 214 | /* |
215 | * Return the first non-free descriptor in the chunk. | 215 | * Return the first non-free descriptor in the chunk. |
216 | */ | 216 | */ |
217 | ASSERT(!XFS_LIC_ARE_ALL_FREE(licp)); | 217 | ASSERT(!xfs_lic_are_all_free(licp)); |
218 | for (i = 0; i < licp->lic_unused; i++) { | 218 | for (i = 0; i < licp->lic_unused; i++) { |
219 | if (XFS_LIC_ISFREE(licp, i)) { | 219 | if (xfs_lic_isfree(licp, i)) { |
220 | continue; | 220 | continue; |
221 | } | 221 | } |
222 | 222 | ||
223 | return XFS_LIC_SLOT(licp, i); | 223 | return xfs_lic_slot(licp, i); |
224 | } | 224 | } |
225 | cmn_err(CE_WARN, "xfs_trans_first_item() -- no first item"); | 225 | cmn_err(CE_WARN, "xfs_trans_first_item() -- no first item"); |
226 | return NULL; | 226 | return NULL; |
@@ -242,18 +242,18 @@ xfs_trans_next_item(xfs_trans_t *tp, xfs_log_item_desc_t *lidp) | |||
242 | xfs_log_item_chunk_t *licp; | 242 | xfs_log_item_chunk_t *licp; |
243 | int i; | 243 | int i; |
244 | 244 | ||
245 | licp = XFS_LIC_DESC_TO_CHUNK(lidp); | 245 | licp = xfs_lic_desc_to_chunk(lidp); |
246 | 246 | ||
247 | /* | 247 | /* |
248 | * First search the rest of the chunk. The for loop keeps us | 248 | * First search the rest of the chunk. The for loop keeps us |
249 | * from referencing things beyond the end of the chunk. | 249 | * from referencing things beyond the end of the chunk. |
250 | */ | 250 | */ |
251 | for (i = (int)XFS_LIC_DESC_TO_SLOT(lidp) + 1; i < licp->lic_unused; i++) { | 251 | for (i = (int)xfs_lic_desc_to_slot(lidp) + 1; i < licp->lic_unused; i++) { |
252 | if (XFS_LIC_ISFREE(licp, i)) { | 252 | if (xfs_lic_isfree(licp, i)) { |
253 | continue; | 253 | continue; |
254 | } | 254 | } |
255 | 255 | ||
256 | return XFS_LIC_SLOT(licp, i); | 256 | return xfs_lic_slot(licp, i); |
257 | } | 257 | } |
258 | 258 | ||
259 | /* | 259 | /* |
@@ -266,13 +266,13 @@ xfs_trans_next_item(xfs_trans_t *tp, xfs_log_item_desc_t *lidp) | |||
266 | } | 266 | } |
267 | 267 | ||
268 | licp = licp->lic_next; | 268 | licp = licp->lic_next; |
269 | ASSERT(!XFS_LIC_ARE_ALL_FREE(licp)); | 269 | ASSERT(!xfs_lic_are_all_free(licp)); |
270 | for (i = 0; i < licp->lic_unused; i++) { | 270 | for (i = 0; i < licp->lic_unused; i++) { |
271 | if (XFS_LIC_ISFREE(licp, i)) { | 271 | if (xfs_lic_isfree(licp, i)) { |
272 | continue; | 272 | continue; |
273 | } | 273 | } |
274 | 274 | ||
275 | return XFS_LIC_SLOT(licp, i); | 275 | return xfs_lic_slot(licp, i); |
276 | } | 276 | } |
277 | ASSERT(0); | 277 | ASSERT(0); |
278 | /* NOTREACHED */ | 278 | /* NOTREACHED */ |
@@ -300,9 +300,9 @@ xfs_trans_free_items( | |||
300 | /* | 300 | /* |
301 | * Special case the embedded chunk so we don't free it below. | 301 | * Special case the embedded chunk so we don't free it below. |
302 | */ | 302 | */ |
303 | if (!XFS_LIC_ARE_ALL_FREE(licp)) { | 303 | if (!xfs_lic_are_all_free(licp)) { |
304 | (void) xfs_trans_unlock_chunk(licp, 1, abort, NULLCOMMITLSN); | 304 | (void) xfs_trans_unlock_chunk(licp, 1, abort, NULLCOMMITLSN); |
305 | XFS_LIC_ALL_FREE(licp); | 305 | xfs_lic_all_free(licp); |
306 | licp->lic_unused = 0; | 306 | licp->lic_unused = 0; |
307 | } | 307 | } |
308 | licp = licp->lic_next; | 308 | licp = licp->lic_next; |
@@ -311,7 +311,7 @@ xfs_trans_free_items( | |||
311 | * Unlock each item in each chunk and free the chunks. | 311 | * Unlock each item in each chunk and free the chunks. |
312 | */ | 312 | */ |
313 | while (licp != NULL) { | 313 | while (licp != NULL) { |
314 | ASSERT(!XFS_LIC_ARE_ALL_FREE(licp)); | 314 | ASSERT(!xfs_lic_are_all_free(licp)); |
315 | (void) xfs_trans_unlock_chunk(licp, 1, abort, NULLCOMMITLSN); | 315 | (void) xfs_trans_unlock_chunk(licp, 1, abort, NULLCOMMITLSN); |
316 | next_licp = licp->lic_next; | 316 | next_licp = licp->lic_next; |
317 | kmem_free(licp); | 317 | kmem_free(licp); |
@@ -347,7 +347,7 @@ xfs_trans_unlock_items(xfs_trans_t *tp, xfs_lsn_t commit_lsn) | |||
347 | /* | 347 | /* |
348 | * Special case the embedded chunk so we don't free. | 348 | * Special case the embedded chunk so we don't free. |
349 | */ | 349 | */ |
350 | if (!XFS_LIC_ARE_ALL_FREE(licp)) { | 350 | if (!xfs_lic_are_all_free(licp)) { |
351 | freed = xfs_trans_unlock_chunk(licp, 0, 0, commit_lsn); | 351 | freed = xfs_trans_unlock_chunk(licp, 0, 0, commit_lsn); |
352 | } | 352 | } |
353 | licpp = &(tp->t_items.lic_next); | 353 | licpp = &(tp->t_items.lic_next); |
@@ -358,10 +358,10 @@ xfs_trans_unlock_items(xfs_trans_t *tp, xfs_lsn_t commit_lsn) | |||
358 | * and free empty chunks. | 358 | * and free empty chunks. |
359 | */ | 359 | */ |
360 | while (licp != NULL) { | 360 | while (licp != NULL) { |
361 | ASSERT(!XFS_LIC_ARE_ALL_FREE(licp)); | 361 | ASSERT(!xfs_lic_are_all_free(licp)); |
362 | freed += xfs_trans_unlock_chunk(licp, 0, 0, commit_lsn); | 362 | freed += xfs_trans_unlock_chunk(licp, 0, 0, commit_lsn); |
363 | next_licp = licp->lic_next; | 363 | next_licp = licp->lic_next; |
364 | if (XFS_LIC_ARE_ALL_FREE(licp)) { | 364 | if (xfs_lic_are_all_free(licp)) { |
365 | *licpp = next_licp; | 365 | *licpp = next_licp; |
366 | kmem_free(licp); | 366 | kmem_free(licp); |
367 | freed -= XFS_LIC_NUM_SLOTS; | 367 | freed -= XFS_LIC_NUM_SLOTS; |
@@ -402,7 +402,7 @@ xfs_trans_unlock_chunk( | |||
402 | freed = 0; | 402 | freed = 0; |
403 | lidp = licp->lic_descs; | 403 | lidp = licp->lic_descs; |
404 | for (i = 0; i < licp->lic_unused; i++, lidp++) { | 404 | for (i = 0; i < licp->lic_unused; i++, lidp++) { |
405 | if (XFS_LIC_ISFREE(licp, i)) { | 405 | if (xfs_lic_isfree(licp, i)) { |
406 | continue; | 406 | continue; |
407 | } | 407 | } |
408 | lip = lidp->lid_item; | 408 | lip = lidp->lid_item; |
@@ -421,7 +421,7 @@ xfs_trans_unlock_chunk( | |||
421 | */ | 421 | */ |
422 | if (!(freeing_chunk) && | 422 | if (!(freeing_chunk) && |
423 | (!(lidp->lid_flags & XFS_LID_DIRTY) || abort)) { | 423 | (!(lidp->lid_flags & XFS_LID_DIRTY) || abort)) { |
424 | XFS_LIC_RELSE(licp, i); | 424 | xfs_lic_relse(licp, i); |
425 | freed++; | 425 | freed++; |
426 | } | 426 | } |
427 | } | 427 | } |
diff --git a/fs/xfs/xfs_utils.c b/fs/xfs/xfs_utils.c index 98e5f110ba5f..35d4d414bcc2 100644 --- a/fs/xfs/xfs_utils.c +++ b/fs/xfs/xfs_utils.c | |||
@@ -237,7 +237,7 @@ xfs_droplink( | |||
237 | 237 | ||
238 | ASSERT (ip->i_d.di_nlink > 0); | 238 | ASSERT (ip->i_d.di_nlink > 0); |
239 | ip->i_d.di_nlink--; | 239 | ip->i_d.di_nlink--; |
240 | drop_nlink(ip->i_vnode); | 240 | drop_nlink(VFS_I(ip)); |
241 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); | 241 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); |
242 | 242 | ||
243 | error = 0; | 243 | error = 0; |
@@ -301,7 +301,7 @@ xfs_bumplink( | |||
301 | 301 | ||
302 | ASSERT(ip->i_d.di_nlink > 0); | 302 | ASSERT(ip->i_d.di_nlink > 0); |
303 | ip->i_d.di_nlink++; | 303 | ip->i_d.di_nlink++; |
304 | inc_nlink(ip->i_vnode); | 304 | inc_nlink(VFS_I(ip)); |
305 | if ((ip->i_d.di_version == XFS_DINODE_VERSION_1) && | 305 | if ((ip->i_d.di_version == XFS_DINODE_VERSION_1) && |
306 | (ip->i_d.di_nlink > XFS_MAXLINK_1)) { | 306 | (ip->i_d.di_nlink > XFS_MAXLINK_1)) { |
307 | /* | 307 | /* |
diff --git a/fs/xfs/xfs_utils.h b/fs/xfs/xfs_utils.h index f316cb85d8e2..ef321225d269 100644 --- a/fs/xfs/xfs_utils.h +++ b/fs/xfs/xfs_utils.h | |||
@@ -18,9 +18,6 @@ | |||
18 | #ifndef __XFS_UTILS_H__ | 18 | #ifndef __XFS_UTILS_H__ |
19 | #define __XFS_UTILS_H__ | 19 | #define __XFS_UTILS_H__ |
20 | 20 | ||
21 | #define IRELE(ip) VN_RELE(XFS_ITOV(ip)) | ||
22 | #define IHOLD(ip) VN_HOLD(XFS_ITOV(ip)) | ||
23 | |||
24 | extern int xfs_truncate_file(xfs_mount_t *, xfs_inode_t *); | 21 | extern int xfs_truncate_file(xfs_mount_t *, xfs_inode_t *); |
25 | extern int xfs_dir_ialloc(xfs_trans_t **, xfs_inode_t *, mode_t, xfs_nlink_t, | 22 | extern int xfs_dir_ialloc(xfs_trans_t **, xfs_inode_t *, mode_t, xfs_nlink_t, |
26 | xfs_dev_t, cred_t *, prid_t, int, | 23 | xfs_dev_t, cred_t *, prid_t, int, |
diff --git a/fs/xfs/xfs_vfsops.c b/fs/xfs/xfs_vfsops.c index 4a9a43315a86..439dd3939dda 100644 --- a/fs/xfs/xfs_vfsops.c +++ b/fs/xfs/xfs_vfsops.c | |||
@@ -128,7 +128,6 @@ xfs_unmount_flush( | |||
128 | xfs_inode_t *rip = mp->m_rootip; | 128 | xfs_inode_t *rip = mp->m_rootip; |
129 | xfs_inode_t *rbmip; | 129 | xfs_inode_t *rbmip; |
130 | xfs_inode_t *rsumip = NULL; | 130 | xfs_inode_t *rsumip = NULL; |
131 | bhv_vnode_t *rvp = XFS_ITOV(rip); | ||
132 | int error; | 131 | int error; |
133 | 132 | ||
134 | xfs_ilock(rip, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT); | 133 | xfs_ilock(rip, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT); |
@@ -146,7 +145,7 @@ xfs_unmount_flush( | |||
146 | if (error == EFSCORRUPTED) | 145 | if (error == EFSCORRUPTED) |
147 | goto fscorrupt_out; | 146 | goto fscorrupt_out; |
148 | 147 | ||
149 | ASSERT(vn_count(XFS_ITOV(rbmip)) == 1); | 148 | ASSERT(vn_count(VFS_I(rbmip)) == 1); |
150 | 149 | ||
151 | rsumip = mp->m_rsumip; | 150 | rsumip = mp->m_rsumip; |
152 | xfs_ilock(rsumip, XFS_ILOCK_EXCL); | 151 | xfs_ilock(rsumip, XFS_ILOCK_EXCL); |
@@ -157,7 +156,7 @@ xfs_unmount_flush( | |||
157 | if (error == EFSCORRUPTED) | 156 | if (error == EFSCORRUPTED) |
158 | goto fscorrupt_out; | 157 | goto fscorrupt_out; |
159 | 158 | ||
160 | ASSERT(vn_count(XFS_ITOV(rsumip)) == 1); | 159 | ASSERT(vn_count(VFS_I(rsumip)) == 1); |
161 | } | 160 | } |
162 | 161 | ||
163 | /* | 162 | /* |
@@ -167,7 +166,7 @@ xfs_unmount_flush( | |||
167 | if (error == EFSCORRUPTED) | 166 | if (error == EFSCORRUPTED) |
168 | goto fscorrupt_out2; | 167 | goto fscorrupt_out2; |
169 | 168 | ||
170 | if (vn_count(rvp) != 1 && !relocation) { | 169 | if (vn_count(VFS_I(rip)) != 1 && !relocation) { |
171 | xfs_iunlock(rip, XFS_ILOCK_EXCL); | 170 | xfs_iunlock(rip, XFS_ILOCK_EXCL); |
172 | return XFS_ERROR(EBUSY); | 171 | return XFS_ERROR(EBUSY); |
173 | } | 172 | } |
@@ -284,7 +283,7 @@ xfs_sync_inodes( | |||
284 | int *bypassed) | 283 | int *bypassed) |
285 | { | 284 | { |
286 | xfs_inode_t *ip = NULL; | 285 | xfs_inode_t *ip = NULL; |
287 | bhv_vnode_t *vp = NULL; | 286 | struct inode *vp = NULL; |
288 | int error; | 287 | int error; |
289 | int last_error; | 288 | int last_error; |
290 | uint64_t fflag; | 289 | uint64_t fflag; |
@@ -404,7 +403,7 @@ xfs_sync_inodes( | |||
404 | continue; | 403 | continue; |
405 | } | 404 | } |
406 | 405 | ||
407 | vp = XFS_ITOV_NULL(ip); | 406 | vp = VFS_I(ip); |
408 | 407 | ||
409 | /* | 408 | /* |
410 | * If the vnode is gone then this is being torn down, | 409 | * If the vnode is gone then this is being torn down, |
@@ -479,7 +478,7 @@ xfs_sync_inodes( | |||
479 | IPOINTER_INSERT(ip, mp); | 478 | IPOINTER_INSERT(ip, mp); |
480 | xfs_ilock(ip, lock_flags); | 479 | xfs_ilock(ip, lock_flags); |
481 | 480 | ||
482 | ASSERT(vp == XFS_ITOV(ip)); | 481 | ASSERT(vp == VFS_I(ip)); |
483 | ASSERT(ip->i_mount == mp); | 482 | ASSERT(ip->i_mount == mp); |
484 | 483 | ||
485 | vnode_refed = B_TRUE; | 484 | vnode_refed = B_TRUE; |
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c index 76a1166af822..588bb4aa215d 100644 --- a/fs/xfs/xfs_vnodeops.c +++ b/fs/xfs/xfs_vnodeops.c | |||
@@ -83,7 +83,7 @@ xfs_setattr( | |||
83 | cred_t *credp) | 83 | cred_t *credp) |
84 | { | 84 | { |
85 | xfs_mount_t *mp = ip->i_mount; | 85 | xfs_mount_t *mp = ip->i_mount; |
86 | struct inode *inode = XFS_ITOV(ip); | 86 | struct inode *inode = VFS_I(ip); |
87 | int mask = iattr->ia_valid; | 87 | int mask = iattr->ia_valid; |
88 | xfs_trans_t *tp; | 88 | xfs_trans_t *tp; |
89 | int code; | 89 | int code; |
@@ -513,7 +513,6 @@ xfs_setattr( | |||
513 | ip->i_d.di_atime.t_sec = iattr->ia_atime.tv_sec; | 513 | ip->i_d.di_atime.t_sec = iattr->ia_atime.tv_sec; |
514 | ip->i_d.di_atime.t_nsec = iattr->ia_atime.tv_nsec; | 514 | ip->i_d.di_atime.t_nsec = iattr->ia_atime.tv_nsec; |
515 | ip->i_update_core = 1; | 515 | ip->i_update_core = 1; |
516 | timeflags &= ~XFS_ICHGTIME_ACC; | ||
517 | } | 516 | } |
518 | if (mask & ATTR_MTIME) { | 517 | if (mask & ATTR_MTIME) { |
519 | inode->i_mtime = iattr->ia_mtime; | 518 | inode->i_mtime = iattr->ia_mtime; |
@@ -714,7 +713,7 @@ xfs_fsync( | |||
714 | return XFS_ERROR(EIO); | 713 | return XFS_ERROR(EIO); |
715 | 714 | ||
716 | /* capture size updates in I/O completion before writing the inode. */ | 715 | /* capture size updates in I/O completion before writing the inode. */ |
717 | error = filemap_fdatawait(vn_to_inode(XFS_ITOV(ip))->i_mapping); | 716 | error = filemap_fdatawait(VFS_I(ip)->i_mapping); |
718 | if (error) | 717 | if (error) |
719 | return XFS_ERROR(error); | 718 | return XFS_ERROR(error); |
720 | 719 | ||
@@ -1160,7 +1159,6 @@ int | |||
1160 | xfs_release( | 1159 | xfs_release( |
1161 | xfs_inode_t *ip) | 1160 | xfs_inode_t *ip) |
1162 | { | 1161 | { |
1163 | bhv_vnode_t *vp = XFS_ITOV(ip); | ||
1164 | xfs_mount_t *mp = ip->i_mount; | 1162 | xfs_mount_t *mp = ip->i_mount; |
1165 | int error; | 1163 | int error; |
1166 | 1164 | ||
@@ -1195,13 +1193,13 @@ xfs_release( | |||
1195 | * be exposed to that problem. | 1193 | * be exposed to that problem. |
1196 | */ | 1194 | */ |
1197 | truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED); | 1195 | truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED); |
1198 | if (truncated && VN_DIRTY(vp) && ip->i_delayed_blks > 0) | 1196 | if (truncated && VN_DIRTY(VFS_I(ip)) && ip->i_delayed_blks > 0) |
1199 | xfs_flush_pages(ip, 0, -1, XFS_B_ASYNC, FI_NONE); | 1197 | xfs_flush_pages(ip, 0, -1, XFS_B_ASYNC, FI_NONE); |
1200 | } | 1198 | } |
1201 | 1199 | ||
1202 | if (ip->i_d.di_nlink != 0) { | 1200 | if (ip->i_d.di_nlink != 0) { |
1203 | if ((((ip->i_d.di_mode & S_IFMT) == S_IFREG) && | 1201 | if ((((ip->i_d.di_mode & S_IFMT) == S_IFREG) && |
1204 | ((ip->i_size > 0) || (VN_CACHED(vp) > 0 || | 1202 | ((ip->i_size > 0) || (VN_CACHED(VFS_I(ip)) > 0 || |
1205 | ip->i_delayed_blks > 0)) && | 1203 | ip->i_delayed_blks > 0)) && |
1206 | (ip->i_df.if_flags & XFS_IFEXTENTS)) && | 1204 | (ip->i_df.if_flags & XFS_IFEXTENTS)) && |
1207 | (!(ip->i_d.di_flags & | 1205 | (!(ip->i_d.di_flags & |
@@ -1227,7 +1225,6 @@ int | |||
1227 | xfs_inactive( | 1225 | xfs_inactive( |
1228 | xfs_inode_t *ip) | 1226 | xfs_inode_t *ip) |
1229 | { | 1227 | { |
1230 | bhv_vnode_t *vp = XFS_ITOV(ip); | ||
1231 | xfs_bmap_free_t free_list; | 1228 | xfs_bmap_free_t free_list; |
1232 | xfs_fsblock_t first_block; | 1229 | xfs_fsblock_t first_block; |
1233 | int committed; | 1230 | int committed; |
@@ -1242,7 +1239,7 @@ xfs_inactive( | |||
1242 | * If the inode is already free, then there can be nothing | 1239 | * If the inode is already free, then there can be nothing |
1243 | * to clean up here. | 1240 | * to clean up here. |
1244 | */ | 1241 | */ |
1245 | if (ip->i_d.di_mode == 0 || VN_BAD(vp)) { | 1242 | if (ip->i_d.di_mode == 0 || VN_BAD(VFS_I(ip))) { |
1246 | ASSERT(ip->i_df.if_real_bytes == 0); | 1243 | ASSERT(ip->i_df.if_real_bytes == 0); |
1247 | ASSERT(ip->i_df.if_broot_bytes == 0); | 1244 | ASSERT(ip->i_df.if_broot_bytes == 0); |
1248 | return VN_INACTIVE_CACHE; | 1245 | return VN_INACTIVE_CACHE; |
@@ -1272,7 +1269,7 @@ xfs_inactive( | |||
1272 | 1269 | ||
1273 | if (ip->i_d.di_nlink != 0) { | 1270 | if (ip->i_d.di_nlink != 0) { |
1274 | if ((((ip->i_d.di_mode & S_IFMT) == S_IFREG) && | 1271 | if ((((ip->i_d.di_mode & S_IFMT) == S_IFREG) && |
1275 | ((ip->i_size > 0) || (VN_CACHED(vp) > 0 || | 1272 | ((ip->i_size > 0) || (VN_CACHED(VFS_I(ip)) > 0 || |
1276 | ip->i_delayed_blks > 0)) && | 1273 | ip->i_delayed_blks > 0)) && |
1277 | (ip->i_df.if_flags & XFS_IFEXTENTS) && | 1274 | (ip->i_df.if_flags & XFS_IFEXTENTS) && |
1278 | (!(ip->i_d.di_flags & | 1275 | (!(ip->i_d.di_flags & |
@@ -1708,111 +1705,6 @@ std_return: | |||
1708 | } | 1705 | } |
1709 | 1706 | ||
1710 | #ifdef DEBUG | 1707 | #ifdef DEBUG |
1711 | /* | ||
1712 | * Some counters to see if (and how often) we are hitting some deadlock | ||
1713 | * prevention code paths. | ||
1714 | */ | ||
1715 | |||
1716 | int xfs_rm_locks; | ||
1717 | int xfs_rm_lock_delays; | ||
1718 | int xfs_rm_attempts; | ||
1719 | #endif | ||
1720 | |||
1721 | /* | ||
1722 | * The following routine will lock the inodes associated with the | ||
1723 | * directory and the named entry in the directory. The locks are | ||
1724 | * acquired in increasing inode number. | ||
1725 | * | ||
1726 | * If the entry is "..", then only the directory is locked. The | ||
1727 | * vnode ref count will still include that from the .. entry in | ||
1728 | * this case. | ||
1729 | * | ||
1730 | * There is a deadlock we need to worry about. If the locked directory is | ||
1731 | * in the AIL, it might be blocking up the log. The next inode we lock | ||
1732 | * could be already locked by another thread waiting for log space (e.g | ||
1733 | * a permanent log reservation with a long running transaction (see | ||
1734 | * xfs_itruncate_finish)). To solve this, we must check if the directory | ||
1735 | * is in the ail and use lock_nowait. If we can't lock, we need to | ||
1736 | * drop the inode lock on the directory and try again. xfs_iunlock will | ||
1737 | * potentially push the tail if we were holding up the log. | ||
1738 | */ | ||
1739 | STATIC int | ||
1740 | xfs_lock_dir_and_entry( | ||
1741 | xfs_inode_t *dp, | ||
1742 | xfs_inode_t *ip) /* inode of entry 'name' */ | ||
1743 | { | ||
1744 | int attempts; | ||
1745 | xfs_ino_t e_inum; | ||
1746 | xfs_inode_t *ips[2]; | ||
1747 | xfs_log_item_t *lp; | ||
1748 | |||
1749 | #ifdef DEBUG | ||
1750 | xfs_rm_locks++; | ||
1751 | #endif | ||
1752 | attempts = 0; | ||
1753 | |||
1754 | again: | ||
1755 | xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT); | ||
1756 | |||
1757 | e_inum = ip->i_ino; | ||
1758 | |||
1759 | xfs_itrace_ref(ip); | ||
1760 | |||
1761 | /* | ||
1762 | * We want to lock in increasing inum. Since we've already | ||
1763 | * acquired the lock on the directory, we may need to release | ||
1764 | * if if the inum of the entry turns out to be less. | ||
1765 | */ | ||
1766 | if (e_inum > dp->i_ino) { | ||
1767 | /* | ||
1768 | * We are already in the right order, so just | ||
1769 | * lock on the inode of the entry. | ||
1770 | * We need to use nowait if dp is in the AIL. | ||
1771 | */ | ||
1772 | |||
1773 | lp = (xfs_log_item_t *)dp->i_itemp; | ||
1774 | if (lp && (lp->li_flags & XFS_LI_IN_AIL)) { | ||
1775 | if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) { | ||
1776 | attempts++; | ||
1777 | #ifdef DEBUG | ||
1778 | xfs_rm_attempts++; | ||
1779 | #endif | ||
1780 | |||
1781 | /* | ||
1782 | * Unlock dp and try again. | ||
1783 | * xfs_iunlock will try to push the tail | ||
1784 | * if the inode is in the AIL. | ||
1785 | */ | ||
1786 | |||
1787 | xfs_iunlock(dp, XFS_ILOCK_EXCL); | ||
1788 | |||
1789 | if ((attempts % 5) == 0) { | ||
1790 | delay(1); /* Don't just spin the CPU */ | ||
1791 | #ifdef DEBUG | ||
1792 | xfs_rm_lock_delays++; | ||
1793 | #endif | ||
1794 | } | ||
1795 | goto again; | ||
1796 | } | ||
1797 | } else { | ||
1798 | xfs_ilock(ip, XFS_ILOCK_EXCL); | ||
1799 | } | ||
1800 | } else if (e_inum < dp->i_ino) { | ||
1801 | xfs_iunlock(dp, XFS_ILOCK_EXCL); | ||
1802 | |||
1803 | ips[0] = ip; | ||
1804 | ips[1] = dp; | ||
1805 | xfs_lock_inodes(ips, 2, XFS_ILOCK_EXCL); | ||
1806 | } | ||
1807 | /* else e_inum == dp->i_ino */ | ||
1808 | /* This can happen if we're asked to lock /x/.. | ||
1809 | * the entry is "..", which is also the parent directory. | ||
1810 | */ | ||
1811 | |||
1812 | return 0; | ||
1813 | } | ||
1814 | |||
1815 | #ifdef DEBUG | ||
1816 | int xfs_locked_n; | 1708 | int xfs_locked_n; |
1817 | int xfs_small_retries; | 1709 | int xfs_small_retries; |
1818 | int xfs_middle_retries; | 1710 | int xfs_middle_retries; |
@@ -1946,6 +1838,45 @@ again: | |||
1946 | #endif | 1838 | #endif |
1947 | } | 1839 | } |
1948 | 1840 | ||
1841 | void | ||
1842 | xfs_lock_two_inodes( | ||
1843 | xfs_inode_t *ip0, | ||
1844 | xfs_inode_t *ip1, | ||
1845 | uint lock_mode) | ||
1846 | { | ||
1847 | xfs_inode_t *temp; | ||
1848 | int attempts = 0; | ||
1849 | xfs_log_item_t *lp; | ||
1850 | |||
1851 | ASSERT(ip0->i_ino != ip1->i_ino); | ||
1852 | |||
1853 | if (ip0->i_ino > ip1->i_ino) { | ||
1854 | temp = ip0; | ||
1855 | ip0 = ip1; | ||
1856 | ip1 = temp; | ||
1857 | } | ||
1858 | |||
1859 | again: | ||
1860 | xfs_ilock(ip0, xfs_lock_inumorder(lock_mode, 0)); | ||
1861 | |||
1862 | /* | ||
1863 | * If the first lock we have locked is in the AIL, we must TRY to get | ||
1864 | * the second lock. If we can't get it, we must release the first one | ||
1865 | * and try again. | ||
1866 | */ | ||
1867 | lp = (xfs_log_item_t *)ip0->i_itemp; | ||
1868 | if (lp && (lp->li_flags & XFS_LI_IN_AIL)) { | ||
1869 | if (!xfs_ilock_nowait(ip1, xfs_lock_inumorder(lock_mode, 1))) { | ||
1870 | xfs_iunlock(ip0, lock_mode); | ||
1871 | if ((++attempts % 5) == 0) | ||
1872 | delay(1); /* Don't just spin the CPU */ | ||
1873 | goto again; | ||
1874 | } | ||
1875 | } else { | ||
1876 | xfs_ilock(ip1, xfs_lock_inumorder(lock_mode, 1)); | ||
1877 | } | ||
1878 | } | ||
1879 | |||
1949 | int | 1880 | int |
1950 | xfs_remove( | 1881 | xfs_remove( |
1951 | xfs_inode_t *dp, | 1882 | xfs_inode_t *dp, |
@@ -2018,9 +1949,7 @@ xfs_remove( | |||
2018 | goto out_trans_cancel; | 1949 | goto out_trans_cancel; |
2019 | } | 1950 | } |
2020 | 1951 | ||
2021 | error = xfs_lock_dir_and_entry(dp, ip); | 1952 | xfs_lock_two_inodes(dp, ip, XFS_ILOCK_EXCL); |
2022 | if (error) | ||
2023 | goto out_trans_cancel; | ||
2024 | 1953 | ||
2025 | /* | 1954 | /* |
2026 | * At this point, we've gotten both the directory and the entry | 1955 | * At this point, we've gotten both the directory and the entry |
@@ -2047,9 +1976,6 @@ xfs_remove( | |||
2047 | } | 1976 | } |
2048 | } | 1977 | } |
2049 | 1978 | ||
2050 | /* | ||
2051 | * Entry must exist since we did a lookup in xfs_lock_dir_and_entry. | ||
2052 | */ | ||
2053 | XFS_BMAP_INIT(&free_list, &first_block); | 1979 | XFS_BMAP_INIT(&free_list, &first_block); |
2054 | error = xfs_dir_removename(tp, dp, name, ip->i_ino, | 1980 | error = xfs_dir_removename(tp, dp, name, ip->i_ino, |
2055 | &first_block, &free_list, resblks); | 1981 | &first_block, &free_list, resblks); |
@@ -2155,7 +2081,6 @@ xfs_link( | |||
2155 | { | 2081 | { |
2156 | xfs_mount_t *mp = tdp->i_mount; | 2082 | xfs_mount_t *mp = tdp->i_mount; |
2157 | xfs_trans_t *tp; | 2083 | xfs_trans_t *tp; |
2158 | xfs_inode_t *ips[2]; | ||
2159 | int error; | 2084 | int error; |
2160 | xfs_bmap_free_t free_list; | 2085 | xfs_bmap_free_t free_list; |
2161 | xfs_fsblock_t first_block; | 2086 | xfs_fsblock_t first_block; |
@@ -2203,15 +2128,7 @@ xfs_link( | |||
2203 | goto error_return; | 2128 | goto error_return; |
2204 | } | 2129 | } |
2205 | 2130 | ||
2206 | if (sip->i_ino < tdp->i_ino) { | 2131 | xfs_lock_two_inodes(sip, tdp, XFS_ILOCK_EXCL); |
2207 | ips[0] = sip; | ||
2208 | ips[1] = tdp; | ||
2209 | } else { | ||
2210 | ips[0] = tdp; | ||
2211 | ips[1] = sip; | ||
2212 | } | ||
2213 | |||
2214 | xfs_lock_inodes(ips, 2, XFS_ILOCK_EXCL); | ||
2215 | 2132 | ||
2216 | /* | 2133 | /* |
2217 | * Increment vnode ref counts since xfs_trans_commit & | 2134 | * Increment vnode ref counts since xfs_trans_commit & |
@@ -2873,14 +2790,13 @@ int | |||
2873 | xfs_reclaim( | 2790 | xfs_reclaim( |
2874 | xfs_inode_t *ip) | 2791 | xfs_inode_t *ip) |
2875 | { | 2792 | { |
2876 | bhv_vnode_t *vp = XFS_ITOV(ip); | ||
2877 | 2793 | ||
2878 | xfs_itrace_entry(ip); | 2794 | xfs_itrace_entry(ip); |
2879 | 2795 | ||
2880 | ASSERT(!VN_MAPPED(vp)); | 2796 | ASSERT(!VN_MAPPED(VFS_I(ip))); |
2881 | 2797 | ||
2882 | /* bad inode, get out here ASAP */ | 2798 | /* bad inode, get out here ASAP */ |
2883 | if (VN_BAD(vp)) { | 2799 | if (VN_BAD(VFS_I(ip))) { |
2884 | xfs_ireclaim(ip); | 2800 | xfs_ireclaim(ip); |
2885 | return 0; | 2801 | return 0; |
2886 | } | 2802 | } |
@@ -2917,7 +2833,7 @@ xfs_reclaim( | |||
2917 | XFS_MOUNT_ILOCK(mp); | 2833 | XFS_MOUNT_ILOCK(mp); |
2918 | spin_lock(&ip->i_flags_lock); | 2834 | spin_lock(&ip->i_flags_lock); |
2919 | __xfs_iflags_set(ip, XFS_IRECLAIMABLE); | 2835 | __xfs_iflags_set(ip, XFS_IRECLAIMABLE); |
2920 | vn_to_inode(vp)->i_private = NULL; | 2836 | VFS_I(ip)->i_private = NULL; |
2921 | ip->i_vnode = NULL; | 2837 | ip->i_vnode = NULL; |
2922 | spin_unlock(&ip->i_flags_lock); | 2838 | spin_unlock(&ip->i_flags_lock); |
2923 | list_add_tail(&ip->i_reclaim, &mp->m_del_inodes); | 2839 | list_add_tail(&ip->i_reclaim, &mp->m_del_inodes); |
@@ -2933,7 +2849,7 @@ xfs_finish_reclaim( | |||
2933 | int sync_mode) | 2849 | int sync_mode) |
2934 | { | 2850 | { |
2935 | xfs_perag_t *pag = xfs_get_perag(ip->i_mount, ip->i_ino); | 2851 | xfs_perag_t *pag = xfs_get_perag(ip->i_mount, ip->i_ino); |
2936 | bhv_vnode_t *vp = XFS_ITOV_NULL(ip); | 2852 | struct inode *vp = VFS_I(ip); |
2937 | 2853 | ||
2938 | if (vp && VN_BAD(vp)) | 2854 | if (vp && VN_BAD(vp)) |
2939 | goto reclaim; | 2855 | goto reclaim; |
@@ -3321,7 +3237,6 @@ xfs_free_file_space( | |||
3321 | xfs_off_t len, | 3237 | xfs_off_t len, |
3322 | int attr_flags) | 3238 | int attr_flags) |
3323 | { | 3239 | { |
3324 | bhv_vnode_t *vp; | ||
3325 | int committed; | 3240 | int committed; |
3326 | int done; | 3241 | int done; |
3327 | xfs_off_t end_dmi_offset; | 3242 | xfs_off_t end_dmi_offset; |
@@ -3341,7 +3256,6 @@ xfs_free_file_space( | |||
3341 | xfs_trans_t *tp; | 3256 | xfs_trans_t *tp; |
3342 | int need_iolock = 1; | 3257 | int need_iolock = 1; |
3343 | 3258 | ||
3344 | vp = XFS_ITOV(ip); | ||
3345 | mp = ip->i_mount; | 3259 | mp = ip->i_mount; |
3346 | 3260 | ||
3347 | xfs_itrace_entry(ip); | 3261 | xfs_itrace_entry(ip); |
@@ -3378,7 +3292,7 @@ xfs_free_file_space( | |||
3378 | rounding = max_t(uint, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE); | 3292 | rounding = max_t(uint, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE); |
3379 | ioffset = offset & ~(rounding - 1); | 3293 | ioffset = offset & ~(rounding - 1); |
3380 | 3294 | ||
3381 | if (VN_CACHED(vp) != 0) { | 3295 | if (VN_CACHED(VFS_I(ip)) != 0) { |
3382 | xfs_inval_cached_trace(ip, ioffset, -1, ioffset, -1); | 3296 | xfs_inval_cached_trace(ip, ioffset, -1, ioffset, -1); |
3383 | error = xfs_flushinval_pages(ip, ioffset, -1, FI_REMAPF_LOCKED); | 3297 | error = xfs_flushinval_pages(ip, ioffset, -1, FI_REMAPF_LOCKED); |
3384 | if (error) | 3298 | if (error) |