aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorDave Chinner <dchinner@redhat.com>2013-08-12 06:49:39 -0400
committerBen Myers <bpm@sgi.com>2013-08-12 17:42:30 -0400
commitfde2227ce12b6d3e1945bd512da2a4a333331a2c (patch)
tree48bce1802731a4ddbaf9d4beaed5a1466b17b90e /fs
parentabec5f2bf991a03b055be36d918cd6f8f58cc83e (diff)
xfs: split out attribute fork truncation code into separate file
The attribute inactivation code is not used by userspace, so like the attribute listing, split it out into a separate file to minimise the differences between the filesystem shared with libxfs in userspace. Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Mark Tinguely <tinguely@sgi.com> Signed-off-by: Ben Myers <bpm@sgi.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/xfs/Makefile1
-rw-r--r--fs/xfs/xfs_attr.c71
-rw-r--r--fs/xfs/xfs_attr_inactive.c454
-rw-r--r--fs/xfs/xfs_attr_leaf.c352
4 files changed, 455 insertions, 423 deletions
diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile
index 94df3ec1945f..a73af1c8f978 100644
--- a/fs/xfs/Makefile
+++ b/fs/xfs/Makefile
@@ -27,6 +27,7 @@ xfs-y += xfs_trace.o
27 27
28# highlevel code 28# highlevel code
29xfs-y += xfs_aops.o \ 29xfs-y += xfs_aops.o \
30 xfs_attr_inactive.o \
30 xfs_attr_list.o \ 31 xfs_attr_list.o \
31 xfs_bit.o \ 32 xfs_bit.o \
32 xfs_buf.o \ 33 xfs_buf.o \
diff --git a/fs/xfs/xfs_attr.c b/fs/xfs/xfs_attr.c
index e9fd5acb4305..6ab77e356a9d 100644
--- a/fs/xfs/xfs_attr.c
+++ b/fs/xfs/xfs_attr.c
@@ -610,77 +610,6 @@ xfs_attr_remove(
610 return xfs_attr_remove_int(dp, &xname, flags); 610 return xfs_attr_remove_int(dp, &xname, flags);
611} 611}
612 612
613int /* error */
614xfs_attr_inactive(xfs_inode_t *dp)
615{
616 xfs_trans_t *trans;
617 xfs_mount_t *mp;
618 int error;
619
620 mp = dp->i_mount;
621 ASSERT(! XFS_NOT_DQATTACHED(mp, dp));
622
623 xfs_ilock(dp, XFS_ILOCK_SHARED);
624 if (!xfs_inode_hasattr(dp) ||
625 dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) {
626 xfs_iunlock(dp, XFS_ILOCK_SHARED);
627 return 0;
628 }
629 xfs_iunlock(dp, XFS_ILOCK_SHARED);
630
631 /*
632 * Start our first transaction of the day.
633 *
634 * All future transactions during this code must be "chained" off
635 * this one via the trans_dup() call. All transactions will contain
636 * the inode, and the inode will always be marked with trans_ihold().
637 * Since the inode will be locked in all transactions, we must log
638 * the inode in every transaction to let it float upward through
639 * the log.
640 */
641 trans = xfs_trans_alloc(mp, XFS_TRANS_ATTRINVAL);
642 if ((error = xfs_trans_reserve(trans, 0, XFS_ATTRINVAL_LOG_RES(mp), 0,
643 XFS_TRANS_PERM_LOG_RES,
644 XFS_ATTRINVAL_LOG_COUNT))) {
645 xfs_trans_cancel(trans, 0);
646 return(error);
647 }
648 xfs_ilock(dp, XFS_ILOCK_EXCL);
649
650 /*
651 * No need to make quota reservations here. We expect to release some
652 * blocks, not allocate, in the common case.
653 */
654 xfs_trans_ijoin(trans, dp, 0);
655
656 /*
657 * Decide on what work routines to call based on the inode size.
658 */
659 if (!xfs_inode_hasattr(dp) ||
660 dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) {
661 error = 0;
662 goto out;
663 }
664 error = xfs_attr3_root_inactive(&trans, dp);
665 if (error)
666 goto out;
667
668 error = xfs_itruncate_extents(&trans, dp, XFS_ATTR_FORK, 0);
669 if (error)
670 goto out;
671
672 error = xfs_trans_commit(trans, XFS_TRANS_RELEASE_LOG_RES);
673 xfs_iunlock(dp, XFS_ILOCK_EXCL);
674
675 return(error);
676
677out:
678 xfs_trans_cancel(trans, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT);
679 xfs_iunlock(dp, XFS_ILOCK_EXCL);
680 return(error);
681}
682
683
684 613
685/*======================================================================== 614/*========================================================================
686 * External routines when attribute list is inside the inode 615 * External routines when attribute list is inside the inode
diff --git a/fs/xfs/xfs_attr_inactive.c b/fs/xfs/xfs_attr_inactive.c
new file mode 100644
index 000000000000..ace95e791311
--- /dev/null
+++ b/fs/xfs/xfs_attr_inactive.c
@@ -0,0 +1,454 @@
1/*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * Copyright (c) 2013 Red Hat, Inc.
4 * All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it would be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19#include "xfs.h"
20#include "xfs_fs.h"
21#include "xfs_format.h"
22#include "xfs_bit.h"
23#include "xfs_log.h"
24#include "xfs_trans.h"
25#include "xfs_sb.h"
26#include "xfs_ag.h"
27#include "xfs_mount.h"
28#include "xfs_da_btree.h"
29#include "xfs_bmap_btree.h"
30#include "xfs_alloc_btree.h"
31#include "xfs_ialloc_btree.h"
32#include "xfs_alloc.h"
33#include "xfs_btree.h"
34#include "xfs_attr_remote.h"
35#include "xfs_dinode.h"
36#include "xfs_inode.h"
37#include "xfs_inode_item.h"
38#include "xfs_bmap.h"
39#include "xfs_attr.h"
40#include "xfs_attr_leaf.h"
41#include "xfs_error.h"
42#include "xfs_quota.h"
43#include "xfs_trace.h"
44#include "xfs_trans_priv.h"
45
46/*
47 * Look at all the extents for this logical region,
48 * invalidate any buffers that are incore/in transactions.
49 */
50STATIC int
51xfs_attr3_leaf_freextent(
52 struct xfs_trans **trans,
53 struct xfs_inode *dp,
54 xfs_dablk_t blkno,
55 int blkcnt)
56{
57 struct xfs_bmbt_irec map;
58 struct xfs_buf *bp;
59 xfs_dablk_t tblkno;
60 xfs_daddr_t dblkno;
61 int tblkcnt;
62 int dblkcnt;
63 int nmap;
64 int error;
65
66 /*
67 * Roll through the "value", invalidating the attribute value's
68 * blocks.
69 */
70 tblkno = blkno;
71 tblkcnt = blkcnt;
72 while (tblkcnt > 0) {
73 /*
74 * Try to remember where we decided to put the value.
75 */
76 nmap = 1;
77 error = xfs_bmapi_read(dp, (xfs_fileoff_t)tblkno, tblkcnt,
78 &map, &nmap, XFS_BMAPI_ATTRFORK);
79 if (error) {
80 return(error);
81 }
82 ASSERT(nmap == 1);
83 ASSERT(map.br_startblock != DELAYSTARTBLOCK);
84
85 /*
86 * If it's a hole, these are already unmapped
87 * so there's nothing to invalidate.
88 */
89 if (map.br_startblock != HOLESTARTBLOCK) {
90
91 dblkno = XFS_FSB_TO_DADDR(dp->i_mount,
92 map.br_startblock);
93 dblkcnt = XFS_FSB_TO_BB(dp->i_mount,
94 map.br_blockcount);
95 bp = xfs_trans_get_buf(*trans,
96 dp->i_mount->m_ddev_targp,
97 dblkno, dblkcnt, 0);
98 if (!bp)
99 return ENOMEM;
100 xfs_trans_binval(*trans, bp);
101 /*
102 * Roll to next transaction.
103 */
104 error = xfs_trans_roll(trans, dp);
105 if (error)
106 return (error);
107 }
108
109 tblkno += map.br_blockcount;
110 tblkcnt -= map.br_blockcount;
111 }
112
113 return(0);
114}
115
116/*
117 * Invalidate all of the "remote" value regions pointed to by a particular
118 * leaf block.
119 * Note that we must release the lock on the buffer so that we are not
120 * caught holding something that the logging code wants to flush to disk.
121 */
122STATIC int
123xfs_attr3_leaf_inactive(
124 struct xfs_trans **trans,
125 struct xfs_inode *dp,
126 struct xfs_buf *bp)
127{
128 struct xfs_attr_leafblock *leaf;
129 struct xfs_attr3_icleaf_hdr ichdr;
130 struct xfs_attr_leaf_entry *entry;
131 struct xfs_attr_leaf_name_remote *name_rmt;
132 struct xfs_attr_inactive_list *list;
133 struct xfs_attr_inactive_list *lp;
134 int error;
135 int count;
136 int size;
137 int tmp;
138 int i;
139
140 leaf = bp->b_addr;
141 xfs_attr3_leaf_hdr_from_disk(&ichdr, leaf);
142
143 /*
144 * Count the number of "remote" value extents.
145 */
146 count = 0;
147 entry = xfs_attr3_leaf_entryp(leaf);
148 for (i = 0; i < ichdr.count; entry++, i++) {
149 if (be16_to_cpu(entry->nameidx) &&
150 ((entry->flags & XFS_ATTR_LOCAL) == 0)) {
151 name_rmt = xfs_attr3_leaf_name_remote(leaf, i);
152 if (name_rmt->valueblk)
153 count++;
154 }
155 }
156
157 /*
158 * If there are no "remote" values, we're done.
159 */
160 if (count == 0) {
161 xfs_trans_brelse(*trans, bp);
162 return 0;
163 }
164
165 /*
166 * Allocate storage for a list of all the "remote" value extents.
167 */
168 size = count * sizeof(xfs_attr_inactive_list_t);
169 list = kmem_alloc(size, KM_SLEEP);
170
171 /*
172 * Identify each of the "remote" value extents.
173 */
174 lp = list;
175 entry = xfs_attr3_leaf_entryp(leaf);
176 for (i = 0; i < ichdr.count; entry++, i++) {
177 if (be16_to_cpu(entry->nameidx) &&
178 ((entry->flags & XFS_ATTR_LOCAL) == 0)) {
179 name_rmt = xfs_attr3_leaf_name_remote(leaf, i);
180 if (name_rmt->valueblk) {
181 lp->valueblk = be32_to_cpu(name_rmt->valueblk);
182 lp->valuelen = xfs_attr3_rmt_blocks(dp->i_mount,
183 be32_to_cpu(name_rmt->valuelen));
184 lp++;
185 }
186 }
187 }
188 xfs_trans_brelse(*trans, bp); /* unlock for trans. in freextent() */
189
190 /*
191 * Invalidate each of the "remote" value extents.
192 */
193 error = 0;
194 for (lp = list, i = 0; i < count; i++, lp++) {
195 tmp = xfs_attr3_leaf_freextent(trans, dp,
196 lp->valueblk, lp->valuelen);
197
198 if (error == 0)
199 error = tmp; /* save only the 1st errno */
200 }
201
202 kmem_free(list);
203 return error;
204}
205
206/*
207 * Recurse (gasp!) through the attribute nodes until we find leaves.
208 * We're doing a depth-first traversal in order to invalidate everything.
209 */
210STATIC int
211xfs_attr3_node_inactive(
212 struct xfs_trans **trans,
213 struct xfs_inode *dp,
214 struct xfs_buf *bp,
215 int level)
216{
217 xfs_da_blkinfo_t *info;
218 xfs_da_intnode_t *node;
219 xfs_dablk_t child_fsb;
220 xfs_daddr_t parent_blkno, child_blkno;
221 int error, i;
222 struct xfs_buf *child_bp;
223 struct xfs_da_node_entry *btree;
224 struct xfs_da3_icnode_hdr ichdr;
225
226 /*
227 * Since this code is recursive (gasp!) we must protect ourselves.
228 */
229 if (level > XFS_DA_NODE_MAXDEPTH) {
230 xfs_trans_brelse(*trans, bp); /* no locks for later trans */
231 return XFS_ERROR(EIO);
232 }
233
234 node = bp->b_addr;
235 xfs_da3_node_hdr_from_disk(&ichdr, node);
236 parent_blkno = bp->b_bn;
237 if (!ichdr.count) {
238 xfs_trans_brelse(*trans, bp);
239 return 0;
240 }
241 btree = xfs_da3_node_tree_p(node);
242 child_fsb = be32_to_cpu(btree[0].before);
243 xfs_trans_brelse(*trans, bp); /* no locks for later trans */
244
245 /*
246 * If this is the node level just above the leaves, simply loop
247 * over the leaves removing all of them. If this is higher up
248 * in the tree, recurse downward.
249 */
250 for (i = 0; i < ichdr.count; i++) {
251 /*
252 * Read the subsidiary block to see what we have to work with.
253 * Don't do this in a transaction. This is a depth-first
254 * traversal of the tree so we may deal with many blocks
255 * before we come back to this one.
256 */
257 error = xfs_da3_node_read(*trans, dp, child_fsb, -2, &child_bp,
258 XFS_ATTR_FORK);
259 if (error)
260 return(error);
261 if (child_bp) {
262 /* save for re-read later */
263 child_blkno = XFS_BUF_ADDR(child_bp);
264
265 /*
266 * Invalidate the subtree, however we have to.
267 */
268 info = child_bp->b_addr;
269 switch (info->magic) {
270 case cpu_to_be16(XFS_DA_NODE_MAGIC):
271 case cpu_to_be16(XFS_DA3_NODE_MAGIC):
272 error = xfs_attr3_node_inactive(trans, dp,
273 child_bp, level + 1);
274 break;
275 case cpu_to_be16(XFS_ATTR_LEAF_MAGIC):
276 case cpu_to_be16(XFS_ATTR3_LEAF_MAGIC):
277 error = xfs_attr3_leaf_inactive(trans, dp,
278 child_bp);
279 break;
280 default:
281 error = XFS_ERROR(EIO);
282 xfs_trans_brelse(*trans, child_bp);
283 break;
284 }
285 if (error)
286 return error;
287
288 /*
289 * Remove the subsidiary block from the cache
290 * and from the log.
291 */
292 error = xfs_da_get_buf(*trans, dp, 0, child_blkno,
293 &child_bp, XFS_ATTR_FORK);
294 if (error)
295 return error;
296 xfs_trans_binval(*trans, child_bp);
297 }
298
299 /*
300 * If we're not done, re-read the parent to get the next
301 * child block number.
302 */
303 if (i + 1 < ichdr.count) {
304 error = xfs_da3_node_read(*trans, dp, 0, parent_blkno,
305 &bp, XFS_ATTR_FORK);
306 if (error)
307 return error;
308 child_fsb = be32_to_cpu(btree[i + 1].before);
309 xfs_trans_brelse(*trans, bp);
310 }
311 /*
312 * Atomically commit the whole invalidate stuff.
313 */
314 error = xfs_trans_roll(trans, dp);
315 if (error)
316 return error;
317 }
318
319 return 0;
320}
321
322/*
323 * Indiscriminately delete the entire attribute fork
324 *
325 * Recurse (gasp!) through the attribute nodes until we find leaves.
326 * We're doing a depth-first traversal in order to invalidate everything.
327 */
328int
329xfs_attr3_root_inactive(
330 struct xfs_trans **trans,
331 struct xfs_inode *dp)
332{
333 struct xfs_da_blkinfo *info;
334 struct xfs_buf *bp;
335 xfs_daddr_t blkno;
336 int error;
337
338 /*
339 * Read block 0 to see what we have to work with.
340 * We only get here if we have extents, since we remove
341 * the extents in reverse order the extent containing
342 * block 0 must still be there.
343 */
344 error = xfs_da3_node_read(*trans, dp, 0, -1, &bp, XFS_ATTR_FORK);
345 if (error)
346 return error;
347 blkno = bp->b_bn;
348
349 /*
350 * Invalidate the tree, even if the "tree" is only a single leaf block.
351 * This is a depth-first traversal!
352 */
353 info = bp->b_addr;
354 switch (info->magic) {
355 case cpu_to_be16(XFS_DA_NODE_MAGIC):
356 case cpu_to_be16(XFS_DA3_NODE_MAGIC):
357 error = xfs_attr3_node_inactive(trans, dp, bp, 1);
358 break;
359 case cpu_to_be16(XFS_ATTR_LEAF_MAGIC):
360 case cpu_to_be16(XFS_ATTR3_LEAF_MAGIC):
361 error = xfs_attr3_leaf_inactive(trans, dp, bp);
362 break;
363 default:
364 error = XFS_ERROR(EIO);
365 xfs_trans_brelse(*trans, bp);
366 break;
367 }
368 if (error)
369 return error;
370
371 /*
372 * Invalidate the incore copy of the root block.
373 */
374 error = xfs_da_get_buf(*trans, dp, 0, blkno, &bp, XFS_ATTR_FORK);
375 if (error)
376 return error;
377 xfs_trans_binval(*trans, bp); /* remove from cache */
378 /*
379 * Commit the invalidate and start the next transaction.
380 */
381 error = xfs_trans_roll(trans, dp);
382
383 return error;
384}
385
386int
387xfs_attr_inactive(xfs_inode_t *dp)
388{
389 xfs_trans_t *trans;
390 xfs_mount_t *mp;
391 int error;
392
393 mp = dp->i_mount;
394 ASSERT(! XFS_NOT_DQATTACHED(mp, dp));
395
396 xfs_ilock(dp, XFS_ILOCK_SHARED);
397 if (!xfs_inode_hasattr(dp) ||
398 dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) {
399 xfs_iunlock(dp, XFS_ILOCK_SHARED);
400 return 0;
401 }
402 xfs_iunlock(dp, XFS_ILOCK_SHARED);
403
404 /*
405 * Start our first transaction of the day.
406 *
407 * All future transactions during this code must be "chained" off
408 * this one via the trans_dup() call. All transactions will contain
409 * the inode, and the inode will always be marked with trans_ihold().
410 * Since the inode will be locked in all transactions, we must log
411 * the inode in every transaction to let it float upward through
412 * the log.
413 */
414 trans = xfs_trans_alloc(mp, XFS_TRANS_ATTRINVAL);
415 if ((error = xfs_trans_reserve(trans, 0, XFS_ATTRINVAL_LOG_RES(mp), 0,
416 XFS_TRANS_PERM_LOG_RES,
417 XFS_ATTRINVAL_LOG_COUNT))) {
418 xfs_trans_cancel(trans, 0);
419 return(error);
420 }
421 xfs_ilock(dp, XFS_ILOCK_EXCL);
422
423 /*
424 * No need to make quota reservations here. We expect to release some
425 * blocks, not allocate, in the common case.
426 */
427 xfs_trans_ijoin(trans, dp, 0);
428
429 /*
430 * Decide on what work routines to call based on the inode size.
431 */
432 if (!xfs_inode_hasattr(dp) ||
433 dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) {
434 error = 0;
435 goto out;
436 }
437 error = xfs_attr3_root_inactive(&trans, dp);
438 if (error)
439 goto out;
440
441 error = xfs_itruncate_extents(&trans, dp, XFS_ATTR_FORK, 0);
442 if (error)
443 goto out;
444
445 error = xfs_trans_commit(trans, XFS_TRANS_RELEASE_LOG_RES);
446 xfs_iunlock(dp, XFS_ILOCK_EXCL);
447
448 return(error);
449
450out:
451 xfs_trans_cancel(trans, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT);
452 xfs_iunlock(dp, XFS_ILOCK_EXCL);
453 return(error);
454}
diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/xfs_attr_leaf.c
index 90c033e41eb9..fe0ade9a74ba 100644
--- a/fs/xfs/xfs_attr_leaf.c
+++ b/fs/xfs/xfs_attr_leaf.c
@@ -79,16 +79,6 @@ STATIC int xfs_attr3_leaf_figure_balance(xfs_da_state_t *state,
79 int *number_usedbytes_in_blk1); 79 int *number_usedbytes_in_blk1);
80 80
81/* 81/*
82 * Routines used for shrinking the Btree.
83 */
84STATIC int xfs_attr3_node_inactive(xfs_trans_t **trans, xfs_inode_t *dp,
85 struct xfs_buf *bp, int level);
86STATIC int xfs_attr3_leaf_inactive(xfs_trans_t **trans, xfs_inode_t *dp,
87 struct xfs_buf *bp);
88STATIC int xfs_attr3_leaf_freextent(xfs_trans_t **trans, xfs_inode_t *dp,
89 xfs_dablk_t blkno, int blkcnt);
90
91/*
92 * Utility routines. 82 * Utility routines.
93 */ 83 */
94STATIC void xfs_attr3_leaf_moveents(struct xfs_attr_leafblock *src_leaf, 84STATIC void xfs_attr3_leaf_moveents(struct xfs_attr_leafblock *src_leaf,
@@ -2712,345 +2702,3 @@ xfs_attr3_leaf_flipflags(
2712 2702
2713 return error; 2703 return error;
2714} 2704}
2715
2716/*========================================================================
2717 * Indiscriminately delete the entire attribute fork
2718 *========================================================================*/
2719
2720/*
2721 * Recurse (gasp!) through the attribute nodes until we find leaves.
2722 * We're doing a depth-first traversal in order to invalidate everything.
2723 */
2724int
2725xfs_attr3_root_inactive(
2726 struct xfs_trans **trans,
2727 struct xfs_inode *dp)
2728{
2729 struct xfs_da_blkinfo *info;
2730 struct xfs_buf *bp;
2731 xfs_daddr_t blkno;
2732 int error;
2733
2734 /*
2735 * Read block 0 to see what we have to work with.
2736 * We only get here if we have extents, since we remove
2737 * the extents in reverse order the extent containing
2738 * block 0 must still be there.
2739 */
2740 error = xfs_da3_node_read(*trans, dp, 0, -1, &bp, XFS_ATTR_FORK);
2741 if (error)
2742 return error;
2743 blkno = bp->b_bn;
2744
2745 /*
2746 * Invalidate the tree, even if the "tree" is only a single leaf block.
2747 * This is a depth-first traversal!
2748 */
2749 info = bp->b_addr;
2750 switch (info->magic) {
2751 case cpu_to_be16(XFS_DA_NODE_MAGIC):
2752 case cpu_to_be16(XFS_DA3_NODE_MAGIC):
2753 error = xfs_attr3_node_inactive(trans, dp, bp, 1);
2754 break;
2755 case cpu_to_be16(XFS_ATTR_LEAF_MAGIC):
2756 case cpu_to_be16(XFS_ATTR3_LEAF_MAGIC):
2757 error = xfs_attr3_leaf_inactive(trans, dp, bp);
2758 break;
2759 default:
2760 error = XFS_ERROR(EIO);
2761 xfs_trans_brelse(*trans, bp);
2762 break;
2763 }
2764 if (error)
2765 return error;
2766
2767 /*
2768 * Invalidate the incore copy of the root block.
2769 */
2770 error = xfs_da_get_buf(*trans, dp, 0, blkno, &bp, XFS_ATTR_FORK);
2771 if (error)
2772 return error;
2773 xfs_trans_binval(*trans, bp); /* remove from cache */
2774 /*
2775 * Commit the invalidate and start the next transaction.
2776 */
2777 error = xfs_trans_roll(trans, dp);
2778
2779 return error;
2780}
2781
2782/*
2783 * Recurse (gasp!) through the attribute nodes until we find leaves.
2784 * We're doing a depth-first traversal in order to invalidate everything.
2785 */
2786STATIC int
2787xfs_attr3_node_inactive(
2788 struct xfs_trans **trans,
2789 struct xfs_inode *dp,
2790 struct xfs_buf *bp,
2791 int level)
2792{
2793 xfs_da_blkinfo_t *info;
2794 xfs_da_intnode_t *node;
2795 xfs_dablk_t child_fsb;
2796 xfs_daddr_t parent_blkno, child_blkno;
2797 int error, i;
2798 struct xfs_buf *child_bp;
2799 struct xfs_da_node_entry *btree;
2800 struct xfs_da3_icnode_hdr ichdr;
2801
2802 /*
2803 * Since this code is recursive (gasp!) we must protect ourselves.
2804 */
2805 if (level > XFS_DA_NODE_MAXDEPTH) {
2806 xfs_trans_brelse(*trans, bp); /* no locks for later trans */
2807 return XFS_ERROR(EIO);
2808 }
2809
2810 node = bp->b_addr;
2811 xfs_da3_node_hdr_from_disk(&ichdr, node);
2812 parent_blkno = bp->b_bn;
2813 if (!ichdr.count) {
2814 xfs_trans_brelse(*trans, bp);
2815 return 0;
2816 }
2817 btree = xfs_da3_node_tree_p(node);
2818 child_fsb = be32_to_cpu(btree[0].before);
2819 xfs_trans_brelse(*trans, bp); /* no locks for later trans */
2820
2821 /*
2822 * If this is the node level just above the leaves, simply loop
2823 * over the leaves removing all of them. If this is higher up
2824 * in the tree, recurse downward.
2825 */
2826 for (i = 0; i < ichdr.count; i++) {
2827 /*
2828 * Read the subsidiary block to see what we have to work with.
2829 * Don't do this in a transaction. This is a depth-first
2830 * traversal of the tree so we may deal with many blocks
2831 * before we come back to this one.
2832 */
2833 error = xfs_da3_node_read(*trans, dp, child_fsb, -2, &child_bp,
2834 XFS_ATTR_FORK);
2835 if (error)
2836 return(error);
2837 if (child_bp) {
2838 /* save for re-read later */
2839 child_blkno = XFS_BUF_ADDR(child_bp);
2840
2841 /*
2842 * Invalidate the subtree, however we have to.
2843 */
2844 info = child_bp->b_addr;
2845 switch (info->magic) {
2846 case cpu_to_be16(XFS_DA_NODE_MAGIC):
2847 case cpu_to_be16(XFS_DA3_NODE_MAGIC):
2848 error = xfs_attr3_node_inactive(trans, dp,
2849 child_bp, level + 1);
2850 break;
2851 case cpu_to_be16(XFS_ATTR_LEAF_MAGIC):
2852 case cpu_to_be16(XFS_ATTR3_LEAF_MAGIC):
2853 error = xfs_attr3_leaf_inactive(trans, dp,
2854 child_bp);
2855 break;
2856 default:
2857 error = XFS_ERROR(EIO);
2858 xfs_trans_brelse(*trans, child_bp);
2859 break;
2860 }
2861 if (error)
2862 return error;
2863
2864 /*
2865 * Remove the subsidiary block from the cache
2866 * and from the log.
2867 */
2868 error = xfs_da_get_buf(*trans, dp, 0, child_blkno,
2869 &child_bp, XFS_ATTR_FORK);
2870 if (error)
2871 return error;
2872 xfs_trans_binval(*trans, child_bp);
2873 }
2874
2875 /*
2876 * If we're not done, re-read the parent to get the next
2877 * child block number.
2878 */
2879 if (i + 1 < ichdr.count) {
2880 error = xfs_da3_node_read(*trans, dp, 0, parent_blkno,
2881 &bp, XFS_ATTR_FORK);
2882 if (error)
2883 return error;
2884 child_fsb = be32_to_cpu(btree[i + 1].before);
2885 xfs_trans_brelse(*trans, bp);
2886 }
2887 /*
2888 * Atomically commit the whole invalidate stuff.
2889 */
2890 error = xfs_trans_roll(trans, dp);
2891 if (error)
2892 return error;
2893 }
2894
2895 return 0;
2896}
2897
2898/*
2899 * Invalidate all of the "remote" value regions pointed to by a particular
2900 * leaf block.
2901 * Note that we must release the lock on the buffer so that we are not
2902 * caught holding something that the logging code wants to flush to disk.
2903 */
2904STATIC int
2905xfs_attr3_leaf_inactive(
2906 struct xfs_trans **trans,
2907 struct xfs_inode *dp,
2908 struct xfs_buf *bp)
2909{
2910 struct xfs_attr_leafblock *leaf;
2911 struct xfs_attr3_icleaf_hdr ichdr;
2912 struct xfs_attr_leaf_entry *entry;
2913 struct xfs_attr_leaf_name_remote *name_rmt;
2914 struct xfs_attr_inactive_list *list;
2915 struct xfs_attr_inactive_list *lp;
2916 int error;
2917 int count;
2918 int size;
2919 int tmp;
2920 int i;
2921
2922 leaf = bp->b_addr;
2923 xfs_attr3_leaf_hdr_from_disk(&ichdr, leaf);
2924
2925 /*
2926 * Count the number of "remote" value extents.
2927 */
2928 count = 0;
2929 entry = xfs_attr3_leaf_entryp(leaf);
2930 for (i = 0; i < ichdr.count; entry++, i++) {
2931 if (be16_to_cpu(entry->nameidx) &&
2932 ((entry->flags & XFS_ATTR_LOCAL) == 0)) {
2933 name_rmt = xfs_attr3_leaf_name_remote(leaf, i);
2934 if (name_rmt->valueblk)
2935 count++;
2936 }
2937 }
2938
2939 /*
2940 * If there are no "remote" values, we're done.
2941 */
2942 if (count == 0) {
2943 xfs_trans_brelse(*trans, bp);
2944 return 0;
2945 }
2946
2947 /*
2948 * Allocate storage for a list of all the "remote" value extents.
2949 */
2950 size = count * sizeof(xfs_attr_inactive_list_t);
2951 list = kmem_alloc(size, KM_SLEEP);
2952
2953 /*
2954 * Identify each of the "remote" value extents.
2955 */
2956 lp = list;
2957 entry = xfs_attr3_leaf_entryp(leaf);
2958 for (i = 0; i < ichdr.count; entry++, i++) {
2959 if (be16_to_cpu(entry->nameidx) &&
2960 ((entry->flags & XFS_ATTR_LOCAL) == 0)) {
2961 name_rmt = xfs_attr3_leaf_name_remote(leaf, i);
2962 if (name_rmt->valueblk) {
2963 lp->valueblk = be32_to_cpu(name_rmt->valueblk);
2964 lp->valuelen = xfs_attr3_rmt_blocks(dp->i_mount,
2965 be32_to_cpu(name_rmt->valuelen));
2966 lp++;
2967 }
2968 }
2969 }
2970 xfs_trans_brelse(*trans, bp); /* unlock for trans. in freextent() */
2971
2972 /*
2973 * Invalidate each of the "remote" value extents.
2974 */
2975 error = 0;
2976 for (lp = list, i = 0; i < count; i++, lp++) {
2977 tmp = xfs_attr3_leaf_freextent(trans, dp,
2978 lp->valueblk, lp->valuelen);
2979
2980 if (error == 0)
2981 error = tmp; /* save only the 1st errno */
2982 }
2983
2984 kmem_free(list);
2985 return error;
2986}
2987
2988/*
2989 * Look at all the extents for this logical region,
2990 * invalidate any buffers that are incore/in transactions.
2991 */
2992STATIC int
2993xfs_attr3_leaf_freextent(
2994 struct xfs_trans **trans,
2995 struct xfs_inode *dp,
2996 xfs_dablk_t blkno,
2997 int blkcnt)
2998{
2999 struct xfs_bmbt_irec map;
3000 struct xfs_buf *bp;
3001 xfs_dablk_t tblkno;
3002 xfs_daddr_t dblkno;
3003 int tblkcnt;
3004 int dblkcnt;
3005 int nmap;
3006 int error;
3007
3008 /*
3009 * Roll through the "value", invalidating the attribute value's
3010 * blocks.
3011 */
3012 tblkno = blkno;
3013 tblkcnt = blkcnt;
3014 while (tblkcnt > 0) {
3015 /*
3016 * Try to remember where we decided to put the value.
3017 */
3018 nmap = 1;
3019 error = xfs_bmapi_read(dp, (xfs_fileoff_t)tblkno, tblkcnt,
3020 &map, &nmap, XFS_BMAPI_ATTRFORK);
3021 if (error) {
3022 return(error);
3023 }
3024 ASSERT(nmap == 1);
3025 ASSERT(map.br_startblock != DELAYSTARTBLOCK);
3026
3027 /*
3028 * If it's a hole, these are already unmapped
3029 * so there's nothing to invalidate.
3030 */
3031 if (map.br_startblock != HOLESTARTBLOCK) {
3032
3033 dblkno = XFS_FSB_TO_DADDR(dp->i_mount,
3034 map.br_startblock);
3035 dblkcnt = XFS_FSB_TO_BB(dp->i_mount,
3036 map.br_blockcount);
3037 bp = xfs_trans_get_buf(*trans,
3038 dp->i_mount->m_ddev_targp,
3039 dblkno, dblkcnt, 0);
3040 if (!bp)
3041 return ENOMEM;
3042 xfs_trans_binval(*trans, bp);
3043 /*
3044 * Roll to next transaction.
3045 */
3046 error = xfs_trans_roll(trans, dp);
3047 if (error)
3048 return (error);
3049 }
3050
3051 tblkno += map.br_blockcount;
3052 tblkcnt -= map.br_blockcount;
3053 }
3054
3055 return(0);
3056}