aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-07-30 16:37:53 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-07-30 16:37:53 -0400
commit37cd9600a9e20359b0283983c9e3a55d84347168 (patch)
treefea12ce0ecbaf417b0d835b3cbee14e973103fad
parent95b18e69950ca7fd9acfa55964e929f58bec9379 (diff)
parent9a57fa8ee7c29e11c2a29ce058573ba99157eda7 (diff)
Merge tag 'for-linus-v3.6-rc1' of git://oss.sgi.com/xfs/xfs
Pull xfs update from Ben Myers: "Numerous cleanups and several bug fixes. Here are some highlights: - Discontiguous directory buffer support - Inode allocator refactoring - Removal of the IO lock in inode reclaim - Implementation of .update_time - Fix for handling of EOF in xfs_vm_writepage - Fix for races in xfsaild, and idle mode is re-enabled - Fix for a crash in xfs_buf completion handlers on unmount." Fix up trivial conflicts in fs/xfs/{xfs_buf.c,xfs_log.c,xfs_log_priv.h} due to duplicate patches that had already been merged for 3.5. * tag 'for-linus-v3.6-rc1' of git://oss.sgi.com/xfs/xfs: (44 commits) xfs: wait for the write the superblock on unmount xfs: re-enable xfsaild idle mode and fix associated races xfs: remove iolock lock classes xfs: avoid the iolock in xfs_free_eofblocks for evicted inodes xfs: do not take the iolock in xfs_inactive xfs: remove xfs_inactive_attrs xfs: clean up xfs_inactive xfs: do not read the AGI buffer in xfs_dialloc until nessecary xfs: refactor xfs_ialloc_ag_select xfs: add a short cut to xfs_dialloc for the non-NULL agbp case xfs: remove the alloc_done argument to xfs_dialloc xfs: split xfs_dialloc xfs: remove xfs_ialloc_find_free Prefix IO_XX flags with XFS_IO_XX to avoid namespace colision. xfs: remove xfs_inotobp xfs: merge xfs_itobp into xfs_imap_to_bp xfs: handle EOF correctly in xfs_vm_writepage xfs: implement ->update_time xfs: fix comment typo of struct xfs_da_blkinfo. xfs: do not call xfs_bdstrat_cb in xfs_buf_iodone_callbacks ...
-rw-r--r--fs/xfs/xfs_alloc_btree.h14
-rw-r--r--fs/xfs/xfs_aops.c79
-rw-r--r--fs/xfs/xfs_aops.h14
-rw-r--r--fs/xfs/xfs_attr.c78
-rw-r--r--fs/xfs/xfs_attr_leaf.c255
-rw-r--r--fs/xfs/xfs_attr_leaf.h21
-rw-r--r--fs/xfs/xfs_bmap.c2
-rw-r--r--fs/xfs/xfs_buf.c240
-rw-r--r--fs/xfs/xfs_buf.h116
-rw-r--r--fs/xfs/xfs_buf_item.c345
-rw-r--r--fs/xfs/xfs_buf_item.h38
-rw-r--r--fs/xfs/xfs_da_btree.c823
-rw-r--r--fs/xfs/xfs_da_btree.h38
-rw-r--r--fs/xfs/xfs_dinode.h2
-rw-r--r--fs/xfs/xfs_dir2.c4
-rw-r--r--fs/xfs/xfs_dir2_block.c118
-rw-r--r--fs/xfs/xfs_dir2_data.c50
-rw-r--r--fs/xfs/xfs_dir2_leaf.c621
-rw-r--r--fs/xfs/xfs_dir2_node.c236
-rw-r--r--fs/xfs/xfs_dir2_priv.h46
-rw-r--r--fs/xfs/xfs_dir2_sf.c4
-rw-r--r--fs/xfs/xfs_file.c19
-rw-r--r--fs/xfs/xfs_ialloc.c446
-rw-r--r--fs/xfs/xfs_ialloc.h2
-rw-r--r--fs/xfs/xfs_iget.c15
-rw-r--r--fs/xfs/xfs_inode.c208
-rw-r--r--fs/xfs/xfs_inode.h13
-rw-r--r--fs/xfs/xfs_iomap.c6
-rw-r--r--fs/xfs/xfs_iops.c45
-rw-r--r--fs/xfs/xfs_itable.c2
-rw-r--r--fs/xfs/xfs_log.c223
-rw-r--r--fs/xfs/xfs_log_priv.h18
-rw-r--r--fs/xfs/xfs_log_recover.c142
-rw-r--r--fs/xfs/xfs_mount.c11
-rw-r--r--fs/xfs/xfs_mount.h3
-rw-r--r--fs/xfs/xfs_qm.c2
-rw-r--r--fs/xfs/xfs_super.c88
-rw-r--r--fs/xfs/xfs_sync.c13
-rw-r--r--fs/xfs/xfs_trace.h2
-rw-r--r--fs/xfs/xfs_trans.h50
-rw-r--r--fs/xfs/xfs_trans_ail.c35
-rw-r--r--fs/xfs/xfs_trans_buf.c68
-rw-r--r--fs/xfs/xfs_trans_priv.h1
-rw-r--r--fs/xfs/xfs_types.h14
-rw-r--r--fs/xfs/xfs_utils.c17
-rw-r--r--fs/xfs/xfs_vnodeops.c285
46 files changed, 2457 insertions, 2415 deletions
diff --git a/fs/xfs/xfs_alloc_btree.h b/fs/xfs/xfs_alloc_btree.h
index a6caa0022c9b..359fb86ed876 100644
--- a/fs/xfs/xfs_alloc_btree.h
+++ b/fs/xfs/xfs_alloc_btree.h
@@ -51,20 +51,6 @@ typedef struct xfs_alloc_rec_incore {
51typedef __be32 xfs_alloc_ptr_t; 51typedef __be32 xfs_alloc_ptr_t;
52 52
53/* 53/*
54 * Minimum and maximum blocksize and sectorsize.
55 * The blocksize upper limit is pretty much arbitrary.
56 * The sectorsize upper limit is due to sizeof(sb_sectsize).
57 */
58#define XFS_MIN_BLOCKSIZE_LOG 9 /* i.e. 512 bytes */
59#define XFS_MAX_BLOCKSIZE_LOG 16 /* i.e. 65536 bytes */
60#define XFS_MIN_BLOCKSIZE (1 << XFS_MIN_BLOCKSIZE_LOG)
61#define XFS_MAX_BLOCKSIZE (1 << XFS_MAX_BLOCKSIZE_LOG)
62#define XFS_MIN_SECTORSIZE_LOG 9 /* i.e. 512 bytes */
63#define XFS_MAX_SECTORSIZE_LOG 15 /* i.e. 32768 bytes */
64#define XFS_MIN_SECTORSIZE (1 << XFS_MIN_SECTORSIZE_LOG)
65#define XFS_MAX_SECTORSIZE (1 << XFS_MAX_SECTORSIZE_LOG)
66
67/*
68 * Block numbers in the AG: 54 * Block numbers in the AG:
69 * SB is sector 0, AGF is sector 1, AGI is sector 2, AGFL is sector 3. 55 * SB is sector 0, AGF is sector 1, AGI is sector 2, AGFL is sector 3.
70 */ 56 */
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 8dad722c0041..15052ff916ec 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -179,7 +179,7 @@ xfs_finish_ioend(
179 if (atomic_dec_and_test(&ioend->io_remaining)) { 179 if (atomic_dec_and_test(&ioend->io_remaining)) {
180 struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount; 180 struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
181 181
182 if (ioend->io_type == IO_UNWRITTEN) 182 if (ioend->io_type == XFS_IO_UNWRITTEN)
183 queue_work(mp->m_unwritten_workqueue, &ioend->io_work); 183 queue_work(mp->m_unwritten_workqueue, &ioend->io_work);
184 else if (ioend->io_append_trans) 184 else if (ioend->io_append_trans)
185 queue_work(mp->m_data_workqueue, &ioend->io_work); 185 queue_work(mp->m_data_workqueue, &ioend->io_work);
@@ -210,7 +210,7 @@ xfs_end_io(
210 * For unwritten extents we need to issue transactions to convert a 210 * For unwritten extents we need to issue transactions to convert a
211 * range to normal written extens after the data I/O has finished. 211 * range to normal written extens after the data I/O has finished.
212 */ 212 */
213 if (ioend->io_type == IO_UNWRITTEN) { 213 if (ioend->io_type == XFS_IO_UNWRITTEN) {
214 /* 214 /*
215 * For buffered I/O we never preallocate a transaction when 215 * For buffered I/O we never preallocate a transaction when
216 * doing the unwritten extent conversion, but for direct I/O 216 * doing the unwritten extent conversion, but for direct I/O
@@ -312,7 +312,7 @@ xfs_map_blocks(
312 if (XFS_FORCED_SHUTDOWN(mp)) 312 if (XFS_FORCED_SHUTDOWN(mp))
313 return -XFS_ERROR(EIO); 313 return -XFS_ERROR(EIO);
314 314
315 if (type == IO_UNWRITTEN) 315 if (type == XFS_IO_UNWRITTEN)
316 bmapi_flags |= XFS_BMAPI_IGSTATE; 316 bmapi_flags |= XFS_BMAPI_IGSTATE;
317 317
318 if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) { 318 if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
@@ -323,10 +323,10 @@ xfs_map_blocks(
323 323
324 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE || 324 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
325 (ip->i_df.if_flags & XFS_IFEXTENTS)); 325 (ip->i_df.if_flags & XFS_IFEXTENTS));
326 ASSERT(offset <= mp->m_maxioffset); 326 ASSERT(offset <= mp->m_super->s_maxbytes);
327 327
328 if (offset + count > mp->m_maxioffset) 328 if (offset + count > mp->m_super->s_maxbytes)
329 count = mp->m_maxioffset - offset; 329 count = mp->m_super->s_maxbytes - offset;
330 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count); 330 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
331 offset_fsb = XFS_B_TO_FSBT(mp, offset); 331 offset_fsb = XFS_B_TO_FSBT(mp, offset);
332 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, 332 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
@@ -336,7 +336,7 @@ xfs_map_blocks(
336 if (error) 336 if (error)
337 return -XFS_ERROR(error); 337 return -XFS_ERROR(error);
338 338
339 if (type == IO_DELALLOC && 339 if (type == XFS_IO_DELALLOC &&
340 (!nimaps || isnullstartblock(imap->br_startblock))) { 340 (!nimaps || isnullstartblock(imap->br_startblock))) {
341 error = xfs_iomap_write_allocate(ip, offset, count, imap); 341 error = xfs_iomap_write_allocate(ip, offset, count, imap);
342 if (!error) 342 if (!error)
@@ -345,7 +345,7 @@ xfs_map_blocks(
345 } 345 }
346 346
347#ifdef DEBUG 347#ifdef DEBUG
348 if (type == IO_UNWRITTEN) { 348 if (type == XFS_IO_UNWRITTEN) {
349 ASSERT(nimaps); 349 ASSERT(nimaps);
350 ASSERT(imap->br_startblock != HOLESTARTBLOCK); 350 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
351 ASSERT(imap->br_startblock != DELAYSTARTBLOCK); 351 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
@@ -634,11 +634,11 @@ xfs_check_page_type(
634 bh = head = page_buffers(page); 634 bh = head = page_buffers(page);
635 do { 635 do {
636 if (buffer_unwritten(bh)) 636 if (buffer_unwritten(bh))
637 acceptable += (type == IO_UNWRITTEN); 637 acceptable += (type == XFS_IO_UNWRITTEN);
638 else if (buffer_delay(bh)) 638 else if (buffer_delay(bh))
639 acceptable += (type == IO_DELALLOC); 639 acceptable += (type == XFS_IO_DELALLOC);
640 else if (buffer_dirty(bh) && buffer_mapped(bh)) 640 else if (buffer_dirty(bh) && buffer_mapped(bh))
641 acceptable += (type == IO_OVERWRITE); 641 acceptable += (type == XFS_IO_OVERWRITE);
642 else 642 else
643 break; 643 break;
644 } while ((bh = bh->b_this_page) != head); 644 } while ((bh = bh->b_this_page) != head);
@@ -721,11 +721,11 @@ xfs_convert_page(
721 if (buffer_unwritten(bh) || buffer_delay(bh) || 721 if (buffer_unwritten(bh) || buffer_delay(bh) ||
722 buffer_mapped(bh)) { 722 buffer_mapped(bh)) {
723 if (buffer_unwritten(bh)) 723 if (buffer_unwritten(bh))
724 type = IO_UNWRITTEN; 724 type = XFS_IO_UNWRITTEN;
725 else if (buffer_delay(bh)) 725 else if (buffer_delay(bh))
726 type = IO_DELALLOC; 726 type = XFS_IO_DELALLOC;
727 else 727 else
728 type = IO_OVERWRITE; 728 type = XFS_IO_OVERWRITE;
729 729
730 if (!xfs_imap_valid(inode, imap, offset)) { 730 if (!xfs_imap_valid(inode, imap, offset)) {
731 done = 1; 731 done = 1;
@@ -733,7 +733,7 @@ xfs_convert_page(
733 } 733 }
734 734
735 lock_buffer(bh); 735 lock_buffer(bh);
736 if (type != IO_OVERWRITE) 736 if (type != XFS_IO_OVERWRITE)
737 xfs_map_at_offset(inode, bh, imap, offset); 737 xfs_map_at_offset(inode, bh, imap, offset);
738 xfs_add_to_ioend(inode, bh, offset, type, 738 xfs_add_to_ioend(inode, bh, offset, type,
739 ioendp, done); 739 ioendp, done);
@@ -831,7 +831,7 @@ xfs_aops_discard_page(
831 struct buffer_head *bh, *head; 831 struct buffer_head *bh, *head;
832 loff_t offset = page_offset(page); 832 loff_t offset = page_offset(page);
833 833
834 if (!xfs_check_page_type(page, IO_DELALLOC)) 834 if (!xfs_check_page_type(page, XFS_IO_DELALLOC))
835 goto out_invalidate; 835 goto out_invalidate;
836 836
837 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) 837 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
@@ -927,11 +927,26 @@ xfs_vm_writepage(
927 end_index = offset >> PAGE_CACHE_SHIFT; 927 end_index = offset >> PAGE_CACHE_SHIFT;
928 last_index = (offset - 1) >> PAGE_CACHE_SHIFT; 928 last_index = (offset - 1) >> PAGE_CACHE_SHIFT;
929 if (page->index >= end_index) { 929 if (page->index >= end_index) {
930 if ((page->index >= end_index + 1) || 930 unsigned offset_into_page = offset & (PAGE_CACHE_SIZE - 1);
931 !(i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) { 931
932 /*
933 * Just skip the page if it is fully outside i_size, e.g. due
934 * to a truncate operation that is in progress.
935 */
936 if (page->index >= end_index + 1 || offset_into_page == 0) {
932 unlock_page(page); 937 unlock_page(page);
933 return 0; 938 return 0;
934 } 939 }
940
941 /*
942 * The page straddles i_size. It must be zeroed out on each
943 * and every writepage invocation because it may be mmapped.
944 * "A file is mapped in multiples of the page size. For a file
945 * that is not a multiple of the page size, the remaining
946 * memory is zeroed when mapped, and writes to that region are
947 * not written out to the file."
948 */
949 zero_user_segment(page, offset_into_page, PAGE_CACHE_SIZE);
935 } 950 }
936 951
937 end_offset = min_t(unsigned long long, 952 end_offset = min_t(unsigned long long,
@@ -941,7 +956,7 @@ xfs_vm_writepage(
941 956
942 bh = head = page_buffers(page); 957 bh = head = page_buffers(page);
943 offset = page_offset(page); 958 offset = page_offset(page);
944 type = IO_OVERWRITE; 959 type = XFS_IO_OVERWRITE;
945 960
946 if (wbc->sync_mode == WB_SYNC_NONE) 961 if (wbc->sync_mode == WB_SYNC_NONE)
947 nonblocking = 1; 962 nonblocking = 1;
@@ -966,18 +981,18 @@ xfs_vm_writepage(
966 } 981 }
967 982
968 if (buffer_unwritten(bh)) { 983 if (buffer_unwritten(bh)) {
969 if (type != IO_UNWRITTEN) { 984 if (type != XFS_IO_UNWRITTEN) {
970 type = IO_UNWRITTEN; 985 type = XFS_IO_UNWRITTEN;
971 imap_valid = 0; 986 imap_valid = 0;
972 } 987 }
973 } else if (buffer_delay(bh)) { 988 } else if (buffer_delay(bh)) {
974 if (type != IO_DELALLOC) { 989 if (type != XFS_IO_DELALLOC) {
975 type = IO_DELALLOC; 990 type = XFS_IO_DELALLOC;
976 imap_valid = 0; 991 imap_valid = 0;
977 } 992 }
978 } else if (buffer_uptodate(bh)) { 993 } else if (buffer_uptodate(bh)) {
979 if (type != IO_OVERWRITE) { 994 if (type != XFS_IO_OVERWRITE) {
980 type = IO_OVERWRITE; 995 type = XFS_IO_OVERWRITE;
981 imap_valid = 0; 996 imap_valid = 0;
982 } 997 }
983 } else { 998 } else {
@@ -1013,7 +1028,7 @@ xfs_vm_writepage(
1013 } 1028 }
1014 if (imap_valid) { 1029 if (imap_valid) {
1015 lock_buffer(bh); 1030 lock_buffer(bh);
1016 if (type != IO_OVERWRITE) 1031 if (type != XFS_IO_OVERWRITE)
1017 xfs_map_at_offset(inode, bh, &imap, offset); 1032 xfs_map_at_offset(inode, bh, &imap, offset);
1018 xfs_add_to_ioend(inode, bh, offset, type, &ioend, 1033 xfs_add_to_ioend(inode, bh, offset, type, &ioend,
1019 new_ioend); 1034 new_ioend);
@@ -1054,7 +1069,7 @@ xfs_vm_writepage(
1054 * Reserve log space if we might write beyond the on-disk 1069 * Reserve log space if we might write beyond the on-disk
1055 * inode size. 1070 * inode size.
1056 */ 1071 */
1057 if (ioend->io_type != IO_UNWRITTEN && 1072 if (ioend->io_type != XFS_IO_UNWRITTEN &&
1058 xfs_ioend_is_append(ioend)) { 1073 xfs_ioend_is_append(ioend)) {
1059 err = xfs_setfilesize_trans_alloc(ioend); 1074 err = xfs_setfilesize_trans_alloc(ioend);
1060 if (err) 1075 if (err)
@@ -1162,9 +1177,9 @@ __xfs_get_blocks(
1162 lockmode = xfs_ilock_map_shared(ip); 1177 lockmode = xfs_ilock_map_shared(ip);
1163 } 1178 }
1164 1179
1165 ASSERT(offset <= mp->m_maxioffset); 1180 ASSERT(offset <= mp->m_super->s_maxbytes);
1166 if (offset + size > mp->m_maxioffset) 1181 if (offset + size > mp->m_super->s_maxbytes)
1167 size = mp->m_maxioffset - offset; 1182 size = mp->m_super->s_maxbytes - offset;
1168 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size); 1183 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size);
1169 offset_fsb = XFS_B_TO_FSBT(mp, offset); 1184 offset_fsb = XFS_B_TO_FSBT(mp, offset);
1170 1185
@@ -1351,7 +1366,7 @@ xfs_end_io_direct_write(
1351 ioend->io_iocb = iocb; 1366 ioend->io_iocb = iocb;
1352 ioend->io_result = ret; 1367 ioend->io_result = ret;
1353 if (private && size > 0) 1368 if (private && size > 0)
1354 ioend->io_type = IO_UNWRITTEN; 1369 ioend->io_type = XFS_IO_UNWRITTEN;
1355 1370
1356 if (is_async) { 1371 if (is_async) {
1357 ioend->io_isasync = 1; 1372 ioend->io_isasync = 1;
@@ -1383,7 +1398,7 @@ xfs_vm_direct_IO(
1383 * and converts at least on unwritten extent we will cancel 1398 * and converts at least on unwritten extent we will cancel
1384 * the still clean transaction after the I/O has finished. 1399 * the still clean transaction after the I/O has finished.
1385 */ 1400 */
1386 iocb->private = ioend = xfs_alloc_ioend(inode, IO_DIRECT); 1401 iocb->private = ioend = xfs_alloc_ioend(inode, XFS_IO_DIRECT);
1387 if (offset + size > XFS_I(inode)->i_d.di_size) { 1402 if (offset + size > XFS_I(inode)->i_d.di_size) {
1388 ret = xfs_setfilesize_trans_alloc(ioend); 1403 ret = xfs_setfilesize_trans_alloc(ioend);
1389 if (ret) 1404 if (ret)
diff --git a/fs/xfs/xfs_aops.h b/fs/xfs/xfs_aops.h
index 84eafbcb0d9d..c325abb8d61a 100644
--- a/fs/xfs/xfs_aops.h
+++ b/fs/xfs/xfs_aops.h
@@ -24,17 +24,17 @@ extern mempool_t *xfs_ioend_pool;
24 * Types of I/O for bmap clustering and I/O completion tracking. 24 * Types of I/O for bmap clustering and I/O completion tracking.
25 */ 25 */
26enum { 26enum {
27 IO_DIRECT = 0, /* special case for direct I/O ioends */ 27 XFS_IO_DIRECT = 0, /* special case for direct I/O ioends */
28 IO_DELALLOC, /* mapping covers delalloc region */ 28 XFS_IO_DELALLOC, /* covers delalloc region */
29 IO_UNWRITTEN, /* mapping covers allocated but uninitialized data */ 29 XFS_IO_UNWRITTEN, /* covers allocated but uninitialized data */
30 IO_OVERWRITE, /* mapping covers already allocated extent */ 30 XFS_IO_OVERWRITE, /* covers already allocated extent */
31}; 31};
32 32
33#define XFS_IO_TYPES \ 33#define XFS_IO_TYPES \
34 { 0, "" }, \ 34 { 0, "" }, \
35 { IO_DELALLOC, "delalloc" }, \ 35 { XFS_IO_DELALLOC, "delalloc" }, \
36 { IO_UNWRITTEN, "unwritten" }, \ 36 { XFS_IO_UNWRITTEN, "unwritten" }, \
37 { IO_OVERWRITE, "overwrite" } 37 { XFS_IO_OVERWRITE, "overwrite" }
38 38
39/* 39/*
40 * xfs_ioend struct manages large extent writes for XFS. 40 * xfs_ioend struct manages large extent writes for XFS.
diff --git a/fs/xfs/xfs_attr.c b/fs/xfs/xfs_attr.c
index a17ff01b5adf..0ca1f0be62d2 100644
--- a/fs/xfs/xfs_attr.c
+++ b/fs/xfs/xfs_attr.c
@@ -893,7 +893,7 @@ STATIC int
893xfs_attr_leaf_addname(xfs_da_args_t *args) 893xfs_attr_leaf_addname(xfs_da_args_t *args)
894{ 894{
895 xfs_inode_t *dp; 895 xfs_inode_t *dp;
896 xfs_dabuf_t *bp; 896 struct xfs_buf *bp;
897 int retval, error, committed, forkoff; 897 int retval, error, committed, forkoff;
898 898
899 trace_xfs_attr_leaf_addname(args); 899 trace_xfs_attr_leaf_addname(args);
@@ -915,11 +915,11 @@ xfs_attr_leaf_addname(xfs_da_args_t *args)
915 */ 915 */
916 retval = xfs_attr_leaf_lookup_int(bp, args); 916 retval = xfs_attr_leaf_lookup_int(bp, args);
917 if ((args->flags & ATTR_REPLACE) && (retval == ENOATTR)) { 917 if ((args->flags & ATTR_REPLACE) && (retval == ENOATTR)) {
918 xfs_da_brelse(args->trans, bp); 918 xfs_trans_brelse(args->trans, bp);
919 return(retval); 919 return(retval);
920 } else if (retval == EEXIST) { 920 } else if (retval == EEXIST) {
921 if (args->flags & ATTR_CREATE) { /* pure create op */ 921 if (args->flags & ATTR_CREATE) { /* pure create op */
922 xfs_da_brelse(args->trans, bp); 922 xfs_trans_brelse(args->trans, bp);
923 return(retval); 923 return(retval);
924 } 924 }
925 925
@@ -937,7 +937,6 @@ xfs_attr_leaf_addname(xfs_da_args_t *args)
937 * if required. 937 * if required.
938 */ 938 */
939 retval = xfs_attr_leaf_add(bp, args); 939 retval = xfs_attr_leaf_add(bp, args);
940 xfs_da_buf_done(bp);
941 if (retval == ENOSPC) { 940 if (retval == ENOSPC) {
942 /* 941 /*
943 * Promote the attribute list to the Btree format, then 942 * Promote the attribute list to the Btree format, then
@@ -1065,8 +1064,7 @@ xfs_attr_leaf_addname(xfs_da_args_t *args)
1065 */ 1064 */
1066 if (committed) 1065 if (committed)
1067 xfs_trans_ijoin(args->trans, dp, 0); 1066 xfs_trans_ijoin(args->trans, dp, 0);
1068 } else 1067 }
1069 xfs_da_buf_done(bp);
1070 1068
1071 /* 1069 /*
1072 * Commit the remove and start the next trans in series. 1070 * Commit the remove and start the next trans in series.
@@ -1092,7 +1090,7 @@ STATIC int
1092xfs_attr_leaf_removename(xfs_da_args_t *args) 1090xfs_attr_leaf_removename(xfs_da_args_t *args)
1093{ 1091{
1094 xfs_inode_t *dp; 1092 xfs_inode_t *dp;
1095 xfs_dabuf_t *bp; 1093 struct xfs_buf *bp;
1096 int error, committed, forkoff; 1094 int error, committed, forkoff;
1097 1095
1098 trace_xfs_attr_leaf_removename(args); 1096 trace_xfs_attr_leaf_removename(args);
@@ -1111,7 +1109,7 @@ xfs_attr_leaf_removename(xfs_da_args_t *args)
1111 ASSERT(bp != NULL); 1109 ASSERT(bp != NULL);
1112 error = xfs_attr_leaf_lookup_int(bp, args); 1110 error = xfs_attr_leaf_lookup_int(bp, args);
1113 if (error == ENOATTR) { 1111 if (error == ENOATTR) {
1114 xfs_da_brelse(args->trans, bp); 1112 xfs_trans_brelse(args->trans, bp);
1115 return(error); 1113 return(error);
1116 } 1114 }
1117 1115
@@ -1141,8 +1139,7 @@ xfs_attr_leaf_removename(xfs_da_args_t *args)
1141 */ 1139 */
1142 if (committed) 1140 if (committed)
1143 xfs_trans_ijoin(args->trans, dp, 0); 1141 xfs_trans_ijoin(args->trans, dp, 0);
1144 } else 1142 }
1145 xfs_da_buf_done(bp);
1146 return(0); 1143 return(0);
1147} 1144}
1148 1145
@@ -1155,7 +1152,7 @@ xfs_attr_leaf_removename(xfs_da_args_t *args)
1155STATIC int 1152STATIC int
1156xfs_attr_leaf_get(xfs_da_args_t *args) 1153xfs_attr_leaf_get(xfs_da_args_t *args)
1157{ 1154{
1158 xfs_dabuf_t *bp; 1155 struct xfs_buf *bp;
1159 int error; 1156 int error;
1160 1157
1161 args->blkno = 0; 1158 args->blkno = 0;
@@ -1167,11 +1164,11 @@ xfs_attr_leaf_get(xfs_da_args_t *args)
1167 1164
1168 error = xfs_attr_leaf_lookup_int(bp, args); 1165 error = xfs_attr_leaf_lookup_int(bp, args);
1169 if (error != EEXIST) { 1166 if (error != EEXIST) {
1170 xfs_da_brelse(args->trans, bp); 1167 xfs_trans_brelse(args->trans, bp);
1171 return(error); 1168 return(error);
1172 } 1169 }
1173 error = xfs_attr_leaf_getvalue(bp, args); 1170 error = xfs_attr_leaf_getvalue(bp, args);
1174 xfs_da_brelse(args->trans, bp); 1171 xfs_trans_brelse(args->trans, bp);
1175 if (!error && (args->rmtblkno > 0) && !(args->flags & ATTR_KERNOVAL)) { 1172 if (!error && (args->rmtblkno > 0) && !(args->flags & ATTR_KERNOVAL)) {
1176 error = xfs_attr_rmtval_get(args); 1173 error = xfs_attr_rmtval_get(args);
1177 } 1174 }
@@ -1186,23 +1183,23 @@ xfs_attr_leaf_list(xfs_attr_list_context_t *context)
1186{ 1183{
1187 xfs_attr_leafblock_t *leaf; 1184 xfs_attr_leafblock_t *leaf;
1188 int error; 1185 int error;
1189 xfs_dabuf_t *bp; 1186 struct xfs_buf *bp;
1190 1187
1191 context->cursor->blkno = 0; 1188 context->cursor->blkno = 0;
1192 error = xfs_da_read_buf(NULL, context->dp, 0, -1, &bp, XFS_ATTR_FORK); 1189 error = xfs_da_read_buf(NULL, context->dp, 0, -1, &bp, XFS_ATTR_FORK);
1193 if (error) 1190 if (error)
1194 return XFS_ERROR(error); 1191 return XFS_ERROR(error);
1195 ASSERT(bp != NULL); 1192 ASSERT(bp != NULL);
1196 leaf = bp->data; 1193 leaf = bp->b_addr;
1197 if (unlikely(leaf->hdr.info.magic != cpu_to_be16(XFS_ATTR_LEAF_MAGIC))) { 1194 if (unlikely(leaf->hdr.info.magic != cpu_to_be16(XFS_ATTR_LEAF_MAGIC))) {
1198 XFS_CORRUPTION_ERROR("xfs_attr_leaf_list", XFS_ERRLEVEL_LOW, 1195 XFS_CORRUPTION_ERROR("xfs_attr_leaf_list", XFS_ERRLEVEL_LOW,
1199 context->dp->i_mount, leaf); 1196 context->dp->i_mount, leaf);
1200 xfs_da_brelse(NULL, bp); 1197 xfs_trans_brelse(NULL, bp);
1201 return XFS_ERROR(EFSCORRUPTED); 1198 return XFS_ERROR(EFSCORRUPTED);
1202 } 1199 }
1203 1200
1204 error = xfs_attr_leaf_list_int(bp, context); 1201 error = xfs_attr_leaf_list_int(bp, context);
1205 xfs_da_brelse(NULL, bp); 1202 xfs_trans_brelse(NULL, bp);
1206 return XFS_ERROR(error); 1203 return XFS_ERROR(error);
1207} 1204}
1208 1205
@@ -1489,7 +1486,7 @@ xfs_attr_node_removename(xfs_da_args_t *args)
1489 xfs_da_state_t *state; 1486 xfs_da_state_t *state;
1490 xfs_da_state_blk_t *blk; 1487 xfs_da_state_blk_t *blk;
1491 xfs_inode_t *dp; 1488 xfs_inode_t *dp;
1492 xfs_dabuf_t *bp; 1489 struct xfs_buf *bp;
1493 int retval, error, committed, forkoff; 1490 int retval, error, committed, forkoff;
1494 1491
1495 trace_xfs_attr_node_removename(args); 1492 trace_xfs_attr_node_removename(args);
@@ -1601,14 +1598,13 @@ xfs_attr_node_removename(xfs_da_args_t *args)
1601 */ 1598 */
1602 ASSERT(state->path.active == 1); 1599 ASSERT(state->path.active == 1);
1603 ASSERT(state->path.blk[0].bp); 1600 ASSERT(state->path.blk[0].bp);
1604 xfs_da_buf_done(state->path.blk[0].bp);
1605 state->path.blk[0].bp = NULL; 1601 state->path.blk[0].bp = NULL;
1606 1602
1607 error = xfs_da_read_buf(args->trans, args->dp, 0, -1, &bp, 1603 error = xfs_da_read_buf(args->trans, args->dp, 0, -1, &bp,
1608 XFS_ATTR_FORK); 1604 XFS_ATTR_FORK);
1609 if (error) 1605 if (error)
1610 goto out; 1606 goto out;
1611 ASSERT((((xfs_attr_leafblock_t *)bp->data)->hdr.info.magic) == 1607 ASSERT((((xfs_attr_leafblock_t *)bp->b_addr)->hdr.info.magic) ==
1612 cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); 1608 cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
1613 1609
1614 if ((forkoff = xfs_attr_shortform_allfit(bp, dp))) { 1610 if ((forkoff = xfs_attr_shortform_allfit(bp, dp))) {
@@ -1635,7 +1631,7 @@ xfs_attr_node_removename(xfs_da_args_t *args)
1635 if (committed) 1631 if (committed)
1636 xfs_trans_ijoin(args->trans, dp, 0); 1632 xfs_trans_ijoin(args->trans, dp, 0);
1637 } else 1633 } else
1638 xfs_da_brelse(args->trans, bp); 1634 xfs_trans_brelse(args->trans, bp);
1639 } 1635 }
1640 error = 0; 1636 error = 0;
1641 1637
@@ -1665,8 +1661,7 @@ xfs_attr_fillstate(xfs_da_state_t *state)
1665 ASSERT((path->active >= 0) && (path->active < XFS_DA_NODE_MAXDEPTH)); 1661 ASSERT((path->active >= 0) && (path->active < XFS_DA_NODE_MAXDEPTH));
1666 for (blk = path->blk, level = 0; level < path->active; blk++, level++) { 1662 for (blk = path->blk, level = 0; level < path->active; blk++, level++) {
1667 if (blk->bp) { 1663 if (blk->bp) {
1668 blk->disk_blkno = xfs_da_blkno(blk->bp); 1664 blk->disk_blkno = XFS_BUF_ADDR(blk->bp);
1669 xfs_da_buf_done(blk->bp);
1670 blk->bp = NULL; 1665 blk->bp = NULL;
1671 } else { 1666 } else {
1672 blk->disk_blkno = 0; 1667 blk->disk_blkno = 0;
@@ -1681,8 +1676,7 @@ xfs_attr_fillstate(xfs_da_state_t *state)
1681 ASSERT((path->active >= 0) && (path->active < XFS_DA_NODE_MAXDEPTH)); 1676 ASSERT((path->active >= 0) && (path->active < XFS_DA_NODE_MAXDEPTH));
1682 for (blk = path->blk, level = 0; level < path->active; blk++, level++) { 1677 for (blk = path->blk, level = 0; level < path->active; blk++, level++) {
1683 if (blk->bp) { 1678 if (blk->bp) {
1684 blk->disk_blkno = xfs_da_blkno(blk->bp); 1679 blk->disk_blkno = XFS_BUF_ADDR(blk->bp);
1685 xfs_da_buf_done(blk->bp);
1686 blk->bp = NULL; 1680 blk->bp = NULL;
1687 } else { 1681 } else {
1688 blk->disk_blkno = 0; 1682 blk->disk_blkno = 0;
@@ -1792,7 +1786,7 @@ xfs_attr_node_get(xfs_da_args_t *args)
1792 * If not in a transaction, we have to release all the buffers. 1786 * If not in a transaction, we have to release all the buffers.
1793 */ 1787 */
1794 for (i = 0; i < state->path.active; i++) { 1788 for (i = 0; i < state->path.active; i++) {
1795 xfs_da_brelse(args->trans, state->path.blk[i].bp); 1789 xfs_trans_brelse(args->trans, state->path.blk[i].bp);
1796 state->path.blk[i].bp = NULL; 1790 state->path.blk[i].bp = NULL;
1797 } 1791 }
1798 1792
@@ -1808,7 +1802,7 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
1808 xfs_da_intnode_t *node; 1802 xfs_da_intnode_t *node;
1809 xfs_da_node_entry_t *btree; 1803 xfs_da_node_entry_t *btree;
1810 int error, i; 1804 int error, i;
1811 xfs_dabuf_t *bp; 1805 struct xfs_buf *bp;
1812 1806
1813 cursor = context->cursor; 1807 cursor = context->cursor;
1814 cursor->initted = 1; 1808 cursor->initted = 1;
@@ -1825,30 +1819,30 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
1825 if ((error != 0) && (error != EFSCORRUPTED)) 1819 if ((error != 0) && (error != EFSCORRUPTED))
1826 return(error); 1820 return(error);
1827 if (bp) { 1821 if (bp) {
1828 node = bp->data; 1822 node = bp->b_addr;
1829 switch (be16_to_cpu(node->hdr.info.magic)) { 1823 switch (be16_to_cpu(node->hdr.info.magic)) {
1830 case XFS_DA_NODE_MAGIC: 1824 case XFS_DA_NODE_MAGIC:
1831 trace_xfs_attr_list_wrong_blk(context); 1825 trace_xfs_attr_list_wrong_blk(context);
1832 xfs_da_brelse(NULL, bp); 1826 xfs_trans_brelse(NULL, bp);
1833 bp = NULL; 1827 bp = NULL;
1834 break; 1828 break;
1835 case XFS_ATTR_LEAF_MAGIC: 1829 case XFS_ATTR_LEAF_MAGIC:
1836 leaf = bp->data; 1830 leaf = bp->b_addr;
1837 if (cursor->hashval > be32_to_cpu(leaf->entries[ 1831 if (cursor->hashval > be32_to_cpu(leaf->entries[
1838 be16_to_cpu(leaf->hdr.count)-1].hashval)) { 1832 be16_to_cpu(leaf->hdr.count)-1].hashval)) {
1839 trace_xfs_attr_list_wrong_blk(context); 1833 trace_xfs_attr_list_wrong_blk(context);
1840 xfs_da_brelse(NULL, bp); 1834 xfs_trans_brelse(NULL, bp);
1841 bp = NULL; 1835 bp = NULL;
1842 } else if (cursor->hashval <= 1836 } else if (cursor->hashval <=
1843 be32_to_cpu(leaf->entries[0].hashval)) { 1837 be32_to_cpu(leaf->entries[0].hashval)) {
1844 trace_xfs_attr_list_wrong_blk(context); 1838 trace_xfs_attr_list_wrong_blk(context);
1845 xfs_da_brelse(NULL, bp); 1839 xfs_trans_brelse(NULL, bp);
1846 bp = NULL; 1840 bp = NULL;
1847 } 1841 }
1848 break; 1842 break;
1849 default: 1843 default:
1850 trace_xfs_attr_list_wrong_blk(context); 1844 trace_xfs_attr_list_wrong_blk(context);
1851 xfs_da_brelse(NULL, bp); 1845 xfs_trans_brelse(NULL, bp);
1852 bp = NULL; 1846 bp = NULL;
1853 } 1847 }
1854 } 1848 }
@@ -1873,7 +1867,7 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
1873 context->dp->i_mount); 1867 context->dp->i_mount);
1874 return(XFS_ERROR(EFSCORRUPTED)); 1868 return(XFS_ERROR(EFSCORRUPTED));
1875 } 1869 }
1876 node = bp->data; 1870 node = bp->b_addr;
1877 if (node->hdr.info.magic == 1871 if (node->hdr.info.magic ==
1878 cpu_to_be16(XFS_ATTR_LEAF_MAGIC)) 1872 cpu_to_be16(XFS_ATTR_LEAF_MAGIC))
1879 break; 1873 break;
@@ -1883,7 +1877,7 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
1883 XFS_ERRLEVEL_LOW, 1877 XFS_ERRLEVEL_LOW,
1884 context->dp->i_mount, 1878 context->dp->i_mount,
1885 node); 1879 node);
1886 xfs_da_brelse(NULL, bp); 1880 xfs_trans_brelse(NULL, bp);
1887 return(XFS_ERROR(EFSCORRUPTED)); 1881 return(XFS_ERROR(EFSCORRUPTED));
1888 } 1882 }
1889 btree = node->btree; 1883 btree = node->btree;
@@ -1898,10 +1892,10 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
1898 } 1892 }
1899 } 1893 }
1900 if (i == be16_to_cpu(node->hdr.count)) { 1894 if (i == be16_to_cpu(node->hdr.count)) {
1901 xfs_da_brelse(NULL, bp); 1895 xfs_trans_brelse(NULL, bp);
1902 return(0); 1896 return(0);
1903 } 1897 }
1904 xfs_da_brelse(NULL, bp); 1898 xfs_trans_brelse(NULL, bp);
1905 } 1899 }
1906 } 1900 }
1907 ASSERT(bp != NULL); 1901 ASSERT(bp != NULL);
@@ -1912,24 +1906,24 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
1912 * adding the information. 1906 * adding the information.
1913 */ 1907 */
1914 for (;;) { 1908 for (;;) {
1915 leaf = bp->data; 1909 leaf = bp->b_addr;
1916 if (unlikely(leaf->hdr.info.magic != 1910 if (unlikely(leaf->hdr.info.magic !=
1917 cpu_to_be16(XFS_ATTR_LEAF_MAGIC))) { 1911 cpu_to_be16(XFS_ATTR_LEAF_MAGIC))) {
1918 XFS_CORRUPTION_ERROR("xfs_attr_node_list(4)", 1912 XFS_CORRUPTION_ERROR("xfs_attr_node_list(4)",
1919 XFS_ERRLEVEL_LOW, 1913 XFS_ERRLEVEL_LOW,
1920 context->dp->i_mount, leaf); 1914 context->dp->i_mount, leaf);
1921 xfs_da_brelse(NULL, bp); 1915 xfs_trans_brelse(NULL, bp);
1922 return(XFS_ERROR(EFSCORRUPTED)); 1916 return(XFS_ERROR(EFSCORRUPTED));
1923 } 1917 }
1924 error = xfs_attr_leaf_list_int(bp, context); 1918 error = xfs_attr_leaf_list_int(bp, context);
1925 if (error) { 1919 if (error) {
1926 xfs_da_brelse(NULL, bp); 1920 xfs_trans_brelse(NULL, bp);
1927 return error; 1921 return error;
1928 } 1922 }
1929 if (context->seen_enough || leaf->hdr.info.forw == 0) 1923 if (context->seen_enough || leaf->hdr.info.forw == 0)
1930 break; 1924 break;
1931 cursor->blkno = be32_to_cpu(leaf->hdr.info.forw); 1925 cursor->blkno = be32_to_cpu(leaf->hdr.info.forw);
1932 xfs_da_brelse(NULL, bp); 1926 xfs_trans_brelse(NULL, bp);
1933 error = xfs_da_read_buf(NULL, context->dp, cursor->blkno, -1, 1927 error = xfs_da_read_buf(NULL, context->dp, cursor->blkno, -1,
1934 &bp, XFS_ATTR_FORK); 1928 &bp, XFS_ATTR_FORK);
1935 if (error) 1929 if (error)
@@ -1941,7 +1935,7 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
1941 return(XFS_ERROR(EFSCORRUPTED)); 1935 return(XFS_ERROR(EFSCORRUPTED));
1942 } 1936 }
1943 } 1937 }
1944 xfs_da_brelse(NULL, bp); 1938 xfs_trans_brelse(NULL, bp);
1945 return(0); 1939 return(0);
1946} 1940}
1947 1941
diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/xfs_attr_leaf.c
index 7d89d800f517..d330111ca738 100644
--- a/fs/xfs/xfs_attr_leaf.c
+++ b/fs/xfs/xfs_attr_leaf.c
@@ -54,10 +54,10 @@
54 * Routines used for growing the Btree. 54 * Routines used for growing the Btree.
55 */ 55 */
56STATIC int xfs_attr_leaf_create(xfs_da_args_t *args, xfs_dablk_t which_block, 56STATIC int xfs_attr_leaf_create(xfs_da_args_t *args, xfs_dablk_t which_block,
57 xfs_dabuf_t **bpp); 57 struct xfs_buf **bpp);
58STATIC int xfs_attr_leaf_add_work(xfs_dabuf_t *leaf_buffer, xfs_da_args_t *args, 58STATIC int xfs_attr_leaf_add_work(struct xfs_buf *leaf_buffer,
59 int freemap_index); 59 xfs_da_args_t *args, int freemap_index);
60STATIC void xfs_attr_leaf_compact(xfs_trans_t *trans, xfs_dabuf_t *leaf_buffer); 60STATIC void xfs_attr_leaf_compact(xfs_trans_t *tp, struct xfs_buf *leaf_buffer);
61STATIC void xfs_attr_leaf_rebalance(xfs_da_state_t *state, 61STATIC void xfs_attr_leaf_rebalance(xfs_da_state_t *state,
62 xfs_da_state_blk_t *blk1, 62 xfs_da_state_blk_t *blk1,
63 xfs_da_state_blk_t *blk2); 63 xfs_da_state_blk_t *blk2);
@@ -71,9 +71,9 @@ STATIC int xfs_attr_leaf_figure_balance(xfs_da_state_t *state,
71 * Routines used for shrinking the Btree. 71 * Routines used for shrinking the Btree.
72 */ 72 */
73STATIC int xfs_attr_node_inactive(xfs_trans_t **trans, xfs_inode_t *dp, 73STATIC int xfs_attr_node_inactive(xfs_trans_t **trans, xfs_inode_t *dp,
74 xfs_dabuf_t *bp, int level); 74 struct xfs_buf *bp, int level);
75STATIC int xfs_attr_leaf_inactive(xfs_trans_t **trans, xfs_inode_t *dp, 75STATIC int xfs_attr_leaf_inactive(xfs_trans_t **trans, xfs_inode_t *dp,
76 xfs_dabuf_t *bp); 76 struct xfs_buf *bp);
77STATIC int xfs_attr_leaf_freextent(xfs_trans_t **trans, xfs_inode_t *dp, 77STATIC int xfs_attr_leaf_freextent(xfs_trans_t **trans, xfs_inode_t *dp,
78 xfs_dablk_t blkno, int blkcnt); 78 xfs_dablk_t blkno, int blkcnt);
79 79
@@ -480,7 +480,7 @@ xfs_attr_shortform_to_leaf(xfs_da_args_t *args)
480 char *tmpbuffer; 480 char *tmpbuffer;
481 int error, i, size; 481 int error, i, size;
482 xfs_dablk_t blkno; 482 xfs_dablk_t blkno;
483 xfs_dabuf_t *bp; 483 struct xfs_buf *bp;
484 xfs_ifork_t *ifp; 484 xfs_ifork_t *ifp;
485 485
486 trace_xfs_attr_sf_to_leaf(args); 486 trace_xfs_attr_sf_to_leaf(args);
@@ -550,8 +550,6 @@ xfs_attr_shortform_to_leaf(xfs_da_args_t *args)
550 error = 0; 550 error = 0;
551 551
552out: 552out:
553 if(bp)
554 xfs_da_buf_done(bp);
555 kmem_free(tmpbuffer); 553 kmem_free(tmpbuffer);
556 return(error); 554 return(error);
557} 555}
@@ -737,14 +735,16 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
737 * a shortform attribute list. 735 * a shortform attribute list.
738 */ 736 */
739int 737int
740xfs_attr_shortform_allfit(xfs_dabuf_t *bp, xfs_inode_t *dp) 738xfs_attr_shortform_allfit(
739 struct xfs_buf *bp,
740 struct xfs_inode *dp)
741{ 741{
742 xfs_attr_leafblock_t *leaf; 742 xfs_attr_leafblock_t *leaf;
743 xfs_attr_leaf_entry_t *entry; 743 xfs_attr_leaf_entry_t *entry;
744 xfs_attr_leaf_name_local_t *name_loc; 744 xfs_attr_leaf_name_local_t *name_loc;
745 int bytes, i; 745 int bytes, i;
746 746
747 leaf = bp->data; 747 leaf = bp->b_addr;
748 ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); 748 ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
749 749
750 entry = &leaf->entries[0]; 750 entry = &leaf->entries[0];
@@ -774,7 +774,10 @@ xfs_attr_shortform_allfit(xfs_dabuf_t *bp, xfs_inode_t *dp)
774 * Convert a leaf attribute list to shortform attribute list 774 * Convert a leaf attribute list to shortform attribute list
775 */ 775 */
776int 776int
777xfs_attr_leaf_to_shortform(xfs_dabuf_t *bp, xfs_da_args_t *args, int forkoff) 777xfs_attr_leaf_to_shortform(
778 struct xfs_buf *bp,
779 xfs_da_args_t *args,
780 int forkoff)
778{ 781{
779 xfs_attr_leafblock_t *leaf; 782 xfs_attr_leafblock_t *leaf;
780 xfs_attr_leaf_entry_t *entry; 783 xfs_attr_leaf_entry_t *entry;
@@ -791,10 +794,10 @@ xfs_attr_leaf_to_shortform(xfs_dabuf_t *bp, xfs_da_args_t *args, int forkoff)
791 ASSERT(tmpbuffer != NULL); 794 ASSERT(tmpbuffer != NULL);
792 795
793 ASSERT(bp != NULL); 796 ASSERT(bp != NULL);
794 memcpy(tmpbuffer, bp->data, XFS_LBSIZE(dp->i_mount)); 797 memcpy(tmpbuffer, bp->b_addr, XFS_LBSIZE(dp->i_mount));
795 leaf = (xfs_attr_leafblock_t *)tmpbuffer; 798 leaf = (xfs_attr_leafblock_t *)tmpbuffer;
796 ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); 799 ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
797 memset(bp->data, 0, XFS_LBSIZE(dp->i_mount)); 800 memset(bp->b_addr, 0, XFS_LBSIZE(dp->i_mount));
798 801
799 /* 802 /*
800 * Clean out the prior contents of the attribute list. 803 * Clean out the prior contents of the attribute list.
@@ -855,7 +858,7 @@ xfs_attr_leaf_to_node(xfs_da_args_t *args)
855 xfs_attr_leafblock_t *leaf; 858 xfs_attr_leafblock_t *leaf;
856 xfs_da_intnode_t *node; 859 xfs_da_intnode_t *node;
857 xfs_inode_t *dp; 860 xfs_inode_t *dp;
858 xfs_dabuf_t *bp1, *bp2; 861 struct xfs_buf *bp1, *bp2;
859 xfs_dablk_t blkno; 862 xfs_dablk_t blkno;
860 int error; 863 int error;
861 864
@@ -877,10 +880,9 @@ xfs_attr_leaf_to_node(xfs_da_args_t *args)
877 if (error) 880 if (error)
878 goto out; 881 goto out;
879 ASSERT(bp2 != NULL); 882 ASSERT(bp2 != NULL);
880 memcpy(bp2->data, bp1->data, XFS_LBSIZE(dp->i_mount)); 883 memcpy(bp2->b_addr, bp1->b_addr, XFS_LBSIZE(dp->i_mount));
881 xfs_da_buf_done(bp1);
882 bp1 = NULL; 884 bp1 = NULL;
883 xfs_da_log_buf(args->trans, bp2, 0, XFS_LBSIZE(dp->i_mount) - 1); 885 xfs_trans_log_buf(args->trans, bp2, 0, XFS_LBSIZE(dp->i_mount) - 1);
884 886
885 /* 887 /*
886 * Set up the new root node. 888 * Set up the new root node.
@@ -888,21 +890,17 @@ xfs_attr_leaf_to_node(xfs_da_args_t *args)
888 error = xfs_da_node_create(args, 0, 1, &bp1, XFS_ATTR_FORK); 890 error = xfs_da_node_create(args, 0, 1, &bp1, XFS_ATTR_FORK);
889 if (error) 891 if (error)
890 goto out; 892 goto out;
891 node = bp1->data; 893 node = bp1->b_addr;
892 leaf = bp2->data; 894 leaf = bp2->b_addr;
893 ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); 895 ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
894 /* both on-disk, don't endian-flip twice */ 896 /* both on-disk, don't endian-flip twice */
895 node->btree[0].hashval = 897 node->btree[0].hashval =
896 leaf->entries[be16_to_cpu(leaf->hdr.count)-1 ].hashval; 898 leaf->entries[be16_to_cpu(leaf->hdr.count)-1 ].hashval;
897 node->btree[0].before = cpu_to_be32(blkno); 899 node->btree[0].before = cpu_to_be32(blkno);
898 node->hdr.count = cpu_to_be16(1); 900 node->hdr.count = cpu_to_be16(1);
899 xfs_da_log_buf(args->trans, bp1, 0, XFS_LBSIZE(dp->i_mount) - 1); 901 xfs_trans_log_buf(args->trans, bp1, 0, XFS_LBSIZE(dp->i_mount) - 1);
900 error = 0; 902 error = 0;
901out: 903out:
902 if (bp1)
903 xfs_da_buf_done(bp1);
904 if (bp2)
905 xfs_da_buf_done(bp2);
906 return(error); 904 return(error);
907} 905}
908 906
@@ -916,12 +914,15 @@ out:
916 * or a leaf in a node attribute list. 914 * or a leaf in a node attribute list.
917 */ 915 */
918STATIC int 916STATIC int
919xfs_attr_leaf_create(xfs_da_args_t *args, xfs_dablk_t blkno, xfs_dabuf_t **bpp) 917xfs_attr_leaf_create(
918 xfs_da_args_t *args,
919 xfs_dablk_t blkno,
920 struct xfs_buf **bpp)
920{ 921{
921 xfs_attr_leafblock_t *leaf; 922 xfs_attr_leafblock_t *leaf;
922 xfs_attr_leaf_hdr_t *hdr; 923 xfs_attr_leaf_hdr_t *hdr;
923 xfs_inode_t *dp; 924 xfs_inode_t *dp;
924 xfs_dabuf_t *bp; 925 struct xfs_buf *bp;
925 int error; 926 int error;
926 927
927 trace_xfs_attr_leaf_create(args); 928 trace_xfs_attr_leaf_create(args);
@@ -933,7 +934,7 @@ xfs_attr_leaf_create(xfs_da_args_t *args, xfs_dablk_t blkno, xfs_dabuf_t **bpp)
933 if (error) 934 if (error)
934 return(error); 935 return(error);
935 ASSERT(bp != NULL); 936 ASSERT(bp != NULL);
936 leaf = bp->data; 937 leaf = bp->b_addr;
937 memset((char *)leaf, 0, XFS_LBSIZE(dp->i_mount)); 938 memset((char *)leaf, 0, XFS_LBSIZE(dp->i_mount));
938 hdr = &leaf->hdr; 939 hdr = &leaf->hdr;
939 hdr->info.magic = cpu_to_be16(XFS_ATTR_LEAF_MAGIC); 940 hdr->info.magic = cpu_to_be16(XFS_ATTR_LEAF_MAGIC);
@@ -947,7 +948,7 @@ xfs_attr_leaf_create(xfs_da_args_t *args, xfs_dablk_t blkno, xfs_dabuf_t **bpp)
947 hdr->freemap[0].size = cpu_to_be16(be16_to_cpu(hdr->firstused) - 948 hdr->freemap[0].size = cpu_to_be16(be16_to_cpu(hdr->firstused) -
948 sizeof(xfs_attr_leaf_hdr_t)); 949 sizeof(xfs_attr_leaf_hdr_t));
949 950
950 xfs_da_log_buf(args->trans, bp, 0, XFS_LBSIZE(dp->i_mount) - 1); 951 xfs_trans_log_buf(args->trans, bp, 0, XFS_LBSIZE(dp->i_mount) - 1);
951 952
952 *bpp = bp; 953 *bpp = bp;
953 return(0); 954 return(0);
@@ -1014,7 +1015,9 @@ xfs_attr_leaf_split(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,
1014 * Add a name to the leaf attribute list structure. 1015 * Add a name to the leaf attribute list structure.
1015 */ 1016 */
1016int 1017int
1017xfs_attr_leaf_add(xfs_dabuf_t *bp, xfs_da_args_t *args) 1018xfs_attr_leaf_add(
1019 struct xfs_buf *bp,
1020 struct xfs_da_args *args)
1018{ 1021{
1019 xfs_attr_leafblock_t *leaf; 1022 xfs_attr_leafblock_t *leaf;
1020 xfs_attr_leaf_hdr_t *hdr; 1023 xfs_attr_leaf_hdr_t *hdr;
@@ -1023,7 +1026,7 @@ xfs_attr_leaf_add(xfs_dabuf_t *bp, xfs_da_args_t *args)
1023 1026
1024 trace_xfs_attr_leaf_add(args); 1027 trace_xfs_attr_leaf_add(args);
1025 1028
1026 leaf = bp->data; 1029 leaf = bp->b_addr;
1027 ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); 1030 ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
1028 ASSERT((args->index >= 0) 1031 ASSERT((args->index >= 0)
1029 && (args->index <= be16_to_cpu(leaf->hdr.count))); 1032 && (args->index <= be16_to_cpu(leaf->hdr.count)));
@@ -1085,7 +1088,10 @@ xfs_attr_leaf_add(xfs_dabuf_t *bp, xfs_da_args_t *args)
1085 * Add a name to a leaf attribute list structure. 1088 * Add a name to a leaf attribute list structure.
1086 */ 1089 */
1087STATIC int 1090STATIC int
1088xfs_attr_leaf_add_work(xfs_dabuf_t *bp, xfs_da_args_t *args, int mapindex) 1091xfs_attr_leaf_add_work(
1092 struct xfs_buf *bp,
1093 xfs_da_args_t *args,
1094 int mapindex)
1089{ 1095{
1090 xfs_attr_leafblock_t *leaf; 1096 xfs_attr_leafblock_t *leaf;
1091 xfs_attr_leaf_hdr_t *hdr; 1097 xfs_attr_leaf_hdr_t *hdr;
@@ -1096,7 +1102,7 @@ xfs_attr_leaf_add_work(xfs_dabuf_t *bp, xfs_da_args_t *args, int mapindex)
1096 xfs_mount_t *mp; 1102 xfs_mount_t *mp;
1097 int tmp, i; 1103 int tmp, i;
1098 1104
1099 leaf = bp->data; 1105 leaf = bp->b_addr;
1100 ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); 1106 ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
1101 hdr = &leaf->hdr; 1107 hdr = &leaf->hdr;
1102 ASSERT((mapindex >= 0) && (mapindex < XFS_ATTR_LEAF_MAPSIZE)); 1108 ASSERT((mapindex >= 0) && (mapindex < XFS_ATTR_LEAF_MAPSIZE));
@@ -1110,7 +1116,7 @@ xfs_attr_leaf_add_work(xfs_dabuf_t *bp, xfs_da_args_t *args, int mapindex)
1110 tmp = be16_to_cpu(hdr->count) - args->index; 1116 tmp = be16_to_cpu(hdr->count) - args->index;
1111 tmp *= sizeof(xfs_attr_leaf_entry_t); 1117 tmp *= sizeof(xfs_attr_leaf_entry_t);
1112 memmove((char *)(entry+1), (char *)entry, tmp); 1118 memmove((char *)(entry+1), (char *)entry, tmp);
1113 xfs_da_log_buf(args->trans, bp, 1119 xfs_trans_log_buf(args->trans, bp,
1114 XFS_DA_LOGRANGE(leaf, entry, tmp + sizeof(*entry))); 1120 XFS_DA_LOGRANGE(leaf, entry, tmp + sizeof(*entry)));
1115 } 1121 }
1116 be16_add_cpu(&hdr->count, 1); 1122 be16_add_cpu(&hdr->count, 1);
@@ -1142,7 +1148,7 @@ xfs_attr_leaf_add_work(xfs_dabuf_t *bp, xfs_da_args_t *args, int mapindex)
1142 args->index2++; 1148 args->index2++;
1143 } 1149 }
1144 } 1150 }
1145 xfs_da_log_buf(args->trans, bp, 1151 xfs_trans_log_buf(args->trans, bp,
1146 XFS_DA_LOGRANGE(leaf, entry, sizeof(*entry))); 1152 XFS_DA_LOGRANGE(leaf, entry, sizeof(*entry)));
1147 ASSERT((args->index == 0) || 1153 ASSERT((args->index == 0) ||
1148 (be32_to_cpu(entry->hashval) >= be32_to_cpu((entry-1)->hashval))); 1154 (be32_to_cpu(entry->hashval) >= be32_to_cpu((entry-1)->hashval)));
@@ -1174,7 +1180,7 @@ xfs_attr_leaf_add_work(xfs_dabuf_t *bp, xfs_da_args_t *args, int mapindex)
1174 args->rmtblkno = 1; 1180 args->rmtblkno = 1;
1175 args->rmtblkcnt = XFS_B_TO_FSB(mp, args->valuelen); 1181 args->rmtblkcnt = XFS_B_TO_FSB(mp, args->valuelen);
1176 } 1182 }
1177 xfs_da_log_buf(args->trans, bp, 1183 xfs_trans_log_buf(args->trans, bp,
1178 XFS_DA_LOGRANGE(leaf, xfs_attr_leaf_name(leaf, args->index), 1184 XFS_DA_LOGRANGE(leaf, xfs_attr_leaf_name(leaf, args->index),
1179 xfs_attr_leaf_entsize(leaf, args->index))); 1185 xfs_attr_leaf_entsize(leaf, args->index)));
1180 1186
@@ -1198,7 +1204,7 @@ xfs_attr_leaf_add_work(xfs_dabuf_t *bp, xfs_da_args_t *args, int mapindex)
1198 } 1204 }
1199 } 1205 }
1200 be16_add_cpu(&hdr->usedbytes, xfs_attr_leaf_entsize(leaf, args->index)); 1206 be16_add_cpu(&hdr->usedbytes, xfs_attr_leaf_entsize(leaf, args->index));
1201 xfs_da_log_buf(args->trans, bp, 1207 xfs_trans_log_buf(args->trans, bp,
1202 XFS_DA_LOGRANGE(leaf, hdr, sizeof(*hdr))); 1208 XFS_DA_LOGRANGE(leaf, hdr, sizeof(*hdr)));
1203 return(0); 1209 return(0);
1204} 1210}
@@ -1207,7 +1213,9 @@ xfs_attr_leaf_add_work(xfs_dabuf_t *bp, xfs_da_args_t *args, int mapindex)
1207 * Garbage collect a leaf attribute list block by copying it to a new buffer. 1213 * Garbage collect a leaf attribute list block by copying it to a new buffer.
1208 */ 1214 */
1209STATIC void 1215STATIC void
1210xfs_attr_leaf_compact(xfs_trans_t *trans, xfs_dabuf_t *bp) 1216xfs_attr_leaf_compact(
1217 struct xfs_trans *trans,
1218 struct xfs_buf *bp)
1211{ 1219{
1212 xfs_attr_leafblock_t *leaf_s, *leaf_d; 1220 xfs_attr_leafblock_t *leaf_s, *leaf_d;
1213 xfs_attr_leaf_hdr_t *hdr_s, *hdr_d; 1221 xfs_attr_leaf_hdr_t *hdr_s, *hdr_d;
@@ -1217,14 +1225,14 @@ xfs_attr_leaf_compact(xfs_trans_t *trans, xfs_dabuf_t *bp)
1217 mp = trans->t_mountp; 1225 mp = trans->t_mountp;
1218 tmpbuffer = kmem_alloc(XFS_LBSIZE(mp), KM_SLEEP); 1226 tmpbuffer = kmem_alloc(XFS_LBSIZE(mp), KM_SLEEP);
1219 ASSERT(tmpbuffer != NULL); 1227 ASSERT(tmpbuffer != NULL);
1220 memcpy(tmpbuffer, bp->data, XFS_LBSIZE(mp)); 1228 memcpy(tmpbuffer, bp->b_addr, XFS_LBSIZE(mp));
1221 memset(bp->data, 0, XFS_LBSIZE(mp)); 1229 memset(bp->b_addr, 0, XFS_LBSIZE(mp));
1222 1230
1223 /* 1231 /*
1224 * Copy basic information 1232 * Copy basic information
1225 */ 1233 */
1226 leaf_s = (xfs_attr_leafblock_t *)tmpbuffer; 1234 leaf_s = (xfs_attr_leafblock_t *)tmpbuffer;
1227 leaf_d = bp->data; 1235 leaf_d = bp->b_addr;
1228 hdr_s = &leaf_s->hdr; 1236 hdr_s = &leaf_s->hdr;
1229 hdr_d = &leaf_d->hdr; 1237 hdr_d = &leaf_d->hdr;
1230 hdr_d->info = hdr_s->info; /* struct copy */ 1238 hdr_d->info = hdr_s->info; /* struct copy */
@@ -1247,7 +1255,7 @@ xfs_attr_leaf_compact(xfs_trans_t *trans, xfs_dabuf_t *bp)
1247 */ 1255 */
1248 xfs_attr_leaf_moveents(leaf_s, 0, leaf_d, 0, 1256 xfs_attr_leaf_moveents(leaf_s, 0, leaf_d, 0,
1249 be16_to_cpu(hdr_s->count), mp); 1257 be16_to_cpu(hdr_s->count), mp);
1250 xfs_da_log_buf(trans, bp, 0, XFS_LBSIZE(mp) - 1); 1258 xfs_trans_log_buf(trans, bp, 0, XFS_LBSIZE(mp) - 1);
1251 1259
1252 kmem_free(tmpbuffer); 1260 kmem_free(tmpbuffer);
1253} 1261}
@@ -1279,8 +1287,8 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
1279 */ 1287 */
1280 ASSERT(blk1->magic == XFS_ATTR_LEAF_MAGIC); 1288 ASSERT(blk1->magic == XFS_ATTR_LEAF_MAGIC);
1281 ASSERT(blk2->magic == XFS_ATTR_LEAF_MAGIC); 1289 ASSERT(blk2->magic == XFS_ATTR_LEAF_MAGIC);
1282 leaf1 = blk1->bp->data; 1290 leaf1 = blk1->bp->b_addr;
1283 leaf2 = blk2->bp->data; 1291 leaf2 = blk2->bp->b_addr;
1284 ASSERT(leaf1->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); 1292 ASSERT(leaf1->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
1285 ASSERT(leaf2->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); 1293 ASSERT(leaf2->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
1286 args = state->args; 1294 args = state->args;
@@ -1298,8 +1306,8 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
1298 tmp_blk = blk1; 1306 tmp_blk = blk1;
1299 blk1 = blk2; 1307 blk1 = blk2;
1300 blk2 = tmp_blk; 1308 blk2 = tmp_blk;
1301 leaf1 = blk1->bp->data; 1309 leaf1 = blk1->bp->b_addr;
1302 leaf2 = blk2->bp->data; 1310 leaf2 = blk2->bp->b_addr;
1303 swap = 1; 1311 swap = 1;
1304 } 1312 }
1305 hdr1 = &leaf1->hdr; 1313 hdr1 = &leaf1->hdr;
@@ -1346,8 +1354,8 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
1346 xfs_attr_leaf_moveents(leaf1, be16_to_cpu(hdr1->count) - count, 1354 xfs_attr_leaf_moveents(leaf1, be16_to_cpu(hdr1->count) - count,
1347 leaf2, 0, count, state->mp); 1355 leaf2, 0, count, state->mp);
1348 1356
1349 xfs_da_log_buf(args->trans, blk1->bp, 0, state->blocksize-1); 1357 xfs_trans_log_buf(args->trans, blk1->bp, 0, state->blocksize-1);
1350 xfs_da_log_buf(args->trans, blk2->bp, 0, state->blocksize-1); 1358 xfs_trans_log_buf(args->trans, blk2->bp, 0, state->blocksize-1);
1351 } else if (count > be16_to_cpu(hdr1->count)) { 1359 } else if (count > be16_to_cpu(hdr1->count)) {
1352 /* 1360 /*
1353 * I assert that since all callers pass in an empty 1361 * I assert that since all callers pass in an empty
@@ -1378,8 +1386,8 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
1378 xfs_attr_leaf_moveents(leaf2, 0, leaf1, 1386 xfs_attr_leaf_moveents(leaf2, 0, leaf1,
1379 be16_to_cpu(hdr1->count), count, state->mp); 1387 be16_to_cpu(hdr1->count), count, state->mp);
1380 1388
1381 xfs_da_log_buf(args->trans, blk1->bp, 0, state->blocksize-1); 1389 xfs_trans_log_buf(args->trans, blk1->bp, 0, state->blocksize-1);
1382 xfs_da_log_buf(args->trans, blk2->bp, 0, state->blocksize-1); 1390 xfs_trans_log_buf(args->trans, blk2->bp, 0, state->blocksize-1);
1383 } 1391 }
1384 1392
1385 /* 1393 /*
@@ -1448,8 +1456,8 @@ xfs_attr_leaf_figure_balance(xfs_da_state_t *state,
1448 /* 1456 /*
1449 * Set up environment. 1457 * Set up environment.
1450 */ 1458 */
1451 leaf1 = blk1->bp->data; 1459 leaf1 = blk1->bp->b_addr;
1452 leaf2 = blk2->bp->data; 1460 leaf2 = blk2->bp->b_addr;
1453 hdr1 = &leaf1->hdr; 1461 hdr1 = &leaf1->hdr;
1454 hdr2 = &leaf2->hdr; 1462 hdr2 = &leaf2->hdr;
1455 foundit = 0; 1463 foundit = 0;
@@ -1551,7 +1559,7 @@ xfs_attr_leaf_toosmall(xfs_da_state_t *state, int *action)
1551 xfs_da_blkinfo_t *info; 1559 xfs_da_blkinfo_t *info;
1552 int count, bytes, forward, error, retval, i; 1560 int count, bytes, forward, error, retval, i;
1553 xfs_dablk_t blkno; 1561 xfs_dablk_t blkno;
1554 xfs_dabuf_t *bp; 1562 struct xfs_buf *bp;
1555 1563
1556 /* 1564 /*
1557 * Check for the degenerate case of the block being over 50% full. 1565 * Check for the degenerate case of the block being over 50% full.
@@ -1559,7 +1567,7 @@ xfs_attr_leaf_toosmall(xfs_da_state_t *state, int *action)
1559 * to coalesce with a sibling. 1567 * to coalesce with a sibling.
1560 */ 1568 */
1561 blk = &state->path.blk[ state->path.active-1 ]; 1569 blk = &state->path.blk[ state->path.active-1 ];
1562 info = blk->bp->data; 1570 info = blk->bp->b_addr;
1563 ASSERT(info->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); 1571 ASSERT(info->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
1564 leaf = (xfs_attr_leafblock_t *)info; 1572 leaf = (xfs_attr_leafblock_t *)info;
1565 count = be16_to_cpu(leaf->hdr.count); 1573 count = be16_to_cpu(leaf->hdr.count);
@@ -1622,13 +1630,13 @@ xfs_attr_leaf_toosmall(xfs_da_state_t *state, int *action)
1622 count = be16_to_cpu(leaf->hdr.count); 1630 count = be16_to_cpu(leaf->hdr.count);
1623 bytes = state->blocksize - (state->blocksize>>2); 1631 bytes = state->blocksize - (state->blocksize>>2);
1624 bytes -= be16_to_cpu(leaf->hdr.usedbytes); 1632 bytes -= be16_to_cpu(leaf->hdr.usedbytes);
1625 leaf = bp->data; 1633 leaf = bp->b_addr;
1626 ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); 1634 ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
1627 count += be16_to_cpu(leaf->hdr.count); 1635 count += be16_to_cpu(leaf->hdr.count);
1628 bytes -= be16_to_cpu(leaf->hdr.usedbytes); 1636 bytes -= be16_to_cpu(leaf->hdr.usedbytes);
1629 bytes -= count * sizeof(xfs_attr_leaf_entry_t); 1637 bytes -= count * sizeof(xfs_attr_leaf_entry_t);
1630 bytes -= sizeof(xfs_attr_leaf_hdr_t); 1638 bytes -= sizeof(xfs_attr_leaf_hdr_t);
1631 xfs_da_brelse(state->args->trans, bp); 1639 xfs_trans_brelse(state->args->trans, bp);
1632 if (bytes >= 0) 1640 if (bytes >= 0)
1633 break; /* fits with at least 25% to spare */ 1641 break; /* fits with at least 25% to spare */
1634 } 1642 }
@@ -1666,7 +1674,9 @@ xfs_attr_leaf_toosmall(xfs_da_state_t *state, int *action)
1666 * If two leaves are 37% full, when combined they will leave 25% free. 1674 * If two leaves are 37% full, when combined they will leave 25% free.
1667 */ 1675 */
1668int 1676int
1669xfs_attr_leaf_remove(xfs_dabuf_t *bp, xfs_da_args_t *args) 1677xfs_attr_leaf_remove(
1678 struct xfs_buf *bp,
1679 xfs_da_args_t *args)
1670{ 1680{
1671 xfs_attr_leafblock_t *leaf; 1681 xfs_attr_leafblock_t *leaf;
1672 xfs_attr_leaf_hdr_t *hdr; 1682 xfs_attr_leaf_hdr_t *hdr;
@@ -1676,7 +1686,7 @@ xfs_attr_leaf_remove(xfs_dabuf_t *bp, xfs_da_args_t *args)
1676 int tablesize, tmp, i; 1686 int tablesize, tmp, i;
1677 xfs_mount_t *mp; 1687 xfs_mount_t *mp;
1678 1688
1679 leaf = bp->data; 1689 leaf = bp->b_addr;
1680 ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); 1690 ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
1681 hdr = &leaf->hdr; 1691 hdr = &leaf->hdr;
1682 mp = args->trans->t_mountp; 1692 mp = args->trans->t_mountp;
@@ -1769,7 +1779,7 @@ xfs_attr_leaf_remove(xfs_dabuf_t *bp, xfs_da_args_t *args)
1769 */ 1779 */
1770 memset(xfs_attr_leaf_name(leaf, args->index), 0, entsize); 1780 memset(xfs_attr_leaf_name(leaf, args->index), 0, entsize);
1771 be16_add_cpu(&hdr->usedbytes, -entsize); 1781 be16_add_cpu(&hdr->usedbytes, -entsize);
1772 xfs_da_log_buf(args->trans, bp, 1782 xfs_trans_log_buf(args->trans, bp,
1773 XFS_DA_LOGRANGE(leaf, xfs_attr_leaf_name(leaf, args->index), 1783 XFS_DA_LOGRANGE(leaf, xfs_attr_leaf_name(leaf, args->index),
1774 entsize)); 1784 entsize));
1775 1785
@@ -1777,7 +1787,7 @@ xfs_attr_leaf_remove(xfs_dabuf_t *bp, xfs_da_args_t *args)
1777 * sizeof(xfs_attr_leaf_entry_t); 1787 * sizeof(xfs_attr_leaf_entry_t);
1778 memmove((char *)entry, (char *)(entry+1), tmp); 1788 memmove((char *)entry, (char *)(entry+1), tmp);
1779 be16_add_cpu(&hdr->count, -1); 1789 be16_add_cpu(&hdr->count, -1);
1780 xfs_da_log_buf(args->trans, bp, 1790 xfs_trans_log_buf(args->trans, bp,
1781 XFS_DA_LOGRANGE(leaf, entry, tmp + sizeof(*entry))); 1791 XFS_DA_LOGRANGE(leaf, entry, tmp + sizeof(*entry)));
1782 entry = &leaf->entries[be16_to_cpu(hdr->count)]; 1792 entry = &leaf->entries[be16_to_cpu(hdr->count)];
1783 memset((char *)entry, 0, sizeof(xfs_attr_leaf_entry_t)); 1793 memset((char *)entry, 0, sizeof(xfs_attr_leaf_entry_t));
@@ -1807,7 +1817,7 @@ xfs_attr_leaf_remove(xfs_dabuf_t *bp, xfs_da_args_t *args)
1807 } else { 1817 } else {
1808 hdr->holes = 1; /* mark as needing compaction */ 1818 hdr->holes = 1; /* mark as needing compaction */
1809 } 1819 }
1810 xfs_da_log_buf(args->trans, bp, 1820 xfs_trans_log_buf(args->trans, bp,
1811 XFS_DA_LOGRANGE(leaf, hdr, sizeof(*hdr))); 1821 XFS_DA_LOGRANGE(leaf, hdr, sizeof(*hdr)));
1812 1822
1813 /* 1823 /*
@@ -1840,8 +1850,8 @@ xfs_attr_leaf_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
1840 mp = state->mp; 1850 mp = state->mp;
1841 ASSERT(drop_blk->magic == XFS_ATTR_LEAF_MAGIC); 1851 ASSERT(drop_blk->magic == XFS_ATTR_LEAF_MAGIC);
1842 ASSERT(save_blk->magic == XFS_ATTR_LEAF_MAGIC); 1852 ASSERT(save_blk->magic == XFS_ATTR_LEAF_MAGIC);
1843 drop_leaf = drop_blk->bp->data; 1853 drop_leaf = drop_blk->bp->b_addr;
1844 save_leaf = save_blk->bp->data; 1854 save_leaf = save_blk->bp->b_addr;
1845 ASSERT(drop_leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); 1855 ASSERT(drop_leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
1846 ASSERT(save_leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); 1856 ASSERT(save_leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
1847 drop_hdr = &drop_leaf->hdr; 1857 drop_hdr = &drop_leaf->hdr;
@@ -1906,7 +1916,7 @@ xfs_attr_leaf_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
1906 kmem_free(tmpbuffer); 1916 kmem_free(tmpbuffer);
1907 } 1917 }
1908 1918
1909 xfs_da_log_buf(state->args->trans, save_blk->bp, 0, 1919 xfs_trans_log_buf(state->args->trans, save_blk->bp, 0,
1910 state->blocksize - 1); 1920 state->blocksize - 1);
1911 1921
1912 /* 1922 /*
@@ -1934,7 +1944,9 @@ xfs_attr_leaf_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
1934 * Don't change the args->value unless we find the attribute. 1944 * Don't change the args->value unless we find the attribute.
1935 */ 1945 */
1936int 1946int
1937xfs_attr_leaf_lookup_int(xfs_dabuf_t *bp, xfs_da_args_t *args) 1947xfs_attr_leaf_lookup_int(
1948 struct xfs_buf *bp,
1949 xfs_da_args_t *args)
1938{ 1950{
1939 xfs_attr_leafblock_t *leaf; 1951 xfs_attr_leafblock_t *leaf;
1940 xfs_attr_leaf_entry_t *entry; 1952 xfs_attr_leaf_entry_t *entry;
@@ -1945,7 +1957,7 @@ xfs_attr_leaf_lookup_int(xfs_dabuf_t *bp, xfs_da_args_t *args)
1945 1957
1946 trace_xfs_attr_leaf_lookup(args); 1958 trace_xfs_attr_leaf_lookup(args);
1947 1959
1948 leaf = bp->data; 1960 leaf = bp->b_addr;
1949 ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); 1961 ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
1950 ASSERT(be16_to_cpu(leaf->hdr.count) 1962 ASSERT(be16_to_cpu(leaf->hdr.count)
1951 < (XFS_LBSIZE(args->dp->i_mount)/8)); 1963 < (XFS_LBSIZE(args->dp->i_mount)/8));
@@ -2041,7 +2053,9 @@ xfs_attr_leaf_lookup_int(xfs_dabuf_t *bp, xfs_da_args_t *args)
2041 * list structure. 2053 * list structure.
2042 */ 2054 */
2043int 2055int
2044xfs_attr_leaf_getvalue(xfs_dabuf_t *bp, xfs_da_args_t *args) 2056xfs_attr_leaf_getvalue(
2057 struct xfs_buf *bp,
2058 xfs_da_args_t *args)
2045{ 2059{
2046 int valuelen; 2060 int valuelen;
2047 xfs_attr_leafblock_t *leaf; 2061 xfs_attr_leafblock_t *leaf;
@@ -2049,7 +2063,7 @@ xfs_attr_leaf_getvalue(xfs_dabuf_t *bp, xfs_da_args_t *args)
2049 xfs_attr_leaf_name_local_t *name_loc; 2063 xfs_attr_leaf_name_local_t *name_loc;
2050 xfs_attr_leaf_name_remote_t *name_rmt; 2064 xfs_attr_leaf_name_remote_t *name_rmt;
2051 2065
2052 leaf = bp->data; 2066 leaf = bp->b_addr;
2053 ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); 2067 ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
2054 ASSERT(be16_to_cpu(leaf->hdr.count) 2068 ASSERT(be16_to_cpu(leaf->hdr.count)
2055 < (XFS_LBSIZE(args->dp->i_mount)/8)); 2069 < (XFS_LBSIZE(args->dp->i_mount)/8));
@@ -2247,12 +2261,14 @@ xfs_attr_leaf_moveents(xfs_attr_leafblock_t *leaf_s, int start_s,
2247 * Return 0 unless leaf2 should go before leaf1. 2261 * Return 0 unless leaf2 should go before leaf1.
2248 */ 2262 */
2249int 2263int
2250xfs_attr_leaf_order(xfs_dabuf_t *leaf1_bp, xfs_dabuf_t *leaf2_bp) 2264xfs_attr_leaf_order(
2265 struct xfs_buf *leaf1_bp,
2266 struct xfs_buf *leaf2_bp)
2251{ 2267{
2252 xfs_attr_leafblock_t *leaf1, *leaf2; 2268 xfs_attr_leafblock_t *leaf1, *leaf2;
2253 2269
2254 leaf1 = leaf1_bp->data; 2270 leaf1 = leaf1_bp->b_addr;
2255 leaf2 = leaf2_bp->data; 2271 leaf2 = leaf2_bp->b_addr;
2256 ASSERT((leaf1->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)) && 2272 ASSERT((leaf1->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)) &&
2257 (leaf2->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC))); 2273 (leaf2->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)));
2258 if ((be16_to_cpu(leaf1->hdr.count) > 0) && 2274 if ((be16_to_cpu(leaf1->hdr.count) > 0) &&
@@ -2272,11 +2288,13 @@ xfs_attr_leaf_order(xfs_dabuf_t *leaf1_bp, xfs_dabuf_t *leaf2_bp)
2272 * Pick up the last hashvalue from a leaf block. 2288 * Pick up the last hashvalue from a leaf block.
2273 */ 2289 */
2274xfs_dahash_t 2290xfs_dahash_t
2275xfs_attr_leaf_lasthash(xfs_dabuf_t *bp, int *count) 2291xfs_attr_leaf_lasthash(
2292 struct xfs_buf *bp,
2293 int *count)
2276{ 2294{
2277 xfs_attr_leafblock_t *leaf; 2295 xfs_attr_leafblock_t *leaf;
2278 2296
2279 leaf = bp->data; 2297 leaf = bp->b_addr;
2280 ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); 2298 ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
2281 if (count) 2299 if (count)
2282 *count = be16_to_cpu(leaf->hdr.count); 2300 *count = be16_to_cpu(leaf->hdr.count);
@@ -2337,7 +2355,9 @@ xfs_attr_leaf_newentsize(int namelen, int valuelen, int blocksize, int *local)
2337 * Copy out attribute list entries for attr_list(), for leaf attribute lists. 2355 * Copy out attribute list entries for attr_list(), for leaf attribute lists.
2338 */ 2356 */
2339int 2357int
2340xfs_attr_leaf_list_int(xfs_dabuf_t *bp, xfs_attr_list_context_t *context) 2358xfs_attr_leaf_list_int(
2359 struct xfs_buf *bp,
2360 xfs_attr_list_context_t *context)
2341{ 2361{
2342 attrlist_cursor_kern_t *cursor; 2362 attrlist_cursor_kern_t *cursor;
2343 xfs_attr_leafblock_t *leaf; 2363 xfs_attr_leafblock_t *leaf;
@@ -2345,7 +2365,7 @@ xfs_attr_leaf_list_int(xfs_dabuf_t *bp, xfs_attr_list_context_t *context)
2345 int retval, i; 2365 int retval, i;
2346 2366
2347 ASSERT(bp != NULL); 2367 ASSERT(bp != NULL);
2348 leaf = bp->data; 2368 leaf = bp->b_addr;
2349 cursor = context->cursor; 2369 cursor = context->cursor;
2350 cursor->initted = 1; 2370 cursor->initted = 1;
2351 2371
@@ -2463,7 +2483,7 @@ xfs_attr_leaf_clearflag(xfs_da_args_t *args)
2463 xfs_attr_leafblock_t *leaf; 2483 xfs_attr_leafblock_t *leaf;
2464 xfs_attr_leaf_entry_t *entry; 2484 xfs_attr_leaf_entry_t *entry;
2465 xfs_attr_leaf_name_remote_t *name_rmt; 2485 xfs_attr_leaf_name_remote_t *name_rmt;
2466 xfs_dabuf_t *bp; 2486 struct xfs_buf *bp;
2467 int error; 2487 int error;
2468#ifdef DEBUG 2488#ifdef DEBUG
2469 xfs_attr_leaf_name_local_t *name_loc; 2489 xfs_attr_leaf_name_local_t *name_loc;
@@ -2482,7 +2502,7 @@ xfs_attr_leaf_clearflag(xfs_da_args_t *args)
2482 } 2502 }
2483 ASSERT(bp != NULL); 2503 ASSERT(bp != NULL);
2484 2504
2485 leaf = bp->data; 2505 leaf = bp->b_addr;
2486 ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); 2506 ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
2487 ASSERT(args->index < be16_to_cpu(leaf->hdr.count)); 2507 ASSERT(args->index < be16_to_cpu(leaf->hdr.count));
2488 ASSERT(args->index >= 0); 2508 ASSERT(args->index >= 0);
@@ -2505,7 +2525,7 @@ xfs_attr_leaf_clearflag(xfs_da_args_t *args)
2505#endif /* DEBUG */ 2525#endif /* DEBUG */
2506 2526
2507 entry->flags &= ~XFS_ATTR_INCOMPLETE; 2527 entry->flags &= ~XFS_ATTR_INCOMPLETE;
2508 xfs_da_log_buf(args->trans, bp, 2528 xfs_trans_log_buf(args->trans, bp,
2509 XFS_DA_LOGRANGE(leaf, entry, sizeof(*entry))); 2529 XFS_DA_LOGRANGE(leaf, entry, sizeof(*entry)));
2510 2530
2511 if (args->rmtblkno) { 2531 if (args->rmtblkno) {
@@ -2513,10 +2533,9 @@ xfs_attr_leaf_clearflag(xfs_da_args_t *args)
2513 name_rmt = xfs_attr_leaf_name_remote(leaf, args->index); 2533 name_rmt = xfs_attr_leaf_name_remote(leaf, args->index);
2514 name_rmt->valueblk = cpu_to_be32(args->rmtblkno); 2534 name_rmt->valueblk = cpu_to_be32(args->rmtblkno);
2515 name_rmt->valuelen = cpu_to_be32(args->valuelen); 2535 name_rmt->valuelen = cpu_to_be32(args->valuelen);
2516 xfs_da_log_buf(args->trans, bp, 2536 xfs_trans_log_buf(args->trans, bp,
2517 XFS_DA_LOGRANGE(leaf, name_rmt, sizeof(*name_rmt))); 2537 XFS_DA_LOGRANGE(leaf, name_rmt, sizeof(*name_rmt)));
2518 } 2538 }
2519 xfs_da_buf_done(bp);
2520 2539
2521 /* 2540 /*
2522 * Commit the flag value change and start the next trans in series. 2541 * Commit the flag value change and start the next trans in series.
@@ -2533,7 +2552,7 @@ xfs_attr_leaf_setflag(xfs_da_args_t *args)
2533 xfs_attr_leafblock_t *leaf; 2552 xfs_attr_leafblock_t *leaf;
2534 xfs_attr_leaf_entry_t *entry; 2553 xfs_attr_leaf_entry_t *entry;
2535 xfs_attr_leaf_name_remote_t *name_rmt; 2554 xfs_attr_leaf_name_remote_t *name_rmt;
2536 xfs_dabuf_t *bp; 2555 struct xfs_buf *bp;
2537 int error; 2556 int error;
2538 2557
2539 trace_xfs_attr_leaf_setflag(args); 2558 trace_xfs_attr_leaf_setflag(args);
@@ -2548,7 +2567,7 @@ xfs_attr_leaf_setflag(xfs_da_args_t *args)
2548 } 2567 }
2549 ASSERT(bp != NULL); 2568 ASSERT(bp != NULL);
2550 2569
2551 leaf = bp->data; 2570 leaf = bp->b_addr;
2552 ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); 2571 ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
2553 ASSERT(args->index < be16_to_cpu(leaf->hdr.count)); 2572 ASSERT(args->index < be16_to_cpu(leaf->hdr.count));
2554 ASSERT(args->index >= 0); 2573 ASSERT(args->index >= 0);
@@ -2556,16 +2575,15 @@ xfs_attr_leaf_setflag(xfs_da_args_t *args)
2556 2575
2557 ASSERT((entry->flags & XFS_ATTR_INCOMPLETE) == 0); 2576 ASSERT((entry->flags & XFS_ATTR_INCOMPLETE) == 0);
2558 entry->flags |= XFS_ATTR_INCOMPLETE; 2577 entry->flags |= XFS_ATTR_INCOMPLETE;
2559 xfs_da_log_buf(args->trans, bp, 2578 xfs_trans_log_buf(args->trans, bp,
2560 XFS_DA_LOGRANGE(leaf, entry, sizeof(*entry))); 2579 XFS_DA_LOGRANGE(leaf, entry, sizeof(*entry)));
2561 if ((entry->flags & XFS_ATTR_LOCAL) == 0) { 2580 if ((entry->flags & XFS_ATTR_LOCAL) == 0) {
2562 name_rmt = xfs_attr_leaf_name_remote(leaf, args->index); 2581 name_rmt = xfs_attr_leaf_name_remote(leaf, args->index);
2563 name_rmt->valueblk = 0; 2582 name_rmt->valueblk = 0;
2564 name_rmt->valuelen = 0; 2583 name_rmt->valuelen = 0;
2565 xfs_da_log_buf(args->trans, bp, 2584 xfs_trans_log_buf(args->trans, bp,
2566 XFS_DA_LOGRANGE(leaf, name_rmt, sizeof(*name_rmt))); 2585 XFS_DA_LOGRANGE(leaf, name_rmt, sizeof(*name_rmt)));
2567 } 2586 }
2568 xfs_da_buf_done(bp);
2569 2587
2570 /* 2588 /*
2571 * Commit the flag value change and start the next trans in series. 2589 * Commit the flag value change and start the next trans in series.
@@ -2586,7 +2604,7 @@ xfs_attr_leaf_flipflags(xfs_da_args_t *args)
2586 xfs_attr_leafblock_t *leaf1, *leaf2; 2604 xfs_attr_leafblock_t *leaf1, *leaf2;
2587 xfs_attr_leaf_entry_t *entry1, *entry2; 2605 xfs_attr_leaf_entry_t *entry1, *entry2;
2588 xfs_attr_leaf_name_remote_t *name_rmt; 2606 xfs_attr_leaf_name_remote_t *name_rmt;
2589 xfs_dabuf_t *bp1, *bp2; 2607 struct xfs_buf *bp1, *bp2;
2590 int error; 2608 int error;
2591#ifdef DEBUG 2609#ifdef DEBUG
2592 xfs_attr_leaf_name_local_t *name_loc; 2610 xfs_attr_leaf_name_local_t *name_loc;
@@ -2620,13 +2638,13 @@ xfs_attr_leaf_flipflags(xfs_da_args_t *args)
2620 bp2 = bp1; 2638 bp2 = bp1;
2621 } 2639 }
2622 2640
2623 leaf1 = bp1->data; 2641 leaf1 = bp1->b_addr;
2624 ASSERT(leaf1->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); 2642 ASSERT(leaf1->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
2625 ASSERT(args->index < be16_to_cpu(leaf1->hdr.count)); 2643 ASSERT(args->index < be16_to_cpu(leaf1->hdr.count));
2626 ASSERT(args->index >= 0); 2644 ASSERT(args->index >= 0);
2627 entry1 = &leaf1->entries[ args->index ]; 2645 entry1 = &leaf1->entries[ args->index ];
2628 2646
2629 leaf2 = bp2->data; 2647 leaf2 = bp2->b_addr;
2630 ASSERT(leaf2->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); 2648 ASSERT(leaf2->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
2631 ASSERT(args->index2 < be16_to_cpu(leaf2->hdr.count)); 2649 ASSERT(args->index2 < be16_to_cpu(leaf2->hdr.count));
2632 ASSERT(args->index2 >= 0); 2650 ASSERT(args->index2 >= 0);
@@ -2660,30 +2678,27 @@ xfs_attr_leaf_flipflags(xfs_da_args_t *args)
2660 ASSERT((entry2->flags & XFS_ATTR_INCOMPLETE) == 0); 2678 ASSERT((entry2->flags & XFS_ATTR_INCOMPLETE) == 0);
2661 2679
2662 entry1->flags &= ~XFS_ATTR_INCOMPLETE; 2680 entry1->flags &= ~XFS_ATTR_INCOMPLETE;
2663 xfs_da_log_buf(args->trans, bp1, 2681 xfs_trans_log_buf(args->trans, bp1,
2664 XFS_DA_LOGRANGE(leaf1, entry1, sizeof(*entry1))); 2682 XFS_DA_LOGRANGE(leaf1, entry1, sizeof(*entry1)));
2665 if (args->rmtblkno) { 2683 if (args->rmtblkno) {
2666 ASSERT((entry1->flags & XFS_ATTR_LOCAL) == 0); 2684 ASSERT((entry1->flags & XFS_ATTR_LOCAL) == 0);
2667 name_rmt = xfs_attr_leaf_name_remote(leaf1, args->index); 2685 name_rmt = xfs_attr_leaf_name_remote(leaf1, args->index);
2668 name_rmt->valueblk = cpu_to_be32(args->rmtblkno); 2686 name_rmt->valueblk = cpu_to_be32(args->rmtblkno);
2669 name_rmt->valuelen = cpu_to_be32(args->valuelen); 2687 name_rmt->valuelen = cpu_to_be32(args->valuelen);
2670 xfs_da_log_buf(args->trans, bp1, 2688 xfs_trans_log_buf(args->trans, bp1,
2671 XFS_DA_LOGRANGE(leaf1, name_rmt, sizeof(*name_rmt))); 2689 XFS_DA_LOGRANGE(leaf1, name_rmt, sizeof(*name_rmt)));
2672 } 2690 }
2673 2691
2674 entry2->flags |= XFS_ATTR_INCOMPLETE; 2692 entry2->flags |= XFS_ATTR_INCOMPLETE;
2675 xfs_da_log_buf(args->trans, bp2, 2693 xfs_trans_log_buf(args->trans, bp2,
2676 XFS_DA_LOGRANGE(leaf2, entry2, sizeof(*entry2))); 2694 XFS_DA_LOGRANGE(leaf2, entry2, sizeof(*entry2)));
2677 if ((entry2->flags & XFS_ATTR_LOCAL) == 0) { 2695 if ((entry2->flags & XFS_ATTR_LOCAL) == 0) {
2678 name_rmt = xfs_attr_leaf_name_remote(leaf2, args->index2); 2696 name_rmt = xfs_attr_leaf_name_remote(leaf2, args->index2);
2679 name_rmt->valueblk = 0; 2697 name_rmt->valueblk = 0;
2680 name_rmt->valuelen = 0; 2698 name_rmt->valuelen = 0;
2681 xfs_da_log_buf(args->trans, bp2, 2699 xfs_trans_log_buf(args->trans, bp2,
2682 XFS_DA_LOGRANGE(leaf2, name_rmt, sizeof(*name_rmt))); 2700 XFS_DA_LOGRANGE(leaf2, name_rmt, sizeof(*name_rmt)));
2683 } 2701 }
2684 xfs_da_buf_done(bp1);
2685 if (bp1 != bp2)
2686 xfs_da_buf_done(bp2);
2687 2702
2688 /* 2703 /*
2689 * Commit the flag value change and start the next trans in series. 2704 * Commit the flag value change and start the next trans in series.
@@ -2706,7 +2721,7 @@ xfs_attr_root_inactive(xfs_trans_t **trans, xfs_inode_t *dp)
2706{ 2721{
2707 xfs_da_blkinfo_t *info; 2722 xfs_da_blkinfo_t *info;
2708 xfs_daddr_t blkno; 2723 xfs_daddr_t blkno;
2709 xfs_dabuf_t *bp; 2724 struct xfs_buf *bp;
2710 int error; 2725 int error;
2711 2726
2712 /* 2727 /*
@@ -2718,20 +2733,20 @@ xfs_attr_root_inactive(xfs_trans_t **trans, xfs_inode_t *dp)
2718 error = xfs_da_read_buf(*trans, dp, 0, -1, &bp, XFS_ATTR_FORK); 2733 error = xfs_da_read_buf(*trans, dp, 0, -1, &bp, XFS_ATTR_FORK);
2719 if (error) 2734 if (error)
2720 return(error); 2735 return(error);
2721 blkno = xfs_da_blkno(bp); 2736 blkno = XFS_BUF_ADDR(bp);
2722 2737
2723 /* 2738 /*
2724 * Invalidate the tree, even if the "tree" is only a single leaf block. 2739 * Invalidate the tree, even if the "tree" is only a single leaf block.
2725 * This is a depth-first traversal! 2740 * This is a depth-first traversal!
2726 */ 2741 */
2727 info = bp->data; 2742 info = bp->b_addr;
2728 if (info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC)) { 2743 if (info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC)) {
2729 error = xfs_attr_node_inactive(trans, dp, bp, 1); 2744 error = xfs_attr_node_inactive(trans, dp, bp, 1);
2730 } else if (info->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)) { 2745 } else if (info->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)) {
2731 error = xfs_attr_leaf_inactive(trans, dp, bp); 2746 error = xfs_attr_leaf_inactive(trans, dp, bp);
2732 } else { 2747 } else {
2733 error = XFS_ERROR(EIO); 2748 error = XFS_ERROR(EIO);
2734 xfs_da_brelse(*trans, bp); 2749 xfs_trans_brelse(*trans, bp);
2735 } 2750 }
2736 if (error) 2751 if (error)
2737 return(error); 2752 return(error);
@@ -2742,7 +2757,7 @@ xfs_attr_root_inactive(xfs_trans_t **trans, xfs_inode_t *dp)
2742 error = xfs_da_get_buf(*trans, dp, 0, blkno, &bp, XFS_ATTR_FORK); 2757 error = xfs_da_get_buf(*trans, dp, 0, blkno, &bp, XFS_ATTR_FORK);
2743 if (error) 2758 if (error)
2744 return(error); 2759 return(error);
2745 xfs_da_binval(*trans, bp); /* remove from cache */ 2760 xfs_trans_binval(*trans, bp); /* remove from cache */
2746 /* 2761 /*
2747 * Commit the invalidate and start the next transaction. 2762 * Commit the invalidate and start the next transaction.
2748 */ 2763 */
@@ -2756,34 +2771,37 @@ xfs_attr_root_inactive(xfs_trans_t **trans, xfs_inode_t *dp)
2756 * We're doing a depth-first traversal in order to invalidate everything. 2771 * We're doing a depth-first traversal in order to invalidate everything.
2757 */ 2772 */
2758STATIC int 2773STATIC int
2759xfs_attr_node_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp, 2774xfs_attr_node_inactive(
2760 int level) 2775 struct xfs_trans **trans,
2776 struct xfs_inode *dp,
2777 struct xfs_buf *bp,
2778 int level)
2761{ 2779{
2762 xfs_da_blkinfo_t *info; 2780 xfs_da_blkinfo_t *info;
2763 xfs_da_intnode_t *node; 2781 xfs_da_intnode_t *node;
2764 xfs_dablk_t child_fsb; 2782 xfs_dablk_t child_fsb;
2765 xfs_daddr_t parent_blkno, child_blkno; 2783 xfs_daddr_t parent_blkno, child_blkno;
2766 int error, count, i; 2784 int error, count, i;
2767 xfs_dabuf_t *child_bp; 2785 struct xfs_buf *child_bp;
2768 2786
2769 /* 2787 /*
2770 * Since this code is recursive (gasp!) we must protect ourselves. 2788 * Since this code is recursive (gasp!) we must protect ourselves.
2771 */ 2789 */
2772 if (level > XFS_DA_NODE_MAXDEPTH) { 2790 if (level > XFS_DA_NODE_MAXDEPTH) {
2773 xfs_da_brelse(*trans, bp); /* no locks for later trans */ 2791 xfs_trans_brelse(*trans, bp); /* no locks for later trans */
2774 return(XFS_ERROR(EIO)); 2792 return(XFS_ERROR(EIO));
2775 } 2793 }
2776 2794
2777 node = bp->data; 2795 node = bp->b_addr;
2778 ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC)); 2796 ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
2779 parent_blkno = xfs_da_blkno(bp); /* save for re-read later */ 2797 parent_blkno = XFS_BUF_ADDR(bp); /* save for re-read later */
2780 count = be16_to_cpu(node->hdr.count); 2798 count = be16_to_cpu(node->hdr.count);
2781 if (!count) { 2799 if (!count) {
2782 xfs_da_brelse(*trans, bp); 2800 xfs_trans_brelse(*trans, bp);
2783 return(0); 2801 return(0);
2784 } 2802 }
2785 child_fsb = be32_to_cpu(node->btree[0].before); 2803 child_fsb = be32_to_cpu(node->btree[0].before);
2786 xfs_da_brelse(*trans, bp); /* no locks for later trans */ 2804 xfs_trans_brelse(*trans, bp); /* no locks for later trans */
2787 2805
2788 /* 2806 /*
2789 * If this is the node level just above the leaves, simply loop 2807 * If this is the node level just above the leaves, simply loop
@@ -2803,12 +2821,12 @@ xfs_attr_node_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp,
2803 return(error); 2821 return(error);
2804 if (child_bp) { 2822 if (child_bp) {
2805 /* save for re-read later */ 2823 /* save for re-read later */
2806 child_blkno = xfs_da_blkno(child_bp); 2824 child_blkno = XFS_BUF_ADDR(child_bp);
2807 2825
2808 /* 2826 /*
2809 * Invalidate the subtree, however we have to. 2827 * Invalidate the subtree, however we have to.
2810 */ 2828 */
2811 info = child_bp->data; 2829 info = child_bp->b_addr;
2812 if (info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC)) { 2830 if (info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC)) {
2813 error = xfs_attr_node_inactive(trans, dp, 2831 error = xfs_attr_node_inactive(trans, dp,
2814 child_bp, level+1); 2832 child_bp, level+1);
@@ -2817,7 +2835,7 @@ xfs_attr_node_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp,
2817 child_bp); 2835 child_bp);
2818 } else { 2836 } else {
2819 error = XFS_ERROR(EIO); 2837 error = XFS_ERROR(EIO);
2820 xfs_da_brelse(*trans, child_bp); 2838 xfs_trans_brelse(*trans, child_bp);
2821 } 2839 }
2822 if (error) 2840 if (error)
2823 return(error); 2841 return(error);
@@ -2830,7 +2848,7 @@ xfs_attr_node_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp,
2830 &child_bp, XFS_ATTR_FORK); 2848 &child_bp, XFS_ATTR_FORK);
2831 if (error) 2849 if (error)
2832 return(error); 2850 return(error);
2833 xfs_da_binval(*trans, child_bp); 2851 xfs_trans_binval(*trans, child_bp);
2834 } 2852 }
2835 2853
2836 /* 2854 /*
@@ -2843,7 +2861,7 @@ xfs_attr_node_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp,
2843 if (error) 2861 if (error)
2844 return(error); 2862 return(error);
2845 child_fsb = be32_to_cpu(node->btree[i+1].before); 2863 child_fsb = be32_to_cpu(node->btree[i+1].before);
2846 xfs_da_brelse(*trans, bp); 2864 xfs_trans_brelse(*trans, bp);
2847 } 2865 }
2848 /* 2866 /*
2849 * Atomically commit the whole invalidate stuff. 2867 * Atomically commit the whole invalidate stuff.
@@ -2863,7 +2881,10 @@ xfs_attr_node_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp,
2863 * caught holding something that the logging code wants to flush to disk. 2881 * caught holding something that the logging code wants to flush to disk.
2864 */ 2882 */
2865STATIC int 2883STATIC int
2866xfs_attr_leaf_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp) 2884xfs_attr_leaf_inactive(
2885 struct xfs_trans **trans,
2886 struct xfs_inode *dp,
2887 struct xfs_buf *bp)
2867{ 2888{
2868 xfs_attr_leafblock_t *leaf; 2889 xfs_attr_leafblock_t *leaf;
2869 xfs_attr_leaf_entry_t *entry; 2890 xfs_attr_leaf_entry_t *entry;
@@ -2871,7 +2892,7 @@ xfs_attr_leaf_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp)
2871 xfs_attr_inactive_list_t *list, *lp; 2892 xfs_attr_inactive_list_t *list, *lp;
2872 int error, count, size, tmp, i; 2893 int error, count, size, tmp, i;
2873 2894
2874 leaf = bp->data; 2895 leaf = bp->b_addr;
2875 ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); 2896 ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
2876 2897
2877 /* 2898 /*
@@ -2892,7 +2913,7 @@ xfs_attr_leaf_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp)
2892 * If there are no "remote" values, we're done. 2913 * If there are no "remote" values, we're done.
2893 */ 2914 */
2894 if (count == 0) { 2915 if (count == 0) {
2895 xfs_da_brelse(*trans, bp); 2916 xfs_trans_brelse(*trans, bp);
2896 return(0); 2917 return(0);
2897 } 2918 }
2898 2919
@@ -2919,7 +2940,7 @@ xfs_attr_leaf_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp)
2919 } 2940 }
2920 } 2941 }
2921 } 2942 }
2922 xfs_da_brelse(*trans, bp); /* unlock for trans. in freextent() */ 2943 xfs_trans_brelse(*trans, bp); /* unlock for trans. in freextent() */
2923 2944
2924 /* 2945 /*
2925 * Invalidate each of the "remote" value extents. 2946 * Invalidate each of the "remote" value extents.
diff --git a/fs/xfs/xfs_attr_leaf.h b/fs/xfs/xfs_attr_leaf.h
index 9c7d22fdcf4d..dea17722945e 100644
--- a/fs/xfs/xfs_attr_leaf.h
+++ b/fs/xfs/xfs_attr_leaf.h
@@ -31,7 +31,6 @@
31struct attrlist; 31struct attrlist;
32struct attrlist_cursor_kern; 32struct attrlist_cursor_kern;
33struct xfs_attr_list_context; 33struct xfs_attr_list_context;
34struct xfs_dabuf;
35struct xfs_da_args; 34struct xfs_da_args;
36struct xfs_da_state; 35struct xfs_da_state;
37struct xfs_da_state_blk; 36struct xfs_da_state_blk;
@@ -215,7 +214,7 @@ int xfs_attr_shortform_getvalue(struct xfs_da_args *args);
215int xfs_attr_shortform_to_leaf(struct xfs_da_args *args); 214int xfs_attr_shortform_to_leaf(struct xfs_da_args *args);
216int xfs_attr_shortform_remove(struct xfs_da_args *args); 215int xfs_attr_shortform_remove(struct xfs_da_args *args);
217int xfs_attr_shortform_list(struct xfs_attr_list_context *context); 216int xfs_attr_shortform_list(struct xfs_attr_list_context *context);
218int xfs_attr_shortform_allfit(struct xfs_dabuf *bp, struct xfs_inode *dp); 217int xfs_attr_shortform_allfit(struct xfs_buf *bp, struct xfs_inode *dp);
219int xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes); 218int xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes);
220 219
221 220
@@ -223,7 +222,7 @@ int xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes);
223 * Internal routines when attribute fork size == XFS_LBSIZE(mp). 222 * Internal routines when attribute fork size == XFS_LBSIZE(mp).
224 */ 223 */
225int xfs_attr_leaf_to_node(struct xfs_da_args *args); 224int xfs_attr_leaf_to_node(struct xfs_da_args *args);
226int xfs_attr_leaf_to_shortform(struct xfs_dabuf *bp, 225int xfs_attr_leaf_to_shortform(struct xfs_buf *bp,
227 struct xfs_da_args *args, int forkoff); 226 struct xfs_da_args *args, int forkoff);
228int xfs_attr_leaf_clearflag(struct xfs_da_args *args); 227int xfs_attr_leaf_clearflag(struct xfs_da_args *args);
229int xfs_attr_leaf_setflag(struct xfs_da_args *args); 228int xfs_attr_leaf_setflag(struct xfs_da_args *args);
@@ -235,14 +234,14 @@ int xfs_attr_leaf_flipflags(xfs_da_args_t *args);
235int xfs_attr_leaf_split(struct xfs_da_state *state, 234int xfs_attr_leaf_split(struct xfs_da_state *state,
236 struct xfs_da_state_blk *oldblk, 235 struct xfs_da_state_blk *oldblk,
237 struct xfs_da_state_blk *newblk); 236 struct xfs_da_state_blk *newblk);
238int xfs_attr_leaf_lookup_int(struct xfs_dabuf *leaf, 237int xfs_attr_leaf_lookup_int(struct xfs_buf *leaf,
239 struct xfs_da_args *args); 238 struct xfs_da_args *args);
240int xfs_attr_leaf_getvalue(struct xfs_dabuf *bp, struct xfs_da_args *args); 239int xfs_attr_leaf_getvalue(struct xfs_buf *bp, struct xfs_da_args *args);
241int xfs_attr_leaf_add(struct xfs_dabuf *leaf_buffer, 240int xfs_attr_leaf_add(struct xfs_buf *leaf_buffer,
242 struct xfs_da_args *args); 241 struct xfs_da_args *args);
243int xfs_attr_leaf_remove(struct xfs_dabuf *leaf_buffer, 242int xfs_attr_leaf_remove(struct xfs_buf *leaf_buffer,
244 struct xfs_da_args *args); 243 struct xfs_da_args *args);
245int xfs_attr_leaf_list_int(struct xfs_dabuf *bp, 244int xfs_attr_leaf_list_int(struct xfs_buf *bp,
246 struct xfs_attr_list_context *context); 245 struct xfs_attr_list_context *context);
247 246
248/* 247/*
@@ -257,9 +256,9 @@ int xfs_attr_root_inactive(struct xfs_trans **trans, struct xfs_inode *dp);
257/* 256/*
258 * Utility routines. 257 * Utility routines.
259 */ 258 */
260xfs_dahash_t xfs_attr_leaf_lasthash(struct xfs_dabuf *bp, int *count); 259xfs_dahash_t xfs_attr_leaf_lasthash(struct xfs_buf *bp, int *count);
261int xfs_attr_leaf_order(struct xfs_dabuf *leaf1_bp, 260int xfs_attr_leaf_order(struct xfs_buf *leaf1_bp,
262 struct xfs_dabuf *leaf2_bp); 261 struct xfs_buf *leaf2_bp);
263int xfs_attr_leaf_newentsize(int namelen, int valuelen, int blocksize, 262int xfs_attr_leaf_newentsize(int namelen, int valuelen, int blocksize,
264 int *local); 263 int *local);
265#endif /* __XFS_ATTR_LEAF_H__ */ 264#endif /* __XFS_ATTR_LEAF_H__ */
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index 58b815ec8c91..848ffa77707b 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -5517,7 +5517,7 @@ xfs_getbmap(
5517 if (xfs_get_extsz_hint(ip) || 5517 if (xfs_get_extsz_hint(ip) ||
5518 ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)){ 5518 ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)){
5519 prealloced = 1; 5519 prealloced = 1;
5520 fixlen = XFS_MAXIOFFSET(mp); 5520 fixlen = mp->m_super->s_maxbytes;
5521 } else { 5521 } else {
5522 prealloced = 0; 5522 prealloced = 0;
5523 fixlen = XFS_ISIZE(ip); 5523 fixlen = XFS_ISIZE(ip);
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 269b35c084da..d7a9dd735e1e 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -164,14 +164,49 @@ xfs_buf_stale(
164 ASSERT(atomic_read(&bp->b_hold) >= 1); 164 ASSERT(atomic_read(&bp->b_hold) >= 1);
165} 165}
166 166
167static int
168xfs_buf_get_maps(
169 struct xfs_buf *bp,
170 int map_count)
171{
172 ASSERT(bp->b_maps == NULL);
173 bp->b_map_count = map_count;
174
175 if (map_count == 1) {
176 bp->b_maps = &bp->b_map;
177 return 0;
178 }
179
180 bp->b_maps = kmem_zalloc(map_count * sizeof(struct xfs_buf_map),
181 KM_NOFS);
182 if (!bp->b_maps)
183 return ENOMEM;
184 return 0;
185}
186
187/*
188 * Frees b_pages if it was allocated.
189 */
190static void
191xfs_buf_free_maps(
192 struct xfs_buf *bp)
193{
194 if (bp->b_maps != &bp->b_map) {
195 kmem_free(bp->b_maps);
196 bp->b_maps = NULL;
197 }
198}
199
167struct xfs_buf * 200struct xfs_buf *
168xfs_buf_alloc( 201_xfs_buf_alloc(
169 struct xfs_buftarg *target, 202 struct xfs_buftarg *target,
170 xfs_daddr_t blkno, 203 struct xfs_buf_map *map,
171 size_t numblks, 204 int nmaps,
172 xfs_buf_flags_t flags) 205 xfs_buf_flags_t flags)
173{ 206{
174 struct xfs_buf *bp; 207 struct xfs_buf *bp;
208 int error;
209 int i;
175 210
176 bp = kmem_zone_zalloc(xfs_buf_zone, KM_NOFS); 211 bp = kmem_zone_zalloc(xfs_buf_zone, KM_NOFS);
177 if (unlikely(!bp)) 212 if (unlikely(!bp))
@@ -192,16 +227,28 @@ xfs_buf_alloc(
192 sema_init(&bp->b_sema, 0); /* held, no waiters */ 227 sema_init(&bp->b_sema, 0); /* held, no waiters */
193 XB_SET_OWNER(bp); 228 XB_SET_OWNER(bp);
194 bp->b_target = target; 229 bp->b_target = target;
230 bp->b_flags = flags;
195 231
196 /* 232 /*
197 * Set length and io_length to the same value initially. 233 * Set length and io_length to the same value initially.
198 * I/O routines should use io_length, which will be the same in 234 * I/O routines should use io_length, which will be the same in
199 * most cases but may be reset (e.g. XFS recovery). 235 * most cases but may be reset (e.g. XFS recovery).
200 */ 236 */
201 bp->b_length = numblks; 237 error = xfs_buf_get_maps(bp, nmaps);
202 bp->b_io_length = numblks; 238 if (error) {
203 bp->b_flags = flags; 239 kmem_zone_free(xfs_buf_zone, bp);
204 bp->b_bn = blkno; 240 return NULL;
241 }
242
243 bp->b_bn = map[0].bm_bn;
244 bp->b_length = 0;
245 for (i = 0; i < nmaps; i++) {
246 bp->b_maps[i].bm_bn = map[i].bm_bn;
247 bp->b_maps[i].bm_len = map[i].bm_len;
248 bp->b_length += map[i].bm_len;
249 }
250 bp->b_io_length = bp->b_length;
251
205 atomic_set(&bp->b_pin_count, 0); 252 atomic_set(&bp->b_pin_count, 0);
206 init_waitqueue_head(&bp->b_waiters); 253 init_waitqueue_head(&bp->b_waiters);
207 254
@@ -280,6 +327,7 @@ xfs_buf_free(
280 } else if (bp->b_flags & _XBF_KMEM) 327 } else if (bp->b_flags & _XBF_KMEM)
281 kmem_free(bp->b_addr); 328 kmem_free(bp->b_addr);
282 _xfs_buf_free_pages(bp); 329 _xfs_buf_free_pages(bp);
330 xfs_buf_free_maps(bp);
283 kmem_zone_free(xfs_buf_zone, bp); 331 kmem_zone_free(xfs_buf_zone, bp);
284} 332}
285 333
@@ -327,8 +375,9 @@ xfs_buf_allocate_memory(
327 } 375 }
328 376
329use_alloc_page: 377use_alloc_page:
330 start = BBTOB(bp->b_bn) >> PAGE_SHIFT; 378 start = BBTOB(bp->b_map.bm_bn) >> PAGE_SHIFT;
331 end = (BBTOB(bp->b_bn + bp->b_length) + PAGE_SIZE - 1) >> PAGE_SHIFT; 379 end = (BBTOB(bp->b_map.bm_bn + bp->b_length) + PAGE_SIZE - 1)
380 >> PAGE_SHIFT;
332 page_count = end - start; 381 page_count = end - start;
333 error = _xfs_buf_get_pages(bp, page_count, flags); 382 error = _xfs_buf_get_pages(bp, page_count, flags);
334 if (unlikely(error)) 383 if (unlikely(error))
@@ -425,8 +474,8 @@ _xfs_buf_map_pages(
425xfs_buf_t * 474xfs_buf_t *
426_xfs_buf_find( 475_xfs_buf_find(
427 struct xfs_buftarg *btp, 476 struct xfs_buftarg *btp,
428 xfs_daddr_t blkno, 477 struct xfs_buf_map *map,
429 size_t numblks, 478 int nmaps,
430 xfs_buf_flags_t flags, 479 xfs_buf_flags_t flags,
431 xfs_buf_t *new_bp) 480 xfs_buf_t *new_bp)
432{ 481{
@@ -435,7 +484,12 @@ _xfs_buf_find(
435 struct rb_node **rbp; 484 struct rb_node **rbp;
436 struct rb_node *parent; 485 struct rb_node *parent;
437 xfs_buf_t *bp; 486 xfs_buf_t *bp;
487 xfs_daddr_t blkno = map[0].bm_bn;
488 int numblks = 0;
489 int i;
438 490
491 for (i = 0; i < nmaps; i++)
492 numblks += map[i].bm_len;
439 numbytes = BBTOB(numblks); 493 numbytes = BBTOB(numblks);
440 494
441 /* Check for IOs smaller than the sector size / not sector aligned */ 495 /* Check for IOs smaller than the sector size / not sector aligned */
@@ -527,31 +581,31 @@ found:
527 * more hits than misses. 581 * more hits than misses.
528 */ 582 */
529struct xfs_buf * 583struct xfs_buf *
530xfs_buf_get( 584xfs_buf_get_map(
531 xfs_buftarg_t *target, 585 struct xfs_buftarg *target,
532 xfs_daddr_t blkno, 586 struct xfs_buf_map *map,
533 size_t numblks, 587 int nmaps,
534 xfs_buf_flags_t flags) 588 xfs_buf_flags_t flags)
535{ 589{
536 struct xfs_buf *bp; 590 struct xfs_buf *bp;
537 struct xfs_buf *new_bp; 591 struct xfs_buf *new_bp;
538 int error = 0; 592 int error = 0;
539 593
540 bp = _xfs_buf_find(target, blkno, numblks, flags, NULL); 594 bp = _xfs_buf_find(target, map, nmaps, flags, NULL);
541 if (likely(bp)) 595 if (likely(bp))
542 goto found; 596 goto found;
543 597
544 new_bp = xfs_buf_alloc(target, blkno, numblks, flags); 598 new_bp = _xfs_buf_alloc(target, map, nmaps, flags);
545 if (unlikely(!new_bp)) 599 if (unlikely(!new_bp))
546 return NULL; 600 return NULL;
547 601
548 error = xfs_buf_allocate_memory(new_bp, flags); 602 error = xfs_buf_allocate_memory(new_bp, flags);
549 if (error) { 603 if (error) {
550 kmem_zone_free(xfs_buf_zone, new_bp); 604 xfs_buf_free(new_bp);
551 return NULL; 605 return NULL;
552 } 606 }
553 607
554 bp = _xfs_buf_find(target, blkno, numblks, flags, new_bp); 608 bp = _xfs_buf_find(target, map, nmaps, flags, new_bp);
555 if (!bp) { 609 if (!bp) {
556 xfs_buf_free(new_bp); 610 xfs_buf_free(new_bp);
557 return NULL; 611 return NULL;
@@ -560,8 +614,6 @@ xfs_buf_get(
560 if (bp != new_bp) 614 if (bp != new_bp)
561 xfs_buf_free(new_bp); 615 xfs_buf_free(new_bp);
562 616
563 bp->b_io_length = bp->b_length;
564
565found: 617found:
566 if (!bp->b_addr) { 618 if (!bp->b_addr) {
567 error = _xfs_buf_map_pages(bp, flags); 619 error = _xfs_buf_map_pages(bp, flags);
@@ -584,7 +636,7 @@ _xfs_buf_read(
584 xfs_buf_flags_t flags) 636 xfs_buf_flags_t flags)
585{ 637{
586 ASSERT(!(flags & XBF_WRITE)); 638 ASSERT(!(flags & XBF_WRITE));
587 ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL); 639 ASSERT(bp->b_map.bm_bn != XFS_BUF_DADDR_NULL);
588 640
589 bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD); 641 bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD);
590 bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD); 642 bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD);
@@ -596,17 +648,17 @@ _xfs_buf_read(
596} 648}
597 649
598xfs_buf_t * 650xfs_buf_t *
599xfs_buf_read( 651xfs_buf_read_map(
600 xfs_buftarg_t *target, 652 struct xfs_buftarg *target,
601 xfs_daddr_t blkno, 653 struct xfs_buf_map *map,
602 size_t numblks, 654 int nmaps,
603 xfs_buf_flags_t flags) 655 xfs_buf_flags_t flags)
604{ 656{
605 xfs_buf_t *bp; 657 struct xfs_buf *bp;
606 658
607 flags |= XBF_READ; 659 flags |= XBF_READ;
608 660
609 bp = xfs_buf_get(target, blkno, numblks, flags); 661 bp = xfs_buf_get_map(target, map, nmaps, flags);
610 if (bp) { 662 if (bp) {
611 trace_xfs_buf_read(bp, flags, _RET_IP_); 663 trace_xfs_buf_read(bp, flags, _RET_IP_);
612 664
@@ -634,15 +686,15 @@ xfs_buf_read(
634 * safe manner. 686 * safe manner.
635 */ 687 */
636void 688void
637xfs_buf_readahead( 689xfs_buf_readahead_map(
638 xfs_buftarg_t *target, 690 struct xfs_buftarg *target,
639 xfs_daddr_t blkno, 691 struct xfs_buf_map *map,
640 size_t numblks) 692 int nmaps)
641{ 693{
642 if (bdi_read_congested(target->bt_bdi)) 694 if (bdi_read_congested(target->bt_bdi))
643 return; 695 return;
644 696
645 xfs_buf_read(target, blkno, numblks, 697 xfs_buf_read_map(target, map, nmaps,
646 XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD); 698 XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD);
647} 699}
648 700
@@ -665,8 +717,10 @@ xfs_buf_read_uncached(
665 return NULL; 717 return NULL;
666 718
667 /* set up the buffer for a read IO */ 719 /* set up the buffer for a read IO */
668 XFS_BUF_SET_ADDR(bp, daddr); 720 ASSERT(bp->b_map_count == 1);
669 XFS_BUF_READ(bp); 721 bp->b_bn = daddr;
722 bp->b_maps[0].bm_bn = daddr;
723 bp->b_flags |= XBF_READ;
670 724
671 xfsbdstrat(target->bt_mount, bp); 725 xfsbdstrat(target->bt_mount, bp);
672 error = xfs_buf_iowait(bp); 726 error = xfs_buf_iowait(bp);
@@ -694,7 +748,11 @@ xfs_buf_set_empty(
694 bp->b_addr = NULL; 748 bp->b_addr = NULL;
695 bp->b_length = numblks; 749 bp->b_length = numblks;
696 bp->b_io_length = numblks; 750 bp->b_io_length = numblks;
751
752 ASSERT(bp->b_map_count == 1);
697 bp->b_bn = XFS_BUF_DADDR_NULL; 753 bp->b_bn = XFS_BUF_DADDR_NULL;
754 bp->b_maps[0].bm_bn = XFS_BUF_DADDR_NULL;
755 bp->b_maps[0].bm_len = bp->b_length;
698} 756}
699 757
700static inline struct page * 758static inline struct page *
@@ -758,9 +816,10 @@ xfs_buf_get_uncached(
758{ 816{
759 unsigned long page_count; 817 unsigned long page_count;
760 int error, i; 818 int error, i;
761 xfs_buf_t *bp; 819 struct xfs_buf *bp;
820 DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks);
762 821
763 bp = xfs_buf_alloc(target, XFS_BUF_DADDR_NULL, numblks, 0); 822 bp = _xfs_buf_alloc(target, &map, 1, 0);
764 if (unlikely(bp == NULL)) 823 if (unlikely(bp == NULL))
765 goto fail; 824 goto fail;
766 825
@@ -791,6 +850,7 @@ xfs_buf_get_uncached(
791 __free_page(bp->b_pages[i]); 850 __free_page(bp->b_pages[i]);
792 _xfs_buf_free_pages(bp); 851 _xfs_buf_free_pages(bp);
793 fail_free_buf: 852 fail_free_buf:
853 xfs_buf_free_maps(bp);
794 kmem_zone_free(xfs_buf_zone, bp); 854 kmem_zone_free(xfs_buf_zone, bp);
795 fail: 855 fail:
796 return NULL; 856 return NULL;
@@ -1144,36 +1204,39 @@ xfs_buf_bio_end_io(
1144 bio_put(bio); 1204 bio_put(bio);
1145} 1205}
1146 1206
1147STATIC void 1207static void
1148_xfs_buf_ioapply( 1208xfs_buf_ioapply_map(
1149 xfs_buf_t *bp) 1209 struct xfs_buf *bp,
1210 int map,
1211 int *buf_offset,
1212 int *count,
1213 int rw)
1150{ 1214{
1151 int rw, map_i, total_nr_pages, nr_pages; 1215 int page_index;
1152 struct bio *bio; 1216 int total_nr_pages = bp->b_page_count;
1153 int offset = bp->b_offset; 1217 int nr_pages;
1154 int size = BBTOB(bp->b_io_length); 1218 struct bio *bio;
1155 sector_t sector = bp->b_bn; 1219 sector_t sector = bp->b_maps[map].bm_bn;
1220 int size;
1221 int offset;
1156 1222
1157 total_nr_pages = bp->b_page_count; 1223 total_nr_pages = bp->b_page_count;
1158 map_i = 0;
1159 1224
1160 if (bp->b_flags & XBF_WRITE) { 1225 /* skip the pages in the buffer before the start offset */
1161 if (bp->b_flags & XBF_SYNCIO) 1226 page_index = 0;
1162 rw = WRITE_SYNC; 1227 offset = *buf_offset;
1163 else 1228 while (offset >= PAGE_SIZE) {
1164 rw = WRITE; 1229 page_index++;
1165 if (bp->b_flags & XBF_FUA) 1230 offset -= PAGE_SIZE;
1166 rw |= REQ_FUA;
1167 if (bp->b_flags & XBF_FLUSH)
1168 rw |= REQ_FLUSH;
1169 } else if (bp->b_flags & XBF_READ_AHEAD) {
1170 rw = READA;
1171 } else {
1172 rw = READ;
1173 } 1231 }
1174 1232
1175 /* we only use the buffer cache for meta-data */ 1233 /*
1176 rw |= REQ_META; 1234 * Limit the IO size to the length of the current vector, and update the
1235 * remaining IO count for the next time around.
1236 */
1237 size = min_t(int, BBTOB(bp->b_maps[map].bm_len), *count);
1238 *count -= size;
1239 *buf_offset += size;
1177 1240
1178next_chunk: 1241next_chunk:
1179 atomic_inc(&bp->b_io_remaining); 1242 atomic_inc(&bp->b_io_remaining);
@@ -1188,13 +1251,14 @@ next_chunk:
1188 bio->bi_private = bp; 1251 bio->bi_private = bp;
1189 1252
1190 1253
1191 for (; size && nr_pages; nr_pages--, map_i++) { 1254 for (; size && nr_pages; nr_pages--, page_index++) {
1192 int rbytes, nbytes = PAGE_SIZE - offset; 1255 int rbytes, nbytes = PAGE_SIZE - offset;
1193 1256
1194 if (nbytes > size) 1257 if (nbytes > size)
1195 nbytes = size; 1258 nbytes = size;
1196 1259
1197 rbytes = bio_add_page(bio, bp->b_pages[map_i], nbytes, offset); 1260 rbytes = bio_add_page(bio, bp->b_pages[page_index], nbytes,
1261 offset);
1198 if (rbytes < nbytes) 1262 if (rbytes < nbytes)
1199 break; 1263 break;
1200 1264
@@ -1216,6 +1280,54 @@ next_chunk:
1216 xfs_buf_ioerror(bp, EIO); 1280 xfs_buf_ioerror(bp, EIO);
1217 bio_put(bio); 1281 bio_put(bio);
1218 } 1282 }
1283
1284}
1285
1286STATIC void
1287_xfs_buf_ioapply(
1288 struct xfs_buf *bp)
1289{
1290 struct blk_plug plug;
1291 int rw;
1292 int offset;
1293 int size;
1294 int i;
1295
1296 if (bp->b_flags & XBF_WRITE) {
1297 if (bp->b_flags & XBF_SYNCIO)
1298 rw = WRITE_SYNC;
1299 else
1300 rw = WRITE;
1301 if (bp->b_flags & XBF_FUA)
1302 rw |= REQ_FUA;
1303 if (bp->b_flags & XBF_FLUSH)
1304 rw |= REQ_FLUSH;
1305 } else if (bp->b_flags & XBF_READ_AHEAD) {
1306 rw = READA;
1307 } else {
1308 rw = READ;
1309 }
1310
1311 /* we only use the buffer cache for meta-data */
1312 rw |= REQ_META;
1313
1314 /*
1315 * Walk all the vectors issuing IO on them. Set up the initial offset
1316 * into the buffer and the desired IO size before we start -
1317 * _xfs_buf_ioapply_vec() will modify them appropriately for each
1318 * subsequent call.
1319 */
1320 offset = bp->b_offset;
1321 size = BBTOB(bp->b_io_length);
1322 blk_start_plug(&plug);
1323 for (i = 0; i < bp->b_map_count; i++) {
1324 xfs_buf_ioapply_map(bp, i, &offset, &size, rw);
1325 if (bp->b_error)
1326 break;
1327 if (size <= 0)
1328 break; /* all done */
1329 }
1330 blk_finish_plug(&plug);
1219} 1331}
1220 1332
1221void 1333void
@@ -1557,7 +1669,7 @@ xfs_buf_cmp(
1557 struct xfs_buf *bp = container_of(b, struct xfs_buf, b_list); 1669 struct xfs_buf *bp = container_of(b, struct xfs_buf, b_list);
1558 xfs_daddr_t diff; 1670 xfs_daddr_t diff;
1559 1671
1560 diff = ap->b_bn - bp->b_bn; 1672 diff = ap->b_map.bm_bn - bp->b_map.bm_bn;
1561 if (diff < 0) 1673 if (diff < 0)
1562 return -1; 1674 return -1;
1563 if (diff > 0) 1675 if (diff > 0)
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
index 79344c48008e..d03b73b9604e 100644
--- a/fs/xfs/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
@@ -58,6 +58,7 @@ typedef enum {
58#define _XBF_PAGES (1 << 20)/* backed by refcounted pages */ 58#define _XBF_PAGES (1 << 20)/* backed by refcounted pages */
59#define _XBF_KMEM (1 << 21)/* backed by heap memory */ 59#define _XBF_KMEM (1 << 21)/* backed by heap memory */
60#define _XBF_DELWRI_Q (1 << 22)/* buffer on a delwri queue */ 60#define _XBF_DELWRI_Q (1 << 22)/* buffer on a delwri queue */
61#define _XBF_COMPOUND (1 << 23)/* compound buffer */
61 62
62typedef unsigned int xfs_buf_flags_t; 63typedef unsigned int xfs_buf_flags_t;
63 64
@@ -75,7 +76,8 @@ typedef unsigned int xfs_buf_flags_t;
75 { XBF_UNMAPPED, "UNMAPPED" }, /* ditto */\ 76 { XBF_UNMAPPED, "UNMAPPED" }, /* ditto */\
76 { _XBF_PAGES, "PAGES" }, \ 77 { _XBF_PAGES, "PAGES" }, \
77 { _XBF_KMEM, "KMEM" }, \ 78 { _XBF_KMEM, "KMEM" }, \
78 { _XBF_DELWRI_Q, "DELWRI_Q" } 79 { _XBF_DELWRI_Q, "DELWRI_Q" }, \
80 { _XBF_COMPOUND, "COMPOUND" }
79 81
80typedef struct xfs_buftarg { 82typedef struct xfs_buftarg {
81 dev_t bt_dev; 83 dev_t bt_dev;
@@ -98,6 +100,14 @@ typedef void (*xfs_buf_iodone_t)(struct xfs_buf *);
98 100
99#define XB_PAGES 2 101#define XB_PAGES 2
100 102
103struct xfs_buf_map {
104 xfs_daddr_t bm_bn; /* block number for I/O */
105 int bm_len; /* size of I/O */
106};
107
108#define DEFINE_SINGLE_BUF_MAP(map, blkno, numblk) \
109 struct xfs_buf_map (map) = { .bm_bn = (blkno), .bm_len = (numblk) };
110
101typedef struct xfs_buf { 111typedef struct xfs_buf {
102 /* 112 /*
103 * first cacheline holds all the fields needed for an uncontended cache 113 * first cacheline holds all the fields needed for an uncontended cache
@@ -107,7 +117,7 @@ typedef struct xfs_buf {
107 * fast-path on locking. 117 * fast-path on locking.
108 */ 118 */
109 struct rb_node b_rbnode; /* rbtree node */ 119 struct rb_node b_rbnode; /* rbtree node */
110 xfs_daddr_t b_bn; /* block number for I/O */ 120 xfs_daddr_t b_bn; /* block number of buffer */
111 int b_length; /* size of buffer in BBs */ 121 int b_length; /* size of buffer in BBs */
112 atomic_t b_hold; /* reference count */ 122 atomic_t b_hold; /* reference count */
113 atomic_t b_lru_ref; /* lru reclaim ref count */ 123 atomic_t b_lru_ref; /* lru reclaim ref count */
@@ -127,12 +137,16 @@ typedef struct xfs_buf {
127 struct xfs_trans *b_transp; 137 struct xfs_trans *b_transp;
128 struct page **b_pages; /* array of page pointers */ 138 struct page **b_pages; /* array of page pointers */
129 struct page *b_page_array[XB_PAGES]; /* inline pages */ 139 struct page *b_page_array[XB_PAGES]; /* inline pages */
140 struct xfs_buf_map *b_maps; /* compound buffer map */
141 struct xfs_buf_map b_map; /* inline compound buffer map */
142 int b_map_count;
130 int b_io_length; /* IO size in BBs */ 143 int b_io_length; /* IO size in BBs */
131 atomic_t b_pin_count; /* pin count */ 144 atomic_t b_pin_count; /* pin count */
132 atomic_t b_io_remaining; /* #outstanding I/O requests */ 145 atomic_t b_io_remaining; /* #outstanding I/O requests */
133 unsigned int b_page_count; /* size of page array */ 146 unsigned int b_page_count; /* size of page array */
134 unsigned int b_offset; /* page offset in first page */ 147 unsigned int b_offset; /* page offset in first page */
135 unsigned short b_error; /* error code on I/O */ 148 unsigned short b_error; /* error code on I/O */
149
136#ifdef XFS_BUF_LOCK_TRACKING 150#ifdef XFS_BUF_LOCK_TRACKING
137 int b_last_holder; 151 int b_last_holder;
138#endif 152#endif
@@ -140,22 +154,78 @@ typedef struct xfs_buf {
140 154
141 155
142/* Finding and Reading Buffers */ 156/* Finding and Reading Buffers */
143struct xfs_buf *_xfs_buf_find(struct xfs_buftarg *target, xfs_daddr_t blkno, 157struct xfs_buf *_xfs_buf_find(struct xfs_buftarg *target,
144 size_t numblks, xfs_buf_flags_t flags, 158 struct xfs_buf_map *map, int nmaps,
145 struct xfs_buf *new_bp); 159 xfs_buf_flags_t flags, struct xfs_buf *new_bp);
146#define xfs_incore(buftarg,blkno,len,lockit) \ 160
147 _xfs_buf_find(buftarg, blkno ,len, lockit, NULL) 161static inline struct xfs_buf *
148 162xfs_incore(
149struct xfs_buf *xfs_buf_get(struct xfs_buftarg *target, xfs_daddr_t blkno, 163 struct xfs_buftarg *target,
150 size_t numblks, xfs_buf_flags_t flags); 164 xfs_daddr_t blkno,
151struct xfs_buf *xfs_buf_read(struct xfs_buftarg *target, xfs_daddr_t blkno, 165 size_t numblks,
152 size_t numblks, xfs_buf_flags_t flags); 166 xfs_buf_flags_t flags)
153void xfs_buf_readahead(struct xfs_buftarg *target, xfs_daddr_t blkno, 167{
154 size_t numblks); 168 DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
169 return _xfs_buf_find(target, &map, 1, flags, NULL);
170}
171
172struct xfs_buf *_xfs_buf_alloc(struct xfs_buftarg *target,
173 struct xfs_buf_map *map, int nmaps,
174 xfs_buf_flags_t flags);
175
176static inline struct xfs_buf *
177xfs_buf_alloc(
178 struct xfs_buftarg *target,
179 xfs_daddr_t blkno,
180 size_t numblks,
181 xfs_buf_flags_t flags)
182{
183 DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
184 return _xfs_buf_alloc(target, &map, 1, flags);
185}
186
187struct xfs_buf *xfs_buf_get_map(struct xfs_buftarg *target,
188 struct xfs_buf_map *map, int nmaps,
189 xfs_buf_flags_t flags);
190struct xfs_buf *xfs_buf_read_map(struct xfs_buftarg *target,
191 struct xfs_buf_map *map, int nmaps,
192 xfs_buf_flags_t flags);
193void xfs_buf_readahead_map(struct xfs_buftarg *target,
194 struct xfs_buf_map *map, int nmaps);
195
196static inline struct xfs_buf *
197xfs_buf_get(
198 struct xfs_buftarg *target,
199 xfs_daddr_t blkno,
200 size_t numblks,
201 xfs_buf_flags_t flags)
202{
203 DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
204 return xfs_buf_get_map(target, &map, 1, flags);
205}
206
207static inline struct xfs_buf *
208xfs_buf_read(
209 struct xfs_buftarg *target,
210 xfs_daddr_t blkno,
211 size_t numblks,
212 xfs_buf_flags_t flags)
213{
214 DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
215 return xfs_buf_read_map(target, &map, 1, flags);
216}
217
218static inline void
219xfs_buf_readahead(
220 struct xfs_buftarg *target,
221 xfs_daddr_t blkno,
222 size_t numblks)
223{
224 DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
225 return xfs_buf_readahead_map(target, &map, 1);
226}
155 227
156struct xfs_buf *xfs_buf_get_empty(struct xfs_buftarg *target, size_t numblks); 228struct xfs_buf *xfs_buf_get_empty(struct xfs_buftarg *target, size_t numblks);
157struct xfs_buf *xfs_buf_alloc(struct xfs_buftarg *target, xfs_daddr_t blkno,
158 size_t numblks, xfs_buf_flags_t flags);
159void xfs_buf_set_empty(struct xfs_buf *bp, size_t numblks); 229void xfs_buf_set_empty(struct xfs_buf *bp, size_t numblks);
160int xfs_buf_associate_memory(struct xfs_buf *bp, void *mem, size_t length); 230int xfs_buf_associate_memory(struct xfs_buf *bp, void *mem, size_t length);
161 231
@@ -232,8 +302,18 @@ void xfs_buf_stale(struct xfs_buf *bp);
232#define XFS_BUF_UNWRITE(bp) ((bp)->b_flags &= ~XBF_WRITE) 302#define XFS_BUF_UNWRITE(bp) ((bp)->b_flags &= ~XBF_WRITE)
233#define XFS_BUF_ISWRITE(bp) ((bp)->b_flags & XBF_WRITE) 303#define XFS_BUF_ISWRITE(bp) ((bp)->b_flags & XBF_WRITE)
234 304
235#define XFS_BUF_ADDR(bp) ((bp)->b_bn) 305/*
236#define XFS_BUF_SET_ADDR(bp, bno) ((bp)->b_bn = (xfs_daddr_t)(bno)) 306 * These macros use the IO block map rather than b_bn. b_bn is now really
307 * just for the buffer cache index for cached buffers. As IO does not use b_bn
308 * anymore, uncached buffers do not use b_bn at all and hence must modify the IO
309 * map directly. Uncached buffers are not allowed to be discontiguous, so this
310 * is safe to do.
311 *
312 * In future, uncached buffers will pass the block number directly to the io
313 * request function and hence these macros will go away at that point.
314 */
315#define XFS_BUF_ADDR(bp) ((bp)->b_map.bm_bn)
316#define XFS_BUF_SET_ADDR(bp, bno) ((bp)->b_map.bm_bn = (xfs_daddr_t)(bno))
237 317
238static inline void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref) 318static inline void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref)
239{ 319{
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index d9e451115f98..a8d0ed911196 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -153,33 +153,25 @@ STATIC void xfs_buf_do_callbacks(struct xfs_buf *bp);
153 * If the XFS_BLI_STALE flag has been set, then log nothing. 153 * If the XFS_BLI_STALE flag has been set, then log nothing.
154 */ 154 */
155STATIC uint 155STATIC uint
156xfs_buf_item_size( 156xfs_buf_item_size_segment(
157 struct xfs_log_item *lip) 157 struct xfs_buf_log_item *bip,
158 struct xfs_buf_log_format *blfp)
158{ 159{
159 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
160 struct xfs_buf *bp = bip->bli_buf; 160 struct xfs_buf *bp = bip->bli_buf;
161 uint nvecs; 161 uint nvecs;
162 int next_bit; 162 int next_bit;
163 int last_bit; 163 int last_bit;
164 164
165 ASSERT(atomic_read(&bip->bli_refcount) > 0); 165 last_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
166 if (bip->bli_flags & XFS_BLI_STALE) { 166 if (last_bit == -1)
167 /* 167 return 0;
168 * The buffer is stale, so all we need to log 168
169 * is the buf log format structure with the 169 /*
170 * cancel flag in it. 170 * initial count for a dirty buffer is 2 vectors - the format structure
171 */ 171 * and the first dirty region.
172 trace_xfs_buf_item_size_stale(bip); 172 */
173 ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL); 173 nvecs = 2;
174 return 1;
175 }
176 174
177 ASSERT(bip->bli_flags & XFS_BLI_LOGGED);
178 nvecs = 1;
179 last_bit = xfs_next_bit(bip->bli_format.blf_data_map,
180 bip->bli_format.blf_map_size, 0);
181 ASSERT(last_bit != -1);
182 nvecs++;
183 while (last_bit != -1) { 175 while (last_bit != -1) {
184 /* 176 /*
185 * This takes the bit number to start looking from and 177 * This takes the bit number to start looking from and
@@ -187,16 +179,15 @@ xfs_buf_item_size(
187 * if there are no more bits set or the start bit is 179 * if there are no more bits set or the start bit is
188 * beyond the end of the bitmap. 180 * beyond the end of the bitmap.
189 */ 181 */
190 next_bit = xfs_next_bit(bip->bli_format.blf_data_map, 182 next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
191 bip->bli_format.blf_map_size, 183 last_bit + 1);
192 last_bit + 1);
193 /* 184 /*
194 * If we run out of bits, leave the loop, 185 * If we run out of bits, leave the loop,
195 * else if we find a new set of bits bump the number of vecs, 186 * else if we find a new set of bits bump the number of vecs,
196 * else keep scanning the current set of bits. 187 * else keep scanning the current set of bits.
197 */ 188 */
198 if (next_bit == -1) { 189 if (next_bit == -1) {
199 last_bit = -1; 190 break;
200 } else if (next_bit != last_bit + 1) { 191 } else if (next_bit != last_bit + 1) {
201 last_bit = next_bit; 192 last_bit = next_bit;
202 nvecs++; 193 nvecs++;
@@ -210,22 +201,73 @@ xfs_buf_item_size(
210 } 201 }
211 } 202 }
212 203
213 trace_xfs_buf_item_size(bip);
214 return nvecs; 204 return nvecs;
215} 205}
216 206
217/* 207/*
218 * This is called to fill in the vector of log iovecs for the 208 * This returns the number of log iovecs needed to log the given buf log item.
219 * given log buf item. It fills the first entry with a buf log 209 *
220 * format structure, and the rest point to contiguous chunks 210 * It calculates this as 1 iovec for the buf log format structure and 1 for each
221 * within the buffer. 211 * stretch of non-contiguous chunks to be logged. Contiguous chunks are logged
212 * in a single iovec.
213 *
214 * Discontiguous buffers need a format structure per region that that is being
215 * logged. This makes the changes in the buffer appear to log recovery as though
216 * they came from separate buffers, just like would occur if multiple buffers
217 * were used instead of a single discontiguous buffer. This enables
218 * discontiguous buffers to be in-memory constructs, completely transparent to
219 * what ends up on disk.
220 *
221 * If the XFS_BLI_STALE flag has been set, then log nothing but the buf log
222 * format structures.
222 */ 223 */
223STATIC void 224STATIC uint
224xfs_buf_item_format( 225xfs_buf_item_size(
225 struct xfs_log_item *lip, 226 struct xfs_log_item *lip)
226 struct xfs_log_iovec *vecp)
227{ 227{
228 struct xfs_buf_log_item *bip = BUF_ITEM(lip); 228 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
229 uint nvecs;
230 int i;
231
232 ASSERT(atomic_read(&bip->bli_refcount) > 0);
233 if (bip->bli_flags & XFS_BLI_STALE) {
234 /*
235 * The buffer is stale, so all we need to log
236 * is the buf log format structure with the
237 * cancel flag in it.
238 */
239 trace_xfs_buf_item_size_stale(bip);
240 ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL);
241 return bip->bli_format_count;
242 }
243
244 ASSERT(bip->bli_flags & XFS_BLI_LOGGED);
245
246 /*
247 * the vector count is based on the number of buffer vectors we have
248 * dirty bits in. This will only be greater than one when we have a
249 * compound buffer with more than one segment dirty. Hence for compound
250 * buffers we need to track which segment the dirty bits correspond to,
251 * and when we move from one segment to the next increment the vector
252 * count for the extra buf log format structure that will need to be
253 * written.
254 */
255 nvecs = 0;
256 for (i = 0; i < bip->bli_format_count; i++) {
257 nvecs += xfs_buf_item_size_segment(bip, &bip->bli_formats[i]);
258 }
259
260 trace_xfs_buf_item_size(bip);
261 return nvecs;
262}
263
264static struct xfs_log_iovec *
265xfs_buf_item_format_segment(
266 struct xfs_buf_log_item *bip,
267 struct xfs_log_iovec *vecp,
268 uint offset,
269 struct xfs_buf_log_format *blfp)
270{
229 struct xfs_buf *bp = bip->bli_buf; 271 struct xfs_buf *bp = bip->bli_buf;
230 uint base_size; 272 uint base_size;
231 uint nvecs; 273 uint nvecs;
@@ -235,40 +277,22 @@ xfs_buf_item_format(
235 uint nbits; 277 uint nbits;
236 uint buffer_offset; 278 uint buffer_offset;
237 279
238 ASSERT(atomic_read(&bip->bli_refcount) > 0); 280 /* copy the flags across from the base format item */
239 ASSERT((bip->bli_flags & XFS_BLI_LOGGED) || 281 blfp->blf_flags = bip->bli_format.blf_flags;
240 (bip->bli_flags & XFS_BLI_STALE));
241 282
242 /* 283 /*
243 * The size of the base structure is the size of the 284 * Base size is the actual size of the ondisk structure - it reflects
244 * declared structure plus the space for the extra words 285 * the actual size of the dirty bitmap rather than the size of the in
245 * of the bitmap. We subtract one from the map size, because 286 * memory structure.
246 * the first element of the bitmap is accounted for in the
247 * size of the base structure.
248 */ 287 */
249 base_size = 288 base_size = offsetof(struct xfs_buf_log_format, blf_data_map) +
250 (uint)(sizeof(xfs_buf_log_format_t) + 289 (blfp->blf_map_size * sizeof(blfp->blf_data_map[0]));
251 ((bip->bli_format.blf_map_size - 1) * sizeof(uint))); 290 vecp->i_addr = blfp;
252 vecp->i_addr = &bip->bli_format;
253 vecp->i_len = base_size; 291 vecp->i_len = base_size;
254 vecp->i_type = XLOG_REG_TYPE_BFORMAT; 292 vecp->i_type = XLOG_REG_TYPE_BFORMAT;
255 vecp++; 293 vecp++;
256 nvecs = 1; 294 nvecs = 1;
257 295
258 /*
259 * If it is an inode buffer, transfer the in-memory state to the
260 * format flags and clear the in-memory state. We do not transfer
261 * this state if the inode buffer allocation has not yet been committed
262 * to the log as setting the XFS_BLI_INODE_BUF flag will prevent
263 * correct replay of the inode allocation.
264 */
265 if (bip->bli_flags & XFS_BLI_INODE_BUF) {
266 if (!((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) &&
267 xfs_log_item_in_current_chkpt(lip)))
268 bip->bli_format.blf_flags |= XFS_BLF_INODE_BUF;
269 bip->bli_flags &= ~XFS_BLI_INODE_BUF;
270 }
271
272 if (bip->bli_flags & XFS_BLI_STALE) { 296 if (bip->bli_flags & XFS_BLI_STALE) {
273 /* 297 /*
274 * The buffer is stale, so all we need to log 298 * The buffer is stale, so all we need to log
@@ -276,16 +300,15 @@ xfs_buf_item_format(
276 * cancel flag in it. 300 * cancel flag in it.
277 */ 301 */
278 trace_xfs_buf_item_format_stale(bip); 302 trace_xfs_buf_item_format_stale(bip);
279 ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL); 303 ASSERT(blfp->blf_flags & XFS_BLF_CANCEL);
280 bip->bli_format.blf_size = nvecs; 304 blfp->blf_size = nvecs;
281 return; 305 return vecp;
282 } 306 }
283 307
284 /* 308 /*
285 * Fill in an iovec for each set of contiguous chunks. 309 * Fill in an iovec for each set of contiguous chunks.
286 */ 310 */
287 first_bit = xfs_next_bit(bip->bli_format.blf_data_map, 311 first_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
288 bip->bli_format.blf_map_size, 0);
289 ASSERT(first_bit != -1); 312 ASSERT(first_bit != -1);
290 last_bit = first_bit; 313 last_bit = first_bit;
291 nbits = 1; 314 nbits = 1;
@@ -296,9 +319,8 @@ xfs_buf_item_format(
296 * if there are no more bits set or the start bit is 319 * if there are no more bits set or the start bit is
297 * beyond the end of the bitmap. 320 * beyond the end of the bitmap.
298 */ 321 */
299 next_bit = xfs_next_bit(bip->bli_format.blf_data_map, 322 next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
300 bip->bli_format.blf_map_size, 323 (uint)last_bit + 1);
301 (uint)last_bit + 1);
302 /* 324 /*
303 * If we run out of bits fill in the last iovec and get 325 * If we run out of bits fill in the last iovec and get
304 * out of the loop. 326 * out of the loop.
@@ -309,14 +331,14 @@ xfs_buf_item_format(
309 * keep counting and scanning. 331 * keep counting and scanning.
310 */ 332 */
311 if (next_bit == -1) { 333 if (next_bit == -1) {
312 buffer_offset = first_bit * XFS_BLF_CHUNK; 334 buffer_offset = offset + first_bit * XFS_BLF_CHUNK;
313 vecp->i_addr = xfs_buf_offset(bp, buffer_offset); 335 vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
314 vecp->i_len = nbits * XFS_BLF_CHUNK; 336 vecp->i_len = nbits * XFS_BLF_CHUNK;
315 vecp->i_type = XLOG_REG_TYPE_BCHUNK; 337 vecp->i_type = XLOG_REG_TYPE_BCHUNK;
316 nvecs++; 338 nvecs++;
317 break; 339 break;
318 } else if (next_bit != last_bit + 1) { 340 } else if (next_bit != last_bit + 1) {
319 buffer_offset = first_bit * XFS_BLF_CHUNK; 341 buffer_offset = offset + first_bit * XFS_BLF_CHUNK;
320 vecp->i_addr = xfs_buf_offset(bp, buffer_offset); 342 vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
321 vecp->i_len = nbits * XFS_BLF_CHUNK; 343 vecp->i_len = nbits * XFS_BLF_CHUNK;
322 vecp->i_type = XLOG_REG_TYPE_BCHUNK; 344 vecp->i_type = XLOG_REG_TYPE_BCHUNK;
@@ -325,14 +347,17 @@ xfs_buf_item_format(
325 first_bit = next_bit; 347 first_bit = next_bit;
326 last_bit = next_bit; 348 last_bit = next_bit;
327 nbits = 1; 349 nbits = 1;
328 } else if (xfs_buf_offset(bp, next_bit << XFS_BLF_SHIFT) != 350 } else if (xfs_buf_offset(bp, offset +
329 (xfs_buf_offset(bp, last_bit << XFS_BLF_SHIFT) + 351 (next_bit << XFS_BLF_SHIFT)) !=
352 (xfs_buf_offset(bp, offset +
353 (last_bit << XFS_BLF_SHIFT)) +
330 XFS_BLF_CHUNK)) { 354 XFS_BLF_CHUNK)) {
331 buffer_offset = first_bit * XFS_BLF_CHUNK; 355 buffer_offset = offset + first_bit * XFS_BLF_CHUNK;
332 vecp->i_addr = xfs_buf_offset(bp, buffer_offset); 356 vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
333 vecp->i_len = nbits * XFS_BLF_CHUNK; 357 vecp->i_len = nbits * XFS_BLF_CHUNK;
334 vecp->i_type = XLOG_REG_TYPE_BCHUNK; 358 vecp->i_type = XLOG_REG_TYPE_BCHUNK;
335/* You would think we need to bump the nvecs here too, but we do not 359/*
360 * You would think we need to bump the nvecs here too, but we do not
336 * this number is used by recovery, and it gets confused by the boundary 361 * this number is used by recovery, and it gets confused by the boundary
337 * split here 362 * split here
338 * nvecs++; 363 * nvecs++;
@@ -347,6 +372,48 @@ xfs_buf_item_format(
347 } 372 }
348 } 373 }
349 bip->bli_format.blf_size = nvecs; 374 bip->bli_format.blf_size = nvecs;
375 return vecp;
376}
377
378/*
379 * This is called to fill in the vector of log iovecs for the
380 * given log buf item. It fills the first entry with a buf log
381 * format structure, and the rest point to contiguous chunks
382 * within the buffer.
383 */
384STATIC void
385xfs_buf_item_format(
386 struct xfs_log_item *lip,
387 struct xfs_log_iovec *vecp)
388{
389 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
390 struct xfs_buf *bp = bip->bli_buf;
391 uint offset = 0;
392 int i;
393
394 ASSERT(atomic_read(&bip->bli_refcount) > 0);
395 ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
396 (bip->bli_flags & XFS_BLI_STALE));
397
398 /*
399 * If it is an inode buffer, transfer the in-memory state to the
400 * format flags and clear the in-memory state. We do not transfer
401 * this state if the inode buffer allocation has not yet been committed
402 * to the log as setting the XFS_BLI_INODE_BUF flag will prevent
403 * correct replay of the inode allocation.
404 */
405 if (bip->bli_flags & XFS_BLI_INODE_BUF) {
406 if (!((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) &&
407 xfs_log_item_in_current_chkpt(lip)))
408 bip->bli_format.blf_flags |= XFS_BLF_INODE_BUF;
409 bip->bli_flags &= ~XFS_BLI_INODE_BUF;
410 }
411
412 for (i = 0; i < bip->bli_format_count; i++) {
413 vecp = xfs_buf_item_format_segment(bip, vecp, offset,
414 &bip->bli_formats[i]);
415 offset += bp->b_maps[i].bm_len;
416 }
350 417
351 /* 418 /*
352 * Check to make sure everything is consistent. 419 * Check to make sure everything is consistent.
@@ -622,6 +689,35 @@ static const struct xfs_item_ops xfs_buf_item_ops = {
622 .iop_committing = xfs_buf_item_committing 689 .iop_committing = xfs_buf_item_committing
623}; 690};
624 691
692STATIC int
693xfs_buf_item_get_format(
694 struct xfs_buf_log_item *bip,
695 int count)
696{
697 ASSERT(bip->bli_formats == NULL);
698 bip->bli_format_count = count;
699
700 if (count == 1) {
701 bip->bli_formats = &bip->bli_format;
702 return 0;
703 }
704
705 bip->bli_formats = kmem_zalloc(count * sizeof(struct xfs_buf_log_format),
706 KM_SLEEP);
707 if (!bip->bli_formats)
708 return ENOMEM;
709 return 0;
710}
711
712STATIC void
713xfs_buf_item_free_format(
714 struct xfs_buf_log_item *bip)
715{
716 if (bip->bli_formats != &bip->bli_format) {
717 kmem_free(bip->bli_formats);
718 bip->bli_formats = NULL;
719 }
720}
625 721
626/* 722/*
627 * Allocate a new buf log item to go with the given buffer. 723 * Allocate a new buf log item to go with the given buffer.
@@ -639,6 +735,8 @@ xfs_buf_item_init(
639 xfs_buf_log_item_t *bip; 735 xfs_buf_log_item_t *bip;
640 int chunks; 736 int chunks;
641 int map_size; 737 int map_size;
738 int error;
739 int i;
642 740
643 /* 741 /*
644 * Check to see if there is already a buf log item for 742 * Check to see if there is already a buf log item for
@@ -650,25 +748,33 @@ xfs_buf_item_init(
650 if (lip != NULL && lip->li_type == XFS_LI_BUF) 748 if (lip != NULL && lip->li_type == XFS_LI_BUF)
651 return; 749 return;
652 750
653 /* 751 bip = kmem_zone_zalloc(xfs_buf_item_zone, KM_SLEEP);
654 * chunks is the number of XFS_BLF_CHUNK size pieces
655 * the buffer can be divided into. Make sure not to
656 * truncate any pieces. map_size is the size of the
657 * bitmap needed to describe the chunks of the buffer.
658 */
659 chunks = (int)((BBTOB(bp->b_length) + (XFS_BLF_CHUNK - 1)) >>
660 XFS_BLF_SHIFT);
661 map_size = (int)((chunks + NBWORD) >> BIT_TO_WORD_SHIFT);
662
663 bip = (xfs_buf_log_item_t*)kmem_zone_zalloc(xfs_buf_item_zone,
664 KM_SLEEP);
665 xfs_log_item_init(mp, &bip->bli_item, XFS_LI_BUF, &xfs_buf_item_ops); 752 xfs_log_item_init(mp, &bip->bli_item, XFS_LI_BUF, &xfs_buf_item_ops);
666 bip->bli_buf = bp; 753 bip->bli_buf = bp;
667 xfs_buf_hold(bp); 754 xfs_buf_hold(bp);
668 bip->bli_format.blf_type = XFS_LI_BUF; 755
669 bip->bli_format.blf_blkno = (__int64_t)XFS_BUF_ADDR(bp); 756 /*
670 bip->bli_format.blf_len = (ushort)bp->b_length; 757 * chunks is the number of XFS_BLF_CHUNK size pieces the buffer
671 bip->bli_format.blf_map_size = map_size; 758 * can be divided into. Make sure not to truncate any pieces.
759 * map_size is the size of the bitmap needed to describe the
760 * chunks of the buffer.
761 *
762 * Discontiguous buffer support follows the layout of the underlying
763 * buffer. This makes the implementation as simple as possible.
764 */
765 error = xfs_buf_item_get_format(bip, bp->b_map_count);
766 ASSERT(error == 0);
767
768 for (i = 0; i < bip->bli_format_count; i++) {
769 chunks = DIV_ROUND_UP(BBTOB(bp->b_maps[i].bm_len),
770 XFS_BLF_CHUNK);
771 map_size = DIV_ROUND_UP(chunks, NBWORD);
772
773 bip->bli_formats[i].blf_type = XFS_LI_BUF;
774 bip->bli_formats[i].blf_blkno = bp->b_maps[i].bm_bn;
775 bip->bli_formats[i].blf_len = bp->b_maps[i].bm_len;
776 bip->bli_formats[i].blf_map_size = map_size;
777 }
672 778
673#ifdef XFS_TRANS_DEBUG 779#ifdef XFS_TRANS_DEBUG
674 /* 780 /*
@@ -699,10 +805,11 @@ xfs_buf_item_init(
699 * item's bitmap. 805 * item's bitmap.
700 */ 806 */
701void 807void
702xfs_buf_item_log( 808xfs_buf_item_log_segment(
703 xfs_buf_log_item_t *bip, 809 struct xfs_buf_log_item *bip,
704 uint first, 810 uint first,
705 uint last) 811 uint last,
812 uint *map)
706{ 813{
707 uint first_bit; 814 uint first_bit;
708 uint last_bit; 815 uint last_bit;
@@ -715,12 +822,6 @@ xfs_buf_item_log(
715 uint mask; 822 uint mask;
716 823
717 /* 824 /*
718 * Mark the item as having some dirty data for
719 * quick reference in xfs_buf_item_dirty.
720 */
721 bip->bli_flags |= XFS_BLI_DIRTY;
722
723 /*
724 * Convert byte offsets to bit numbers. 825 * Convert byte offsets to bit numbers.
725 */ 826 */
726 first_bit = first >> XFS_BLF_SHIFT; 827 first_bit = first >> XFS_BLF_SHIFT;
@@ -736,7 +837,7 @@ xfs_buf_item_log(
736 * to set a bit in. 837 * to set a bit in.
737 */ 838 */
738 word_num = first_bit >> BIT_TO_WORD_SHIFT; 839 word_num = first_bit >> BIT_TO_WORD_SHIFT;
739 wordp = &(bip->bli_format.blf_data_map[word_num]); 840 wordp = &map[word_num];
740 841
741 /* 842 /*
742 * Calculate the starting bit in the first word. 843 * Calculate the starting bit in the first word.
@@ -783,6 +884,51 @@ xfs_buf_item_log(
783 xfs_buf_item_log_debug(bip, first, last); 884 xfs_buf_item_log_debug(bip, first, last);
784} 885}
785 886
887/*
888 * Mark bytes first through last inclusive as dirty in the buf
889 * item's bitmap.
890 */
891void
892xfs_buf_item_log(
893 xfs_buf_log_item_t *bip,
894 uint first,
895 uint last)
896{
897 int i;
898 uint start;
899 uint end;
900 struct xfs_buf *bp = bip->bli_buf;
901
902 /*
903 * Mark the item as having some dirty data for
904 * quick reference in xfs_buf_item_dirty.
905 */
906 bip->bli_flags |= XFS_BLI_DIRTY;
907
908 /*
909 * walk each buffer segment and mark them dirty appropriately.
910 */
911 start = 0;
912 for (i = 0; i < bip->bli_format_count; i++) {
913 if (start > last)
914 break;
915 end = start + BBTOB(bp->b_maps[i].bm_len);
916 if (first > end) {
917 start += BBTOB(bp->b_maps[i].bm_len);
918 continue;
919 }
920 if (first < start)
921 first = start;
922 if (end > last)
923 end = last;
924
925 xfs_buf_item_log_segment(bip, first, end,
926 &bip->bli_formats[i].blf_data_map[0]);
927
928 start += bp->b_maps[i].bm_len;
929 }
930}
931
786 932
787/* 933/*
788 * Return 1 if the buffer has some data that has been logged (at any 934 * Return 1 if the buffer has some data that has been logged (at any
@@ -804,6 +950,7 @@ xfs_buf_item_free(
804 kmem_free(bip->bli_logged); 950 kmem_free(bip->bli_logged);
805#endif /* XFS_TRANS_DEBUG */ 951#endif /* XFS_TRANS_DEBUG */
806 952
953 xfs_buf_item_free_format(bip);
807 kmem_zone_free(xfs_buf_item_zone, bip); 954 kmem_zone_free(xfs_buf_item_zone, bip);
808} 955}
809 956
diff --git a/fs/xfs/xfs_buf_item.h b/fs/xfs/xfs_buf_item.h
index b6ecd2061e7c..6850f49f4af3 100644
--- a/fs/xfs/xfs_buf_item.h
+++ b/fs/xfs/xfs_buf_item.h
@@ -21,23 +21,6 @@
21extern kmem_zone_t *xfs_buf_item_zone; 21extern kmem_zone_t *xfs_buf_item_zone;
22 22
23/* 23/*
24 * This is the structure used to lay out a buf log item in the
25 * log. The data map describes which 128 byte chunks of the buffer
26 * have been logged.
27 * For 6.2 and beyond, this is XFS_LI_BUF. We use this to log everything.
28 */
29typedef struct xfs_buf_log_format {
30 unsigned short blf_type; /* buf log item type indicator */
31 unsigned short blf_size; /* size of this item */
32 ushort blf_flags; /* misc state */
33 ushort blf_len; /* number of blocks in this buf */
34 __int64_t blf_blkno; /* starting blkno of this buf */
35 unsigned int blf_map_size; /* size of data bitmap in words */
36 unsigned int blf_data_map[1];/* variable size bitmap of */
37 /* regions of buffer in this item */
38} xfs_buf_log_format_t;
39
40/*
41 * This flag indicates that the buffer contains on disk inodes 24 * This flag indicates that the buffer contains on disk inodes
42 * and requires special recovery handling. 25 * and requires special recovery handling.
43 */ 26 */
@@ -61,6 +44,23 @@ typedef struct xfs_buf_log_format {
61#define NBWORD (NBBY * sizeof(unsigned int)) 44#define NBWORD (NBBY * sizeof(unsigned int))
62 45
63/* 46/*
47 * This is the structure used to lay out a buf log item in the
48 * log. The data map describes which 128 byte chunks of the buffer
49 * have been logged.
50 */
51#define XFS_BLF_DATAMAP_SIZE ((XFS_MAX_BLOCKSIZE / XFS_BLF_CHUNK) / NBWORD)
52
53typedef struct xfs_buf_log_format {
54 unsigned short blf_type; /* buf log item type indicator */
55 unsigned short blf_size; /* size of this item */
56 ushort blf_flags; /* misc state */
57 ushort blf_len; /* number of blocks in this buf */
58 __int64_t blf_blkno; /* starting blkno of this buf */
59 unsigned int blf_map_size; /* used size of data bitmap in words */
60 unsigned int blf_data_map[XFS_BLF_DATAMAP_SIZE]; /* dirty bitmap */
61} xfs_buf_log_format_t;
62
63/*
64 * buf log item flags 64 * buf log item flags
65 */ 65 */
66#define XFS_BLI_HOLD 0x01 66#define XFS_BLI_HOLD 0x01
@@ -102,7 +102,9 @@ typedef struct xfs_buf_log_item {
102 char *bli_orig; /* original buffer copy */ 102 char *bli_orig; /* original buffer copy */
103 char *bli_logged; /* bytes logged (bitmap) */ 103 char *bli_logged; /* bytes logged (bitmap) */
104#endif 104#endif
105 xfs_buf_log_format_t bli_format; /* in-log header */ 105 int bli_format_count; /* count of headers */
106 struct xfs_buf_log_format *bli_formats; /* array of in-log header ptrs */
107 struct xfs_buf_log_format bli_format; /* embedded in-log header */
106} xfs_buf_log_item_t; 108} xfs_buf_log_item_t;
107 109
108void xfs_buf_item_init(struct xfs_buf *, struct xfs_mount *); 110void xfs_buf_item_init(struct xfs_buf *, struct xfs_mount *);
diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c
index 015b946c5808..7bfb7dd334fc 100644
--- a/fs/xfs/xfs_da_btree.c
+++ b/fs/xfs/xfs_da_btree.c
@@ -83,9 +83,9 @@ STATIC void xfs_da_node_unbalance(xfs_da_state_t *state,
83/* 83/*
84 * Utility routines. 84 * Utility routines.
85 */ 85 */
86STATIC uint xfs_da_node_lasthash(xfs_dabuf_t *bp, int *count); 86STATIC uint xfs_da_node_lasthash(struct xfs_buf *bp, int *count);
87STATIC int xfs_da_node_order(xfs_dabuf_t *node1_bp, xfs_dabuf_t *node2_bp); 87STATIC int xfs_da_node_order(struct xfs_buf *node1_bp,
88STATIC xfs_dabuf_t *xfs_da_buf_make(int nbuf, xfs_buf_t **bps); 88 struct xfs_buf *node2_bp);
89STATIC int xfs_da_blk_unlink(xfs_da_state_t *state, 89STATIC int xfs_da_blk_unlink(xfs_da_state_t *state,
90 xfs_da_state_blk_t *drop_blk, 90 xfs_da_state_blk_t *drop_blk,
91 xfs_da_state_blk_t *save_blk); 91 xfs_da_state_blk_t *save_blk);
@@ -100,10 +100,10 @@ STATIC void xfs_da_state_kill_altpath(xfs_da_state_t *state);
100 */ 100 */
101int 101int
102xfs_da_node_create(xfs_da_args_t *args, xfs_dablk_t blkno, int level, 102xfs_da_node_create(xfs_da_args_t *args, xfs_dablk_t blkno, int level,
103 xfs_dabuf_t **bpp, int whichfork) 103 struct xfs_buf **bpp, int whichfork)
104{ 104{
105 xfs_da_intnode_t *node; 105 xfs_da_intnode_t *node;
106 xfs_dabuf_t *bp; 106 struct xfs_buf *bp;
107 int error; 107 int error;
108 xfs_trans_t *tp; 108 xfs_trans_t *tp;
109 109
@@ -114,7 +114,7 @@ xfs_da_node_create(xfs_da_args_t *args, xfs_dablk_t blkno, int level,
114 if (error) 114 if (error)
115 return(error); 115 return(error);
116 ASSERT(bp != NULL); 116 ASSERT(bp != NULL);
117 node = bp->data; 117 node = bp->b_addr;
118 node->hdr.info.forw = 0; 118 node->hdr.info.forw = 0;
119 node->hdr.info.back = 0; 119 node->hdr.info.back = 0;
120 node->hdr.info.magic = cpu_to_be16(XFS_DA_NODE_MAGIC); 120 node->hdr.info.magic = cpu_to_be16(XFS_DA_NODE_MAGIC);
@@ -122,7 +122,7 @@ xfs_da_node_create(xfs_da_args_t *args, xfs_dablk_t blkno, int level,
122 node->hdr.count = 0; 122 node->hdr.count = 0;
123 node->hdr.level = cpu_to_be16(level); 123 node->hdr.level = cpu_to_be16(level);
124 124
125 xfs_da_log_buf(tp, bp, 125 xfs_trans_log_buf(tp, bp,
126 XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr))); 126 XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr)));
127 127
128 *bpp = bp; 128 *bpp = bp;
@@ -138,7 +138,7 @@ xfs_da_split(xfs_da_state_t *state)
138{ 138{
139 xfs_da_state_blk_t *oldblk, *newblk, *addblk; 139 xfs_da_state_blk_t *oldblk, *newblk, *addblk;
140 xfs_da_intnode_t *node; 140 xfs_da_intnode_t *node;
141 xfs_dabuf_t *bp; 141 struct xfs_buf *bp;
142 int max, action, error, i; 142 int max, action, error, i;
143 143
144 trace_xfs_da_split(state->args); 144 trace_xfs_da_split(state->args);
@@ -203,7 +203,6 @@ xfs_da_split(xfs_da_state_t *state)
203 case XFS_DA_NODE_MAGIC: 203 case XFS_DA_NODE_MAGIC:
204 error = xfs_da_node_split(state, oldblk, newblk, addblk, 204 error = xfs_da_node_split(state, oldblk, newblk, addblk,
205 max - i, &action); 205 max - i, &action);
206 xfs_da_buf_done(addblk->bp);
207 addblk->bp = NULL; 206 addblk->bp = NULL;
208 if (error) 207 if (error)
209 return(error); /* GROT: dir is inconsistent */ 208 return(error); /* GROT: dir is inconsistent */
@@ -221,13 +220,6 @@ xfs_da_split(xfs_da_state_t *state)
221 * Update the btree to show the new hashval for this child. 220 * Update the btree to show the new hashval for this child.
222 */ 221 */
223 xfs_da_fixhashpath(state, &state->path); 222 xfs_da_fixhashpath(state, &state->path);
224 /*
225 * If we won't need this block again, it's getting dropped
226 * from the active path by the loop control, so we need
227 * to mark it done now.
228 */
229 if (i > 0 || !addblk)
230 xfs_da_buf_done(oldblk->bp);
231 } 223 }
232 if (!addblk) 224 if (!addblk)
233 return(0); 225 return(0);
@@ -239,8 +231,6 @@ xfs_da_split(xfs_da_state_t *state)
239 oldblk = &state->path.blk[0]; 231 oldblk = &state->path.blk[0];
240 error = xfs_da_root_split(state, oldblk, addblk); 232 error = xfs_da_root_split(state, oldblk, addblk);
241 if (error) { 233 if (error) {
242 xfs_da_buf_done(oldblk->bp);
243 xfs_da_buf_done(addblk->bp);
244 addblk->bp = NULL; 234 addblk->bp = NULL;
245 return(error); /* GROT: dir is inconsistent */ 235 return(error); /* GROT: dir is inconsistent */
246 } 236 }
@@ -252,7 +242,7 @@ xfs_da_split(xfs_da_state_t *state)
252 * and the original block 0 could be at any position in the list. 242 * and the original block 0 could be at any position in the list.
253 */ 243 */
254 244
255 node = oldblk->bp->data; 245 node = oldblk->bp->b_addr;
256 if (node->hdr.info.forw) { 246 if (node->hdr.info.forw) {
257 if (be32_to_cpu(node->hdr.info.forw) == addblk->blkno) { 247 if (be32_to_cpu(node->hdr.info.forw) == addblk->blkno) {
258 bp = addblk->bp; 248 bp = addblk->bp;
@@ -260,13 +250,13 @@ xfs_da_split(xfs_da_state_t *state)
260 ASSERT(state->extravalid); 250 ASSERT(state->extravalid);
261 bp = state->extrablk.bp; 251 bp = state->extrablk.bp;
262 } 252 }
263 node = bp->data; 253 node = bp->b_addr;
264 node->hdr.info.back = cpu_to_be32(oldblk->blkno); 254 node->hdr.info.back = cpu_to_be32(oldblk->blkno);
265 xfs_da_log_buf(state->args->trans, bp, 255 xfs_trans_log_buf(state->args->trans, bp,
266 XFS_DA_LOGRANGE(node, &node->hdr.info, 256 XFS_DA_LOGRANGE(node, &node->hdr.info,
267 sizeof(node->hdr.info))); 257 sizeof(node->hdr.info)));
268 } 258 }
269 node = oldblk->bp->data; 259 node = oldblk->bp->b_addr;
270 if (node->hdr.info.back) { 260 if (node->hdr.info.back) {
271 if (be32_to_cpu(node->hdr.info.back) == addblk->blkno) { 261 if (be32_to_cpu(node->hdr.info.back) == addblk->blkno) {
272 bp = addblk->bp; 262 bp = addblk->bp;
@@ -274,14 +264,12 @@ xfs_da_split(xfs_da_state_t *state)
274 ASSERT(state->extravalid); 264 ASSERT(state->extravalid);
275 bp = state->extrablk.bp; 265 bp = state->extrablk.bp;
276 } 266 }
277 node = bp->data; 267 node = bp->b_addr;
278 node->hdr.info.forw = cpu_to_be32(oldblk->blkno); 268 node->hdr.info.forw = cpu_to_be32(oldblk->blkno);
279 xfs_da_log_buf(state->args->trans, bp, 269 xfs_trans_log_buf(state->args->trans, bp,
280 XFS_DA_LOGRANGE(node, &node->hdr.info, 270 XFS_DA_LOGRANGE(node, &node->hdr.info,
281 sizeof(node->hdr.info))); 271 sizeof(node->hdr.info)));
282 } 272 }
283 xfs_da_buf_done(oldblk->bp);
284 xfs_da_buf_done(addblk->bp);
285 addblk->bp = NULL; 273 addblk->bp = NULL;
286 return(0); 274 return(0);
287} 275}
@@ -298,7 +286,7 @@ xfs_da_root_split(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
298 xfs_da_intnode_t *node, *oldroot; 286 xfs_da_intnode_t *node, *oldroot;
299 xfs_da_args_t *args; 287 xfs_da_args_t *args;
300 xfs_dablk_t blkno; 288 xfs_dablk_t blkno;
301 xfs_dabuf_t *bp; 289 struct xfs_buf *bp;
302 int error, size; 290 int error, size;
303 xfs_inode_t *dp; 291 xfs_inode_t *dp;
304 xfs_trans_t *tp; 292 xfs_trans_t *tp;
@@ -323,8 +311,8 @@ xfs_da_root_split(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
323 if (error) 311 if (error)
324 return(error); 312 return(error);
325 ASSERT(bp != NULL); 313 ASSERT(bp != NULL);
326 node = bp->data; 314 node = bp->b_addr;
327 oldroot = blk1->bp->data; 315 oldroot = blk1->bp->b_addr;
328 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC)) { 316 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC)) {
329 size = (int)((char *)&oldroot->btree[be16_to_cpu(oldroot->hdr.count)] - 317 size = (int)((char *)&oldroot->btree[be16_to_cpu(oldroot->hdr.count)] -
330 (char *)oldroot); 318 (char *)oldroot);
@@ -335,8 +323,7 @@ xfs_da_root_split(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
335 (char *)leaf); 323 (char *)leaf);
336 } 324 }
337 memcpy(node, oldroot, size); 325 memcpy(node, oldroot, size);
338 xfs_da_log_buf(tp, bp, 0, size - 1); 326 xfs_trans_log_buf(tp, bp, 0, size - 1);
339 xfs_da_buf_done(blk1->bp);
340 blk1->bp = bp; 327 blk1->bp = bp;
341 blk1->blkno = blkno; 328 blk1->blkno = blkno;
342 329
@@ -348,7 +335,7 @@ xfs_da_root_split(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
348 be16_to_cpu(node->hdr.level) + 1, &bp, args->whichfork); 335 be16_to_cpu(node->hdr.level) + 1, &bp, args->whichfork);
349 if (error) 336 if (error)
350 return(error); 337 return(error);
351 node = bp->data; 338 node = bp->b_addr;
352 node->btree[0].hashval = cpu_to_be32(blk1->hashval); 339 node->btree[0].hashval = cpu_to_be32(blk1->hashval);
353 node->btree[0].before = cpu_to_be32(blk1->blkno); 340 node->btree[0].before = cpu_to_be32(blk1->blkno);
354 node->btree[1].hashval = cpu_to_be32(blk2->hashval); 341 node->btree[1].hashval = cpu_to_be32(blk2->hashval);
@@ -365,10 +352,9 @@ xfs_da_root_split(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
365#endif 352#endif
366 353
367 /* Header is already logged by xfs_da_node_create */ 354 /* Header is already logged by xfs_da_node_create */
368 xfs_da_log_buf(tp, bp, 355 xfs_trans_log_buf(tp, bp,
369 XFS_DA_LOGRANGE(node, node->btree, 356 XFS_DA_LOGRANGE(node, node->btree,
370 sizeof(xfs_da_node_entry_t) * 2)); 357 sizeof(xfs_da_node_entry_t) * 2));
371 xfs_da_buf_done(bp);
372 358
373 return(0); 359 return(0);
374} 360}
@@ -389,7 +375,7 @@ xfs_da_node_split(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,
389 375
390 trace_xfs_da_node_split(state->args); 376 trace_xfs_da_node_split(state->args);
391 377
392 node = oldblk->bp->data; 378 node = oldblk->bp->b_addr;
393 ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC)); 379 ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
394 380
395 /* 381 /*
@@ -436,7 +422,7 @@ xfs_da_node_split(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,
436 * 422 *
437 * If we had double-split op below us, then add the extra block too. 423 * If we had double-split op below us, then add the extra block too.
438 */ 424 */
439 node = oldblk->bp->data; 425 node = oldblk->bp->b_addr;
440 if (oldblk->index <= be16_to_cpu(node->hdr.count)) { 426 if (oldblk->index <= be16_to_cpu(node->hdr.count)) {
441 oldblk->index++; 427 oldblk->index++;
442 xfs_da_node_add(state, oldblk, addblk); 428 xfs_da_node_add(state, oldblk, addblk);
@@ -477,8 +463,8 @@ xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
477 463
478 trace_xfs_da_node_rebalance(state->args); 464 trace_xfs_da_node_rebalance(state->args);
479 465
480 node1 = blk1->bp->data; 466 node1 = blk1->bp->b_addr;
481 node2 = blk2->bp->data; 467 node2 = blk2->bp->b_addr;
482 /* 468 /*
483 * Figure out how many entries need to move, and in which direction. 469 * Figure out how many entries need to move, and in which direction.
484 * Swap the nodes around if that makes it simpler. 470 * Swap the nodes around if that makes it simpler.
@@ -532,7 +518,7 @@ xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
532 btree_d = &node1->btree[be16_to_cpu(node1->hdr.count)]; 518 btree_d = &node1->btree[be16_to_cpu(node1->hdr.count)];
533 memcpy(btree_d, btree_s, tmp); 519 memcpy(btree_d, btree_s, tmp);
534 be16_add_cpu(&node1->hdr.count, count); 520 be16_add_cpu(&node1->hdr.count, count);
535 xfs_da_log_buf(tp, blk1->bp, 521 xfs_trans_log_buf(tp, blk1->bp,
536 XFS_DA_LOGRANGE(node1, btree_d, tmp)); 522 XFS_DA_LOGRANGE(node1, btree_d, tmp));
537 523
538 /* 524 /*
@@ -549,9 +535,9 @@ xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
549 /* 535 /*
550 * Log header of node 1 and all current bits of node 2. 536 * Log header of node 1 and all current bits of node 2.
551 */ 537 */
552 xfs_da_log_buf(tp, blk1->bp, 538 xfs_trans_log_buf(tp, blk1->bp,
553 XFS_DA_LOGRANGE(node1, &node1->hdr, sizeof(node1->hdr))); 539 XFS_DA_LOGRANGE(node1, &node1->hdr, sizeof(node1->hdr)));
554 xfs_da_log_buf(tp, blk2->bp, 540 xfs_trans_log_buf(tp, blk2->bp,
555 XFS_DA_LOGRANGE(node2, &node2->hdr, 541 XFS_DA_LOGRANGE(node2, &node2->hdr,
556 sizeof(node2->hdr) + 542 sizeof(node2->hdr) +
557 sizeof(node2->btree[0]) * be16_to_cpu(node2->hdr.count))); 543 sizeof(node2->btree[0]) * be16_to_cpu(node2->hdr.count)));
@@ -560,8 +546,8 @@ xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
560 * Record the last hashval from each block for upward propagation. 546 * Record the last hashval from each block for upward propagation.
561 * (note: don't use the swapped node pointers) 547 * (note: don't use the swapped node pointers)
562 */ 548 */
563 node1 = blk1->bp->data; 549 node1 = blk1->bp->b_addr;
564 node2 = blk2->bp->data; 550 node2 = blk2->bp->b_addr;
565 blk1->hashval = be32_to_cpu(node1->btree[be16_to_cpu(node1->hdr.count)-1].hashval); 551 blk1->hashval = be32_to_cpu(node1->btree[be16_to_cpu(node1->hdr.count)-1].hashval);
566 blk2->hashval = be32_to_cpu(node2->btree[be16_to_cpu(node2->hdr.count)-1].hashval); 552 blk2->hashval = be32_to_cpu(node2->btree[be16_to_cpu(node2->hdr.count)-1].hashval);
567 553
@@ -587,7 +573,7 @@ xfs_da_node_add(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,
587 573
588 trace_xfs_da_node_add(state->args); 574 trace_xfs_da_node_add(state->args);
589 575
590 node = oldblk->bp->data; 576 node = oldblk->bp->b_addr;
591 ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC)); 577 ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
592 ASSERT((oldblk->index >= 0) && (oldblk->index <= be16_to_cpu(node->hdr.count))); 578 ASSERT((oldblk->index >= 0) && (oldblk->index <= be16_to_cpu(node->hdr.count)));
593 ASSERT(newblk->blkno != 0); 579 ASSERT(newblk->blkno != 0);
@@ -606,10 +592,10 @@ xfs_da_node_add(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,
606 } 592 }
607 btree->hashval = cpu_to_be32(newblk->hashval); 593 btree->hashval = cpu_to_be32(newblk->hashval);
608 btree->before = cpu_to_be32(newblk->blkno); 594 btree->before = cpu_to_be32(newblk->blkno);
609 xfs_da_log_buf(state->args->trans, oldblk->bp, 595 xfs_trans_log_buf(state->args->trans, oldblk->bp,
610 XFS_DA_LOGRANGE(node, btree, tmp + sizeof(*btree))); 596 XFS_DA_LOGRANGE(node, btree, tmp + sizeof(*btree)));
611 be16_add_cpu(&node->hdr.count, 1); 597 be16_add_cpu(&node->hdr.count, 1);
612 xfs_da_log_buf(state->args->trans, oldblk->bp, 598 xfs_trans_log_buf(state->args->trans, oldblk->bp,
613 XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr))); 599 XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr)));
614 600
615 /* 601 /*
@@ -735,7 +721,7 @@ xfs_da_root_join(xfs_da_state_t *state, xfs_da_state_blk_t *root_blk)
735 xfs_da_intnode_t *oldroot; 721 xfs_da_intnode_t *oldroot;
736 xfs_da_args_t *args; 722 xfs_da_args_t *args;
737 xfs_dablk_t child; 723 xfs_dablk_t child;
738 xfs_dabuf_t *bp; 724 struct xfs_buf *bp;
739 int error; 725 int error;
740 726
741 trace_xfs_da_root_join(state->args); 727 trace_xfs_da_root_join(state->args);
@@ -743,7 +729,7 @@ xfs_da_root_join(xfs_da_state_t *state, xfs_da_state_blk_t *root_blk)
743 args = state->args; 729 args = state->args;
744 ASSERT(args != NULL); 730 ASSERT(args != NULL);
745 ASSERT(root_blk->magic == XFS_DA_NODE_MAGIC); 731 ASSERT(root_blk->magic == XFS_DA_NODE_MAGIC);
746 oldroot = root_blk->bp->data; 732 oldroot = root_blk->bp->b_addr;
747 ASSERT(oldroot->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC)); 733 ASSERT(oldroot->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
748 ASSERT(!oldroot->hdr.info.forw); 734 ASSERT(!oldroot->hdr.info.forw);
749 ASSERT(!oldroot->hdr.info.back); 735 ASSERT(!oldroot->hdr.info.back);
@@ -765,11 +751,11 @@ xfs_da_root_join(xfs_da_state_t *state, xfs_da_state_blk_t *root_blk)
765 if (error) 751 if (error)
766 return(error); 752 return(error);
767 ASSERT(bp != NULL); 753 ASSERT(bp != NULL);
768 xfs_da_blkinfo_onlychild_validate(bp->data, 754 xfs_da_blkinfo_onlychild_validate(bp->b_addr,
769 be16_to_cpu(oldroot->hdr.level)); 755 be16_to_cpu(oldroot->hdr.level));
770 756
771 memcpy(root_blk->bp->data, bp->data, state->blocksize); 757 memcpy(root_blk->bp->b_addr, bp->b_addr, state->blocksize);
772 xfs_da_log_buf(args->trans, root_blk->bp, 0, state->blocksize - 1); 758 xfs_trans_log_buf(args->trans, root_blk->bp, 0, state->blocksize - 1);
773 error = xfs_da_shrink_inode(args, child, bp); 759 error = xfs_da_shrink_inode(args, child, bp);
774 return(error); 760 return(error);
775} 761}
@@ -791,7 +777,7 @@ xfs_da_node_toosmall(xfs_da_state_t *state, int *action)
791 xfs_da_blkinfo_t *info; 777 xfs_da_blkinfo_t *info;
792 int count, forward, error, retval, i; 778 int count, forward, error, retval, i;
793 xfs_dablk_t blkno; 779 xfs_dablk_t blkno;
794 xfs_dabuf_t *bp; 780 struct xfs_buf *bp;
795 781
796 /* 782 /*
797 * Check for the degenerate case of the block being over 50% full. 783 * Check for the degenerate case of the block being over 50% full.
@@ -799,7 +785,7 @@ xfs_da_node_toosmall(xfs_da_state_t *state, int *action)
799 * to coalesce with a sibling. 785 * to coalesce with a sibling.
800 */ 786 */
801 blk = &state->path.blk[ state->path.active-1 ]; 787 blk = &state->path.blk[ state->path.active-1 ];
802 info = blk->bp->data; 788 info = blk->bp->b_addr;
803 ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC)); 789 ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
804 node = (xfs_da_intnode_t *)info; 790 node = (xfs_da_intnode_t *)info;
805 count = be16_to_cpu(node->hdr.count); 791 count = be16_to_cpu(node->hdr.count);
@@ -859,10 +845,10 @@ xfs_da_node_toosmall(xfs_da_state_t *state, int *action)
859 count = state->node_ents; 845 count = state->node_ents;
860 count -= state->node_ents >> 2; 846 count -= state->node_ents >> 2;
861 count -= be16_to_cpu(node->hdr.count); 847 count -= be16_to_cpu(node->hdr.count);
862 node = bp->data; 848 node = bp->b_addr;
863 ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC)); 849 ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
864 count -= be16_to_cpu(node->hdr.count); 850 count -= be16_to_cpu(node->hdr.count);
865 xfs_da_brelse(state->args->trans, bp); 851 xfs_trans_brelse(state->args->trans, bp);
866 if (count >= 0) 852 if (count >= 0)
867 break; /* fits with at least 25% to spare */ 853 break; /* fits with at least 25% to spare */
868 } 854 }
@@ -934,14 +920,14 @@ xfs_da_fixhashpath(xfs_da_state_t *state, xfs_da_state_path_t *path)
934 break; 920 break;
935 } 921 }
936 for (blk--, level--; level >= 0; blk--, level--) { 922 for (blk--, level--; level >= 0; blk--, level--) {
937 node = blk->bp->data; 923 node = blk->bp->b_addr;
938 ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC)); 924 ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
939 btree = &node->btree[ blk->index ]; 925 btree = &node->btree[ blk->index ];
940 if (be32_to_cpu(btree->hashval) == lasthash) 926 if (be32_to_cpu(btree->hashval) == lasthash)
941 break; 927 break;
942 blk->hashval = lasthash; 928 blk->hashval = lasthash;
943 btree->hashval = cpu_to_be32(lasthash); 929 btree->hashval = cpu_to_be32(lasthash);
944 xfs_da_log_buf(state->args->trans, blk->bp, 930 xfs_trans_log_buf(state->args->trans, blk->bp,
945 XFS_DA_LOGRANGE(node, btree, sizeof(*btree))); 931 XFS_DA_LOGRANGE(node, btree, sizeof(*btree)));
946 932
947 lasthash = be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1].hashval); 933 lasthash = be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1].hashval);
@@ -960,7 +946,7 @@ xfs_da_node_remove(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk)
960 946
961 trace_xfs_da_node_remove(state->args); 947 trace_xfs_da_node_remove(state->args);
962 948
963 node = drop_blk->bp->data; 949 node = drop_blk->bp->b_addr;
964 ASSERT(drop_blk->index < be16_to_cpu(node->hdr.count)); 950 ASSERT(drop_blk->index < be16_to_cpu(node->hdr.count));
965 ASSERT(drop_blk->index >= 0); 951 ASSERT(drop_blk->index >= 0);
966 952
@@ -972,15 +958,15 @@ xfs_da_node_remove(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk)
972 tmp = be16_to_cpu(node->hdr.count) - drop_blk->index - 1; 958 tmp = be16_to_cpu(node->hdr.count) - drop_blk->index - 1;
973 tmp *= (uint)sizeof(xfs_da_node_entry_t); 959 tmp *= (uint)sizeof(xfs_da_node_entry_t);
974 memmove(btree, btree + 1, tmp); 960 memmove(btree, btree + 1, tmp);
975 xfs_da_log_buf(state->args->trans, drop_blk->bp, 961 xfs_trans_log_buf(state->args->trans, drop_blk->bp,
976 XFS_DA_LOGRANGE(node, btree, tmp)); 962 XFS_DA_LOGRANGE(node, btree, tmp));
977 btree = &node->btree[be16_to_cpu(node->hdr.count)-1]; 963 btree = &node->btree[be16_to_cpu(node->hdr.count)-1];
978 } 964 }
979 memset((char *)btree, 0, sizeof(xfs_da_node_entry_t)); 965 memset((char *)btree, 0, sizeof(xfs_da_node_entry_t));
980 xfs_da_log_buf(state->args->trans, drop_blk->bp, 966 xfs_trans_log_buf(state->args->trans, drop_blk->bp,
981 XFS_DA_LOGRANGE(node, btree, sizeof(*btree))); 967 XFS_DA_LOGRANGE(node, btree, sizeof(*btree)));
982 be16_add_cpu(&node->hdr.count, -1); 968 be16_add_cpu(&node->hdr.count, -1);
983 xfs_da_log_buf(state->args->trans, drop_blk->bp, 969 xfs_trans_log_buf(state->args->trans, drop_blk->bp,
984 XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr))); 970 XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr)));
985 971
986 /* 972 /*
@@ -1005,8 +991,8 @@ xfs_da_node_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
1005 991
1006 trace_xfs_da_node_unbalance(state->args); 992 trace_xfs_da_node_unbalance(state->args);
1007 993
1008 drop_node = drop_blk->bp->data; 994 drop_node = drop_blk->bp->b_addr;
1009 save_node = save_blk->bp->data; 995 save_node = save_blk->bp->b_addr;
1010 ASSERT(drop_node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC)); 996 ASSERT(drop_node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
1011 ASSERT(save_node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC)); 997 ASSERT(save_node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
1012 tp = state->args->trans; 998 tp = state->args->trans;
@@ -1023,13 +1009,13 @@ xfs_da_node_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
1023 tmp = be16_to_cpu(save_node->hdr.count) * (uint)sizeof(xfs_da_node_entry_t); 1009 tmp = be16_to_cpu(save_node->hdr.count) * (uint)sizeof(xfs_da_node_entry_t);
1024 memmove(btree, &save_node->btree[0], tmp); 1010 memmove(btree, &save_node->btree[0], tmp);
1025 btree = &save_node->btree[0]; 1011 btree = &save_node->btree[0];
1026 xfs_da_log_buf(tp, save_blk->bp, 1012 xfs_trans_log_buf(tp, save_blk->bp,
1027 XFS_DA_LOGRANGE(save_node, btree, 1013 XFS_DA_LOGRANGE(save_node, btree,
1028 (be16_to_cpu(save_node->hdr.count) + be16_to_cpu(drop_node->hdr.count)) * 1014 (be16_to_cpu(save_node->hdr.count) + be16_to_cpu(drop_node->hdr.count)) *
1029 sizeof(xfs_da_node_entry_t))); 1015 sizeof(xfs_da_node_entry_t)));
1030 } else { 1016 } else {
1031 btree = &save_node->btree[be16_to_cpu(save_node->hdr.count)]; 1017 btree = &save_node->btree[be16_to_cpu(save_node->hdr.count)];
1032 xfs_da_log_buf(tp, save_blk->bp, 1018 xfs_trans_log_buf(tp, save_blk->bp,
1033 XFS_DA_LOGRANGE(save_node, btree, 1019 XFS_DA_LOGRANGE(save_node, btree,
1034 be16_to_cpu(drop_node->hdr.count) * 1020 be16_to_cpu(drop_node->hdr.count) *
1035 sizeof(xfs_da_node_entry_t))); 1021 sizeof(xfs_da_node_entry_t)));
@@ -1042,7 +1028,7 @@ xfs_da_node_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
1042 memcpy(btree, &drop_node->btree[0], tmp); 1028 memcpy(btree, &drop_node->btree[0], tmp);
1043 be16_add_cpu(&save_node->hdr.count, be16_to_cpu(drop_node->hdr.count)); 1029 be16_add_cpu(&save_node->hdr.count, be16_to_cpu(drop_node->hdr.count));
1044 1030
1045 xfs_da_log_buf(tp, save_blk->bp, 1031 xfs_trans_log_buf(tp, save_blk->bp,
1046 XFS_DA_LOGRANGE(save_node, &save_node->hdr, 1032 XFS_DA_LOGRANGE(save_node, &save_node->hdr,
1047 sizeof(save_node->hdr))); 1033 sizeof(save_node->hdr)));
1048 1034
@@ -1100,7 +1086,7 @@ xfs_da_node_lookup_int(xfs_da_state_t *state, int *result)
1100 state->path.active--; 1086 state->path.active--;
1101 return(error); 1087 return(error);
1102 } 1088 }
1103 curr = blk->bp->data; 1089 curr = blk->bp->b_addr;
1104 blk->magic = be16_to_cpu(curr->magic); 1090 blk->magic = be16_to_cpu(curr->magic);
1105 ASSERT(blk->magic == XFS_DA_NODE_MAGIC || 1091 ASSERT(blk->magic == XFS_DA_NODE_MAGIC ||
1106 blk->magic == XFS_DIR2_LEAFN_MAGIC || 1092 blk->magic == XFS_DIR2_LEAFN_MAGIC ||
@@ -1110,7 +1096,7 @@ xfs_da_node_lookup_int(xfs_da_state_t *state, int *result)
1110 * Search an intermediate node for a match. 1096 * Search an intermediate node for a match.
1111 */ 1097 */
1112 if (blk->magic == XFS_DA_NODE_MAGIC) { 1098 if (blk->magic == XFS_DA_NODE_MAGIC) {
1113 node = blk->bp->data; 1099 node = blk->bp->b_addr;
1114 max = be16_to_cpu(node->hdr.count); 1100 max = be16_to_cpu(node->hdr.count);
1115 blk->hashval = be32_to_cpu(node->btree[max-1].hashval); 1101 blk->hashval = be32_to_cpu(node->btree[max-1].hashval);
1116 1102
@@ -1216,15 +1202,15 @@ xfs_da_blk_link(xfs_da_state_t *state, xfs_da_state_blk_t *old_blk,
1216 xfs_da_blkinfo_t *old_info, *new_info, *tmp_info; 1202 xfs_da_blkinfo_t *old_info, *new_info, *tmp_info;
1217 xfs_da_args_t *args; 1203 xfs_da_args_t *args;
1218 int before=0, error; 1204 int before=0, error;
1219 xfs_dabuf_t *bp; 1205 struct xfs_buf *bp;
1220 1206
1221 /* 1207 /*
1222 * Set up environment. 1208 * Set up environment.
1223 */ 1209 */
1224 args = state->args; 1210 args = state->args;
1225 ASSERT(args != NULL); 1211 ASSERT(args != NULL);
1226 old_info = old_blk->bp->data; 1212 old_info = old_blk->bp->b_addr;
1227 new_info = new_blk->bp->data; 1213 new_info = new_blk->bp->b_addr;
1228 ASSERT(old_blk->magic == XFS_DA_NODE_MAGIC || 1214 ASSERT(old_blk->magic == XFS_DA_NODE_MAGIC ||
1229 old_blk->magic == XFS_DIR2_LEAFN_MAGIC || 1215 old_blk->magic == XFS_DIR2_LEAFN_MAGIC ||
1230 old_blk->magic == XFS_ATTR_LEAF_MAGIC); 1216 old_blk->magic == XFS_ATTR_LEAF_MAGIC);
@@ -1261,12 +1247,11 @@ xfs_da_blk_link(xfs_da_state_t *state, xfs_da_state_blk_t *old_blk,
1261 if (error) 1247 if (error)
1262 return(error); 1248 return(error);
1263 ASSERT(bp != NULL); 1249 ASSERT(bp != NULL);
1264 tmp_info = bp->data; 1250 tmp_info = bp->b_addr;
1265 ASSERT(be16_to_cpu(tmp_info->magic) == be16_to_cpu(old_info->magic)); 1251 ASSERT(be16_to_cpu(tmp_info->magic) == be16_to_cpu(old_info->magic));
1266 ASSERT(be32_to_cpu(tmp_info->forw) == old_blk->blkno); 1252 ASSERT(be32_to_cpu(tmp_info->forw) == old_blk->blkno);
1267 tmp_info->forw = cpu_to_be32(new_blk->blkno); 1253 tmp_info->forw = cpu_to_be32(new_blk->blkno);
1268 xfs_da_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1); 1254 xfs_trans_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
1269 xfs_da_buf_done(bp);
1270 } 1255 }
1271 old_info->back = cpu_to_be32(new_blk->blkno); 1256 old_info->back = cpu_to_be32(new_blk->blkno);
1272 } else { 1257 } else {
@@ -1283,18 +1268,17 @@ xfs_da_blk_link(xfs_da_state_t *state, xfs_da_state_blk_t *old_blk,
1283 if (error) 1268 if (error)
1284 return(error); 1269 return(error);
1285 ASSERT(bp != NULL); 1270 ASSERT(bp != NULL);
1286 tmp_info = bp->data; 1271 tmp_info = bp->b_addr;
1287 ASSERT(tmp_info->magic == old_info->magic); 1272 ASSERT(tmp_info->magic == old_info->magic);
1288 ASSERT(be32_to_cpu(tmp_info->back) == old_blk->blkno); 1273 ASSERT(be32_to_cpu(tmp_info->back) == old_blk->blkno);
1289 tmp_info->back = cpu_to_be32(new_blk->blkno); 1274 tmp_info->back = cpu_to_be32(new_blk->blkno);
1290 xfs_da_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1); 1275 xfs_trans_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
1291 xfs_da_buf_done(bp);
1292 } 1276 }
1293 old_info->forw = cpu_to_be32(new_blk->blkno); 1277 old_info->forw = cpu_to_be32(new_blk->blkno);
1294 } 1278 }
1295 1279
1296 xfs_da_log_buf(args->trans, old_blk->bp, 0, sizeof(*tmp_info) - 1); 1280 xfs_trans_log_buf(args->trans, old_blk->bp, 0, sizeof(*tmp_info) - 1);
1297 xfs_da_log_buf(args->trans, new_blk->bp, 0, sizeof(*tmp_info) - 1); 1281 xfs_trans_log_buf(args->trans, new_blk->bp, 0, sizeof(*tmp_info) - 1);
1298 return(0); 1282 return(0);
1299} 1283}
1300 1284
@@ -1302,12 +1286,14 @@ xfs_da_blk_link(xfs_da_state_t *state, xfs_da_state_blk_t *old_blk,
1302 * Compare two intermediate nodes for "order". 1286 * Compare two intermediate nodes for "order".
1303 */ 1287 */
1304STATIC int 1288STATIC int
1305xfs_da_node_order(xfs_dabuf_t *node1_bp, xfs_dabuf_t *node2_bp) 1289xfs_da_node_order(
1290 struct xfs_buf *node1_bp,
1291 struct xfs_buf *node2_bp)
1306{ 1292{
1307 xfs_da_intnode_t *node1, *node2; 1293 xfs_da_intnode_t *node1, *node2;
1308 1294
1309 node1 = node1_bp->data; 1295 node1 = node1_bp->b_addr;
1310 node2 = node2_bp->data; 1296 node2 = node2_bp->b_addr;
1311 ASSERT(node1->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC) && 1297 ASSERT(node1->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC) &&
1312 node2->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC)); 1298 node2->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
1313 if ((be16_to_cpu(node1->hdr.count) > 0) && (be16_to_cpu(node2->hdr.count) > 0) && 1299 if ((be16_to_cpu(node1->hdr.count) > 0) && (be16_to_cpu(node2->hdr.count) > 0) &&
@@ -1324,11 +1310,13 @@ xfs_da_node_order(xfs_dabuf_t *node1_bp, xfs_dabuf_t *node2_bp)
1324 * Pick up the last hashvalue from an intermediate node. 1310 * Pick up the last hashvalue from an intermediate node.
1325 */ 1311 */
1326STATIC uint 1312STATIC uint
1327xfs_da_node_lasthash(xfs_dabuf_t *bp, int *count) 1313xfs_da_node_lasthash(
1314 struct xfs_buf *bp,
1315 int *count)
1328{ 1316{
1329 xfs_da_intnode_t *node; 1317 xfs_da_intnode_t *node;
1330 1318
1331 node = bp->data; 1319 node = bp->b_addr;
1332 ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC)); 1320 ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
1333 if (count) 1321 if (count)
1334 *count = be16_to_cpu(node->hdr.count); 1322 *count = be16_to_cpu(node->hdr.count);
@@ -1346,7 +1334,7 @@ xfs_da_blk_unlink(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
1346{ 1334{
1347 xfs_da_blkinfo_t *drop_info, *save_info, *tmp_info; 1335 xfs_da_blkinfo_t *drop_info, *save_info, *tmp_info;
1348 xfs_da_args_t *args; 1336 xfs_da_args_t *args;
1349 xfs_dabuf_t *bp; 1337 struct xfs_buf *bp;
1350 int error; 1338 int error;
1351 1339
1352 /* 1340 /*
@@ -1354,8 +1342,8 @@ xfs_da_blk_unlink(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
1354 */ 1342 */
1355 args = state->args; 1343 args = state->args;
1356 ASSERT(args != NULL); 1344 ASSERT(args != NULL);
1357 save_info = save_blk->bp->data; 1345 save_info = save_blk->bp->b_addr;
1358 drop_info = drop_blk->bp->data; 1346 drop_info = drop_blk->bp->b_addr;
1359 ASSERT(save_blk->magic == XFS_DA_NODE_MAGIC || 1347 ASSERT(save_blk->magic == XFS_DA_NODE_MAGIC ||
1360 save_blk->magic == XFS_DIR2_LEAFN_MAGIC || 1348 save_blk->magic == XFS_DIR2_LEAFN_MAGIC ||
1361 save_blk->magic == XFS_ATTR_LEAF_MAGIC); 1349 save_blk->magic == XFS_ATTR_LEAF_MAGIC);
@@ -1380,13 +1368,12 @@ xfs_da_blk_unlink(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
1380 if (error) 1368 if (error)
1381 return(error); 1369 return(error);
1382 ASSERT(bp != NULL); 1370 ASSERT(bp != NULL);
1383 tmp_info = bp->data; 1371 tmp_info = bp->b_addr;
1384 ASSERT(tmp_info->magic == save_info->magic); 1372 ASSERT(tmp_info->magic == save_info->magic);
1385 ASSERT(be32_to_cpu(tmp_info->forw) == drop_blk->blkno); 1373 ASSERT(be32_to_cpu(tmp_info->forw) == drop_blk->blkno);
1386 tmp_info->forw = cpu_to_be32(save_blk->blkno); 1374 tmp_info->forw = cpu_to_be32(save_blk->blkno);
1387 xfs_da_log_buf(args->trans, bp, 0, 1375 xfs_trans_log_buf(args->trans, bp, 0,
1388 sizeof(*tmp_info) - 1); 1376 sizeof(*tmp_info) - 1);
1389 xfs_da_buf_done(bp);
1390 } 1377 }
1391 } else { 1378 } else {
1392 trace_xfs_da_unlink_forward(args); 1379 trace_xfs_da_unlink_forward(args);
@@ -1398,17 +1385,16 @@ xfs_da_blk_unlink(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
1398 if (error) 1385 if (error)
1399 return(error); 1386 return(error);
1400 ASSERT(bp != NULL); 1387 ASSERT(bp != NULL);
1401 tmp_info = bp->data; 1388 tmp_info = bp->b_addr;
1402 ASSERT(tmp_info->magic == save_info->magic); 1389 ASSERT(tmp_info->magic == save_info->magic);
1403 ASSERT(be32_to_cpu(tmp_info->back) == drop_blk->blkno); 1390 ASSERT(be32_to_cpu(tmp_info->back) == drop_blk->blkno);
1404 tmp_info->back = cpu_to_be32(save_blk->blkno); 1391 tmp_info->back = cpu_to_be32(save_blk->blkno);
1405 xfs_da_log_buf(args->trans, bp, 0, 1392 xfs_trans_log_buf(args->trans, bp, 0,
1406 sizeof(*tmp_info) - 1); 1393 sizeof(*tmp_info) - 1);
1407 xfs_da_buf_done(bp);
1408 } 1394 }
1409 } 1395 }
1410 1396
1411 xfs_da_log_buf(args->trans, save_blk->bp, 0, sizeof(*save_info) - 1); 1397 xfs_trans_log_buf(args->trans, save_blk->bp, 0, sizeof(*save_info) - 1);
1412 return(0); 1398 return(0);
1413} 1399}
1414 1400
@@ -1443,7 +1429,7 @@ xfs_da_path_shift(xfs_da_state_t *state, xfs_da_state_path_t *path,
1443 level = (path->active-1) - 1; /* skip bottom layer in path */ 1429 level = (path->active-1) - 1; /* skip bottom layer in path */
1444 for (blk = &path->blk[level]; level >= 0; blk--, level--) { 1430 for (blk = &path->blk[level]; level >= 0; blk--, level--) {
1445 ASSERT(blk->bp != NULL); 1431 ASSERT(blk->bp != NULL);
1446 node = blk->bp->data; 1432 node = blk->bp->b_addr;
1447 ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC)); 1433 ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
1448 if (forward && (blk->index < be16_to_cpu(node->hdr.count)-1)) { 1434 if (forward && (blk->index < be16_to_cpu(node->hdr.count)-1)) {
1449 blk->index++; 1435 blk->index++;
@@ -1471,7 +1457,7 @@ xfs_da_path_shift(xfs_da_state_t *state, xfs_da_state_path_t *path,
1471 * (if it's dirty, trans won't actually let go) 1457 * (if it's dirty, trans won't actually let go)
1472 */ 1458 */
1473 if (release) 1459 if (release)
1474 xfs_da_brelse(args->trans, blk->bp); 1460 xfs_trans_brelse(args->trans, blk->bp);
1475 1461
1476 /* 1462 /*
1477 * Read the next child block. 1463 * Read the next child block.
@@ -1482,7 +1468,7 @@ xfs_da_path_shift(xfs_da_state_t *state, xfs_da_state_path_t *path,
1482 if (error) 1468 if (error)
1483 return(error); 1469 return(error);
1484 ASSERT(blk->bp != NULL); 1470 ASSERT(blk->bp != NULL);
1485 info = blk->bp->data; 1471 info = blk->bp->b_addr;
1486 ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC) || 1472 ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
1487 info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) || 1473 info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
1488 info->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); 1474 info->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
@@ -1702,11 +1688,13 @@ xfs_da_grow_inode(
1702 * a bmap btree split to do that. 1688 * a bmap btree split to do that.
1703 */ 1689 */
1704STATIC int 1690STATIC int
1705xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop, 1691xfs_da_swap_lastblock(
1706 xfs_dabuf_t **dead_bufp) 1692 xfs_da_args_t *args,
1693 xfs_dablk_t *dead_blknop,
1694 struct xfs_buf **dead_bufp)
1707{ 1695{
1708 xfs_dablk_t dead_blkno, last_blkno, sib_blkno, par_blkno; 1696 xfs_dablk_t dead_blkno, last_blkno, sib_blkno, par_blkno;
1709 xfs_dabuf_t *dead_buf, *last_buf, *sib_buf, *par_buf; 1697 struct xfs_buf *dead_buf, *last_buf, *sib_buf, *par_buf;
1710 xfs_fileoff_t lastoff; 1698 xfs_fileoff_t lastoff;
1711 xfs_inode_t *ip; 1699 xfs_inode_t *ip;
1712 xfs_trans_t *tp; 1700 xfs_trans_t *tp;
@@ -1744,9 +1732,9 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop,
1744 /* 1732 /*
1745 * Copy the last block into the dead buffer and log it. 1733 * Copy the last block into the dead buffer and log it.
1746 */ 1734 */
1747 memcpy(dead_buf->data, last_buf->data, mp->m_dirblksize); 1735 memcpy(dead_buf->b_addr, last_buf->b_addr, mp->m_dirblksize);
1748 xfs_da_log_buf(tp, dead_buf, 0, mp->m_dirblksize - 1); 1736 xfs_trans_log_buf(tp, dead_buf, 0, mp->m_dirblksize - 1);
1749 dead_info = dead_buf->data; 1737 dead_info = dead_buf->b_addr;
1750 /* 1738 /*
1751 * Get values from the moved block. 1739 * Get values from the moved block.
1752 */ 1740 */
@@ -1767,7 +1755,7 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop,
1767 if ((sib_blkno = be32_to_cpu(dead_info->back))) { 1755 if ((sib_blkno = be32_to_cpu(dead_info->back))) {
1768 if ((error = xfs_da_read_buf(tp, ip, sib_blkno, -1, &sib_buf, w))) 1756 if ((error = xfs_da_read_buf(tp, ip, sib_blkno, -1, &sib_buf, w)))
1769 goto done; 1757 goto done;
1770 sib_info = sib_buf->data; 1758 sib_info = sib_buf->b_addr;
1771 if (unlikely( 1759 if (unlikely(
1772 be32_to_cpu(sib_info->forw) != last_blkno || 1760 be32_to_cpu(sib_info->forw) != last_blkno ||
1773 sib_info->magic != dead_info->magic)) { 1761 sib_info->magic != dead_info->magic)) {
@@ -1777,10 +1765,9 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop,
1777 goto done; 1765 goto done;
1778 } 1766 }
1779 sib_info->forw = cpu_to_be32(dead_blkno); 1767 sib_info->forw = cpu_to_be32(dead_blkno);
1780 xfs_da_log_buf(tp, sib_buf, 1768 xfs_trans_log_buf(tp, sib_buf,
1781 XFS_DA_LOGRANGE(sib_info, &sib_info->forw, 1769 XFS_DA_LOGRANGE(sib_info, &sib_info->forw,
1782 sizeof(sib_info->forw))); 1770 sizeof(sib_info->forw)));
1783 xfs_da_buf_done(sib_buf);
1784 sib_buf = NULL; 1771 sib_buf = NULL;
1785 } 1772 }
1786 /* 1773 /*
@@ -1789,7 +1776,7 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop,
1789 if ((sib_blkno = be32_to_cpu(dead_info->forw))) { 1776 if ((sib_blkno = be32_to_cpu(dead_info->forw))) {
1790 if ((error = xfs_da_read_buf(tp, ip, sib_blkno, -1, &sib_buf, w))) 1777 if ((error = xfs_da_read_buf(tp, ip, sib_blkno, -1, &sib_buf, w)))
1791 goto done; 1778 goto done;
1792 sib_info = sib_buf->data; 1779 sib_info = sib_buf->b_addr;
1793 if (unlikely( 1780 if (unlikely(
1794 be32_to_cpu(sib_info->back) != last_blkno || 1781 be32_to_cpu(sib_info->back) != last_blkno ||
1795 sib_info->magic != dead_info->magic)) { 1782 sib_info->magic != dead_info->magic)) {
@@ -1799,10 +1786,9 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop,
1799 goto done; 1786 goto done;
1800 } 1787 }
1801 sib_info->back = cpu_to_be32(dead_blkno); 1788 sib_info->back = cpu_to_be32(dead_blkno);
1802 xfs_da_log_buf(tp, sib_buf, 1789 xfs_trans_log_buf(tp, sib_buf,
1803 XFS_DA_LOGRANGE(sib_info, &sib_info->back, 1790 XFS_DA_LOGRANGE(sib_info, &sib_info->back,
1804 sizeof(sib_info->back))); 1791 sizeof(sib_info->back)));
1805 xfs_da_buf_done(sib_buf);
1806 sib_buf = NULL; 1792 sib_buf = NULL;
1807 } 1793 }
1808 par_blkno = mp->m_dirleafblk; 1794 par_blkno = mp->m_dirleafblk;
@@ -1813,7 +1799,7 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop,
1813 for (;;) { 1799 for (;;) {
1814 if ((error = xfs_da_read_buf(tp, ip, par_blkno, -1, &par_buf, w))) 1800 if ((error = xfs_da_read_buf(tp, ip, par_blkno, -1, &par_buf, w)))
1815 goto done; 1801 goto done;
1816 par_node = par_buf->data; 1802 par_node = par_buf->b_addr;
1817 if (unlikely(par_node->hdr.info.magic != 1803 if (unlikely(par_node->hdr.info.magic !=
1818 cpu_to_be16(XFS_DA_NODE_MAGIC) || 1804 cpu_to_be16(XFS_DA_NODE_MAGIC) ||
1819 (level >= 0 && level != be16_to_cpu(par_node->hdr.level) + 1))) { 1805 (level >= 0 && level != be16_to_cpu(par_node->hdr.level) + 1))) {
@@ -1837,7 +1823,7 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop,
1837 par_blkno = be32_to_cpu(par_node->btree[entno].before); 1823 par_blkno = be32_to_cpu(par_node->btree[entno].before);
1838 if (level == dead_level + 1) 1824 if (level == dead_level + 1)
1839 break; 1825 break;
1840 xfs_da_brelse(tp, par_buf); 1826 xfs_trans_brelse(tp, par_buf);
1841 par_buf = NULL; 1827 par_buf = NULL;
1842 } 1828 }
1843 /* 1829 /*
@@ -1853,7 +1839,7 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop,
1853 if (entno < be16_to_cpu(par_node->hdr.count)) 1839 if (entno < be16_to_cpu(par_node->hdr.count))
1854 break; 1840 break;
1855 par_blkno = be32_to_cpu(par_node->hdr.info.forw); 1841 par_blkno = be32_to_cpu(par_node->hdr.info.forw);
1856 xfs_da_brelse(tp, par_buf); 1842 xfs_trans_brelse(tp, par_buf);
1857 par_buf = NULL; 1843 par_buf = NULL;
1858 if (unlikely(par_blkno == 0)) { 1844 if (unlikely(par_blkno == 0)) {
1859 XFS_ERROR_REPORT("xfs_da_swap_lastblock(6)", 1845 XFS_ERROR_REPORT("xfs_da_swap_lastblock(6)",
@@ -1863,7 +1849,7 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop,
1863 } 1849 }
1864 if ((error = xfs_da_read_buf(tp, ip, par_blkno, -1, &par_buf, w))) 1850 if ((error = xfs_da_read_buf(tp, ip, par_blkno, -1, &par_buf, w)))
1865 goto done; 1851 goto done;
1866 par_node = par_buf->data; 1852 par_node = par_buf->b_addr;
1867 if (unlikely( 1853 if (unlikely(
1868 be16_to_cpu(par_node->hdr.level) != level || 1854 be16_to_cpu(par_node->hdr.level) != level ||
1869 par_node->hdr.info.magic != cpu_to_be16(XFS_DA_NODE_MAGIC))) { 1855 par_node->hdr.info.magic != cpu_to_be16(XFS_DA_NODE_MAGIC))) {
@@ -1878,20 +1864,18 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop,
1878 * Update the parent entry pointing to the moved block. 1864 * Update the parent entry pointing to the moved block.
1879 */ 1865 */
1880 par_node->btree[entno].before = cpu_to_be32(dead_blkno); 1866 par_node->btree[entno].before = cpu_to_be32(dead_blkno);
1881 xfs_da_log_buf(tp, par_buf, 1867 xfs_trans_log_buf(tp, par_buf,
1882 XFS_DA_LOGRANGE(par_node, &par_node->btree[entno].before, 1868 XFS_DA_LOGRANGE(par_node, &par_node->btree[entno].before,
1883 sizeof(par_node->btree[entno].before))); 1869 sizeof(par_node->btree[entno].before)));
1884 xfs_da_buf_done(par_buf);
1885 xfs_da_buf_done(dead_buf);
1886 *dead_blknop = last_blkno; 1870 *dead_blknop = last_blkno;
1887 *dead_bufp = last_buf; 1871 *dead_bufp = last_buf;
1888 return 0; 1872 return 0;
1889done: 1873done:
1890 if (par_buf) 1874 if (par_buf)
1891 xfs_da_brelse(tp, par_buf); 1875 xfs_trans_brelse(tp, par_buf);
1892 if (sib_buf) 1876 if (sib_buf)
1893 xfs_da_brelse(tp, sib_buf); 1877 xfs_trans_brelse(tp, sib_buf);
1894 xfs_da_brelse(tp, last_buf); 1878 xfs_trans_brelse(tp, last_buf);
1895 return error; 1879 return error;
1896} 1880}
1897 1881
@@ -1899,8 +1883,10 @@ done:
1899 * Remove a btree block from a directory or attribute. 1883 * Remove a btree block from a directory or attribute.
1900 */ 1884 */
1901int 1885int
1902xfs_da_shrink_inode(xfs_da_args_t *args, xfs_dablk_t dead_blkno, 1886xfs_da_shrink_inode(
1903 xfs_dabuf_t *dead_buf) 1887 xfs_da_args_t *args,
1888 xfs_dablk_t dead_blkno,
1889 struct xfs_buf *dead_buf)
1904{ 1890{
1905 xfs_inode_t *dp; 1891 xfs_inode_t *dp;
1906 int done, error, w, count; 1892 int done, error, w, count;
@@ -1935,7 +1921,7 @@ xfs_da_shrink_inode(xfs_da_args_t *args, xfs_dablk_t dead_blkno,
1935 break; 1921 break;
1936 } 1922 }
1937 } 1923 }
1938 xfs_da_binval(tp, dead_buf); 1924 xfs_trans_binval(tp, dead_buf);
1939 return error; 1925 return error;
1940} 1926}
1941 1927
@@ -1967,35 +1953,75 @@ xfs_da_map_covers_blocks(
1967} 1953}
1968 1954
1969/* 1955/*
1970 * Make a dabuf. 1956 * Convert a struct xfs_bmbt_irec to a struct xfs_buf_map.
1971 * Used for get_buf, read_buf, read_bufr, and reada_buf. 1957 *
1958 * For the single map case, it is assumed that the caller has provided a pointer
1959 * to a valid xfs_buf_map. For the multiple map case, this function will
1960 * allocate the xfs_buf_map to hold all the maps and replace the caller's single
1961 * map pointer with the allocated map.
1972 */ 1962 */
1973STATIC int 1963static int
1974xfs_da_do_buf( 1964xfs_buf_map_from_irec(
1975 xfs_trans_t *trans, 1965 struct xfs_mount *mp,
1976 xfs_inode_t *dp, 1966 struct xfs_buf_map **mapp,
1977 xfs_dablk_t bno, 1967 unsigned int *nmaps,
1978 xfs_daddr_t *mappedbnop, 1968 struct xfs_bmbt_irec *irecs,
1979 xfs_dabuf_t **bpp, 1969 unsigned int nirecs)
1980 int whichfork,
1981 int caller)
1982{ 1970{
1983 xfs_buf_t *bp = NULL; 1971 struct xfs_buf_map *map;
1984 xfs_buf_t **bplist; 1972 int i;
1985 int error=0; 1973
1986 int i; 1974 ASSERT(*nmaps == 1);
1987 xfs_bmbt_irec_t map; 1975 ASSERT(nirecs >= 1);
1988 xfs_bmbt_irec_t *mapp; 1976
1989 xfs_daddr_t mappedbno; 1977 if (nirecs > 1) {
1990 xfs_mount_t *mp; 1978 map = kmem_zalloc(nirecs * sizeof(struct xfs_buf_map), KM_SLEEP);
1991 int nbplist=0; 1979 if (!map)
1992 int nfsb; 1980 return ENOMEM;
1993 int nmap; 1981 *mapp = map;
1994 xfs_dabuf_t *rbp; 1982 }
1983
1984 *nmaps = nirecs;
1985 map = *mapp;
1986 for (i = 0; i < *nmaps; i++) {
1987 ASSERT(irecs[i].br_startblock != DELAYSTARTBLOCK &&
1988 irecs[i].br_startblock != HOLESTARTBLOCK);
1989 map[i].bm_bn = XFS_FSB_TO_DADDR(mp, irecs[i].br_startblock);
1990 map[i].bm_len = XFS_FSB_TO_BB(mp, irecs[i].br_blockcount);
1991 }
1992 return 0;
1993}
1994
1995/*
1996 * Map the block we are given ready for reading. There are three possible return
1997 * values:
1998 * -1 - will be returned if we land in a hole and mappedbno == -2 so the
1999 * caller knows not to execute a subsequent read.
2000 * 0 - if we mapped the block successfully
2001 * >0 - positive error number if there was an error.
2002 */
2003static int
2004xfs_dabuf_map(
2005 struct xfs_trans *trans,
2006 struct xfs_inode *dp,
2007 xfs_dablk_t bno,
2008 xfs_daddr_t mappedbno,
2009 int whichfork,
2010 struct xfs_buf_map **map,
2011 int *nmaps)
2012{
2013 struct xfs_mount *mp = dp->i_mount;
2014 int nfsb;
2015 int error = 0;
2016 struct xfs_bmbt_irec irec;
2017 struct xfs_bmbt_irec *irecs = &irec;
2018 int nirecs;
2019
2020 ASSERT(map && *map);
2021 ASSERT(*nmaps == 1);
1995 2022
1996 mp = dp->i_mount;
1997 nfsb = (whichfork == XFS_DATA_FORK) ? mp->m_dirblkfsbs : 1; 2023 nfsb = (whichfork == XFS_DATA_FORK) ? mp->m_dirblkfsbs : 1;
1998 mappedbno = *mappedbnop; 2024
1999 /* 2025 /*
2000 * Caller doesn't have a mapping. -2 means don't complain 2026 * Caller doesn't have a mapping. -2 means don't complain
2001 * if we land in a hole. 2027 * if we land in a hole.
@@ -2004,112 +2030,150 @@ xfs_da_do_buf(
2004 /* 2030 /*
2005 * Optimize the one-block case. 2031 * Optimize the one-block case.
2006 */ 2032 */
2007 if (nfsb == 1) 2033 if (nfsb != 1)
2008 mapp = &map; 2034 irecs = kmem_zalloc(sizeof(irec) * nfsb, KM_SLEEP);
2009 else
2010 mapp = kmem_alloc(sizeof(*mapp) * nfsb, KM_SLEEP);
2011 2035
2012 nmap = nfsb; 2036 nirecs = nfsb;
2013 error = xfs_bmapi_read(dp, (xfs_fileoff_t)bno, nfsb, mapp, 2037 error = xfs_bmapi_read(dp, (xfs_fileoff_t)bno, nfsb, irecs,
2014 &nmap, xfs_bmapi_aflag(whichfork)); 2038 &nirecs, xfs_bmapi_aflag(whichfork));
2015 if (error) 2039 if (error)
2016 goto exit0; 2040 goto out;
2017 } else { 2041 } else {
2018 map.br_startblock = XFS_DADDR_TO_FSB(mp, mappedbno); 2042 irecs->br_startblock = XFS_DADDR_TO_FSB(mp, mappedbno);
2019 map.br_startoff = (xfs_fileoff_t)bno; 2043 irecs->br_startoff = (xfs_fileoff_t)bno;
2020 map.br_blockcount = nfsb; 2044 irecs->br_blockcount = nfsb;
2021 mapp = &map; 2045 irecs->br_state = 0;
2022 nmap = 1; 2046 nirecs = 1;
2023 } 2047 }
2024 if (!xfs_da_map_covers_blocks(nmap, mapp, bno, nfsb)) { 2048
2025 error = mappedbno == -2 ? 0 : XFS_ERROR(EFSCORRUPTED); 2049 if (!xfs_da_map_covers_blocks(nirecs, irecs, bno, nfsb)) {
2050 error = mappedbno == -2 ? -1 : XFS_ERROR(EFSCORRUPTED);
2026 if (unlikely(error == EFSCORRUPTED)) { 2051 if (unlikely(error == EFSCORRUPTED)) {
2027 if (xfs_error_level >= XFS_ERRLEVEL_LOW) { 2052 if (xfs_error_level >= XFS_ERRLEVEL_LOW) {
2053 int i;
2028 xfs_alert(mp, "%s: bno %lld dir: inode %lld", 2054 xfs_alert(mp, "%s: bno %lld dir: inode %lld",
2029 __func__, (long long)bno, 2055 __func__, (long long)bno,
2030 (long long)dp->i_ino); 2056 (long long)dp->i_ino);
2031 for (i = 0; i < nmap; i++) { 2057 for (i = 0; i < *nmaps; i++) {
2032 xfs_alert(mp, 2058 xfs_alert(mp,
2033"[%02d] br_startoff %lld br_startblock %lld br_blockcount %lld br_state %d", 2059"[%02d] br_startoff %lld br_startblock %lld br_blockcount %lld br_state %d",
2034 i, 2060 i,
2035 (long long)mapp[i].br_startoff, 2061 (long long)irecs[i].br_startoff,
2036 (long long)mapp[i].br_startblock, 2062 (long long)irecs[i].br_startblock,
2037 (long long)mapp[i].br_blockcount, 2063 (long long)irecs[i].br_blockcount,
2038 mapp[i].br_state); 2064 irecs[i].br_state);
2039 } 2065 }
2040 } 2066 }
2041 XFS_ERROR_REPORT("xfs_da_do_buf(1)", 2067 XFS_ERROR_REPORT("xfs_da_do_buf(1)",
2042 XFS_ERRLEVEL_LOW, mp); 2068 XFS_ERRLEVEL_LOW, mp);
2043 } 2069 }
2044 goto exit0; 2070 goto out;
2045 } 2071 }
2046 if (caller != 3 && nmap > 1) { 2072 error = xfs_buf_map_from_irec(mp, map, nmaps, irecs, nirecs);
2047 bplist = kmem_alloc(sizeof(*bplist) * nmap, KM_SLEEP); 2073out:
2048 nbplist = 0; 2074 if (irecs != &irec)
2049 } else 2075 kmem_free(irecs);
2050 bplist = NULL; 2076 return error;
2051 /* 2077}
2052 * Turn the mapping(s) into buffer(s). 2078
2053 */ 2079/*
2054 for (i = 0; i < nmap; i++) { 2080 * Get a buffer for the dir/attr block.
2055 int nmapped; 2081 */
2056 2082int
2057 mappedbno = XFS_FSB_TO_DADDR(mp, mapp[i].br_startblock); 2083xfs_da_get_buf(
2058 if (i == 0) 2084 struct xfs_trans *trans,
2059 *mappedbnop = mappedbno; 2085 struct xfs_inode *dp,
2060 nmapped = (int)XFS_FSB_TO_BB(mp, mapp[i].br_blockcount); 2086 xfs_dablk_t bno,
2061 switch (caller) { 2087 xfs_daddr_t mappedbno,
2062 case 0: 2088 struct xfs_buf **bpp,
2063 bp = xfs_trans_get_buf(trans, mp->m_ddev_targp, 2089 int whichfork)
2064 mappedbno, nmapped, 0); 2090{
2065 error = bp ? bp->b_error : XFS_ERROR(EIO); 2091 struct xfs_buf *bp;
2066 break; 2092 struct xfs_buf_map map;
2067 case 1: 2093 struct xfs_buf_map *mapp;
2068 case 2: 2094 int nmap;
2069 bp = NULL; 2095 int error;
2070 error = xfs_trans_read_buf(mp, trans, mp->m_ddev_targp, 2096
2071 mappedbno, nmapped, 0, &bp); 2097 *bpp = NULL;
2072 break; 2098 mapp = &map;
2073 case 3: 2099 nmap = 1;
2074 xfs_buf_readahead(mp->m_ddev_targp, mappedbno, nmapped); 2100 error = xfs_dabuf_map(trans, dp, bno, mappedbno, whichfork,
2101 &mapp, &nmap);
2102 if (error) {
2103 /* mapping a hole is not an error, but we don't continue */
2104 if (error == -1)
2075 error = 0; 2105 error = 0;
2076 bp = NULL; 2106 goto out_free;
2077 break;
2078 }
2079 if (error) {
2080 if (bp)
2081 xfs_trans_brelse(trans, bp);
2082 goto exit1;
2083 }
2084 if (!bp)
2085 continue;
2086 if (caller == 1) {
2087 if (whichfork == XFS_ATTR_FORK)
2088 xfs_buf_set_ref(bp, XFS_ATTR_BTREE_REF);
2089 else
2090 xfs_buf_set_ref(bp, XFS_DIR_BTREE_REF);
2091 }
2092 if (bplist) {
2093 bplist[nbplist++] = bp;
2094 }
2095 } 2107 }
2096 /* 2108
2097 * Build a dabuf structure. 2109 bp = xfs_trans_get_buf_map(trans, dp->i_mount->m_ddev_targp,
2098 */ 2110 mapp, nmap, 0);
2099 if (bplist) { 2111 error = bp ? bp->b_error : XFS_ERROR(EIO);
2100 rbp = xfs_da_buf_make(nbplist, bplist); 2112 if (error) {
2101 } else if (bp) 2113 xfs_trans_brelse(trans, bp);
2102 rbp = xfs_da_buf_make(1, &bp); 2114 goto out_free;
2115 }
2116
2117 *bpp = bp;
2118
2119out_free:
2120 if (mapp != &map)
2121 kmem_free(mapp);
2122
2123 return error;
2124}
2125
2126/*
2127 * Get a buffer for the dir/attr block, fill in the contents.
2128 */
2129int
2130xfs_da_read_buf(
2131 struct xfs_trans *trans,
2132 struct xfs_inode *dp,
2133 xfs_dablk_t bno,
2134 xfs_daddr_t mappedbno,
2135 struct xfs_buf **bpp,
2136 int whichfork)
2137{
2138 struct xfs_buf *bp;
2139 struct xfs_buf_map map;
2140 struct xfs_buf_map *mapp;
2141 int nmap;
2142 int error;
2143
2144 *bpp = NULL;
2145 mapp = &map;
2146 nmap = 1;
2147 error = xfs_dabuf_map(trans, dp, bno, mappedbno, whichfork,
2148 &mapp, &nmap);
2149 if (error) {
2150 /* mapping a hole is not an error, but we don't continue */
2151 if (error == -1)
2152 error = 0;
2153 goto out_free;
2154 }
2155
2156 error = xfs_trans_read_buf_map(dp->i_mount, trans,
2157 dp->i_mount->m_ddev_targp,
2158 mapp, nmap, 0, &bp);
2159 if (error)
2160 goto out_free;
2161
2162 if (whichfork == XFS_ATTR_FORK)
2163 xfs_buf_set_ref(bp, XFS_ATTR_BTREE_REF);
2103 else 2164 else
2104 rbp = NULL; 2165 xfs_buf_set_ref(bp, XFS_DIR_BTREE_REF);
2166
2105 /* 2167 /*
2106 * For read_buf, check the magic number. 2168 * This verification code will be moved to a CRC verification callback
2169 * function so just leave it here unchanged until then.
2107 */ 2170 */
2108 if (caller == 1) { 2171 {
2109 xfs_dir2_data_hdr_t *hdr = rbp->data; 2172 xfs_dir2_data_hdr_t *hdr = bp->b_addr;
2110 xfs_dir2_free_t *free = rbp->data; 2173 xfs_dir2_free_t *free = bp->b_addr;
2111 xfs_da_blkinfo_t *info = rbp->data; 2174 xfs_da_blkinfo_t *info = bp->b_addr;
2112 uint magic, magic1; 2175 uint magic, magic1;
2176 struct xfs_mount *mp = dp->i_mount;
2113 2177
2114 magic = be16_to_cpu(info->magic); 2178 magic = be16_to_cpu(info->magic);
2115 magic1 = be32_to_cpu(hdr->magic); 2179 magic1 = be32_to_cpu(hdr->magic);
@@ -2123,66 +2187,20 @@ xfs_da_do_buf(
2123 (free->hdr.magic != cpu_to_be32(XFS_DIR2_FREE_MAGIC)), 2187 (free->hdr.magic != cpu_to_be32(XFS_DIR2_FREE_MAGIC)),
2124 mp, XFS_ERRTAG_DA_READ_BUF, 2188 mp, XFS_ERRTAG_DA_READ_BUF,
2125 XFS_RANDOM_DA_READ_BUF))) { 2189 XFS_RANDOM_DA_READ_BUF))) {
2126 trace_xfs_da_btree_corrupt(rbp->bps[0], _RET_IP_); 2190 trace_xfs_da_btree_corrupt(bp, _RET_IP_);
2127 XFS_CORRUPTION_ERROR("xfs_da_do_buf(2)", 2191 XFS_CORRUPTION_ERROR("xfs_da_do_buf(2)",
2128 XFS_ERRLEVEL_LOW, mp, info); 2192 XFS_ERRLEVEL_LOW, mp, info);
2129 error = XFS_ERROR(EFSCORRUPTED); 2193 error = XFS_ERROR(EFSCORRUPTED);
2130 xfs_da_brelse(trans, rbp); 2194 xfs_trans_brelse(trans, bp);
2131 nbplist = 0; 2195 goto out_free;
2132 goto exit1;
2133 } 2196 }
2134 } 2197 }
2135 if (bplist) { 2198 *bpp = bp;
2136 kmem_free(bplist); 2199out_free:
2137 }
2138 if (mapp != &map) {
2139 kmem_free(mapp);
2140 }
2141 if (bpp)
2142 *bpp = rbp;
2143 return 0;
2144exit1:
2145 if (bplist) {
2146 for (i = 0; i < nbplist; i++)
2147 xfs_trans_brelse(trans, bplist[i]);
2148 kmem_free(bplist);
2149 }
2150exit0:
2151 if (mapp != &map) 2200 if (mapp != &map)
2152 kmem_free(mapp); 2201 kmem_free(mapp);
2153 if (bpp)
2154 *bpp = NULL;
2155 return error;
2156}
2157
2158/*
2159 * Get a buffer for the dir/attr block.
2160 */
2161int
2162xfs_da_get_buf(
2163 xfs_trans_t *trans,
2164 xfs_inode_t *dp,
2165 xfs_dablk_t bno,
2166 xfs_daddr_t mappedbno,
2167 xfs_dabuf_t **bpp,
2168 int whichfork)
2169{
2170 return xfs_da_do_buf(trans, dp, bno, &mappedbno, bpp, whichfork, 0);
2171}
2172 2202
2173/* 2203 return error;
2174 * Get a buffer for the dir/attr block, fill in the contents.
2175 */
2176int
2177xfs_da_read_buf(
2178 xfs_trans_t *trans,
2179 xfs_inode_t *dp,
2180 xfs_dablk_t bno,
2181 xfs_daddr_t mappedbno,
2182 xfs_dabuf_t **bpp,
2183 int whichfork)
2184{
2185 return xfs_da_do_buf(trans, dp, bno, &mappedbno, bpp, whichfork, 1);
2186} 2204}
2187 2205
2188/* 2206/*
@@ -2190,22 +2208,41 @@ xfs_da_read_buf(
2190 */ 2208 */
2191xfs_daddr_t 2209xfs_daddr_t
2192xfs_da_reada_buf( 2210xfs_da_reada_buf(
2193 xfs_trans_t *trans, 2211 struct xfs_trans *trans,
2194 xfs_inode_t *dp, 2212 struct xfs_inode *dp,
2195 xfs_dablk_t bno, 2213 xfs_dablk_t bno,
2196 int whichfork) 2214 int whichfork)
2197{ 2215{
2198 xfs_daddr_t rval; 2216 xfs_daddr_t mappedbno = -1;
2217 struct xfs_buf_map map;
2218 struct xfs_buf_map *mapp;
2219 int nmap;
2220 int error;
2221
2222 mapp = &map;
2223 nmap = 1;
2224 error = xfs_dabuf_map(trans, dp, bno, -1, whichfork,
2225 &mapp, &nmap);
2226 if (error) {
2227 /* mapping a hole is not an error, but we don't continue */
2228 if (error == -1)
2229 error = 0;
2230 goto out_free;
2231 }
2199 2232
2200 rval = -1; 2233 mappedbno = mapp[0].bm_bn;
2201 if (xfs_da_do_buf(trans, dp, bno, &rval, NULL, whichfork, 3)) 2234 xfs_buf_readahead_map(dp->i_mount->m_ddev_targp, mapp, nmap);
2235
2236out_free:
2237 if (mapp != &map)
2238 kmem_free(mapp);
2239
2240 if (error)
2202 return -1; 2241 return -1;
2203 else 2242 return mappedbno;
2204 return rval;
2205} 2243}
2206 2244
2207kmem_zone_t *xfs_da_state_zone; /* anchor for state struct zone */ 2245kmem_zone_t *xfs_da_state_zone; /* anchor for state struct zone */
2208kmem_zone_t *xfs_dabuf_zone; /* dabuf zone */
2209 2246
2210/* 2247/*
2211 * Allocate a dir-state structure. 2248 * Allocate a dir-state structure.
@@ -2225,13 +2262,8 @@ xfs_da_state_kill_altpath(xfs_da_state_t *state)
2225{ 2262{
2226 int i; 2263 int i;
2227 2264
2228 for (i = 0; i < state->altpath.active; i++) { 2265 for (i = 0; i < state->altpath.active; i++)
2229 if (state->altpath.blk[i].bp) { 2266 state->altpath.blk[i].bp = NULL;
2230 if (state->altpath.blk[i].bp != state->path.blk[i].bp)
2231 xfs_da_buf_done(state->altpath.blk[i].bp);
2232 state->altpath.blk[i].bp = NULL;
2233 }
2234 }
2235 state->altpath.active = 0; 2267 state->altpath.active = 0;
2236} 2268}
2237 2269
@@ -2241,204 +2273,9 @@ xfs_da_state_kill_altpath(xfs_da_state_t *state)
2241void 2273void
2242xfs_da_state_free(xfs_da_state_t *state) 2274xfs_da_state_free(xfs_da_state_t *state)
2243{ 2275{
2244 int i;
2245
2246 xfs_da_state_kill_altpath(state); 2276 xfs_da_state_kill_altpath(state);
2247 for (i = 0; i < state->path.active; i++) {
2248 if (state->path.blk[i].bp)
2249 xfs_da_buf_done(state->path.blk[i].bp);
2250 }
2251 if (state->extravalid && state->extrablk.bp)
2252 xfs_da_buf_done(state->extrablk.bp);
2253#ifdef DEBUG 2277#ifdef DEBUG
2254 memset((char *)state, 0, sizeof(*state)); 2278 memset((char *)state, 0, sizeof(*state));
2255#endif /* DEBUG */ 2279#endif /* DEBUG */
2256 kmem_zone_free(xfs_da_state_zone, state); 2280 kmem_zone_free(xfs_da_state_zone, state);
2257} 2281}
2258
2259/*
2260 * Create a dabuf.
2261 */
2262/* ARGSUSED */
2263STATIC xfs_dabuf_t *
2264xfs_da_buf_make(int nbuf, xfs_buf_t **bps)
2265{
2266 xfs_buf_t *bp;
2267 xfs_dabuf_t *dabuf;
2268 int i;
2269 int off;
2270
2271 if (nbuf == 1)
2272 dabuf = kmem_zone_alloc(xfs_dabuf_zone, KM_NOFS);
2273 else
2274 dabuf = kmem_alloc(XFS_DA_BUF_SIZE(nbuf), KM_NOFS);
2275 dabuf->dirty = 0;
2276 if (nbuf == 1) {
2277 dabuf->nbuf = 1;
2278 bp = bps[0];
2279 dabuf->bbcount = bp->b_length;
2280 dabuf->data = bp->b_addr;
2281 dabuf->bps[0] = bp;
2282 } else {
2283 dabuf->nbuf = nbuf;
2284 for (i = 0, dabuf->bbcount = 0; i < nbuf; i++) {
2285 dabuf->bps[i] = bp = bps[i];
2286 dabuf->bbcount += bp->b_length;
2287 }
2288 dabuf->data = kmem_alloc(BBTOB(dabuf->bbcount), KM_SLEEP);
2289 for (i = off = 0; i < nbuf; i++, off += BBTOB(bp->b_length)) {
2290 bp = bps[i];
2291 memcpy((char *)dabuf->data + off, bp->b_addr,
2292 BBTOB(bp->b_length));
2293 }
2294 }
2295 return dabuf;
2296}
2297
2298/*
2299 * Un-dirty a dabuf.
2300 */
2301STATIC void
2302xfs_da_buf_clean(xfs_dabuf_t *dabuf)
2303{
2304 xfs_buf_t *bp;
2305 int i;
2306 int off;
2307
2308 if (dabuf->dirty) {
2309 ASSERT(dabuf->nbuf > 1);
2310 dabuf->dirty = 0;
2311 for (i = off = 0; i < dabuf->nbuf;
2312 i++, off += BBTOB(bp->b_length)) {
2313 bp = dabuf->bps[i];
2314 memcpy(bp->b_addr, dabuf->data + off,
2315 BBTOB(bp->b_length));
2316 }
2317 }
2318}
2319
2320/*
2321 * Release a dabuf.
2322 */
2323void
2324xfs_da_buf_done(xfs_dabuf_t *dabuf)
2325{
2326 ASSERT(dabuf);
2327 ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]);
2328 if (dabuf->dirty)
2329 xfs_da_buf_clean(dabuf);
2330 if (dabuf->nbuf > 1) {
2331 kmem_free(dabuf->data);
2332 kmem_free(dabuf);
2333 } else {
2334 kmem_zone_free(xfs_dabuf_zone, dabuf);
2335 }
2336}
2337
2338/*
2339 * Log transaction from a dabuf.
2340 */
2341void
2342xfs_da_log_buf(xfs_trans_t *tp, xfs_dabuf_t *dabuf, uint first, uint last)
2343{
2344 xfs_buf_t *bp;
2345 uint f;
2346 int i;
2347 uint l;
2348 int off;
2349
2350 ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]);
2351 if (dabuf->nbuf == 1) {
2352 ASSERT(dabuf->data == dabuf->bps[0]->b_addr);
2353 xfs_trans_log_buf(tp, dabuf->bps[0], first, last);
2354 return;
2355 }
2356 dabuf->dirty = 1;
2357 ASSERT(first <= last);
2358 for (i = off = 0; i < dabuf->nbuf; i++, off += BBTOB(bp->b_length)) {
2359 bp = dabuf->bps[i];
2360 f = off;
2361 l = f + BBTOB(bp->b_length) - 1;
2362 if (f < first)
2363 f = first;
2364 if (l > last)
2365 l = last;
2366 if (f <= l)
2367 xfs_trans_log_buf(tp, bp, f - off, l - off);
2368 /*
2369 * B_DONE is set by xfs_trans_log buf.
2370 * If we don't set it on a new buffer (get not read)
2371 * then if we don't put anything in the buffer it won't
2372 * be set, and at commit it it released into the cache,
2373 * and then a read will fail.
2374 */
2375 else if (!(XFS_BUF_ISDONE(bp)))
2376 XFS_BUF_DONE(bp);
2377 }
2378 ASSERT(last < off);
2379}
2380
2381/*
2382 * Release dabuf from a transaction.
2383 * Have to free up the dabuf before the buffers are released,
2384 * since the synchronization on the dabuf is really the lock on the buffer.
2385 */
2386void
2387xfs_da_brelse(xfs_trans_t *tp, xfs_dabuf_t *dabuf)
2388{
2389 xfs_buf_t *bp;
2390 xfs_buf_t **bplist;
2391 int i;
2392 int nbuf;
2393
2394 ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]);
2395 if ((nbuf = dabuf->nbuf) == 1) {
2396 bplist = &bp;
2397 bp = dabuf->bps[0];
2398 } else {
2399 bplist = kmem_alloc(nbuf * sizeof(*bplist), KM_SLEEP);
2400 memcpy(bplist, dabuf->bps, nbuf * sizeof(*bplist));
2401 }
2402 xfs_da_buf_done(dabuf);
2403 for (i = 0; i < nbuf; i++)
2404 xfs_trans_brelse(tp, bplist[i]);
2405 if (bplist != &bp)
2406 kmem_free(bplist);
2407}
2408
2409/*
2410 * Invalidate dabuf from a transaction.
2411 */
2412void
2413xfs_da_binval(xfs_trans_t *tp, xfs_dabuf_t *dabuf)
2414{
2415 xfs_buf_t *bp;
2416 xfs_buf_t **bplist;
2417 int i;
2418 int nbuf;
2419
2420 ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]);
2421 if ((nbuf = dabuf->nbuf) == 1) {
2422 bplist = &bp;
2423 bp = dabuf->bps[0];
2424 } else {
2425 bplist = kmem_alloc(nbuf * sizeof(*bplist), KM_SLEEP);
2426 memcpy(bplist, dabuf->bps, nbuf * sizeof(*bplist));
2427 }
2428 xfs_da_buf_done(dabuf);
2429 for (i = 0; i < nbuf; i++)
2430 xfs_trans_binval(tp, bplist[i]);
2431 if (bplist != &bp)
2432 kmem_free(bplist);
2433}
2434
2435/*
2436 * Get the first daddr from a dabuf.
2437 */
2438xfs_daddr_t
2439xfs_da_blkno(xfs_dabuf_t *dabuf)
2440{
2441 ASSERT(dabuf->nbuf);
2442 ASSERT(dabuf->data);
2443 return XFS_BUF_ADDR(dabuf->bps[0]);
2444}
diff --git a/fs/xfs/xfs_da_btree.h b/fs/xfs/xfs_da_btree.h
index dbf7c074ae73..132adafb041e 100644
--- a/fs/xfs/xfs_da_btree.h
+++ b/fs/xfs/xfs_da_btree.h
@@ -32,7 +32,7 @@ struct zone;
32/* 32/*
33 * This structure is common to both leaf nodes and non-leaf nodes in the Btree. 33 * This structure is common to both leaf nodes and non-leaf nodes in the Btree.
34 * 34 *
35 * Is is used to manage a doubly linked list of all blocks at the same 35 * It is used to manage a doubly linked list of all blocks at the same
36 * level in the Btree, and to identify which type of block this is. 36 * level in the Btree, and to identify which type of block this is.
37 */ 37 */
38#define XFS_DA_NODE_MAGIC 0xfebe /* magic number: non-leaf blocks */ 38#define XFS_DA_NODE_MAGIC 0xfebe /* magic number: non-leaf blocks */
@@ -133,24 +133,6 @@ typedef struct xfs_da_args {
133 { XFS_DA_OP_CILOOKUP, "CILOOKUP" } 133 { XFS_DA_OP_CILOOKUP, "CILOOKUP" }
134 134
135/* 135/*
136 * Structure to describe buffer(s) for a block.
137 * This is needed in the directory version 2 format case, when
138 * multiple non-contiguous fsblocks might be needed to cover one
139 * logical directory block.
140 * If the buffer count is 1 then the data pointer points to the
141 * same place as the b_addr field for the buffer, else to kmem_alloced memory.
142 */
143typedef struct xfs_dabuf {
144 int nbuf; /* number of buffer pointers present */
145 short dirty; /* data needs to be copied back */
146 short bbcount; /* how large is data in bbs */
147 void *data; /* pointer for buffers' data */
148 struct xfs_buf *bps[1]; /* actually nbuf of these */
149} xfs_dabuf_t;
150#define XFS_DA_BUF_SIZE(n) \
151 (sizeof(xfs_dabuf_t) + sizeof(struct xfs_buf *) * ((n) - 1))
152
153/*
154 * Storage for holding state during Btree searches and split/join ops. 136 * Storage for holding state during Btree searches and split/join ops.
155 * 137 *
156 * Only need space for 5 intermediate nodes. With a minimum of 62-way 138 * Only need space for 5 intermediate nodes. With a minimum of 62-way
@@ -158,7 +140,7 @@ typedef struct xfs_dabuf {
158 * which is slightly more than enough. 140 * which is slightly more than enough.
159 */ 141 */
160typedef struct xfs_da_state_blk { 142typedef struct xfs_da_state_blk {
161 xfs_dabuf_t *bp; /* buffer containing block */ 143 struct xfs_buf *bp; /* buffer containing block */
162 xfs_dablk_t blkno; /* filesystem blkno of buffer */ 144 xfs_dablk_t blkno; /* filesystem blkno of buffer */
163 xfs_daddr_t disk_blkno; /* on-disk blkno (in BBs) of buffer */ 145 xfs_daddr_t disk_blkno; /* on-disk blkno (in BBs) of buffer */
164 int index; /* relevant index into block */ 146 int index; /* relevant index into block */
@@ -211,7 +193,7 @@ struct xfs_nameops {
211 * Routines used for growing the Btree. 193 * Routines used for growing the Btree.
212 */ 194 */
213int xfs_da_node_create(xfs_da_args_t *args, xfs_dablk_t blkno, int level, 195int xfs_da_node_create(xfs_da_args_t *args, xfs_dablk_t blkno, int level,
214 xfs_dabuf_t **bpp, int whichfork); 196 struct xfs_buf **bpp, int whichfork);
215int xfs_da_split(xfs_da_state_t *state); 197int xfs_da_split(xfs_da_state_t *state);
216 198
217/* 199/*
@@ -241,14 +223,14 @@ int xfs_da_grow_inode_int(struct xfs_da_args *args, xfs_fileoff_t *bno,
241 int count); 223 int count);
242int xfs_da_get_buf(struct xfs_trans *trans, struct xfs_inode *dp, 224int xfs_da_get_buf(struct xfs_trans *trans, struct xfs_inode *dp,
243 xfs_dablk_t bno, xfs_daddr_t mappedbno, 225 xfs_dablk_t bno, xfs_daddr_t mappedbno,
244 xfs_dabuf_t **bp, int whichfork); 226 struct xfs_buf **bp, int whichfork);
245int xfs_da_read_buf(struct xfs_trans *trans, struct xfs_inode *dp, 227int xfs_da_read_buf(struct xfs_trans *trans, struct xfs_inode *dp,
246 xfs_dablk_t bno, xfs_daddr_t mappedbno, 228 xfs_dablk_t bno, xfs_daddr_t mappedbno,
247 xfs_dabuf_t **bpp, int whichfork); 229 struct xfs_buf **bpp, int whichfork);
248xfs_daddr_t xfs_da_reada_buf(struct xfs_trans *trans, struct xfs_inode *dp, 230xfs_daddr_t xfs_da_reada_buf(struct xfs_trans *trans, struct xfs_inode *dp,
249 xfs_dablk_t bno, int whichfork); 231 xfs_dablk_t bno, int whichfork);
250int xfs_da_shrink_inode(xfs_da_args_t *args, xfs_dablk_t dead_blkno, 232int xfs_da_shrink_inode(xfs_da_args_t *args, xfs_dablk_t dead_blkno,
251 xfs_dabuf_t *dead_buf); 233 struct xfs_buf *dead_buf);
252 234
253uint xfs_da_hashname(const __uint8_t *name_string, int name_length); 235uint xfs_da_hashname(const __uint8_t *name_string, int name_length);
254enum xfs_dacmp xfs_da_compname(struct xfs_da_args *args, 236enum xfs_dacmp xfs_da_compname(struct xfs_da_args *args,
@@ -258,15 +240,7 @@ enum xfs_dacmp xfs_da_compname(struct xfs_da_args *args,
258xfs_da_state_t *xfs_da_state_alloc(void); 240xfs_da_state_t *xfs_da_state_alloc(void);
259void xfs_da_state_free(xfs_da_state_t *state); 241void xfs_da_state_free(xfs_da_state_t *state);
260 242
261void xfs_da_buf_done(xfs_dabuf_t *dabuf);
262void xfs_da_log_buf(struct xfs_trans *tp, xfs_dabuf_t *dabuf, uint first,
263 uint last);
264void xfs_da_brelse(struct xfs_trans *tp, xfs_dabuf_t *dabuf);
265void xfs_da_binval(struct xfs_trans *tp, xfs_dabuf_t *dabuf);
266xfs_daddr_t xfs_da_blkno(xfs_dabuf_t *dabuf);
267
268extern struct kmem_zone *xfs_da_state_zone; 243extern struct kmem_zone *xfs_da_state_zone;
269extern struct kmem_zone *xfs_dabuf_zone;
270extern const struct xfs_nameops xfs_default_nameops; 244extern const struct xfs_nameops xfs_default_nameops;
271 245
272#endif /* __XFS_DA_BTREE_H__ */ 246#endif /* __XFS_DA_BTREE_H__ */
diff --git a/fs/xfs/xfs_dinode.h b/fs/xfs/xfs_dinode.h
index a3721633abc8..1d9643b3dce6 100644
--- a/fs/xfs/xfs_dinode.h
+++ b/fs/xfs/xfs_dinode.h
@@ -33,7 +33,7 @@ typedef struct xfs_timestamp {
33 * variable size the leftover area split into a data and an attribute fork. 33 * variable size the leftover area split into a data and an attribute fork.
34 * The format of the data and attribute fork depends on the format of the 34 * The format of the data and attribute fork depends on the format of the
35 * inode as indicated by di_format and di_aformat. To access the data and 35 * inode as indicated by di_format and di_aformat. To access the data and
36 * attribute use the XFS_DFORK_PTR, XFS_DFORK_DPTR, and XFS_DFORK_PTR macros 36 * attribute use the XFS_DFORK_DPTR, XFS_DFORK_APTR, and XFS_DFORK_PTR macros
37 * below. 37 * below.
38 * 38 *
39 * There is a very similar struct icdinode in xfs_inode which matches the 39 * There is a very similar struct icdinode in xfs_inode which matches the
diff --git a/fs/xfs/xfs_dir2.c b/fs/xfs/xfs_dir2.c
index 67a250c36d41..b26a50f9921d 100644
--- a/fs/xfs/xfs_dir2.c
+++ b/fs/xfs/xfs_dir2.c
@@ -592,7 +592,7 @@ int
592xfs_dir2_shrink_inode( 592xfs_dir2_shrink_inode(
593 xfs_da_args_t *args, 593 xfs_da_args_t *args,
594 xfs_dir2_db_t db, 594 xfs_dir2_db_t db,
595 xfs_dabuf_t *bp) 595 struct xfs_buf *bp)
596{ 596{
597 xfs_fileoff_t bno; /* directory file offset */ 597 xfs_fileoff_t bno; /* directory file offset */
598 xfs_dablk_t da; /* directory file offset */ 598 xfs_dablk_t da; /* directory file offset */
@@ -634,7 +634,7 @@ xfs_dir2_shrink_inode(
634 /* 634 /*
635 * Invalidate the buffer from the transaction. 635 * Invalidate the buffer from the transaction.
636 */ 636 */
637 xfs_da_binval(tp, bp); 637 xfs_trans_binval(tp, bp);
638 /* 638 /*
639 * If it's not a data block, we're done. 639 * If it's not a data block, we're done.
640 */ 640 */
diff --git a/fs/xfs/xfs_dir2_block.c b/fs/xfs/xfs_dir2_block.c
index 586732f2d80d..e93ca8f054f4 100644
--- a/fs/xfs/xfs_dir2_block.c
+++ b/fs/xfs/xfs_dir2_block.c
@@ -37,10 +37,10 @@
37/* 37/*
38 * Local function prototypes. 38 * Local function prototypes.
39 */ 39 */
40static void xfs_dir2_block_log_leaf(xfs_trans_t *tp, xfs_dabuf_t *bp, int first, 40static void xfs_dir2_block_log_leaf(xfs_trans_t *tp, struct xfs_buf *bp,
41 int last); 41 int first, int last);
42static void xfs_dir2_block_log_tail(xfs_trans_t *tp, xfs_dabuf_t *bp); 42static void xfs_dir2_block_log_tail(xfs_trans_t *tp, struct xfs_buf *bp);
43static int xfs_dir2_block_lookup_int(xfs_da_args_t *args, xfs_dabuf_t **bpp, 43static int xfs_dir2_block_lookup_int(xfs_da_args_t *args, struct xfs_buf **bpp,
44 int *entno); 44 int *entno);
45static int xfs_dir2_block_sort(const void *a, const void *b); 45static int xfs_dir2_block_sort(const void *a, const void *b);
46 46
@@ -66,7 +66,7 @@ xfs_dir2_block_addname(
66 xfs_dir2_data_free_t *bf; /* bestfree table in block */ 66 xfs_dir2_data_free_t *bf; /* bestfree table in block */
67 xfs_dir2_data_hdr_t *hdr; /* block header */ 67 xfs_dir2_data_hdr_t *hdr; /* block header */
68 xfs_dir2_leaf_entry_t *blp; /* block leaf entries */ 68 xfs_dir2_leaf_entry_t *blp; /* block leaf entries */
69 xfs_dabuf_t *bp; /* buffer for block */ 69 struct xfs_buf *bp; /* buffer for block */
70 xfs_dir2_block_tail_t *btp; /* block tail */ 70 xfs_dir2_block_tail_t *btp; /* block tail */
71 int compact; /* need to compact leaf ents */ 71 int compact; /* need to compact leaf ents */
72 xfs_dir2_data_entry_t *dep; /* block data entry */ 72 xfs_dir2_data_entry_t *dep; /* block data entry */
@@ -102,14 +102,14 @@ xfs_dir2_block_addname(
102 return error; 102 return error;
103 } 103 }
104 ASSERT(bp != NULL); 104 ASSERT(bp != NULL);
105 hdr = bp->data; 105 hdr = bp->b_addr;
106 /* 106 /*
107 * Check the magic number, corrupted if wrong. 107 * Check the magic number, corrupted if wrong.
108 */ 108 */
109 if (unlikely(hdr->magic != cpu_to_be32(XFS_DIR2_BLOCK_MAGIC))) { 109 if (unlikely(hdr->magic != cpu_to_be32(XFS_DIR2_BLOCK_MAGIC))) {
110 XFS_CORRUPTION_ERROR("xfs_dir2_block_addname", 110 XFS_CORRUPTION_ERROR("xfs_dir2_block_addname",
111 XFS_ERRLEVEL_LOW, mp, hdr); 111 XFS_ERRLEVEL_LOW, mp, hdr);
112 xfs_da_brelse(tp, bp); 112 xfs_trans_brelse(tp, bp);
113 return XFS_ERROR(EFSCORRUPTED); 113 return XFS_ERROR(EFSCORRUPTED);
114 } 114 }
115 len = xfs_dir2_data_entsize(args->namelen); 115 len = xfs_dir2_data_entsize(args->namelen);
@@ -212,7 +212,7 @@ xfs_dir2_block_addname(
212 * If this isn't a real add, we're done with the buffer. 212 * If this isn't a real add, we're done with the buffer.
213 */ 213 */
214 if (args->op_flags & XFS_DA_OP_JUSTCHECK) 214 if (args->op_flags & XFS_DA_OP_JUSTCHECK)
215 xfs_da_brelse(tp, bp); 215 xfs_trans_brelse(tp, bp);
216 /* 216 /*
217 * If we don't have space for the new entry & leaf ... 217 * If we don't have space for the new entry & leaf ...
218 */ 218 */
@@ -228,7 +228,6 @@ xfs_dir2_block_addname(
228 * Then add the new entry in that format. 228 * Then add the new entry in that format.
229 */ 229 */
230 error = xfs_dir2_block_to_leaf(args, bp); 230 error = xfs_dir2_block_to_leaf(args, bp);
231 xfs_da_buf_done(bp);
232 if (error) 231 if (error)
233 return error; 232 return error;
234 return xfs_dir2_leaf_addname(args); 233 return xfs_dir2_leaf_addname(args);
@@ -422,7 +421,6 @@ xfs_dir2_block_addname(
422 xfs_dir2_block_log_tail(tp, bp); 421 xfs_dir2_block_log_tail(tp, bp);
423 xfs_dir2_data_log_entry(tp, bp, dep); 422 xfs_dir2_data_log_entry(tp, bp, dep);
424 xfs_dir2_data_check(dp, bp); 423 xfs_dir2_data_check(dp, bp);
425 xfs_da_buf_done(bp);
426 return 0; 424 return 0;
427} 425}
428 426
@@ -437,7 +435,7 @@ xfs_dir2_block_getdents(
437 filldir_t filldir) 435 filldir_t filldir)
438{ 436{
439 xfs_dir2_data_hdr_t *hdr; /* block header */ 437 xfs_dir2_data_hdr_t *hdr; /* block header */
440 xfs_dabuf_t *bp; /* buffer for block */ 438 struct xfs_buf *bp; /* buffer for block */
441 xfs_dir2_block_tail_t *btp; /* block tail */ 439 xfs_dir2_block_tail_t *btp; /* block tail */
442 xfs_dir2_data_entry_t *dep; /* block data entry */ 440 xfs_dir2_data_entry_t *dep; /* block data entry */
443 xfs_dir2_data_unused_t *dup; /* block unused entry */ 441 xfs_dir2_data_unused_t *dup; /* block unused entry */
@@ -469,7 +467,7 @@ xfs_dir2_block_getdents(
469 * We'll skip entries before this. 467 * We'll skip entries before this.
470 */ 468 */
471 wantoff = xfs_dir2_dataptr_to_off(mp, *offset); 469 wantoff = xfs_dir2_dataptr_to_off(mp, *offset);
472 hdr = bp->data; 470 hdr = bp->b_addr;
473 xfs_dir2_data_check(dp, bp); 471 xfs_dir2_data_check(dp, bp);
474 /* 472 /*
475 * Set up values for the loop. 473 * Set up values for the loop.
@@ -514,7 +512,7 @@ xfs_dir2_block_getdents(
514 cook & 0x7fffffff, be64_to_cpu(dep->inumber), 512 cook & 0x7fffffff, be64_to_cpu(dep->inumber),
515 DT_UNKNOWN)) { 513 DT_UNKNOWN)) {
516 *offset = cook & 0x7fffffff; 514 *offset = cook & 0x7fffffff;
517 xfs_da_brelse(NULL, bp); 515 xfs_trans_brelse(NULL, bp);
518 return 0; 516 return 0;
519 } 517 }
520 } 518 }
@@ -525,7 +523,7 @@ xfs_dir2_block_getdents(
525 */ 523 */
526 *offset = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk + 1, 0) & 524 *offset = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk + 1, 0) &
527 0x7fffffff; 525 0x7fffffff;
528 xfs_da_brelse(NULL, bp); 526 xfs_trans_brelse(NULL, bp);
529 return 0; 527 return 0;
530} 528}
531 529
@@ -535,17 +533,17 @@ xfs_dir2_block_getdents(
535static void 533static void
536xfs_dir2_block_log_leaf( 534xfs_dir2_block_log_leaf(
537 xfs_trans_t *tp, /* transaction structure */ 535 xfs_trans_t *tp, /* transaction structure */
538 xfs_dabuf_t *bp, /* block buffer */ 536 struct xfs_buf *bp, /* block buffer */
539 int first, /* index of first logged leaf */ 537 int first, /* index of first logged leaf */
540 int last) /* index of last logged leaf */ 538 int last) /* index of last logged leaf */
541{ 539{
542 xfs_dir2_data_hdr_t *hdr = bp->data; 540 xfs_dir2_data_hdr_t *hdr = bp->b_addr;
543 xfs_dir2_leaf_entry_t *blp; 541 xfs_dir2_leaf_entry_t *blp;
544 xfs_dir2_block_tail_t *btp; 542 xfs_dir2_block_tail_t *btp;
545 543
546 btp = xfs_dir2_block_tail_p(tp->t_mountp, hdr); 544 btp = xfs_dir2_block_tail_p(tp->t_mountp, hdr);
547 blp = xfs_dir2_block_leaf_p(btp); 545 blp = xfs_dir2_block_leaf_p(btp);
548 xfs_da_log_buf(tp, bp, (uint)((char *)&blp[first] - (char *)hdr), 546 xfs_trans_log_buf(tp, bp, (uint)((char *)&blp[first] - (char *)hdr),
549 (uint)((char *)&blp[last + 1] - (char *)hdr - 1)); 547 (uint)((char *)&blp[last + 1] - (char *)hdr - 1));
550} 548}
551 549
@@ -555,13 +553,13 @@ xfs_dir2_block_log_leaf(
555static void 553static void
556xfs_dir2_block_log_tail( 554xfs_dir2_block_log_tail(
557 xfs_trans_t *tp, /* transaction structure */ 555 xfs_trans_t *tp, /* transaction structure */
558 xfs_dabuf_t *bp) /* block buffer */ 556 struct xfs_buf *bp) /* block buffer */
559{ 557{
560 xfs_dir2_data_hdr_t *hdr = bp->data; 558 xfs_dir2_data_hdr_t *hdr = bp->b_addr;
561 xfs_dir2_block_tail_t *btp; 559 xfs_dir2_block_tail_t *btp;
562 560
563 btp = xfs_dir2_block_tail_p(tp->t_mountp, hdr); 561 btp = xfs_dir2_block_tail_p(tp->t_mountp, hdr);
564 xfs_da_log_buf(tp, bp, (uint)((char *)btp - (char *)hdr), 562 xfs_trans_log_buf(tp, bp, (uint)((char *)btp - (char *)hdr),
565 (uint)((char *)(btp + 1) - (char *)hdr - 1)); 563 (uint)((char *)(btp + 1) - (char *)hdr - 1));
566} 564}
567 565
@@ -575,7 +573,7 @@ xfs_dir2_block_lookup(
575{ 573{
576 xfs_dir2_data_hdr_t *hdr; /* block header */ 574 xfs_dir2_data_hdr_t *hdr; /* block header */
577 xfs_dir2_leaf_entry_t *blp; /* block leaf entries */ 575 xfs_dir2_leaf_entry_t *blp; /* block leaf entries */
578 xfs_dabuf_t *bp; /* block buffer */ 576 struct xfs_buf *bp; /* block buffer */
579 xfs_dir2_block_tail_t *btp; /* block tail */ 577 xfs_dir2_block_tail_t *btp; /* block tail */
580 xfs_dir2_data_entry_t *dep; /* block data entry */ 578 xfs_dir2_data_entry_t *dep; /* block data entry */
581 xfs_inode_t *dp; /* incore inode */ 579 xfs_inode_t *dp; /* incore inode */
@@ -593,7 +591,7 @@ xfs_dir2_block_lookup(
593 return error; 591 return error;
594 dp = args->dp; 592 dp = args->dp;
595 mp = dp->i_mount; 593 mp = dp->i_mount;
596 hdr = bp->data; 594 hdr = bp->b_addr;
597 xfs_dir2_data_check(dp, bp); 595 xfs_dir2_data_check(dp, bp);
598 btp = xfs_dir2_block_tail_p(mp, hdr); 596 btp = xfs_dir2_block_tail_p(mp, hdr);
599 blp = xfs_dir2_block_leaf_p(btp); 597 blp = xfs_dir2_block_leaf_p(btp);
@@ -607,7 +605,7 @@ xfs_dir2_block_lookup(
607 */ 605 */
608 args->inumber = be64_to_cpu(dep->inumber); 606 args->inumber = be64_to_cpu(dep->inumber);
609 error = xfs_dir_cilookup_result(args, dep->name, dep->namelen); 607 error = xfs_dir_cilookup_result(args, dep->name, dep->namelen);
610 xfs_da_brelse(args->trans, bp); 608 xfs_trans_brelse(args->trans, bp);
611 return XFS_ERROR(error); 609 return XFS_ERROR(error);
612} 610}
613 611
@@ -617,13 +615,13 @@ xfs_dir2_block_lookup(
617static int /* error */ 615static int /* error */
618xfs_dir2_block_lookup_int( 616xfs_dir2_block_lookup_int(
619 xfs_da_args_t *args, /* dir lookup arguments */ 617 xfs_da_args_t *args, /* dir lookup arguments */
620 xfs_dabuf_t **bpp, /* returned block buffer */ 618 struct xfs_buf **bpp, /* returned block buffer */
621 int *entno) /* returned entry number */ 619 int *entno) /* returned entry number */
622{ 620{
623 xfs_dir2_dataptr_t addr; /* data entry address */ 621 xfs_dir2_dataptr_t addr; /* data entry address */
624 xfs_dir2_data_hdr_t *hdr; /* block header */ 622 xfs_dir2_data_hdr_t *hdr; /* block header */
625 xfs_dir2_leaf_entry_t *blp; /* block leaf entries */ 623 xfs_dir2_leaf_entry_t *blp; /* block leaf entries */
626 xfs_dabuf_t *bp; /* block buffer */ 624 struct xfs_buf *bp; /* block buffer */
627 xfs_dir2_block_tail_t *btp; /* block tail */ 625 xfs_dir2_block_tail_t *btp; /* block tail */
628 xfs_dir2_data_entry_t *dep; /* block data entry */ 626 xfs_dir2_data_entry_t *dep; /* block data entry */
629 xfs_inode_t *dp; /* incore inode */ 627 xfs_inode_t *dp; /* incore inode */
@@ -647,7 +645,7 @@ xfs_dir2_block_lookup_int(
647 return error; 645 return error;
648 } 646 }
649 ASSERT(bp != NULL); 647 ASSERT(bp != NULL);
650 hdr = bp->data; 648 hdr = bp->b_addr;
651 xfs_dir2_data_check(dp, bp); 649 xfs_dir2_data_check(dp, bp);
652 btp = xfs_dir2_block_tail_p(mp, hdr); 650 btp = xfs_dir2_block_tail_p(mp, hdr);
653 blp = xfs_dir2_block_leaf_p(btp); 651 blp = xfs_dir2_block_leaf_p(btp);
@@ -666,7 +664,7 @@ xfs_dir2_block_lookup_int(
666 high = mid - 1; 664 high = mid - 1;
667 if (low > high) { 665 if (low > high) {
668 ASSERT(args->op_flags & XFS_DA_OP_OKNOENT); 666 ASSERT(args->op_flags & XFS_DA_OP_OKNOENT);
669 xfs_da_brelse(tp, bp); 667 xfs_trans_brelse(tp, bp);
670 return XFS_ERROR(ENOENT); 668 return XFS_ERROR(ENOENT);
671 } 669 }
672 } 670 }
@@ -714,7 +712,7 @@ xfs_dir2_block_lookup_int(
714 /* 712 /*
715 * No match, release the buffer and return ENOENT. 713 * No match, release the buffer and return ENOENT.
716 */ 714 */
717 xfs_da_brelse(tp, bp); 715 xfs_trans_brelse(tp, bp);
718 return XFS_ERROR(ENOENT); 716 return XFS_ERROR(ENOENT);
719} 717}
720 718
@@ -728,7 +726,7 @@ xfs_dir2_block_removename(
728{ 726{
729 xfs_dir2_data_hdr_t *hdr; /* block header */ 727 xfs_dir2_data_hdr_t *hdr; /* block header */
730 xfs_dir2_leaf_entry_t *blp; /* block leaf pointer */ 728 xfs_dir2_leaf_entry_t *blp; /* block leaf pointer */
731 xfs_dabuf_t *bp; /* block buffer */ 729 struct xfs_buf *bp; /* block buffer */
732 xfs_dir2_block_tail_t *btp; /* block tail */ 730 xfs_dir2_block_tail_t *btp; /* block tail */
733 xfs_dir2_data_entry_t *dep; /* block data entry */ 731 xfs_dir2_data_entry_t *dep; /* block data entry */
734 xfs_inode_t *dp; /* incore inode */ 732 xfs_inode_t *dp; /* incore inode */
@@ -753,7 +751,7 @@ xfs_dir2_block_removename(
753 dp = args->dp; 751 dp = args->dp;
754 tp = args->trans; 752 tp = args->trans;
755 mp = dp->i_mount; 753 mp = dp->i_mount;
756 hdr = bp->data; 754 hdr = bp->b_addr;
757 btp = xfs_dir2_block_tail_p(mp, hdr); 755 btp = xfs_dir2_block_tail_p(mp, hdr);
758 blp = xfs_dir2_block_leaf_p(btp); 756 blp = xfs_dir2_block_leaf_p(btp);
759 /* 757 /*
@@ -790,10 +788,9 @@ xfs_dir2_block_removename(
790 * See if the size as a shortform is good enough. 788 * See if the size as a shortform is good enough.
791 */ 789 */
792 size = xfs_dir2_block_sfsize(dp, hdr, &sfh); 790 size = xfs_dir2_block_sfsize(dp, hdr, &sfh);
793 if (size > XFS_IFORK_DSIZE(dp)) { 791 if (size > XFS_IFORK_DSIZE(dp))
794 xfs_da_buf_done(bp);
795 return 0; 792 return 0;
796 } 793
797 /* 794 /*
798 * If it works, do the conversion. 795 * If it works, do the conversion.
799 */ 796 */
@@ -810,7 +807,7 @@ xfs_dir2_block_replace(
810{ 807{
811 xfs_dir2_data_hdr_t *hdr; /* block header */ 808 xfs_dir2_data_hdr_t *hdr; /* block header */
812 xfs_dir2_leaf_entry_t *blp; /* block leaf entries */ 809 xfs_dir2_leaf_entry_t *blp; /* block leaf entries */
813 xfs_dabuf_t *bp; /* block buffer */ 810 struct xfs_buf *bp; /* block buffer */
814 xfs_dir2_block_tail_t *btp; /* block tail */ 811 xfs_dir2_block_tail_t *btp; /* block tail */
815 xfs_dir2_data_entry_t *dep; /* block data entry */ 812 xfs_dir2_data_entry_t *dep; /* block data entry */
816 xfs_inode_t *dp; /* incore inode */ 813 xfs_inode_t *dp; /* incore inode */
@@ -829,7 +826,7 @@ xfs_dir2_block_replace(
829 } 826 }
830 dp = args->dp; 827 dp = args->dp;
831 mp = dp->i_mount; 828 mp = dp->i_mount;
832 hdr = bp->data; 829 hdr = bp->b_addr;
833 btp = xfs_dir2_block_tail_p(mp, hdr); 830 btp = xfs_dir2_block_tail_p(mp, hdr);
834 blp = xfs_dir2_block_leaf_p(btp); 831 blp = xfs_dir2_block_leaf_p(btp);
835 /* 832 /*
@@ -844,7 +841,6 @@ xfs_dir2_block_replace(
844 dep->inumber = cpu_to_be64(args->inumber); 841 dep->inumber = cpu_to_be64(args->inumber);
845 xfs_dir2_data_log_entry(args->trans, bp, dep); 842 xfs_dir2_data_log_entry(args->trans, bp, dep);
846 xfs_dir2_data_check(dp, bp); 843 xfs_dir2_data_check(dp, bp);
847 xfs_da_buf_done(bp);
848 return 0; 844 return 0;
849} 845}
850 846
@@ -871,8 +867,8 @@ xfs_dir2_block_sort(
871int /* error */ 867int /* error */
872xfs_dir2_leaf_to_block( 868xfs_dir2_leaf_to_block(
873 xfs_da_args_t *args, /* operation arguments */ 869 xfs_da_args_t *args, /* operation arguments */
874 xfs_dabuf_t *lbp, /* leaf buffer */ 870 struct xfs_buf *lbp, /* leaf buffer */
875 xfs_dabuf_t *dbp) /* data buffer */ 871 struct xfs_buf *dbp) /* data buffer */
876{ 872{
877 __be16 *bestsp; /* leaf bests table */ 873 __be16 *bestsp; /* leaf bests table */
878 xfs_dir2_data_hdr_t *hdr; /* block header */ 874 xfs_dir2_data_hdr_t *hdr; /* block header */
@@ -898,7 +894,7 @@ xfs_dir2_leaf_to_block(
898 dp = args->dp; 894 dp = args->dp;
899 tp = args->trans; 895 tp = args->trans;
900 mp = dp->i_mount; 896 mp = dp->i_mount;
901 leaf = lbp->data; 897 leaf = lbp->b_addr;
902 ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAF1_MAGIC)); 898 ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAF1_MAGIC));
903 ltp = xfs_dir2_leaf_tail_p(mp, leaf); 899 ltp = xfs_dir2_leaf_tail_p(mp, leaf);
904 /* 900 /*
@@ -914,11 +910,9 @@ xfs_dir2_leaf_to_block(
914 if ((error = 910 if ((error =
915 xfs_dir2_leaf_trim_data(args, lbp, 911 xfs_dir2_leaf_trim_data(args, lbp,
916 (xfs_dir2_db_t)(be32_to_cpu(ltp->bestcount) - 1)))) 912 (xfs_dir2_db_t)(be32_to_cpu(ltp->bestcount) - 1))))
917 goto out; 913 return error;
918 } else { 914 } else
919 error = 0; 915 return 0;
920 goto out;
921 }
922 } 916 }
923 /* 917 /*
924 * Read the data block if we don't already have it, give up if it fails. 918 * Read the data block if we don't already have it, give up if it fails.
@@ -926,9 +920,9 @@ xfs_dir2_leaf_to_block(
926 if (dbp == NULL && 920 if (dbp == NULL &&
927 (error = xfs_da_read_buf(tp, dp, mp->m_dirdatablk, -1, &dbp, 921 (error = xfs_da_read_buf(tp, dp, mp->m_dirdatablk, -1, &dbp,
928 XFS_DATA_FORK))) { 922 XFS_DATA_FORK))) {
929 goto out; 923 return error;
930 } 924 }
931 hdr = dbp->data; 925 hdr = dbp->b_addr;
932 ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC)); 926 ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC));
933 /* 927 /*
934 * Size of the "leaf" area in the block. 928 * Size of the "leaf" area in the block.
@@ -944,10 +938,9 @@ xfs_dir2_leaf_to_block(
944 * If it's not free or is too short we can't do it. 938 * If it's not free or is too short we can't do it.
945 */ 939 */
946 if (be16_to_cpu(dup->freetag) != XFS_DIR2_DATA_FREE_TAG || 940 if (be16_to_cpu(dup->freetag) != XFS_DIR2_DATA_FREE_TAG ||
947 be16_to_cpu(dup->length) < size) { 941 be16_to_cpu(dup->length) < size)
948 error = 0; 942 return 0;
949 goto out; 943
950 }
951 /* 944 /*
952 * Start converting it to block form. 945 * Start converting it to block form.
953 */ 946 */
@@ -989,25 +982,17 @@ xfs_dir2_leaf_to_block(
989 * Pitch the old leaf block. 982 * Pitch the old leaf block.
990 */ 983 */
991 error = xfs_da_shrink_inode(args, mp->m_dirleafblk, lbp); 984 error = xfs_da_shrink_inode(args, mp->m_dirleafblk, lbp);
992 lbp = NULL; 985 if (error)
993 if (error) { 986 return error;
994 goto out; 987
995 }
996 /* 988 /*
997 * Now see if the resulting block can be shrunken to shortform. 989 * Now see if the resulting block can be shrunken to shortform.
998 */ 990 */
999 size = xfs_dir2_block_sfsize(dp, hdr, &sfh); 991 size = xfs_dir2_block_sfsize(dp, hdr, &sfh);
1000 if (size > XFS_IFORK_DSIZE(dp)) { 992 if (size > XFS_IFORK_DSIZE(dp))
1001 error = 0; 993 return 0;
1002 goto out; 994
1003 }
1004 return xfs_dir2_block_to_sf(args, dbp, size, &sfh); 995 return xfs_dir2_block_to_sf(args, dbp, size, &sfh);
1005out:
1006 if (lbp)
1007 xfs_da_buf_done(lbp);
1008 if (dbp)
1009 xfs_da_buf_done(dbp);
1010 return error;
1011} 996}
1012 997
1013/* 998/*
@@ -1020,7 +1005,7 @@ xfs_dir2_sf_to_block(
1020 xfs_dir2_db_t blkno; /* dir-relative block # (0) */ 1005 xfs_dir2_db_t blkno; /* dir-relative block # (0) */
1021 xfs_dir2_data_hdr_t *hdr; /* block header */ 1006 xfs_dir2_data_hdr_t *hdr; /* block header */
1022 xfs_dir2_leaf_entry_t *blp; /* block leaf entries */ 1007 xfs_dir2_leaf_entry_t *blp; /* block leaf entries */
1023 xfs_dabuf_t *bp; /* block buffer */ 1008 struct xfs_buf *bp; /* block buffer */
1024 xfs_dir2_block_tail_t *btp; /* block tail pointer */ 1009 xfs_dir2_block_tail_t *btp; /* block tail pointer */
1025 xfs_dir2_data_entry_t *dep; /* data entry pointer */ 1010 xfs_dir2_data_entry_t *dep; /* data entry pointer */
1026 xfs_inode_t *dp; /* incore directory inode */ 1011 xfs_inode_t *dp; /* incore directory inode */
@@ -1088,7 +1073,7 @@ xfs_dir2_sf_to_block(
1088 kmem_free(sfp); 1073 kmem_free(sfp);
1089 return error; 1074 return error;
1090 } 1075 }
1091 hdr = bp->data; 1076 hdr = bp->b_addr;
1092 hdr->magic = cpu_to_be32(XFS_DIR2_BLOCK_MAGIC); 1077 hdr->magic = cpu_to_be32(XFS_DIR2_BLOCK_MAGIC);
1093 /* 1078 /*
1094 * Compute size of block "tail" area. 1079 * Compute size of block "tail" area.
@@ -1217,6 +1202,5 @@ xfs_dir2_sf_to_block(
1217 xfs_dir2_block_log_leaf(tp, bp, 0, be32_to_cpu(btp->count) - 1); 1202 xfs_dir2_block_log_leaf(tp, bp, 0, be32_to_cpu(btp->count) - 1);
1218 xfs_dir2_block_log_tail(tp, bp); 1203 xfs_dir2_block_log_tail(tp, bp);
1219 xfs_dir2_data_check(dp, bp); 1204 xfs_dir2_data_check(dp, bp);
1220 xfs_da_buf_done(bp);
1221 return 0; 1205 return 0;
1222} 1206}
diff --git a/fs/xfs/xfs_dir2_data.c b/fs/xfs/xfs_dir2_data.c
index 2046988e9eb2..44ffd4d6bc91 100644
--- a/fs/xfs/xfs_dir2_data.c
+++ b/fs/xfs/xfs_dir2_data.c
@@ -42,8 +42,8 @@ xfs_dir2_data_freefind(xfs_dir2_data_hdr_t *hdr, xfs_dir2_data_unused_t *dup);
42 */ 42 */
43void 43void
44xfs_dir2_data_check( 44xfs_dir2_data_check(
45 xfs_inode_t *dp, /* incore inode pointer */ 45 struct xfs_inode *dp, /* incore inode pointer */
46 xfs_dabuf_t *bp) /* data block's buffer */ 46 struct xfs_buf *bp) /* data block's buffer */
47{ 47{
48 xfs_dir2_dataptr_t addr; /* addr for leaf lookup */ 48 xfs_dir2_dataptr_t addr; /* addr for leaf lookup */
49 xfs_dir2_data_free_t *bf; /* bestfree table */ 49 xfs_dir2_data_free_t *bf; /* bestfree table */
@@ -65,7 +65,7 @@ xfs_dir2_data_check(
65 struct xfs_name name; 65 struct xfs_name name;
66 66
67 mp = dp->i_mount; 67 mp = dp->i_mount;
68 hdr = bp->data; 68 hdr = bp->b_addr;
69 bf = hdr->bestfree; 69 bf = hdr->bestfree;
70 p = (char *)(hdr + 1); 70 p = (char *)(hdr + 1);
71 71
@@ -389,9 +389,9 @@ int /* error */
389xfs_dir2_data_init( 389xfs_dir2_data_init(
390 xfs_da_args_t *args, /* directory operation args */ 390 xfs_da_args_t *args, /* directory operation args */
391 xfs_dir2_db_t blkno, /* logical dir block number */ 391 xfs_dir2_db_t blkno, /* logical dir block number */
392 xfs_dabuf_t **bpp) /* output block buffer */ 392 struct xfs_buf **bpp) /* output block buffer */
393{ 393{
394 xfs_dabuf_t *bp; /* block buffer */ 394 struct xfs_buf *bp; /* block buffer */
395 xfs_dir2_data_hdr_t *hdr; /* data block header */ 395 xfs_dir2_data_hdr_t *hdr; /* data block header */
396 xfs_inode_t *dp; /* incore directory inode */ 396 xfs_inode_t *dp; /* incore directory inode */
397 xfs_dir2_data_unused_t *dup; /* unused entry pointer */ 397 xfs_dir2_data_unused_t *dup; /* unused entry pointer */
@@ -417,7 +417,7 @@ xfs_dir2_data_init(
417 /* 417 /*
418 * Initialize the header. 418 * Initialize the header.
419 */ 419 */
420 hdr = bp->data; 420 hdr = bp->b_addr;
421 hdr->magic = cpu_to_be32(XFS_DIR2_DATA_MAGIC); 421 hdr->magic = cpu_to_be32(XFS_DIR2_DATA_MAGIC);
422 hdr->bestfree[0].offset = cpu_to_be16(sizeof(*hdr)); 422 hdr->bestfree[0].offset = cpu_to_be16(sizeof(*hdr));
423 for (i = 1; i < XFS_DIR2_DATA_FD_COUNT; i++) { 423 for (i = 1; i < XFS_DIR2_DATA_FD_COUNT; i++) {
@@ -449,16 +449,16 @@ xfs_dir2_data_init(
449 */ 449 */
450void 450void
451xfs_dir2_data_log_entry( 451xfs_dir2_data_log_entry(
452 xfs_trans_t *tp, /* transaction pointer */ 452 struct xfs_trans *tp,
453 xfs_dabuf_t *bp, /* block buffer */ 453 struct xfs_buf *bp,
454 xfs_dir2_data_entry_t *dep) /* data entry pointer */ 454 xfs_dir2_data_entry_t *dep) /* data entry pointer */
455{ 455{
456 xfs_dir2_data_hdr_t *hdr = bp->data; 456 xfs_dir2_data_hdr_t *hdr = bp->b_addr;
457 457
458 ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) || 458 ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) ||
459 hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC)); 459 hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC));
460 460
461 xfs_da_log_buf(tp, bp, (uint)((char *)dep - (char *)hdr), 461 xfs_trans_log_buf(tp, bp, (uint)((char *)dep - (char *)hdr),
462 (uint)((char *)(xfs_dir2_data_entry_tag_p(dep) + 1) - 462 (uint)((char *)(xfs_dir2_data_entry_tag_p(dep) + 1) -
463 (char *)hdr - 1)); 463 (char *)hdr - 1));
464} 464}
@@ -468,15 +468,15 @@ xfs_dir2_data_log_entry(
468 */ 468 */
469void 469void
470xfs_dir2_data_log_header( 470xfs_dir2_data_log_header(
471 xfs_trans_t *tp, /* transaction pointer */ 471 struct xfs_trans *tp,
472 xfs_dabuf_t *bp) /* block buffer */ 472 struct xfs_buf *bp)
473{ 473{
474 xfs_dir2_data_hdr_t *hdr = bp->data; 474 xfs_dir2_data_hdr_t *hdr = bp->b_addr;
475 475
476 ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) || 476 ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) ||
477 hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC)); 477 hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC));
478 478
479 xfs_da_log_buf(tp, bp, 0, sizeof(*hdr) - 1); 479 xfs_trans_log_buf(tp, bp, 0, sizeof(*hdr) - 1);
480} 480}
481 481
482/* 482/*
@@ -484,11 +484,11 @@ xfs_dir2_data_log_header(
484 */ 484 */
485void 485void
486xfs_dir2_data_log_unused( 486xfs_dir2_data_log_unused(
487 xfs_trans_t *tp, /* transaction pointer */ 487 struct xfs_trans *tp,
488 xfs_dabuf_t *bp, /* block buffer */ 488 struct xfs_buf *bp,
489 xfs_dir2_data_unused_t *dup) /* data unused pointer */ 489 xfs_dir2_data_unused_t *dup) /* data unused pointer */
490{ 490{
491 xfs_dir2_data_hdr_t *hdr = bp->data; 491 xfs_dir2_data_hdr_t *hdr = bp->b_addr;
492 492
493 ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) || 493 ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) ||
494 hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC)); 494 hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC));
@@ -496,13 +496,13 @@ xfs_dir2_data_log_unused(
496 /* 496 /*
497 * Log the first part of the unused entry. 497 * Log the first part of the unused entry.
498 */ 498 */
499 xfs_da_log_buf(tp, bp, (uint)((char *)dup - (char *)hdr), 499 xfs_trans_log_buf(tp, bp, (uint)((char *)dup - (char *)hdr),
500 (uint)((char *)&dup->length + sizeof(dup->length) - 500 (uint)((char *)&dup->length + sizeof(dup->length) -
501 1 - (char *)hdr)); 501 1 - (char *)hdr));
502 /* 502 /*
503 * Log the end (tag) of the unused entry. 503 * Log the end (tag) of the unused entry.
504 */ 504 */
505 xfs_da_log_buf(tp, bp, 505 xfs_trans_log_buf(tp, bp,
506 (uint)((char *)xfs_dir2_data_unused_tag_p(dup) - (char *)hdr), 506 (uint)((char *)xfs_dir2_data_unused_tag_p(dup) - (char *)hdr),
507 (uint)((char *)xfs_dir2_data_unused_tag_p(dup) - (char *)hdr + 507 (uint)((char *)xfs_dir2_data_unused_tag_p(dup) - (char *)hdr +
508 sizeof(xfs_dir2_data_off_t) - 1)); 508 sizeof(xfs_dir2_data_off_t) - 1));
@@ -514,8 +514,8 @@ xfs_dir2_data_log_unused(
514 */ 514 */
515void 515void
516xfs_dir2_data_make_free( 516xfs_dir2_data_make_free(
517 xfs_trans_t *tp, /* transaction pointer */ 517 struct xfs_trans *tp,
518 xfs_dabuf_t *bp, /* block buffer */ 518 struct xfs_buf *bp,
519 xfs_dir2_data_aoff_t offset, /* starting byte offset */ 519 xfs_dir2_data_aoff_t offset, /* starting byte offset */
520 xfs_dir2_data_aoff_t len, /* length in bytes */ 520 xfs_dir2_data_aoff_t len, /* length in bytes */
521 int *needlogp, /* out: log header */ 521 int *needlogp, /* out: log header */
@@ -531,7 +531,7 @@ xfs_dir2_data_make_free(
531 xfs_dir2_data_unused_t *prevdup; /* unused entry before us */ 531 xfs_dir2_data_unused_t *prevdup; /* unused entry before us */
532 532
533 mp = tp->t_mountp; 533 mp = tp->t_mountp;
534 hdr = bp->data; 534 hdr = bp->b_addr;
535 535
536 /* 536 /*
537 * Figure out where the end of the data area is. 537 * Figure out where the end of the data area is.
@@ -696,8 +696,8 @@ xfs_dir2_data_make_free(
696 */ 696 */
697void 697void
698xfs_dir2_data_use_free( 698xfs_dir2_data_use_free(
699 xfs_trans_t *tp, /* transaction pointer */ 699 struct xfs_trans *tp,
700 xfs_dabuf_t *bp, /* data block buffer */ 700 struct xfs_buf *bp,
701 xfs_dir2_data_unused_t *dup, /* unused entry */ 701 xfs_dir2_data_unused_t *dup, /* unused entry */
702 xfs_dir2_data_aoff_t offset, /* starting offset to use */ 702 xfs_dir2_data_aoff_t offset, /* starting offset to use */
703 xfs_dir2_data_aoff_t len, /* length to use */ 703 xfs_dir2_data_aoff_t len, /* length to use */
@@ -713,7 +713,7 @@ xfs_dir2_data_use_free(
713 xfs_dir2_data_unused_t *newdup2; /* another new unused entry */ 713 xfs_dir2_data_unused_t *newdup2; /* another new unused entry */
714 int oldlen; /* old unused entry's length */ 714 int oldlen; /* old unused entry's length */
715 715
716 hdr = bp->data; 716 hdr = bp->b_addr;
717 ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) || 717 ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) ||
718 hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC)); 718 hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC));
719 ASSERT(be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG); 719 ASSERT(be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG);
diff --git a/fs/xfs/xfs_dir2_leaf.c b/fs/xfs/xfs_dir2_leaf.c
index 397ffbcbab1d..0b296253bd01 100644
--- a/fs/xfs/xfs_dir2_leaf.c
+++ b/fs/xfs/xfs_dir2_leaf.c
@@ -38,15 +38,15 @@
38 * Local function declarations. 38 * Local function declarations.
39 */ 39 */
40#ifdef DEBUG 40#ifdef DEBUG
41static void xfs_dir2_leaf_check(xfs_inode_t *dp, xfs_dabuf_t *bp); 41static void xfs_dir2_leaf_check(struct xfs_inode *dp, struct xfs_buf *bp);
42#else 42#else
43#define xfs_dir2_leaf_check(dp, bp) 43#define xfs_dir2_leaf_check(dp, bp)
44#endif 44#endif
45static int xfs_dir2_leaf_lookup_int(xfs_da_args_t *args, xfs_dabuf_t **lbpp, 45static int xfs_dir2_leaf_lookup_int(xfs_da_args_t *args, struct xfs_buf **lbpp,
46 int *indexp, xfs_dabuf_t **dbpp); 46 int *indexp, struct xfs_buf **dbpp);
47static void xfs_dir2_leaf_log_bests(struct xfs_trans *tp, struct xfs_dabuf *bp, 47static void xfs_dir2_leaf_log_bests(struct xfs_trans *tp, struct xfs_buf *bp,
48 int first, int last); 48 int first, int last);
49static void xfs_dir2_leaf_log_tail(struct xfs_trans *tp, struct xfs_dabuf *bp); 49static void xfs_dir2_leaf_log_tail(struct xfs_trans *tp, struct xfs_buf *bp);
50 50
51 51
52/* 52/*
@@ -55,7 +55,7 @@ static void xfs_dir2_leaf_log_tail(struct xfs_trans *tp, struct xfs_dabuf *bp);
55int /* error */ 55int /* error */
56xfs_dir2_block_to_leaf( 56xfs_dir2_block_to_leaf(
57 xfs_da_args_t *args, /* operation arguments */ 57 xfs_da_args_t *args, /* operation arguments */
58 xfs_dabuf_t *dbp) /* input block's buffer */ 58 struct xfs_buf *dbp) /* input block's buffer */
59{ 59{
60 __be16 *bestsp; /* leaf's bestsp entries */ 60 __be16 *bestsp; /* leaf's bestsp entries */
61 xfs_dablk_t blkno; /* leaf block's bno */ 61 xfs_dablk_t blkno; /* leaf block's bno */
@@ -64,7 +64,7 @@ xfs_dir2_block_to_leaf(
64 xfs_dir2_block_tail_t *btp; /* block's tail */ 64 xfs_dir2_block_tail_t *btp; /* block's tail */
65 xfs_inode_t *dp; /* incore directory inode */ 65 xfs_inode_t *dp; /* incore directory inode */
66 int error; /* error return code */ 66 int error; /* error return code */
67 xfs_dabuf_t *lbp; /* leaf block's buffer */ 67 struct xfs_buf *lbp; /* leaf block's buffer */
68 xfs_dir2_db_t ldb; /* leaf block's bno */ 68 xfs_dir2_db_t ldb; /* leaf block's bno */
69 xfs_dir2_leaf_t *leaf; /* leaf structure */ 69 xfs_dir2_leaf_t *leaf; /* leaf structure */
70 xfs_dir2_leaf_tail_t *ltp; /* leaf's tail */ 70 xfs_dir2_leaf_tail_t *ltp; /* leaf's tail */
@@ -95,8 +95,8 @@ xfs_dir2_block_to_leaf(
95 return error; 95 return error;
96 } 96 }
97 ASSERT(lbp != NULL); 97 ASSERT(lbp != NULL);
98 leaf = lbp->data; 98 leaf = lbp->b_addr;
99 hdr = dbp->data; 99 hdr = dbp->b_addr;
100 xfs_dir2_data_check(dp, dbp); 100 xfs_dir2_data_check(dp, dbp);
101 btp = xfs_dir2_block_tail_p(mp, hdr); 101 btp = xfs_dir2_block_tail_p(mp, hdr);
102 blp = xfs_dir2_block_leaf_p(btp); 102 blp = xfs_dir2_block_leaf_p(btp);
@@ -143,7 +143,6 @@ xfs_dir2_block_to_leaf(
143 xfs_dir2_leaf_check(dp, lbp); 143 xfs_dir2_leaf_check(dp, lbp);
144 xfs_dir2_data_check(dp, dbp); 144 xfs_dir2_data_check(dp, dbp);
145 xfs_dir2_leaf_log_bests(tp, lbp, 0, 0); 145 xfs_dir2_leaf_log_bests(tp, lbp, 0, 0);
146 xfs_da_buf_done(lbp);
147 return 0; 146 return 0;
148} 147}
149 148
@@ -282,7 +281,7 @@ xfs_dir2_leaf_addname(
282 __be16 *bestsp; /* freespace table in leaf */ 281 __be16 *bestsp; /* freespace table in leaf */
283 int compact; /* need to compact leaves */ 282 int compact; /* need to compact leaves */
284 xfs_dir2_data_hdr_t *hdr; /* data block header */ 283 xfs_dir2_data_hdr_t *hdr; /* data block header */
285 xfs_dabuf_t *dbp; /* data block buffer */ 284 struct xfs_buf *dbp; /* data block buffer */
286 xfs_dir2_data_entry_t *dep; /* data block entry */ 285 xfs_dir2_data_entry_t *dep; /* data block entry */
287 xfs_inode_t *dp; /* incore directory inode */ 286 xfs_inode_t *dp; /* incore directory inode */
288 xfs_dir2_data_unused_t *dup; /* data unused entry */ 287 xfs_dir2_data_unused_t *dup; /* data unused entry */
@@ -291,7 +290,7 @@ xfs_dir2_leaf_addname(
291 int highstale; /* index of next stale leaf */ 290 int highstale; /* index of next stale leaf */
292 int i; /* temporary, index */ 291 int i; /* temporary, index */
293 int index; /* leaf table position */ 292 int index; /* leaf table position */
294 xfs_dabuf_t *lbp; /* leaf's buffer */ 293 struct xfs_buf *lbp; /* leaf's buffer */
295 xfs_dir2_leaf_t *leaf; /* leaf structure */ 294 xfs_dir2_leaf_t *leaf; /* leaf structure */
296 int length; /* length of new entry */ 295 int length; /* length of new entry */
297 xfs_dir2_leaf_entry_t *lep; /* leaf entry table pointer */ 296 xfs_dir2_leaf_entry_t *lep; /* leaf entry table pointer */
@@ -328,7 +327,7 @@ xfs_dir2_leaf_addname(
328 * But if there are dup hash values the index is of the first of those. 327 * But if there are dup hash values the index is of the first of those.
329 */ 328 */
330 index = xfs_dir2_leaf_search_hash(args, lbp); 329 index = xfs_dir2_leaf_search_hash(args, lbp);
331 leaf = lbp->data; 330 leaf = lbp->b_addr;
332 ltp = xfs_dir2_leaf_tail_p(mp, leaf); 331 ltp = xfs_dir2_leaf_tail_p(mp, leaf);
333 bestsp = xfs_dir2_leaf_bests_p(ltp); 332 bestsp = xfs_dir2_leaf_bests_p(ltp);
334 length = xfs_dir2_data_entsize(args->namelen); 333 length = xfs_dir2_data_entsize(args->namelen);
@@ -402,14 +401,13 @@ xfs_dir2_leaf_addname(
402 */ 401 */
403 if ((args->op_flags & XFS_DA_OP_JUSTCHECK) || 402 if ((args->op_flags & XFS_DA_OP_JUSTCHECK) ||
404 args->total == 0) { 403 args->total == 0) {
405 xfs_da_brelse(tp, lbp); 404 xfs_trans_brelse(tp, lbp);
406 return XFS_ERROR(ENOSPC); 405 return XFS_ERROR(ENOSPC);
407 } 406 }
408 /* 407 /*
409 * Convert to node form. 408 * Convert to node form.
410 */ 409 */
411 error = xfs_dir2_leaf_to_node(args, lbp); 410 error = xfs_dir2_leaf_to_node(args, lbp);
412 xfs_da_buf_done(lbp);
413 if (error) 411 if (error)
414 return error; 412 return error;
415 /* 413 /*
@@ -427,7 +425,7 @@ xfs_dir2_leaf_addname(
427 * a new data block. 425 * a new data block.
428 */ 426 */
429 if (args->op_flags & XFS_DA_OP_JUSTCHECK) { 427 if (args->op_flags & XFS_DA_OP_JUSTCHECK) {
430 xfs_da_brelse(tp, lbp); 428 xfs_trans_brelse(tp, lbp);
431 return use_block == -1 ? XFS_ERROR(ENOSPC) : 0; 429 return use_block == -1 ? XFS_ERROR(ENOSPC) : 0;
432 } 430 }
433 /* 431 /*
@@ -435,7 +433,7 @@ xfs_dir2_leaf_addname(
435 * changed anything. 433 * changed anything.
436 */ 434 */
437 if (args->total == 0 && use_block == -1) { 435 if (args->total == 0 && use_block == -1) {
438 xfs_da_brelse(tp, lbp); 436 xfs_trans_brelse(tp, lbp);
439 return XFS_ERROR(ENOSPC); 437 return XFS_ERROR(ENOSPC);
440 } 438 }
441 /* 439 /*
@@ -466,14 +464,14 @@ xfs_dir2_leaf_addname(
466 */ 464 */
467 if ((error = xfs_dir2_grow_inode(args, XFS_DIR2_DATA_SPACE, 465 if ((error = xfs_dir2_grow_inode(args, XFS_DIR2_DATA_SPACE,
468 &use_block))) { 466 &use_block))) {
469 xfs_da_brelse(tp, lbp); 467 xfs_trans_brelse(tp, lbp);
470 return error; 468 return error;
471 } 469 }
472 /* 470 /*
473 * Initialize the block. 471 * Initialize the block.
474 */ 472 */
475 if ((error = xfs_dir2_data_init(args, use_block, &dbp))) { 473 if ((error = xfs_dir2_data_init(args, use_block, &dbp))) {
476 xfs_da_brelse(tp, lbp); 474 xfs_trans_brelse(tp, lbp);
477 return error; 475 return error;
478 } 476 }
479 /* 477 /*
@@ -493,7 +491,7 @@ xfs_dir2_leaf_addname(
493 */ 491 */
494 else 492 else
495 xfs_dir2_leaf_log_bests(tp, lbp, use_block, use_block); 493 xfs_dir2_leaf_log_bests(tp, lbp, use_block, use_block);
496 hdr = dbp->data; 494 hdr = dbp->b_addr;
497 bestsp[use_block] = hdr->bestfree[0].length; 495 bestsp[use_block] = hdr->bestfree[0].length;
498 grown = 1; 496 grown = 1;
499 } 497 }
@@ -505,10 +503,10 @@ xfs_dir2_leaf_addname(
505 if ((error = 503 if ((error =
506 xfs_da_read_buf(tp, dp, xfs_dir2_db_to_da(mp, use_block), 504 xfs_da_read_buf(tp, dp, xfs_dir2_db_to_da(mp, use_block),
507 -1, &dbp, XFS_DATA_FORK))) { 505 -1, &dbp, XFS_DATA_FORK))) {
508 xfs_da_brelse(tp, lbp); 506 xfs_trans_brelse(tp, lbp);
509 return error; 507 return error;
510 } 508 }
511 hdr = dbp->data; 509 hdr = dbp->b_addr;
512 grown = 0; 510 grown = 0;
513 } 511 }
514 xfs_dir2_data_check(dp, dbp); 512 xfs_dir2_data_check(dp, dbp);
@@ -570,9 +568,7 @@ xfs_dir2_leaf_addname(
570 xfs_dir2_leaf_log_header(tp, lbp); 568 xfs_dir2_leaf_log_header(tp, lbp);
571 xfs_dir2_leaf_log_ents(tp, lbp, lfloglow, lfloghigh); 569 xfs_dir2_leaf_log_ents(tp, lbp, lfloglow, lfloghigh);
572 xfs_dir2_leaf_check(dp, lbp); 570 xfs_dir2_leaf_check(dp, lbp);
573 xfs_da_buf_done(lbp);
574 xfs_dir2_data_check(dp, dbp); 571 xfs_dir2_data_check(dp, dbp);
575 xfs_da_buf_done(dbp);
576 return 0; 572 return 0;
577} 573}
578 574
@@ -583,8 +579,8 @@ xfs_dir2_leaf_addname(
583 */ 579 */
584STATIC void 580STATIC void
585xfs_dir2_leaf_check( 581xfs_dir2_leaf_check(
586 xfs_inode_t *dp, /* incore directory inode */ 582 struct xfs_inode *dp, /* incore directory inode */
587 xfs_dabuf_t *bp) /* leaf's buffer */ 583 struct xfs_buf *bp) /* leaf's buffer */
588{ 584{
589 int i; /* leaf index */ 585 int i; /* leaf index */
590 xfs_dir2_leaf_t *leaf; /* leaf structure */ 586 xfs_dir2_leaf_t *leaf; /* leaf structure */
@@ -592,7 +588,7 @@ xfs_dir2_leaf_check(
592 xfs_mount_t *mp; /* filesystem mount point */ 588 xfs_mount_t *mp; /* filesystem mount point */
593 int stale; /* count of stale leaves */ 589 int stale; /* count of stale leaves */
594 590
595 leaf = bp->data; 591 leaf = bp->b_addr;
596 mp = dp->i_mount; 592 mp = dp->i_mount;
597 ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAF1_MAGIC)); 593 ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAF1_MAGIC));
598 /* 594 /*
@@ -628,14 +624,14 @@ xfs_dir2_leaf_check(
628void 624void
629xfs_dir2_leaf_compact( 625xfs_dir2_leaf_compact(
630 xfs_da_args_t *args, /* operation arguments */ 626 xfs_da_args_t *args, /* operation arguments */
631 xfs_dabuf_t *bp) /* leaf buffer */ 627 struct xfs_buf *bp) /* leaf buffer */
632{ 628{
633 int from; /* source leaf index */ 629 int from; /* source leaf index */
634 xfs_dir2_leaf_t *leaf; /* leaf structure */ 630 xfs_dir2_leaf_t *leaf; /* leaf structure */
635 int loglow; /* first leaf entry to log */ 631 int loglow; /* first leaf entry to log */
636 int to; /* target leaf index */ 632 int to; /* target leaf index */
637 633
638 leaf = bp->data; 634 leaf = bp->b_addr;
639 if (!leaf->hdr.stale) { 635 if (!leaf->hdr.stale) {
640 return; 636 return;
641 } 637 }
@@ -677,7 +673,7 @@ xfs_dir2_leaf_compact(
677 */ 673 */
678void 674void
679xfs_dir2_leaf_compact_x1( 675xfs_dir2_leaf_compact_x1(
680 xfs_dabuf_t *bp, /* leaf buffer */ 676 struct xfs_buf *bp, /* leaf buffer */
681 int *indexp, /* insertion index */ 677 int *indexp, /* insertion index */
682 int *lowstalep, /* out: stale entry before us */ 678 int *lowstalep, /* out: stale entry before us */
683 int *highstalep, /* out: stale entry after us */ 679 int *highstalep, /* out: stale entry after us */
@@ -693,7 +689,7 @@ xfs_dir2_leaf_compact_x1(
693 int newindex=0; /* new insertion index */ 689 int newindex=0; /* new insertion index */
694 int to; /* destination copy index */ 690 int to; /* destination copy index */
695 691
696 leaf = bp->data; 692 leaf = bp->b_addr;
697 ASSERT(be16_to_cpu(leaf->hdr.stale) > 1); 693 ASSERT(be16_to_cpu(leaf->hdr.stale) > 1);
698 index = *indexp; 694 index = *indexp;
699 695
@@ -763,6 +759,218 @@ xfs_dir2_leaf_compact_x1(
763 *highstalep = highstale; 759 *highstalep = highstale;
764} 760}
765 761
762struct xfs_dir2_leaf_map_info {
763 xfs_extlen_t map_blocks; /* number of fsbs in map */
764 xfs_dablk_t map_off; /* last mapped file offset */
765 int map_size; /* total entries in *map */
766 int map_valid; /* valid entries in *map */
767 int nmap; /* mappings to ask xfs_bmapi */
768 xfs_dir2_db_t curdb; /* db for current block */
769 int ra_current; /* number of read-ahead blks */
770 int ra_index; /* *map index for read-ahead */
771 int ra_offset; /* map entry offset for ra */
772 int ra_want; /* readahead count wanted */
773 struct xfs_bmbt_irec map[]; /* map vector for blocks */
774};
775
776STATIC int
777xfs_dir2_leaf_readbuf(
778 struct xfs_inode *dp,
779 size_t bufsize,
780 struct xfs_dir2_leaf_map_info *mip,
781 xfs_dir2_off_t *curoff,
782 struct xfs_buf **bpp)
783{
784 struct xfs_mount *mp = dp->i_mount;
785 struct xfs_buf *bp = *bpp;
786 struct xfs_bmbt_irec *map = mip->map;
787 int error = 0;
788 int length;
789 int i;
790 int j;
791
792 /*
793 * If we have a buffer, we need to release it and
794 * take it out of the mapping.
795 */
796
797 if (bp) {
798 xfs_trans_brelse(NULL, bp);
799 bp = NULL;
800 mip->map_blocks -= mp->m_dirblkfsbs;
801 /*
802 * Loop to get rid of the extents for the
803 * directory block.
804 */
805 for (i = mp->m_dirblkfsbs; i > 0; ) {
806 j = min_t(int, map->br_blockcount, i);
807 map->br_blockcount -= j;
808 map->br_startblock += j;
809 map->br_startoff += j;
810 /*
811 * If mapping is done, pitch it from
812 * the table.
813 */
814 if (!map->br_blockcount && --mip->map_valid)
815 memmove(&map[0], &map[1],
816 sizeof(map[0]) * mip->map_valid);
817 i -= j;
818 }
819 }
820
821 /*
822 * Recalculate the readahead blocks wanted.
823 */
824 mip->ra_want = howmany(bufsize + mp->m_dirblksize,
825 mp->m_sb.sb_blocksize) - 1;
826 ASSERT(mip->ra_want >= 0);
827
828 /*
829 * If we don't have as many as we want, and we haven't
830 * run out of data blocks, get some more mappings.
831 */
832 if (1 + mip->ra_want > mip->map_blocks &&
833 mip->map_off < xfs_dir2_byte_to_da(mp, XFS_DIR2_LEAF_OFFSET)) {
834 /*
835 * Get more bmaps, fill in after the ones
836 * we already have in the table.
837 */
838 mip->nmap = mip->map_size - mip->map_valid;
839 error = xfs_bmapi_read(dp, mip->map_off,
840 xfs_dir2_byte_to_da(mp, XFS_DIR2_LEAF_OFFSET) -
841 mip->map_off,
842 &map[mip->map_valid], &mip->nmap, 0);
843
844 /*
845 * Don't know if we should ignore this or try to return an
846 * error. The trouble with returning errors is that readdir
847 * will just stop without actually passing the error through.
848 */
849 if (error)
850 goto out; /* XXX */
851
852 /*
853 * If we got all the mappings we asked for, set the final map
854 * offset based on the last bmap value received. Otherwise,
855 * we've reached the end.
856 */
857 if (mip->nmap == mip->map_size - mip->map_valid) {
858 i = mip->map_valid + mip->nmap - 1;
859 mip->map_off = map[i].br_startoff + map[i].br_blockcount;
860 } else
861 mip->map_off = xfs_dir2_byte_to_da(mp,
862 XFS_DIR2_LEAF_OFFSET);
863
864 /*
865 * Look for holes in the mapping, and eliminate them. Count up
866 * the valid blocks.
867 */
868 for (i = mip->map_valid; i < mip->map_valid + mip->nmap; ) {
869 if (map[i].br_startblock == HOLESTARTBLOCK) {
870 mip->nmap--;
871 length = mip->map_valid + mip->nmap - i;
872 if (length)
873 memmove(&map[i], &map[i + 1],
874 sizeof(map[i]) * length);
875 } else {
876 mip->map_blocks += map[i].br_blockcount;
877 i++;
878 }
879 }
880 mip->map_valid += mip->nmap;
881 }
882
883 /*
884 * No valid mappings, so no more data blocks.
885 */
886 if (!mip->map_valid) {
887 *curoff = xfs_dir2_da_to_byte(mp, mip->map_off);
888 goto out;
889 }
890
891 /*
892 * Read the directory block starting at the first mapping.
893 */
894 mip->curdb = xfs_dir2_da_to_db(mp, map->br_startoff);
895 error = xfs_da_read_buf(NULL, dp, map->br_startoff,
896 map->br_blockcount >= mp->m_dirblkfsbs ?
897 XFS_FSB_TO_DADDR(mp, map->br_startblock) : -1,
898 &bp, XFS_DATA_FORK);
899
900 /*
901 * Should just skip over the data block instead of giving up.
902 */
903 if (error)
904 goto out; /* XXX */
905
906 /*
907 * Adjust the current amount of read-ahead: we just read a block that
908 * was previously ra.
909 */
910 if (mip->ra_current)
911 mip->ra_current -= mp->m_dirblkfsbs;
912
913 /*
914 * Do we need more readahead?
915 */
916 for (mip->ra_index = mip->ra_offset = i = 0;
917 mip->ra_want > mip->ra_current && i < mip->map_blocks;
918 i += mp->m_dirblkfsbs) {
919 ASSERT(mip->ra_index < mip->map_valid);
920 /*
921 * Read-ahead a contiguous directory block.
922 */
923 if (i > mip->ra_current &&
924 map[mip->ra_index].br_blockcount >= mp->m_dirblkfsbs) {
925 xfs_buf_readahead(mp->m_ddev_targp,
926 XFS_FSB_TO_DADDR(mp,
927 map[mip->ra_index].br_startblock +
928 mip->ra_offset),
929 (int)BTOBB(mp->m_dirblksize));
930 mip->ra_current = i;
931 }
932
933 /*
934 * Read-ahead a non-contiguous directory block. This doesn't
935 * use our mapping, but this is a very rare case.
936 */
937 else if (i > mip->ra_current) {
938 xfs_da_reada_buf(NULL, dp,
939 map[mip->ra_index].br_startoff +
940 mip->ra_offset,
941 XFS_DATA_FORK);
942 mip->ra_current = i;
943 }
944
945 /*
946 * Advance offset through the mapping table.
947 */
948 for (j = 0; j < mp->m_dirblkfsbs; j++) {
949 /*
950 * The rest of this extent but not more than a dir
951 * block.
952 */
953 length = min_t(int, mp->m_dirblkfsbs,
954 map[mip->ra_index].br_blockcount -
955 mip->ra_offset);
956 j += length;
957 mip->ra_offset += length;
958
959 /*
960 * Advance to the next mapping if this one is used up.
961 */
962 if (mip->ra_offset == map[mip->ra_index].br_blockcount) {
963 mip->ra_offset = 0;
964 mip->ra_index++;
965 }
966 }
967 }
968
969out:
970 *bpp = bp;
971 return error;
972}
973
766/* 974/*
767 * Getdents (readdir) for leaf and node directories. 975 * Getdents (readdir) for leaf and node directories.
768 * This reads the data blocks only, so is the same for both forms. 976 * This reads the data blocks only, so is the same for both forms.
@@ -775,30 +983,18 @@ xfs_dir2_leaf_getdents(
775 xfs_off_t *offset, 983 xfs_off_t *offset,
776 filldir_t filldir) 984 filldir_t filldir)
777{ 985{
778 xfs_dabuf_t *bp; /* data block buffer */ 986 struct xfs_buf *bp = NULL; /* data block buffer */
779 int byteoff; /* offset in current block */
780 xfs_dir2_db_t curdb; /* db for current block */
781 xfs_dir2_off_t curoff; /* current overall offset */
782 xfs_dir2_data_hdr_t *hdr; /* data block header */ 987 xfs_dir2_data_hdr_t *hdr; /* data block header */
783 xfs_dir2_data_entry_t *dep; /* data entry */ 988 xfs_dir2_data_entry_t *dep; /* data entry */
784 xfs_dir2_data_unused_t *dup; /* unused entry */ 989 xfs_dir2_data_unused_t *dup; /* unused entry */
785 int error = 0; /* error return value */ 990 int error = 0; /* error return value */
786 int i; /* temporary loop index */
787 int j; /* temporary loop index */
788 int length; /* temporary length value */ 991 int length; /* temporary length value */
789 xfs_bmbt_irec_t *map; /* map vector for blocks */
790 xfs_extlen_t map_blocks; /* number of fsbs in map */
791 xfs_dablk_t map_off; /* last mapped file offset */
792 int map_size; /* total entries in *map */
793 int map_valid; /* valid entries in *map */
794 xfs_mount_t *mp; /* filesystem mount point */ 992 xfs_mount_t *mp; /* filesystem mount point */
993 int byteoff; /* offset in current block */
994 xfs_dir2_off_t curoff; /* current overall offset */
795 xfs_dir2_off_t newoff; /* new curoff after new blk */ 995 xfs_dir2_off_t newoff; /* new curoff after new blk */
796 int nmap; /* mappings to ask xfs_bmapi */
797 char *ptr = NULL; /* pointer to current data */ 996 char *ptr = NULL; /* pointer to current data */
798 int ra_current; /* number of read-ahead blks */ 997 struct xfs_dir2_leaf_map_info *map_info;
799 int ra_index; /* *map index for read-ahead */
800 int ra_offset; /* map entry offset for ra */
801 int ra_want; /* readahead count wanted */
802 998
803 /* 999 /*
804 * If the offset is at or past the largest allowed value, 1000 * If the offset is at or past the largest allowed value,
@@ -814,10 +1010,12 @@ xfs_dir2_leaf_getdents(
814 * buffer size, the directory block size, and the filesystem 1010 * buffer size, the directory block size, and the filesystem
815 * block size. 1011 * block size.
816 */ 1012 */
817 map_size = howmany(bufsize + mp->m_dirblksize, mp->m_sb.sb_blocksize); 1013 length = howmany(bufsize + mp->m_dirblksize,
818 map = kmem_alloc(map_size * sizeof(*map), KM_SLEEP); 1014 mp->m_sb.sb_blocksize);
819 map_valid = ra_index = ra_offset = ra_current = map_blocks = 0; 1015 map_info = kmem_zalloc(offsetof(struct xfs_dir2_leaf_map_info, map) +
820 bp = NULL; 1016 (length * sizeof(struct xfs_bmbt_irec)),
1017 KM_SLEEP);
1018 map_info->map_size = length;
821 1019
822 /* 1020 /*
823 * Inside the loop we keep the main offset value as a byte offset 1021 * Inside the loop we keep the main offset value as a byte offset
@@ -829,7 +1027,9 @@ xfs_dir2_leaf_getdents(
829 * Force this conversion through db so we truncate the offset 1027 * Force this conversion through db so we truncate the offset
830 * down to get the start of the data block. 1028 * down to get the start of the data block.
831 */ 1029 */
832 map_off = xfs_dir2_db_to_da(mp, xfs_dir2_byte_to_db(mp, curoff)); 1030 map_info->map_off = xfs_dir2_db_to_da(mp,
1031 xfs_dir2_byte_to_db(mp, curoff));
1032
833 /* 1033 /*
834 * Loop over directory entries until we reach the end offset. 1034 * Loop over directory entries until we reach the end offset.
835 * Get more blocks and readahead as necessary. 1035 * Get more blocks and readahead as necessary.
@@ -839,191 +1039,17 @@ xfs_dir2_leaf_getdents(
839 * If we have no buffer, or we're off the end of the 1039 * If we have no buffer, or we're off the end of the
840 * current buffer, need to get another one. 1040 * current buffer, need to get another one.
841 */ 1041 */
842 if (!bp || ptr >= (char *)bp->data + mp->m_dirblksize) { 1042 if (!bp || ptr >= (char *)bp->b_addr + mp->m_dirblksize) {
843 /*
844 * If we have a buffer, we need to release it and
845 * take it out of the mapping.
846 */
847 if (bp) {
848 xfs_da_brelse(NULL, bp);
849 bp = NULL;
850 map_blocks -= mp->m_dirblkfsbs;
851 /*
852 * Loop to get rid of the extents for the
853 * directory block.
854 */
855 for (i = mp->m_dirblkfsbs; i > 0; ) {
856 j = MIN((int)map->br_blockcount, i);
857 map->br_blockcount -= j;
858 map->br_startblock += j;
859 map->br_startoff += j;
860 /*
861 * If mapping is done, pitch it from
862 * the table.
863 */
864 if (!map->br_blockcount && --map_valid)
865 memmove(&map[0], &map[1],
866 sizeof(map[0]) *
867 map_valid);
868 i -= j;
869 }
870 }
871 /*
872 * Recalculate the readahead blocks wanted.
873 */
874 ra_want = howmany(bufsize + mp->m_dirblksize,
875 mp->m_sb.sb_blocksize) - 1;
876 ASSERT(ra_want >= 0);
877 1043
878 /* 1044 error = xfs_dir2_leaf_readbuf(dp, bufsize, map_info,
879 * If we don't have as many as we want, and we haven't 1045 &curoff, &bp);
880 * run out of data blocks, get some more mappings. 1046 if (error || !map_info->map_valid)
881 */
882 if (1 + ra_want > map_blocks &&
883 map_off <
884 xfs_dir2_byte_to_da(mp, XFS_DIR2_LEAF_OFFSET)) {
885 /*
886 * Get more bmaps, fill in after the ones
887 * we already have in the table.
888 */
889 nmap = map_size - map_valid;
890 error = xfs_bmapi_read(dp, map_off,
891 xfs_dir2_byte_to_da(mp,
892 XFS_DIR2_LEAF_OFFSET) - map_off,
893 &map[map_valid], &nmap, 0);
894 /*
895 * Don't know if we should ignore this or
896 * try to return an error.
897 * The trouble with returning errors
898 * is that readdir will just stop without
899 * actually passing the error through.
900 */
901 if (error)
902 break; /* XXX */
903 /*
904 * If we got all the mappings we asked for,
905 * set the final map offset based on the
906 * last bmap value received.
907 * Otherwise, we've reached the end.
908 */
909 if (nmap == map_size - map_valid)
910 map_off =
911 map[map_valid + nmap - 1].br_startoff +
912 map[map_valid + nmap - 1].br_blockcount;
913 else
914 map_off =
915 xfs_dir2_byte_to_da(mp,
916 XFS_DIR2_LEAF_OFFSET);
917 /*
918 * Look for holes in the mapping, and
919 * eliminate them. Count up the valid blocks.
920 */
921 for (i = map_valid; i < map_valid + nmap; ) {
922 if (map[i].br_startblock ==
923 HOLESTARTBLOCK) {
924 nmap--;
925 length = map_valid + nmap - i;
926 if (length)
927 memmove(&map[i],
928 &map[i + 1],
929 sizeof(map[i]) *
930 length);
931 } else {
932 map_blocks +=
933 map[i].br_blockcount;
934 i++;
935 }
936 }
937 map_valid += nmap;
938 }
939 /*
940 * No valid mappings, so no more data blocks.
941 */
942 if (!map_valid) {
943 curoff = xfs_dir2_da_to_byte(mp, map_off);
944 break; 1047 break;
945 } 1048
946 /*
947 * Read the directory block starting at the first
948 * mapping.
949 */
950 curdb = xfs_dir2_da_to_db(mp, map->br_startoff);
951 error = xfs_da_read_buf(NULL, dp, map->br_startoff,
952 map->br_blockcount >= mp->m_dirblkfsbs ?
953 XFS_FSB_TO_DADDR(mp, map->br_startblock) :
954 -1,
955 &bp, XFS_DATA_FORK);
956 /*
957 * Should just skip over the data block instead
958 * of giving up.
959 */
960 if (error)
961 break; /* XXX */
962 /*
963 * Adjust the current amount of read-ahead: we just
964 * read a block that was previously ra.
965 */
966 if (ra_current)
967 ra_current -= mp->m_dirblkfsbs;
968 /*
969 * Do we need more readahead?
970 */
971 for (ra_index = ra_offset = i = 0;
972 ra_want > ra_current && i < map_blocks;
973 i += mp->m_dirblkfsbs) {
974 ASSERT(ra_index < map_valid);
975 /*
976 * Read-ahead a contiguous directory block.
977 */
978 if (i > ra_current &&
979 map[ra_index].br_blockcount >=
980 mp->m_dirblkfsbs) {
981 xfs_buf_readahead(mp->m_ddev_targp,
982 XFS_FSB_TO_DADDR(mp,
983 map[ra_index].br_startblock +
984 ra_offset),
985 (int)BTOBB(mp->m_dirblksize));
986 ra_current = i;
987 }
988 /*
989 * Read-ahead a non-contiguous directory block.
990 * This doesn't use our mapping, but this
991 * is a very rare case.
992 */
993 else if (i > ra_current) {
994 (void)xfs_da_reada_buf(NULL, dp,
995 map[ra_index].br_startoff +
996 ra_offset, XFS_DATA_FORK);
997 ra_current = i;
998 }
999 /*
1000 * Advance offset through the mapping table.
1001 */
1002 for (j = 0; j < mp->m_dirblkfsbs; j++) {
1003 /*
1004 * The rest of this extent but not
1005 * more than a dir block.
1006 */
1007 length = MIN(mp->m_dirblkfsbs,
1008 (int)(map[ra_index].br_blockcount -
1009 ra_offset));
1010 j += length;
1011 ra_offset += length;
1012 /*
1013 * Advance to the next mapping if
1014 * this one is used up.
1015 */
1016 if (ra_offset ==
1017 map[ra_index].br_blockcount) {
1018 ra_offset = 0;
1019 ra_index++;
1020 }
1021 }
1022 }
1023 /* 1049 /*
1024 * Having done a read, we need to set a new offset. 1050 * Having done a read, we need to set a new offset.
1025 */ 1051 */
1026 newoff = xfs_dir2_db_off_to_byte(mp, curdb, 0); 1052 newoff = xfs_dir2_db_off_to_byte(mp, map_info->curdb, 0);
1027 /* 1053 /*
1028 * Start of the current block. 1054 * Start of the current block.
1029 */ 1055 */
@@ -1034,8 +1060,8 @@ xfs_dir2_leaf_getdents(
1034 */ 1060 */
1035 else if (curoff > newoff) 1061 else if (curoff > newoff)
1036 ASSERT(xfs_dir2_byte_to_db(mp, curoff) == 1062 ASSERT(xfs_dir2_byte_to_db(mp, curoff) ==
1037 curdb); 1063 map_info->curdb);
1038 hdr = bp->data; 1064 hdr = bp->b_addr;
1039 xfs_dir2_data_check(dp, bp); 1065 xfs_dir2_data_check(dp, bp);
1040 /* 1066 /*
1041 * Find our position in the block. 1067 * Find our position in the block.
@@ -1117,9 +1143,9 @@ xfs_dir2_leaf_getdents(
1117 *offset = XFS_DIR2_MAX_DATAPTR & 0x7fffffff; 1143 *offset = XFS_DIR2_MAX_DATAPTR & 0x7fffffff;
1118 else 1144 else
1119 *offset = xfs_dir2_byte_to_dataptr(mp, curoff) & 0x7fffffff; 1145 *offset = xfs_dir2_byte_to_dataptr(mp, curoff) & 0x7fffffff;
1120 kmem_free(map); 1146 kmem_free(map_info);
1121 if (bp) 1147 if (bp)
1122 xfs_da_brelse(NULL, bp); 1148 xfs_trans_brelse(NULL, bp);
1123 return error; 1149 return error;
1124} 1150}
1125 1151
@@ -1130,10 +1156,10 @@ int
1130xfs_dir2_leaf_init( 1156xfs_dir2_leaf_init(
1131 xfs_da_args_t *args, /* operation arguments */ 1157 xfs_da_args_t *args, /* operation arguments */
1132 xfs_dir2_db_t bno, /* directory block number */ 1158 xfs_dir2_db_t bno, /* directory block number */
1133 xfs_dabuf_t **bpp, /* out: leaf buffer */ 1159 struct xfs_buf **bpp, /* out: leaf buffer */
1134 int magic) /* magic number for block */ 1160 int magic) /* magic number for block */
1135{ 1161{
1136 xfs_dabuf_t *bp; /* leaf buffer */ 1162 struct xfs_buf *bp; /* leaf buffer */
1137 xfs_inode_t *dp; /* incore directory inode */ 1163 xfs_inode_t *dp; /* incore directory inode */
1138 int error; /* error return code */ 1164 int error; /* error return code */
1139 xfs_dir2_leaf_t *leaf; /* leaf structure */ 1165 xfs_dir2_leaf_t *leaf; /* leaf structure */
@@ -1156,7 +1182,7 @@ xfs_dir2_leaf_init(
1156 return error; 1182 return error;
1157 } 1183 }
1158 ASSERT(bp != NULL); 1184 ASSERT(bp != NULL);
1159 leaf = bp->data; 1185 leaf = bp->b_addr;
1160 /* 1186 /*
1161 * Initialize the header. 1187 * Initialize the header.
1162 */ 1188 */
@@ -1186,7 +1212,7 @@ xfs_dir2_leaf_init(
1186static void 1212static void
1187xfs_dir2_leaf_log_bests( 1213xfs_dir2_leaf_log_bests(
1188 xfs_trans_t *tp, /* transaction pointer */ 1214 xfs_trans_t *tp, /* transaction pointer */
1189 xfs_dabuf_t *bp, /* leaf buffer */ 1215 struct xfs_buf *bp, /* leaf buffer */
1190 int first, /* first entry to log */ 1216 int first, /* first entry to log */
1191 int last) /* last entry to log */ 1217 int last) /* last entry to log */
1192{ 1218{
@@ -1195,12 +1221,12 @@ xfs_dir2_leaf_log_bests(
1195 xfs_dir2_leaf_t *leaf; /* leaf structure */ 1221 xfs_dir2_leaf_t *leaf; /* leaf structure */
1196 xfs_dir2_leaf_tail_t *ltp; /* leaf tail structure */ 1222 xfs_dir2_leaf_tail_t *ltp; /* leaf tail structure */
1197 1223
1198 leaf = bp->data; 1224 leaf = bp->b_addr;
1199 ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAF1_MAGIC)); 1225 ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAF1_MAGIC));
1200 ltp = xfs_dir2_leaf_tail_p(tp->t_mountp, leaf); 1226 ltp = xfs_dir2_leaf_tail_p(tp->t_mountp, leaf);
1201 firstb = xfs_dir2_leaf_bests_p(ltp) + first; 1227 firstb = xfs_dir2_leaf_bests_p(ltp) + first;
1202 lastb = xfs_dir2_leaf_bests_p(ltp) + last; 1228 lastb = xfs_dir2_leaf_bests_p(ltp) + last;
1203 xfs_da_log_buf(tp, bp, (uint)((char *)firstb - (char *)leaf), 1229 xfs_trans_log_buf(tp, bp, (uint)((char *)firstb - (char *)leaf),
1204 (uint)((char *)lastb - (char *)leaf + sizeof(*lastb) - 1)); 1230 (uint)((char *)lastb - (char *)leaf + sizeof(*lastb) - 1));
1205} 1231}
1206 1232
@@ -1210,7 +1236,7 @@ xfs_dir2_leaf_log_bests(
1210void 1236void
1211xfs_dir2_leaf_log_ents( 1237xfs_dir2_leaf_log_ents(
1212 xfs_trans_t *tp, /* transaction pointer */ 1238 xfs_trans_t *tp, /* transaction pointer */
1213 xfs_dabuf_t *bp, /* leaf buffer */ 1239 struct xfs_buf *bp, /* leaf buffer */
1214 int first, /* first entry to log */ 1240 int first, /* first entry to log */
1215 int last) /* last entry to log */ 1241 int last) /* last entry to log */
1216{ 1242{
@@ -1218,12 +1244,12 @@ xfs_dir2_leaf_log_ents(
1218 xfs_dir2_leaf_entry_t *lastlep; /* pointer to last entry */ 1244 xfs_dir2_leaf_entry_t *lastlep; /* pointer to last entry */
1219 xfs_dir2_leaf_t *leaf; /* leaf structure */ 1245 xfs_dir2_leaf_t *leaf; /* leaf structure */
1220 1246
1221 leaf = bp->data; 1247 leaf = bp->b_addr;
1222 ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAF1_MAGIC) || 1248 ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAF1_MAGIC) ||
1223 leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)); 1249 leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC));
1224 firstlep = &leaf->ents[first]; 1250 firstlep = &leaf->ents[first];
1225 lastlep = &leaf->ents[last]; 1251 lastlep = &leaf->ents[last];
1226 xfs_da_log_buf(tp, bp, (uint)((char *)firstlep - (char *)leaf), 1252 xfs_trans_log_buf(tp, bp, (uint)((char *)firstlep - (char *)leaf),
1227 (uint)((char *)lastlep - (char *)leaf + sizeof(*lastlep) - 1)); 1253 (uint)((char *)lastlep - (char *)leaf + sizeof(*lastlep) - 1));
1228} 1254}
1229 1255
@@ -1232,15 +1258,15 @@ xfs_dir2_leaf_log_ents(
1232 */ 1258 */
1233void 1259void
1234xfs_dir2_leaf_log_header( 1260xfs_dir2_leaf_log_header(
1235 xfs_trans_t *tp, /* transaction pointer */ 1261 struct xfs_trans *tp,
1236 xfs_dabuf_t *bp) /* leaf buffer */ 1262 struct xfs_buf *bp)
1237{ 1263{
1238 xfs_dir2_leaf_t *leaf; /* leaf structure */ 1264 xfs_dir2_leaf_t *leaf; /* leaf structure */
1239 1265
1240 leaf = bp->data; 1266 leaf = bp->b_addr;
1241 ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAF1_MAGIC) || 1267 ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAF1_MAGIC) ||
1242 leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)); 1268 leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC));
1243 xfs_da_log_buf(tp, bp, (uint)((char *)&leaf->hdr - (char *)leaf), 1269 xfs_trans_log_buf(tp, bp, (uint)((char *)&leaf->hdr - (char *)leaf),
1244 (uint)(sizeof(leaf->hdr) - 1)); 1270 (uint)(sizeof(leaf->hdr) - 1));
1245} 1271}
1246 1272
@@ -1249,18 +1275,18 @@ xfs_dir2_leaf_log_header(
1249 */ 1275 */
1250STATIC void 1276STATIC void
1251xfs_dir2_leaf_log_tail( 1277xfs_dir2_leaf_log_tail(
1252 xfs_trans_t *tp, /* transaction pointer */ 1278 struct xfs_trans *tp,
1253 xfs_dabuf_t *bp) /* leaf buffer */ 1279 struct xfs_buf *bp)
1254{ 1280{
1255 xfs_dir2_leaf_t *leaf; /* leaf structure */ 1281 xfs_dir2_leaf_t *leaf; /* leaf structure */
1256 xfs_dir2_leaf_tail_t *ltp; /* leaf tail structure */ 1282 xfs_dir2_leaf_tail_t *ltp; /* leaf tail structure */
1257 xfs_mount_t *mp; /* filesystem mount point */ 1283 xfs_mount_t *mp; /* filesystem mount point */
1258 1284
1259 mp = tp->t_mountp; 1285 mp = tp->t_mountp;
1260 leaf = bp->data; 1286 leaf = bp->b_addr;
1261 ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAF1_MAGIC)); 1287 ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAF1_MAGIC));
1262 ltp = xfs_dir2_leaf_tail_p(mp, leaf); 1288 ltp = xfs_dir2_leaf_tail_p(mp, leaf);
1263 xfs_da_log_buf(tp, bp, (uint)((char *)ltp - (char *)leaf), 1289 xfs_trans_log_buf(tp, bp, (uint)((char *)ltp - (char *)leaf),
1264 (uint)(mp->m_dirblksize - 1)); 1290 (uint)(mp->m_dirblksize - 1));
1265} 1291}
1266 1292
@@ -1273,12 +1299,12 @@ int
1273xfs_dir2_leaf_lookup( 1299xfs_dir2_leaf_lookup(
1274 xfs_da_args_t *args) /* operation arguments */ 1300 xfs_da_args_t *args) /* operation arguments */
1275{ 1301{
1276 xfs_dabuf_t *dbp; /* data block buffer */ 1302 struct xfs_buf *dbp; /* data block buffer */
1277 xfs_dir2_data_entry_t *dep; /* data block entry */ 1303 xfs_dir2_data_entry_t *dep; /* data block entry */
1278 xfs_inode_t *dp; /* incore directory inode */ 1304 xfs_inode_t *dp; /* incore directory inode */
1279 int error; /* error return code */ 1305 int error; /* error return code */
1280 int index; /* found entry index */ 1306 int index; /* found entry index */
1281 xfs_dabuf_t *lbp; /* leaf buffer */ 1307 struct xfs_buf *lbp; /* leaf buffer */
1282 xfs_dir2_leaf_t *leaf; /* leaf structure */ 1308 xfs_dir2_leaf_t *leaf; /* leaf structure */
1283 xfs_dir2_leaf_entry_t *lep; /* leaf entry */ 1309 xfs_dir2_leaf_entry_t *lep; /* leaf entry */
1284 xfs_trans_t *tp; /* transaction pointer */ 1310 xfs_trans_t *tp; /* transaction pointer */
@@ -1294,7 +1320,7 @@ xfs_dir2_leaf_lookup(
1294 tp = args->trans; 1320 tp = args->trans;
1295 dp = args->dp; 1321 dp = args->dp;
1296 xfs_dir2_leaf_check(dp, lbp); 1322 xfs_dir2_leaf_check(dp, lbp);
1297 leaf = lbp->data; 1323 leaf = lbp->b_addr;
1298 /* 1324 /*
1299 * Get to the leaf entry and contained data entry address. 1325 * Get to the leaf entry and contained data entry address.
1300 */ 1326 */
@@ -1303,15 +1329,15 @@ xfs_dir2_leaf_lookup(
1303 * Point to the data entry. 1329 * Point to the data entry.
1304 */ 1330 */
1305 dep = (xfs_dir2_data_entry_t *) 1331 dep = (xfs_dir2_data_entry_t *)
1306 ((char *)dbp->data + 1332 ((char *)dbp->b_addr +
1307 xfs_dir2_dataptr_to_off(dp->i_mount, be32_to_cpu(lep->address))); 1333 xfs_dir2_dataptr_to_off(dp->i_mount, be32_to_cpu(lep->address)));
1308 /* 1334 /*
1309 * Return the found inode number & CI name if appropriate 1335 * Return the found inode number & CI name if appropriate
1310 */ 1336 */
1311 args->inumber = be64_to_cpu(dep->inumber); 1337 args->inumber = be64_to_cpu(dep->inumber);
1312 error = xfs_dir_cilookup_result(args, dep->name, dep->namelen); 1338 error = xfs_dir_cilookup_result(args, dep->name, dep->namelen);
1313 xfs_da_brelse(tp, dbp); 1339 xfs_trans_brelse(tp, dbp);
1314 xfs_da_brelse(tp, lbp); 1340 xfs_trans_brelse(tp, lbp);
1315 return XFS_ERROR(error); 1341 return XFS_ERROR(error);
1316} 1342}
1317 1343
@@ -1324,17 +1350,17 @@ xfs_dir2_leaf_lookup(
1324static int /* error */ 1350static int /* error */
1325xfs_dir2_leaf_lookup_int( 1351xfs_dir2_leaf_lookup_int(
1326 xfs_da_args_t *args, /* operation arguments */ 1352 xfs_da_args_t *args, /* operation arguments */
1327 xfs_dabuf_t **lbpp, /* out: leaf buffer */ 1353 struct xfs_buf **lbpp, /* out: leaf buffer */
1328 int *indexp, /* out: index in leaf block */ 1354 int *indexp, /* out: index in leaf block */
1329 xfs_dabuf_t **dbpp) /* out: data buffer */ 1355 struct xfs_buf **dbpp) /* out: data buffer */
1330{ 1356{
1331 xfs_dir2_db_t curdb = -1; /* current data block number */ 1357 xfs_dir2_db_t curdb = -1; /* current data block number */
1332 xfs_dabuf_t *dbp = NULL; /* data buffer */ 1358 struct xfs_buf *dbp = NULL; /* data buffer */
1333 xfs_dir2_data_entry_t *dep; /* data entry */ 1359 xfs_dir2_data_entry_t *dep; /* data entry */
1334 xfs_inode_t *dp; /* incore directory inode */ 1360 xfs_inode_t *dp; /* incore directory inode */
1335 int error; /* error return code */ 1361 int error; /* error return code */
1336 int index; /* index in leaf block */ 1362 int index; /* index in leaf block */
1337 xfs_dabuf_t *lbp; /* leaf buffer */ 1363 struct xfs_buf *lbp; /* leaf buffer */
1338 xfs_dir2_leaf_entry_t *lep; /* leaf entry */ 1364 xfs_dir2_leaf_entry_t *lep; /* leaf entry */
1339 xfs_dir2_leaf_t *leaf; /* leaf structure */ 1365 xfs_dir2_leaf_t *leaf; /* leaf structure */
1340 xfs_mount_t *mp; /* filesystem mount point */ 1366 xfs_mount_t *mp; /* filesystem mount point */
@@ -1354,7 +1380,7 @@ xfs_dir2_leaf_lookup_int(
1354 if (error) 1380 if (error)
1355 return error; 1381 return error;
1356 *lbpp = lbp; 1382 *lbpp = lbp;
1357 leaf = lbp->data; 1383 leaf = lbp->b_addr;
1358 xfs_dir2_leaf_check(dp, lbp); 1384 xfs_dir2_leaf_check(dp, lbp);
1359 /* 1385 /*
1360 * Look for the first leaf entry with our hash value. 1386 * Look for the first leaf entry with our hash value.
@@ -1382,12 +1408,12 @@ xfs_dir2_leaf_lookup_int(
1382 */ 1408 */
1383 if (newdb != curdb) { 1409 if (newdb != curdb) {
1384 if (dbp) 1410 if (dbp)
1385 xfs_da_brelse(tp, dbp); 1411 xfs_trans_brelse(tp, dbp);
1386 error = xfs_da_read_buf(tp, dp, 1412 error = xfs_da_read_buf(tp, dp,
1387 xfs_dir2_db_to_da(mp, newdb), 1413 xfs_dir2_db_to_da(mp, newdb),
1388 -1, &dbp, XFS_DATA_FORK); 1414 -1, &dbp, XFS_DATA_FORK);
1389 if (error) { 1415 if (error) {
1390 xfs_da_brelse(tp, lbp); 1416 xfs_trans_brelse(tp, lbp);
1391 return error; 1417 return error;
1392 } 1418 }
1393 xfs_dir2_data_check(dp, dbp); 1419 xfs_dir2_data_check(dp, dbp);
@@ -1396,7 +1422,7 @@ xfs_dir2_leaf_lookup_int(
1396 /* 1422 /*
1397 * Point to the data entry. 1423 * Point to the data entry.
1398 */ 1424 */
1399 dep = (xfs_dir2_data_entry_t *)((char *)dbp->data + 1425 dep = (xfs_dir2_data_entry_t *)((char *)dbp->b_addr +
1400 xfs_dir2_dataptr_to_off(mp, be32_to_cpu(lep->address))); 1426 xfs_dir2_dataptr_to_off(mp, be32_to_cpu(lep->address)));
1401 /* 1427 /*
1402 * Compare name and if it's an exact match, return the index 1428 * Compare name and if it's an exact match, return the index
@@ -1424,12 +1450,12 @@ xfs_dir2_leaf_lookup_int(
1424 if (args->cmpresult == XFS_CMP_CASE) { 1450 if (args->cmpresult == XFS_CMP_CASE) {
1425 ASSERT(cidb != -1); 1451 ASSERT(cidb != -1);
1426 if (cidb != curdb) { 1452 if (cidb != curdb) {
1427 xfs_da_brelse(tp, dbp); 1453 xfs_trans_brelse(tp, dbp);
1428 error = xfs_da_read_buf(tp, dp, 1454 error = xfs_da_read_buf(tp, dp,
1429 xfs_dir2_db_to_da(mp, cidb), 1455 xfs_dir2_db_to_da(mp, cidb),
1430 -1, &dbp, XFS_DATA_FORK); 1456 -1, &dbp, XFS_DATA_FORK);
1431 if (error) { 1457 if (error) {
1432 xfs_da_brelse(tp, lbp); 1458 xfs_trans_brelse(tp, lbp);
1433 return error; 1459 return error;
1434 } 1460 }
1435 } 1461 }
@@ -1441,8 +1467,8 @@ xfs_dir2_leaf_lookup_int(
1441 */ 1467 */
1442 ASSERT(cidb == -1); 1468 ASSERT(cidb == -1);
1443 if (dbp) 1469 if (dbp)
1444 xfs_da_brelse(tp, dbp); 1470 xfs_trans_brelse(tp, dbp);
1445 xfs_da_brelse(tp, lbp); 1471 xfs_trans_brelse(tp, lbp);
1446 return XFS_ERROR(ENOENT); 1472 return XFS_ERROR(ENOENT);
1447} 1473}
1448 1474
@@ -1456,13 +1482,13 @@ xfs_dir2_leaf_removename(
1456 __be16 *bestsp; /* leaf block best freespace */ 1482 __be16 *bestsp; /* leaf block best freespace */
1457 xfs_dir2_data_hdr_t *hdr; /* data block header */ 1483 xfs_dir2_data_hdr_t *hdr; /* data block header */
1458 xfs_dir2_db_t db; /* data block number */ 1484 xfs_dir2_db_t db; /* data block number */
1459 xfs_dabuf_t *dbp; /* data block buffer */ 1485 struct xfs_buf *dbp; /* data block buffer */
1460 xfs_dir2_data_entry_t *dep; /* data entry structure */ 1486 xfs_dir2_data_entry_t *dep; /* data entry structure */
1461 xfs_inode_t *dp; /* incore directory inode */ 1487 xfs_inode_t *dp; /* incore directory inode */
1462 int error; /* error return code */ 1488 int error; /* error return code */
1463 xfs_dir2_db_t i; /* temporary data block # */ 1489 xfs_dir2_db_t i; /* temporary data block # */
1464 int index; /* index into leaf entries */ 1490 int index; /* index into leaf entries */
1465 xfs_dabuf_t *lbp; /* leaf buffer */ 1491 struct xfs_buf *lbp; /* leaf buffer */
1466 xfs_dir2_leaf_t *leaf; /* leaf structure */ 1492 xfs_dir2_leaf_t *leaf; /* leaf structure */
1467 xfs_dir2_leaf_entry_t *lep; /* leaf entry */ 1493 xfs_dir2_leaf_entry_t *lep; /* leaf entry */
1468 xfs_dir2_leaf_tail_t *ltp; /* leaf tail structure */ 1494 xfs_dir2_leaf_tail_t *ltp; /* leaf tail structure */
@@ -1483,8 +1509,8 @@ xfs_dir2_leaf_removename(
1483 dp = args->dp; 1509 dp = args->dp;
1484 tp = args->trans; 1510 tp = args->trans;
1485 mp = dp->i_mount; 1511 mp = dp->i_mount;
1486 leaf = lbp->data; 1512 leaf = lbp->b_addr;
1487 hdr = dbp->data; 1513 hdr = dbp->b_addr;
1488 xfs_dir2_data_check(dp, dbp); 1514 xfs_dir2_data_check(dp, dbp);
1489 /* 1515 /*
1490 * Point to the leaf entry, use that to point to the data entry. 1516 * Point to the leaf entry, use that to point to the data entry.
@@ -1541,12 +1567,9 @@ xfs_dir2_leaf_removename(
1541 * Just go on, returning success, leaving the 1567 * Just go on, returning success, leaving the
1542 * empty block in place. 1568 * empty block in place.
1543 */ 1569 */
1544 if (error == ENOSPC && args->total == 0) { 1570 if (error == ENOSPC && args->total == 0)
1545 xfs_da_buf_done(dbp);
1546 error = 0; 1571 error = 0;
1547 }
1548 xfs_dir2_leaf_check(dp, lbp); 1572 xfs_dir2_leaf_check(dp, lbp);
1549 xfs_da_buf_done(lbp);
1550 return error; 1573 return error;
1551 } 1574 }
1552 dbp = NULL; 1575 dbp = NULL;
@@ -1577,10 +1600,9 @@ xfs_dir2_leaf_removename(
1577 /* 1600 /*
1578 * If the data block was not the first one, drop it. 1601 * If the data block was not the first one, drop it.
1579 */ 1602 */
1580 else if (db != mp->m_dirdatablk && dbp != NULL) { 1603 else if (db != mp->m_dirdatablk)
1581 xfs_da_buf_done(dbp);
1582 dbp = NULL; 1604 dbp = NULL;
1583 } 1605
1584 xfs_dir2_leaf_check(dp, lbp); 1606 xfs_dir2_leaf_check(dp, lbp);
1585 /* 1607 /*
1586 * See if we can convert to block form. 1608 * See if we can convert to block form.
@@ -1595,12 +1617,12 @@ int /* error */
1595xfs_dir2_leaf_replace( 1617xfs_dir2_leaf_replace(
1596 xfs_da_args_t *args) /* operation arguments */ 1618 xfs_da_args_t *args) /* operation arguments */
1597{ 1619{
1598 xfs_dabuf_t *dbp; /* data block buffer */ 1620 struct xfs_buf *dbp; /* data block buffer */
1599 xfs_dir2_data_entry_t *dep; /* data block entry */ 1621 xfs_dir2_data_entry_t *dep; /* data block entry */
1600 xfs_inode_t *dp; /* incore directory inode */ 1622 xfs_inode_t *dp; /* incore directory inode */
1601 int error; /* error return code */ 1623 int error; /* error return code */
1602 int index; /* index of leaf entry */ 1624 int index; /* index of leaf entry */
1603 xfs_dabuf_t *lbp; /* leaf buffer */ 1625 struct xfs_buf *lbp; /* leaf buffer */
1604 xfs_dir2_leaf_t *leaf; /* leaf structure */ 1626 xfs_dir2_leaf_t *leaf; /* leaf structure */
1605 xfs_dir2_leaf_entry_t *lep; /* leaf entry */ 1627 xfs_dir2_leaf_entry_t *lep; /* leaf entry */
1606 xfs_trans_t *tp; /* transaction pointer */ 1628 xfs_trans_t *tp; /* transaction pointer */
@@ -1614,7 +1636,7 @@ xfs_dir2_leaf_replace(
1614 return error; 1636 return error;
1615 } 1637 }
1616 dp = args->dp; 1638 dp = args->dp;
1617 leaf = lbp->data; 1639 leaf = lbp->b_addr;
1618 /* 1640 /*
1619 * Point to the leaf entry, get data address from it. 1641 * Point to the leaf entry, get data address from it.
1620 */ 1642 */
@@ -1623,7 +1645,7 @@ xfs_dir2_leaf_replace(
1623 * Point to the data entry. 1645 * Point to the data entry.
1624 */ 1646 */
1625 dep = (xfs_dir2_data_entry_t *) 1647 dep = (xfs_dir2_data_entry_t *)
1626 ((char *)dbp->data + 1648 ((char *)dbp->b_addr +
1627 xfs_dir2_dataptr_to_off(dp->i_mount, be32_to_cpu(lep->address))); 1649 xfs_dir2_dataptr_to_off(dp->i_mount, be32_to_cpu(lep->address)));
1628 ASSERT(args->inumber != be64_to_cpu(dep->inumber)); 1650 ASSERT(args->inumber != be64_to_cpu(dep->inumber));
1629 /* 1651 /*
@@ -1632,9 +1654,8 @@ xfs_dir2_leaf_replace(
1632 dep->inumber = cpu_to_be64(args->inumber); 1654 dep->inumber = cpu_to_be64(args->inumber);
1633 tp = args->trans; 1655 tp = args->trans;
1634 xfs_dir2_data_log_entry(tp, dbp, dep); 1656 xfs_dir2_data_log_entry(tp, dbp, dep);
1635 xfs_da_buf_done(dbp);
1636 xfs_dir2_leaf_check(dp, lbp); 1657 xfs_dir2_leaf_check(dp, lbp);
1637 xfs_da_brelse(tp, lbp); 1658 xfs_trans_brelse(tp, lbp);
1638 return 0; 1659 return 0;
1639} 1660}
1640 1661
@@ -1646,7 +1667,7 @@ xfs_dir2_leaf_replace(
1646int /* index value */ 1667int /* index value */
1647xfs_dir2_leaf_search_hash( 1668xfs_dir2_leaf_search_hash(
1648 xfs_da_args_t *args, /* operation arguments */ 1669 xfs_da_args_t *args, /* operation arguments */
1649 xfs_dabuf_t *lbp) /* leaf buffer */ 1670 struct xfs_buf *lbp) /* leaf buffer */
1650{ 1671{
1651 xfs_dahash_t hash=0; /* hash from this entry */ 1672 xfs_dahash_t hash=0; /* hash from this entry */
1652 xfs_dahash_t hashwant; /* hash value looking for */ 1673 xfs_dahash_t hashwant; /* hash value looking for */
@@ -1656,7 +1677,7 @@ xfs_dir2_leaf_search_hash(
1656 xfs_dir2_leaf_entry_t *lep; /* leaf entry */ 1677 xfs_dir2_leaf_entry_t *lep; /* leaf entry */
1657 int mid=0; /* current leaf index */ 1678 int mid=0; /* current leaf index */
1658 1679
1659 leaf = lbp->data; 1680 leaf = lbp->b_addr;
1660#ifndef __KERNEL__ 1681#ifndef __KERNEL__
1661 if (!leaf->hdr.count) 1682 if (!leaf->hdr.count)
1662 return 0; 1683 return 0;
@@ -1699,11 +1720,11 @@ xfs_dir2_leaf_search_hash(
1699int /* error */ 1720int /* error */
1700xfs_dir2_leaf_trim_data( 1721xfs_dir2_leaf_trim_data(
1701 xfs_da_args_t *args, /* operation arguments */ 1722 xfs_da_args_t *args, /* operation arguments */
1702 xfs_dabuf_t *lbp, /* leaf buffer */ 1723 struct xfs_buf *lbp, /* leaf buffer */
1703 xfs_dir2_db_t db) /* data block number */ 1724 xfs_dir2_db_t db) /* data block number */
1704{ 1725{
1705 __be16 *bestsp; /* leaf bests table */ 1726 __be16 *bestsp; /* leaf bests table */
1706 xfs_dabuf_t *dbp; /* data block buffer */ 1727 struct xfs_buf *dbp; /* data block buffer */
1707 xfs_inode_t *dp; /* incore directory inode */ 1728 xfs_inode_t *dp; /* incore directory inode */
1708 int error; /* error return value */ 1729 int error; /* error return value */
1709 xfs_dir2_leaf_t *leaf; /* leaf structure */ 1730 xfs_dir2_leaf_t *leaf; /* leaf structure */
@@ -1722,12 +1743,12 @@ xfs_dir2_leaf_trim_data(
1722 return error; 1743 return error;
1723 } 1744 }
1724 1745
1725 leaf = lbp->data; 1746 leaf = lbp->b_addr;
1726 ltp = xfs_dir2_leaf_tail_p(mp, leaf); 1747 ltp = xfs_dir2_leaf_tail_p(mp, leaf);
1727 1748
1728#ifdef DEBUG 1749#ifdef DEBUG
1729{ 1750{
1730 struct xfs_dir2_data_hdr *hdr = dbp->data; 1751 struct xfs_dir2_data_hdr *hdr = dbp->b_addr;
1731 1752
1732 ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC)); 1753 ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC));
1733 ASSERT(be16_to_cpu(hdr->bestfree[0].length) == 1754 ASSERT(be16_to_cpu(hdr->bestfree[0].length) ==
@@ -1741,7 +1762,7 @@ xfs_dir2_leaf_trim_data(
1741 */ 1762 */
1742 if ((error = xfs_dir2_shrink_inode(args, db, dbp))) { 1763 if ((error = xfs_dir2_shrink_inode(args, db, dbp))) {
1743 ASSERT(error != ENOSPC); 1764 ASSERT(error != ENOSPC);
1744 xfs_da_brelse(tp, dbp); 1765 xfs_trans_brelse(tp, dbp);
1745 return error; 1766 return error;
1746 } 1767 }
1747 /* 1768 /*
@@ -1781,10 +1802,10 @@ xfs_dir2_node_to_leaf(
1781 xfs_da_args_t *args; /* operation arguments */ 1802 xfs_da_args_t *args; /* operation arguments */
1782 xfs_inode_t *dp; /* incore directory inode */ 1803 xfs_inode_t *dp; /* incore directory inode */
1783 int error; /* error return code */ 1804 int error; /* error return code */
1784 xfs_dabuf_t *fbp; /* buffer for freespace block */ 1805 struct xfs_buf *fbp; /* buffer for freespace block */
1785 xfs_fileoff_t fo; /* freespace file offset */ 1806 xfs_fileoff_t fo; /* freespace file offset */
1786 xfs_dir2_free_t *free; /* freespace structure */ 1807 xfs_dir2_free_t *free; /* freespace structure */
1787 xfs_dabuf_t *lbp; /* buffer for leaf block */ 1808 struct xfs_buf *lbp; /* buffer for leaf block */
1788 xfs_dir2_leaf_tail_t *ltp; /* tail of leaf structure */ 1809 xfs_dir2_leaf_tail_t *ltp; /* tail of leaf structure */
1789 xfs_dir2_leaf_t *leaf; /* leaf structure */ 1810 xfs_dir2_leaf_t *leaf; /* leaf structure */
1790 xfs_mount_t *mp; /* filesystem mount point */ 1811 xfs_mount_t *mp; /* filesystem mount point */
@@ -1838,7 +1859,7 @@ xfs_dir2_node_to_leaf(
1838 if (XFS_FSB_TO_B(mp, fo) > XFS_DIR2_LEAF_OFFSET + mp->m_dirblksize) 1859 if (XFS_FSB_TO_B(mp, fo) > XFS_DIR2_LEAF_OFFSET + mp->m_dirblksize)
1839 return 0; 1860 return 0;
1840 lbp = state->path.blk[0].bp; 1861 lbp = state->path.blk[0].bp;
1841 leaf = lbp->data; 1862 leaf = lbp->b_addr;
1842 ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)); 1863 ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC));
1843 /* 1864 /*
1844 * Read the freespace block. 1865 * Read the freespace block.
@@ -1847,7 +1868,7 @@ xfs_dir2_node_to_leaf(
1847 XFS_DATA_FORK))) { 1868 XFS_DATA_FORK))) {
1848 return error; 1869 return error;
1849 } 1870 }
1850 free = fbp->data; 1871 free = fbp->b_addr;
1851 ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC)); 1872 ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC));
1852 ASSERT(!free->hdr.firstdb); 1873 ASSERT(!free->hdr.firstdb);
1853 1874
@@ -1857,7 +1878,7 @@ xfs_dir2_node_to_leaf(
1857 */ 1878 */
1858 if (xfs_dir2_leaf_size(&leaf->hdr, be32_to_cpu(free->hdr.nvalid)) > 1879 if (xfs_dir2_leaf_size(&leaf->hdr, be32_to_cpu(free->hdr.nvalid)) >
1859 mp->m_dirblksize) { 1880 mp->m_dirblksize) {
1860 xfs_da_brelse(tp, fbp); 1881 xfs_trans_brelse(tp, fbp);
1861 return 0; 1882 return 0;
1862 } 1883 }
1863 1884
diff --git a/fs/xfs/xfs_dir2_node.c b/fs/xfs/xfs_dir2_node.c
index b0f26780449d..6c7052406605 100644
--- a/fs/xfs/xfs_dir2_node.c
+++ b/fs/xfs/xfs_dir2_node.c
@@ -36,20 +36,20 @@
36/* 36/*
37 * Function declarations. 37 * Function declarations.
38 */ 38 */
39static void xfs_dir2_free_log_header(xfs_trans_t *tp, xfs_dabuf_t *bp); 39static int xfs_dir2_leafn_add(struct xfs_buf *bp, xfs_da_args_t *args,
40static int xfs_dir2_leafn_add(xfs_dabuf_t *bp, xfs_da_args_t *args, int index); 40 int index);
41#ifdef DEBUG 41#ifdef DEBUG
42static void xfs_dir2_leafn_check(xfs_inode_t *dp, xfs_dabuf_t *bp); 42static void xfs_dir2_leafn_check(struct xfs_inode *dp, struct xfs_buf *bp);
43#else 43#else
44#define xfs_dir2_leafn_check(dp, bp) 44#define xfs_dir2_leafn_check(dp, bp)
45#endif 45#endif
46static void xfs_dir2_leafn_moveents(xfs_da_args_t *args, xfs_dabuf_t *bp_s, 46static void xfs_dir2_leafn_moveents(xfs_da_args_t *args, struct xfs_buf *bp_s,
47 int start_s, xfs_dabuf_t *bp_d, int start_d, 47 int start_s, struct xfs_buf *bp_d,
48 int count); 48 int start_d, int count);
49static void xfs_dir2_leafn_rebalance(xfs_da_state_t *state, 49static void xfs_dir2_leafn_rebalance(xfs_da_state_t *state,
50 xfs_da_state_blk_t *blk1, 50 xfs_da_state_blk_t *blk1,
51 xfs_da_state_blk_t *blk2); 51 xfs_da_state_blk_t *blk2);
52static int xfs_dir2_leafn_remove(xfs_da_args_t *args, xfs_dabuf_t *bp, 52static int xfs_dir2_leafn_remove(xfs_da_args_t *args, struct xfs_buf *bp,
53 int index, xfs_da_state_blk_t *dblk, 53 int index, xfs_da_state_blk_t *dblk,
54 int *rval); 54 int *rval);
55static int xfs_dir2_node_addname_int(xfs_da_args_t *args, 55static int xfs_dir2_node_addname_int(xfs_da_args_t *args,
@@ -60,16 +60,16 @@ static int xfs_dir2_node_addname_int(xfs_da_args_t *args,
60 */ 60 */
61STATIC void 61STATIC void
62xfs_dir2_free_log_bests( 62xfs_dir2_free_log_bests(
63 xfs_trans_t *tp, /* transaction pointer */ 63 struct xfs_trans *tp,
64 xfs_dabuf_t *bp, /* freespace buffer */ 64 struct xfs_buf *bp,
65 int first, /* first entry to log */ 65 int first, /* first entry to log */
66 int last) /* last entry to log */ 66 int last) /* last entry to log */
67{ 67{
68 xfs_dir2_free_t *free; /* freespace structure */ 68 xfs_dir2_free_t *free; /* freespace structure */
69 69
70 free = bp->data; 70 free = bp->b_addr;
71 ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC)); 71 ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC));
72 xfs_da_log_buf(tp, bp, 72 xfs_trans_log_buf(tp, bp,
73 (uint)((char *)&free->bests[first] - (char *)free), 73 (uint)((char *)&free->bests[first] - (char *)free),
74 (uint)((char *)&free->bests[last] - (char *)free + 74 (uint)((char *)&free->bests[last] - (char *)free +
75 sizeof(free->bests[0]) - 1)); 75 sizeof(free->bests[0]) - 1));
@@ -80,14 +80,14 @@ xfs_dir2_free_log_bests(
80 */ 80 */
81static void 81static void
82xfs_dir2_free_log_header( 82xfs_dir2_free_log_header(
83 xfs_trans_t *tp, /* transaction pointer */ 83 struct xfs_trans *tp,
84 xfs_dabuf_t *bp) /* freespace buffer */ 84 struct xfs_buf *bp)
85{ 85{
86 xfs_dir2_free_t *free; /* freespace structure */ 86 xfs_dir2_free_t *free; /* freespace structure */
87 87
88 free = bp->data; 88 free = bp->b_addr;
89 ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC)); 89 ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC));
90 xfs_da_log_buf(tp, bp, (uint)((char *)&free->hdr - (char *)free), 90 xfs_trans_log_buf(tp, bp, (uint)((char *)&free->hdr - (char *)free),
91 (uint)(sizeof(xfs_dir2_free_hdr_t) - 1)); 91 (uint)(sizeof(xfs_dir2_free_hdr_t) - 1));
92} 92}
93 93
@@ -99,11 +99,11 @@ xfs_dir2_free_log_header(
99int /* error */ 99int /* error */
100xfs_dir2_leaf_to_node( 100xfs_dir2_leaf_to_node(
101 xfs_da_args_t *args, /* operation arguments */ 101 xfs_da_args_t *args, /* operation arguments */
102 xfs_dabuf_t *lbp) /* leaf buffer */ 102 struct xfs_buf *lbp) /* leaf buffer */
103{ 103{
104 xfs_inode_t *dp; /* incore directory inode */ 104 xfs_inode_t *dp; /* incore directory inode */
105 int error; /* error return value */ 105 int error; /* error return value */
106 xfs_dabuf_t *fbp; /* freespace buffer */ 106 struct xfs_buf *fbp; /* freespace buffer */
107 xfs_dir2_db_t fdb; /* freespace block number */ 107 xfs_dir2_db_t fdb; /* freespace block number */
108 xfs_dir2_free_t *free; /* freespace structure */ 108 xfs_dir2_free_t *free; /* freespace structure */
109 __be16 *from; /* pointer to freespace entry */ 109 __be16 *from; /* pointer to freespace entry */
@@ -136,8 +136,8 @@ xfs_dir2_leaf_to_node(
136 return error; 136 return error;
137 } 137 }
138 ASSERT(fbp != NULL); 138 ASSERT(fbp != NULL);
139 free = fbp->data; 139 free = fbp->b_addr;
140 leaf = lbp->data; 140 leaf = lbp->b_addr;
141 ltp = xfs_dir2_leaf_tail_p(mp, leaf); 141 ltp = xfs_dir2_leaf_tail_p(mp, leaf);
142 /* 142 /*
143 * Initialize the freespace block header. 143 * Initialize the freespace block header.
@@ -164,7 +164,6 @@ xfs_dir2_leaf_to_node(
164 xfs_dir2_leaf_log_header(tp, lbp); 164 xfs_dir2_leaf_log_header(tp, lbp);
165 xfs_dir2_free_log_header(tp, fbp); 165 xfs_dir2_free_log_header(tp, fbp);
166 xfs_dir2_free_log_bests(tp, fbp, 0, be32_to_cpu(free->hdr.nvalid) - 1); 166 xfs_dir2_free_log_bests(tp, fbp, 0, be32_to_cpu(free->hdr.nvalid) - 1);
167 xfs_da_buf_done(fbp);
168 xfs_dir2_leafn_check(dp, lbp); 167 xfs_dir2_leafn_check(dp, lbp);
169 return 0; 168 return 0;
170} 169}
@@ -175,7 +174,7 @@ xfs_dir2_leaf_to_node(
175 */ 174 */
176static int /* error */ 175static int /* error */
177xfs_dir2_leafn_add( 176xfs_dir2_leafn_add(
178 xfs_dabuf_t *bp, /* leaf buffer */ 177 struct xfs_buf *bp, /* leaf buffer */
179 xfs_da_args_t *args, /* operation arguments */ 178 xfs_da_args_t *args, /* operation arguments */
180 int index) /* insertion pt for new entry */ 179 int index) /* insertion pt for new entry */
181{ 180{
@@ -195,7 +194,7 @@ xfs_dir2_leafn_add(
195 dp = args->dp; 194 dp = args->dp;
196 mp = dp->i_mount; 195 mp = dp->i_mount;
197 tp = args->trans; 196 tp = args->trans;
198 leaf = bp->data; 197 leaf = bp->b_addr;
199 198
200 /* 199 /*
201 * Quick check just to make sure we are not going to index 200 * Quick check just to make sure we are not going to index
@@ -261,15 +260,15 @@ xfs_dir2_leafn_add(
261 */ 260 */
262void 261void
263xfs_dir2_leafn_check( 262xfs_dir2_leafn_check(
264 xfs_inode_t *dp, /* incore directory inode */ 263 struct xfs_inode *dp,
265 xfs_dabuf_t *bp) /* leaf buffer */ 264 struct xfs_buf *bp)
266{ 265{
267 int i; /* leaf index */ 266 int i; /* leaf index */
268 xfs_dir2_leaf_t *leaf; /* leaf structure */ 267 xfs_dir2_leaf_t *leaf; /* leaf structure */
269 xfs_mount_t *mp; /* filesystem mount point */ 268 xfs_mount_t *mp; /* filesystem mount point */
270 int stale; /* count of stale leaves */ 269 int stale; /* count of stale leaves */
271 270
272 leaf = bp->data; 271 leaf = bp->b_addr;
273 mp = dp->i_mount; 272 mp = dp->i_mount;
274 ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)); 273 ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC));
275 ASSERT(be16_to_cpu(leaf->hdr.count) <= xfs_dir2_max_leaf_ents(mp)); 274 ASSERT(be16_to_cpu(leaf->hdr.count) <= xfs_dir2_max_leaf_ents(mp));
@@ -291,12 +290,12 @@ xfs_dir2_leafn_check(
291 */ 290 */
292xfs_dahash_t /* hash value */ 291xfs_dahash_t /* hash value */
293xfs_dir2_leafn_lasthash( 292xfs_dir2_leafn_lasthash(
294 xfs_dabuf_t *bp, /* leaf buffer */ 293 struct xfs_buf *bp, /* leaf buffer */
295 int *count) /* count of entries in leaf */ 294 int *count) /* count of entries in leaf */
296{ 295{
297 xfs_dir2_leaf_t *leaf; /* leaf structure */ 296 xfs_dir2_leaf_t *leaf; /* leaf structure */
298 297
299 leaf = bp->data; 298 leaf = bp->b_addr;
300 ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)); 299 ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC));
301 if (count) 300 if (count)
302 *count = be16_to_cpu(leaf->hdr.count); 301 *count = be16_to_cpu(leaf->hdr.count);
@@ -311,12 +310,12 @@ xfs_dir2_leafn_lasthash(
311 */ 310 */
312STATIC int 311STATIC int
313xfs_dir2_leafn_lookup_for_addname( 312xfs_dir2_leafn_lookup_for_addname(
314 xfs_dabuf_t *bp, /* leaf buffer */ 313 struct xfs_buf *bp, /* leaf buffer */
315 xfs_da_args_t *args, /* operation arguments */ 314 xfs_da_args_t *args, /* operation arguments */
316 int *indexp, /* out: leaf entry index */ 315 int *indexp, /* out: leaf entry index */
317 xfs_da_state_t *state) /* state to fill in */ 316 xfs_da_state_t *state) /* state to fill in */
318{ 317{
319 xfs_dabuf_t *curbp = NULL; /* current data/free buffer */ 318 struct xfs_buf *curbp = NULL; /* current data/free buffer */
320 xfs_dir2_db_t curdb = -1; /* current data block number */ 319 xfs_dir2_db_t curdb = -1; /* current data block number */
321 xfs_dir2_db_t curfdb = -1; /* current free block number */ 320 xfs_dir2_db_t curfdb = -1; /* current free block number */
322 xfs_inode_t *dp; /* incore directory inode */ 321 xfs_inode_t *dp; /* incore directory inode */
@@ -335,7 +334,7 @@ xfs_dir2_leafn_lookup_for_addname(
335 dp = args->dp; 334 dp = args->dp;
336 tp = args->trans; 335 tp = args->trans;
337 mp = dp->i_mount; 336 mp = dp->i_mount;
338 leaf = bp->data; 337 leaf = bp->b_addr;
339 ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)); 338 ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC));
340#ifdef __KERNEL__ 339#ifdef __KERNEL__
341 ASSERT(be16_to_cpu(leaf->hdr.count) > 0); 340 ASSERT(be16_to_cpu(leaf->hdr.count) > 0);
@@ -352,7 +351,7 @@ xfs_dir2_leafn_lookup_for_addname(
352 /* If so, it's a free block buffer, get the block number. */ 351 /* If so, it's a free block buffer, get the block number. */
353 curbp = state->extrablk.bp; 352 curbp = state->extrablk.bp;
354 curfdb = state->extrablk.blkno; 353 curfdb = state->extrablk.blkno;
355 free = curbp->data; 354 free = curbp->b_addr;
356 ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC)); 355 ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC));
357 } 356 }
358 length = xfs_dir2_data_entsize(args->namelen); 357 length = xfs_dir2_data_entsize(args->namelen);
@@ -394,7 +393,7 @@ xfs_dir2_leafn_lookup_for_addname(
394 * If we had one before, drop it. 393 * If we had one before, drop it.
395 */ 394 */
396 if (curbp) 395 if (curbp)
397 xfs_da_brelse(tp, curbp); 396 xfs_trans_brelse(tp, curbp);
398 /* 397 /*
399 * Read the free block. 398 * Read the free block.
400 */ 399 */
@@ -403,7 +402,7 @@ xfs_dir2_leafn_lookup_for_addname(
403 -1, &curbp, XFS_DATA_FORK); 402 -1, &curbp, XFS_DATA_FORK);
404 if (error) 403 if (error)
405 return error; 404 return error;
406 free = curbp->data; 405 free = curbp->b_addr;
407 ASSERT(be32_to_cpu(free->hdr.magic) == 406 ASSERT(be32_to_cpu(free->hdr.magic) ==
408 XFS_DIR2_FREE_MAGIC); 407 XFS_DIR2_FREE_MAGIC);
409 ASSERT((be32_to_cpu(free->hdr.firstdb) % 408 ASSERT((be32_to_cpu(free->hdr.firstdb) %
@@ -424,7 +423,7 @@ xfs_dir2_leafn_lookup_for_addname(
424 XFS_ERROR_REPORT("xfs_dir2_leafn_lookup_int", 423 XFS_ERROR_REPORT("xfs_dir2_leafn_lookup_int",
425 XFS_ERRLEVEL_LOW, mp); 424 XFS_ERRLEVEL_LOW, mp);
426 if (curfdb != newfdb) 425 if (curfdb != newfdb)
427 xfs_da_brelse(tp, curbp); 426 xfs_trans_brelse(tp, curbp);
428 return XFS_ERROR(EFSCORRUPTED); 427 return XFS_ERROR(EFSCORRUPTED);
429 } 428 }
430 curfdb = newfdb; 429 curfdb = newfdb;
@@ -459,12 +458,12 @@ out:
459 */ 458 */
460STATIC int 459STATIC int
461xfs_dir2_leafn_lookup_for_entry( 460xfs_dir2_leafn_lookup_for_entry(
462 xfs_dabuf_t *bp, /* leaf buffer */ 461 struct xfs_buf *bp, /* leaf buffer */
463 xfs_da_args_t *args, /* operation arguments */ 462 xfs_da_args_t *args, /* operation arguments */
464 int *indexp, /* out: leaf entry index */ 463 int *indexp, /* out: leaf entry index */
465 xfs_da_state_t *state) /* state to fill in */ 464 xfs_da_state_t *state) /* state to fill in */
466{ 465{
467 xfs_dabuf_t *curbp = NULL; /* current data/free buffer */ 466 struct xfs_buf *curbp = NULL; /* current data/free buffer */
468 xfs_dir2_db_t curdb = -1; /* current data block number */ 467 xfs_dir2_db_t curdb = -1; /* current data block number */
469 xfs_dir2_data_entry_t *dep; /* data block entry */ 468 xfs_dir2_data_entry_t *dep; /* data block entry */
470 xfs_inode_t *dp; /* incore directory inode */ 469 xfs_inode_t *dp; /* incore directory inode */
@@ -480,7 +479,7 @@ xfs_dir2_leafn_lookup_for_entry(
480 dp = args->dp; 479 dp = args->dp;
481 tp = args->trans; 480 tp = args->trans;
482 mp = dp->i_mount; 481 mp = dp->i_mount;
483 leaf = bp->data; 482 leaf = bp->b_addr;
484 ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)); 483 ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC));
485#ifdef __KERNEL__ 484#ifdef __KERNEL__
486 ASSERT(be16_to_cpu(leaf->hdr.count) > 0); 485 ASSERT(be16_to_cpu(leaf->hdr.count) > 0);
@@ -525,7 +524,7 @@ xfs_dir2_leafn_lookup_for_entry(
525 */ 524 */
526 if (curbp && (args->cmpresult == XFS_CMP_DIFFERENT || 525 if (curbp && (args->cmpresult == XFS_CMP_DIFFERENT ||
527 curdb != state->extrablk.blkno)) 526 curdb != state->extrablk.blkno))
528 xfs_da_brelse(tp, curbp); 527 xfs_trans_brelse(tp, curbp);
529 /* 528 /*
530 * If needing the block that is saved with a CI match, 529 * If needing the block that is saved with a CI match,
531 * use it otherwise read in the new data block. 530 * use it otherwise read in the new data block.
@@ -547,7 +546,7 @@ xfs_dir2_leafn_lookup_for_entry(
547 /* 546 /*
548 * Point to the data entry. 547 * Point to the data entry.
549 */ 548 */
550 dep = (xfs_dir2_data_entry_t *)((char *)curbp->data + 549 dep = (xfs_dir2_data_entry_t *)((char *)curbp->b_addr +
551 xfs_dir2_dataptr_to_off(mp, be32_to_cpu(lep->address))); 550 xfs_dir2_dataptr_to_off(mp, be32_to_cpu(lep->address)));
552 /* 551 /*
553 * Compare the entry and if it's an exact match, return 552 * Compare the entry and if it's an exact match, return
@@ -559,7 +558,7 @@ xfs_dir2_leafn_lookup_for_entry(
559 /* If there is a CI match block, drop it */ 558 /* If there is a CI match block, drop it */
560 if (args->cmpresult != XFS_CMP_DIFFERENT && 559 if (args->cmpresult != XFS_CMP_DIFFERENT &&
561 curdb != state->extrablk.blkno) 560 curdb != state->extrablk.blkno)
562 xfs_da_brelse(tp, state->extrablk.bp); 561 xfs_trans_brelse(tp, state->extrablk.bp);
563 args->cmpresult = cmp; 562 args->cmpresult = cmp;
564 args->inumber = be64_to_cpu(dep->inumber); 563 args->inumber = be64_to_cpu(dep->inumber);
565 *indexp = index; 564 *indexp = index;
@@ -567,7 +566,7 @@ xfs_dir2_leafn_lookup_for_entry(
567 state->extrablk.bp = curbp; 566 state->extrablk.bp = curbp;
568 state->extrablk.blkno = curdb; 567 state->extrablk.blkno = curdb;
569 state->extrablk.index = (int)((char *)dep - 568 state->extrablk.index = (int)((char *)dep -
570 (char *)curbp->data); 569 (char *)curbp->b_addr);
571 state->extrablk.magic = XFS_DIR2_DATA_MAGIC; 570 state->extrablk.magic = XFS_DIR2_DATA_MAGIC;
572 if (cmp == XFS_CMP_EXACT) 571 if (cmp == XFS_CMP_EXACT)
573 return XFS_ERROR(EEXIST); 572 return XFS_ERROR(EEXIST);
@@ -586,7 +585,7 @@ xfs_dir2_leafn_lookup_for_entry(
586 } else { 585 } else {
587 /* If the curbp is not the CI match block, drop it */ 586 /* If the curbp is not the CI match block, drop it */
588 if (state->extrablk.bp != curbp) 587 if (state->extrablk.bp != curbp)
589 xfs_da_brelse(tp, curbp); 588 xfs_trans_brelse(tp, curbp);
590 } 589 }
591 } else { 590 } else {
592 state->extravalid = 0; 591 state->extravalid = 0;
@@ -602,7 +601,7 @@ xfs_dir2_leafn_lookup_for_entry(
602 */ 601 */
603int 602int
604xfs_dir2_leafn_lookup_int( 603xfs_dir2_leafn_lookup_int(
605 xfs_dabuf_t *bp, /* leaf buffer */ 604 struct xfs_buf *bp, /* leaf buffer */
606 xfs_da_args_t *args, /* operation arguments */ 605 xfs_da_args_t *args, /* operation arguments */
607 int *indexp, /* out: leaf entry index */ 606 int *indexp, /* out: leaf entry index */
608 xfs_da_state_t *state) /* state to fill in */ 607 xfs_da_state_t *state) /* state to fill in */
@@ -620,9 +619,9 @@ xfs_dir2_leafn_lookup_int(
620static void 619static void
621xfs_dir2_leafn_moveents( 620xfs_dir2_leafn_moveents(
622 xfs_da_args_t *args, /* operation arguments */ 621 xfs_da_args_t *args, /* operation arguments */
623 xfs_dabuf_t *bp_s, /* source leaf buffer */ 622 struct xfs_buf *bp_s, /* source leaf buffer */
624 int start_s, /* source leaf index */ 623 int start_s, /* source leaf index */
625 xfs_dabuf_t *bp_d, /* destination leaf buffer */ 624 struct xfs_buf *bp_d, /* destination leaf buffer */
626 int start_d, /* destination leaf index */ 625 int start_d, /* destination leaf index */
627 int count) /* count of leaves to copy */ 626 int count) /* count of leaves to copy */
628{ 627{
@@ -640,8 +639,8 @@ xfs_dir2_leafn_moveents(
640 return; 639 return;
641 } 640 }
642 tp = args->trans; 641 tp = args->trans;
643 leaf_s = bp_s->data; 642 leaf_s = bp_s->b_addr;
644 leaf_d = bp_d->data; 643 leaf_d = bp_d->b_addr;
645 /* 644 /*
646 * If the destination index is not the end of the current 645 * If the destination index is not the end of the current
647 * destination leaf entries, open up a hole in the destination 646 * destination leaf entries, open up a hole in the destination
@@ -702,14 +701,14 @@ xfs_dir2_leafn_moveents(
702 */ 701 */
703int /* sort order */ 702int /* sort order */
704xfs_dir2_leafn_order( 703xfs_dir2_leafn_order(
705 xfs_dabuf_t *leaf1_bp, /* leaf1 buffer */ 704 struct xfs_buf *leaf1_bp, /* leaf1 buffer */
706 xfs_dabuf_t *leaf2_bp) /* leaf2 buffer */ 705 struct xfs_buf *leaf2_bp) /* leaf2 buffer */
707{ 706{
708 xfs_dir2_leaf_t *leaf1; /* leaf1 structure */ 707 xfs_dir2_leaf_t *leaf1; /* leaf1 structure */
709 xfs_dir2_leaf_t *leaf2; /* leaf2 structure */ 708 xfs_dir2_leaf_t *leaf2; /* leaf2 structure */
710 709
711 leaf1 = leaf1_bp->data; 710 leaf1 = leaf1_bp->b_addr;
712 leaf2 = leaf2_bp->data; 711 leaf2 = leaf2_bp->b_addr;
713 ASSERT(leaf1->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)); 712 ASSERT(leaf1->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC));
714 ASSERT(leaf2->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)); 713 ASSERT(leaf2->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC));
715 if (be16_to_cpu(leaf1->hdr.count) > 0 && 714 if (be16_to_cpu(leaf1->hdr.count) > 0 &&
@@ -757,8 +756,8 @@ xfs_dir2_leafn_rebalance(
757 blk1 = blk2; 756 blk1 = blk2;
758 blk2 = tmp; 757 blk2 = tmp;
759 } 758 }
760 leaf1 = blk1->bp->data; 759 leaf1 = blk1->bp->b_addr;
761 leaf2 = blk2->bp->data; 760 leaf2 = blk2->bp->b_addr;
762 oldsum = be16_to_cpu(leaf1->hdr.count) + be16_to_cpu(leaf2->hdr.count); 761 oldsum = be16_to_cpu(leaf1->hdr.count) + be16_to_cpu(leaf2->hdr.count);
763#ifdef DEBUG 762#ifdef DEBUG
764 oldstale = be16_to_cpu(leaf1->hdr.stale) + be16_to_cpu(leaf2->hdr.stale); 763 oldstale = be16_to_cpu(leaf1->hdr.stale) + be16_to_cpu(leaf2->hdr.stale);
@@ -834,14 +833,14 @@ xfs_dir2_leafn_rebalance(
834static int /* error */ 833static int /* error */
835xfs_dir2_leafn_remove( 834xfs_dir2_leafn_remove(
836 xfs_da_args_t *args, /* operation arguments */ 835 xfs_da_args_t *args, /* operation arguments */
837 xfs_dabuf_t *bp, /* leaf buffer */ 836 struct xfs_buf *bp, /* leaf buffer */
838 int index, /* leaf entry index */ 837 int index, /* leaf entry index */
839 xfs_da_state_blk_t *dblk, /* data block */ 838 xfs_da_state_blk_t *dblk, /* data block */
840 int *rval) /* resulting block needs join */ 839 int *rval) /* resulting block needs join */
841{ 840{
842 xfs_dir2_data_hdr_t *hdr; /* data block header */ 841 xfs_dir2_data_hdr_t *hdr; /* data block header */
843 xfs_dir2_db_t db; /* data block number */ 842 xfs_dir2_db_t db; /* data block number */
844 xfs_dabuf_t *dbp; /* data block buffer */ 843 struct xfs_buf *dbp; /* data block buffer */
845 xfs_dir2_data_entry_t *dep; /* data block entry */ 844 xfs_dir2_data_entry_t *dep; /* data block entry */
846 xfs_inode_t *dp; /* incore directory inode */ 845 xfs_inode_t *dp; /* incore directory inode */
847 xfs_dir2_leaf_t *leaf; /* leaf structure */ 846 xfs_dir2_leaf_t *leaf; /* leaf structure */
@@ -858,7 +857,7 @@ xfs_dir2_leafn_remove(
858 dp = args->dp; 857 dp = args->dp;
859 tp = args->trans; 858 tp = args->trans;
860 mp = dp->i_mount; 859 mp = dp->i_mount;
861 leaf = bp->data; 860 leaf = bp->b_addr;
862 ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)); 861 ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC));
863 /* 862 /*
864 * Point to the entry we're removing. 863 * Point to the entry we're removing.
@@ -884,7 +883,7 @@ xfs_dir2_leafn_remove(
884 * in the data block in case it changes. 883 * in the data block in case it changes.
885 */ 884 */
886 dbp = dblk->bp; 885 dbp = dblk->bp;
887 hdr = dbp->data; 886 hdr = dbp->b_addr;
888 dep = (xfs_dir2_data_entry_t *)((char *)hdr + off); 887 dep = (xfs_dir2_data_entry_t *)((char *)hdr + off);
889 longest = be16_to_cpu(hdr->bestfree[0].length); 888 longest = be16_to_cpu(hdr->bestfree[0].length);
890 needlog = needscan = 0; 889 needlog = needscan = 0;
@@ -905,7 +904,7 @@ xfs_dir2_leafn_remove(
905 */ 904 */
906 if (longest < be16_to_cpu(hdr->bestfree[0].length)) { 905 if (longest < be16_to_cpu(hdr->bestfree[0].length)) {
907 int error; /* error return value */ 906 int error; /* error return value */
908 xfs_dabuf_t *fbp; /* freeblock buffer */ 907 struct xfs_buf *fbp; /* freeblock buffer */
909 xfs_dir2_db_t fdb; /* freeblock block number */ 908 xfs_dir2_db_t fdb; /* freeblock block number */
910 int findex; /* index in freeblock entries */ 909 int findex; /* index in freeblock entries */
911 xfs_dir2_free_t *free; /* freeblock structure */ 910 xfs_dir2_free_t *free; /* freeblock structure */
@@ -920,7 +919,7 @@ xfs_dir2_leafn_remove(
920 -1, &fbp, XFS_DATA_FORK))) { 919 -1, &fbp, XFS_DATA_FORK))) {
921 return error; 920 return error;
922 } 921 }
923 free = fbp->data; 922 free = fbp->b_addr;
924 ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC)); 923 ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC));
925 ASSERT(be32_to_cpu(free->hdr.firstdb) == 924 ASSERT(be32_to_cpu(free->hdr.firstdb) ==
926 xfs_dir2_free_max_bests(mp) * 925 xfs_dir2_free_max_bests(mp) *
@@ -948,9 +947,7 @@ xfs_dir2_leafn_remove(
948 * In this case just drop the buffer and some one else 947 * In this case just drop the buffer and some one else
949 * will eventually get rid of the empty block. 948 * will eventually get rid of the empty block.
950 */ 949 */
951 else if (error == ENOSPC && args->total == 0) 950 else if (!(error == ENOSPC && args->total == 0))
952 xfs_da_buf_done(dbp);
953 else
954 return error; 951 return error;
955 } 952 }
956 /* 953 /*
@@ -1018,11 +1015,6 @@ xfs_dir2_leafn_remove(
1018 */ 1015 */
1019 if (logfree) 1016 if (logfree)
1020 xfs_dir2_free_log_bests(tp, fbp, findex, findex); 1017 xfs_dir2_free_log_bests(tp, fbp, findex, findex);
1021 /*
1022 * Drop the buffer if we still have it.
1023 */
1024 if (fbp)
1025 xfs_da_buf_done(fbp);
1026 } 1018 }
1027 xfs_dir2_leafn_check(dp, bp); 1019 xfs_dir2_leafn_check(dp, bp);
1028 /* 1020 /*
@@ -1114,7 +1106,7 @@ xfs_dir2_leafn_toosmall(
1114{ 1106{
1115 xfs_da_state_blk_t *blk; /* leaf block */ 1107 xfs_da_state_blk_t *blk; /* leaf block */
1116 xfs_dablk_t blkno; /* leaf block number */ 1108 xfs_dablk_t blkno; /* leaf block number */
1117 xfs_dabuf_t *bp; /* leaf buffer */ 1109 struct xfs_buf *bp; /* leaf buffer */
1118 int bytes; /* bytes in use */ 1110 int bytes; /* bytes in use */
1119 int count; /* leaf live entry count */ 1111 int count; /* leaf live entry count */
1120 int error; /* error return value */ 1112 int error; /* error return value */
@@ -1130,7 +1122,7 @@ xfs_dir2_leafn_toosmall(
1130 * to coalesce with a sibling. 1122 * to coalesce with a sibling.
1131 */ 1123 */
1132 blk = &state->path.blk[state->path.active - 1]; 1124 blk = &state->path.blk[state->path.active - 1];
1133 info = blk->bp->data; 1125 info = blk->bp->b_addr;
1134 ASSERT(info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)); 1126 ASSERT(info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC));
1135 leaf = (xfs_dir2_leaf_t *)info; 1127 leaf = (xfs_dir2_leaf_t *)info;
1136 count = be16_to_cpu(leaf->hdr.count) - be16_to_cpu(leaf->hdr.stale); 1128 count = be16_to_cpu(leaf->hdr.count) - be16_to_cpu(leaf->hdr.stale);
@@ -1189,7 +1181,7 @@ xfs_dir2_leafn_toosmall(
1189 leaf = (xfs_dir2_leaf_t *)info; 1181 leaf = (xfs_dir2_leaf_t *)info;
1190 count = be16_to_cpu(leaf->hdr.count) - be16_to_cpu(leaf->hdr.stale); 1182 count = be16_to_cpu(leaf->hdr.count) - be16_to_cpu(leaf->hdr.stale);
1191 bytes = state->blocksize - (state->blocksize >> 2); 1183 bytes = state->blocksize - (state->blocksize >> 2);
1192 leaf = bp->data; 1184 leaf = bp->b_addr;
1193 ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)); 1185 ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC));
1194 count += be16_to_cpu(leaf->hdr.count) - be16_to_cpu(leaf->hdr.stale); 1186 count += be16_to_cpu(leaf->hdr.count) - be16_to_cpu(leaf->hdr.stale);
1195 bytes -= count * (uint)sizeof(leaf->ents[0]); 1187 bytes -= count * (uint)sizeof(leaf->ents[0]);
@@ -1198,7 +1190,7 @@ xfs_dir2_leafn_toosmall(
1198 */ 1190 */
1199 if (bytes >= 0) 1191 if (bytes >= 0)
1200 break; 1192 break;
1201 xfs_da_brelse(state->args->trans, bp); 1193 xfs_trans_brelse(state->args->trans, bp);
1202 } 1194 }
1203 /* 1195 /*
1204 * Didn't like either block, give up. 1196 * Didn't like either block, give up.
@@ -1207,11 +1199,7 @@ xfs_dir2_leafn_toosmall(
1207 *action = 0; 1199 *action = 0;
1208 return 0; 1200 return 0;
1209 } 1201 }
1210 /* 1202
1211 * Done with the sibling leaf block here, drop the dabuf
1212 * so path_shift can get it.
1213 */
1214 xfs_da_buf_done(bp);
1215 /* 1203 /*
1216 * Make altpath point to the block we want to keep (the lower 1204 * Make altpath point to the block we want to keep (the lower
1217 * numbered block) and path point to the block we want to drop. 1205 * numbered block) and path point to the block we want to drop.
@@ -1247,8 +1235,8 @@ xfs_dir2_leafn_unbalance(
1247 args = state->args; 1235 args = state->args;
1248 ASSERT(drop_blk->magic == XFS_DIR2_LEAFN_MAGIC); 1236 ASSERT(drop_blk->magic == XFS_DIR2_LEAFN_MAGIC);
1249 ASSERT(save_blk->magic == XFS_DIR2_LEAFN_MAGIC); 1237 ASSERT(save_blk->magic == XFS_DIR2_LEAFN_MAGIC);
1250 drop_leaf = drop_blk->bp->data; 1238 drop_leaf = drop_blk->bp->b_addr;
1251 save_leaf = save_blk->bp->data; 1239 save_leaf = save_blk->bp->b_addr;
1252 ASSERT(drop_leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)); 1240 ASSERT(drop_leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC));
1253 ASSERT(save_leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)); 1241 ASSERT(save_leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC));
1254 /* 1242 /*
@@ -1356,13 +1344,13 @@ xfs_dir2_node_addname_int(
1356{ 1344{
1357 xfs_dir2_data_hdr_t *hdr; /* data block header */ 1345 xfs_dir2_data_hdr_t *hdr; /* data block header */
1358 xfs_dir2_db_t dbno; /* data block number */ 1346 xfs_dir2_db_t dbno; /* data block number */
1359 xfs_dabuf_t *dbp; /* data block buffer */ 1347 struct xfs_buf *dbp; /* data block buffer */
1360 xfs_dir2_data_entry_t *dep; /* data entry pointer */ 1348 xfs_dir2_data_entry_t *dep; /* data entry pointer */
1361 xfs_inode_t *dp; /* incore directory inode */ 1349 xfs_inode_t *dp; /* incore directory inode */
1362 xfs_dir2_data_unused_t *dup; /* data unused entry pointer */ 1350 xfs_dir2_data_unused_t *dup; /* data unused entry pointer */
1363 int error; /* error return value */ 1351 int error; /* error return value */
1364 xfs_dir2_db_t fbno; /* freespace block number */ 1352 xfs_dir2_db_t fbno; /* freespace block number */
1365 xfs_dabuf_t *fbp; /* freespace buffer */ 1353 struct xfs_buf *fbp; /* freespace buffer */
1366 int findex; /* freespace entry index */ 1354 int findex; /* freespace entry index */
1367 xfs_dir2_free_t *free=NULL; /* freespace block structure */ 1355 xfs_dir2_free_t *free=NULL; /* freespace block structure */
1368 xfs_dir2_db_t ifbno; /* initial freespace block no */ 1356 xfs_dir2_db_t ifbno; /* initial freespace block no */
@@ -1390,7 +1378,7 @@ xfs_dir2_node_addname_int(
1390 * Remember initial freespace block number. 1378 * Remember initial freespace block number.
1391 */ 1379 */
1392 ifbno = fblk->blkno; 1380 ifbno = fblk->blkno;
1393 free = fbp->data; 1381 free = fbp->b_addr;
1394 ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC)); 1382 ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC));
1395 findex = fblk->index; 1383 findex = fblk->index;
1396 /* 1384 /*
@@ -1474,7 +1462,7 @@ xfs_dir2_node_addname_int(
1474 if (unlikely(fbp == NULL)) { 1462 if (unlikely(fbp == NULL)) {
1475 continue; 1463 continue;
1476 } 1464 }
1477 free = fbp->data; 1465 free = fbp->b_addr;
1478 ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC)); 1466 ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC));
1479 findex = 0; 1467 findex = 0;
1480 } 1468 }
@@ -1492,7 +1480,7 @@ xfs_dir2_node_addname_int(
1492 /* 1480 /*
1493 * Drop the block. 1481 * Drop the block.
1494 */ 1482 */
1495 xfs_da_brelse(tp, fbp); 1483 xfs_trans_brelse(tp, fbp);
1496 fbp = NULL; 1484 fbp = NULL;
1497 if (fblk && fblk->bp) 1485 if (fblk && fblk->bp)
1498 fblk->bp = NULL; 1486 fblk->bp = NULL;
@@ -1507,36 +1495,23 @@ xfs_dir2_node_addname_int(
1507 /* 1495 /*
1508 * Not allowed to allocate, return failure. 1496 * Not allowed to allocate, return failure.
1509 */ 1497 */
1510 if ((args->op_flags & XFS_DA_OP_JUSTCHECK) || 1498 if ((args->op_flags & XFS_DA_OP_JUSTCHECK) || args->total == 0)
1511 args->total == 0) {
1512 /*
1513 * Drop the freespace buffer unless it came from our
1514 * caller.
1515 */
1516 if ((fblk == NULL || fblk->bp == NULL) && fbp != NULL)
1517 xfs_da_buf_done(fbp);
1518 return XFS_ERROR(ENOSPC); 1499 return XFS_ERROR(ENOSPC);
1519 } 1500
1520 /* 1501 /*
1521 * Allocate and initialize the new data block. 1502 * Allocate and initialize the new data block.
1522 */ 1503 */
1523 if (unlikely((error = xfs_dir2_grow_inode(args, 1504 if (unlikely((error = xfs_dir2_grow_inode(args,
1524 XFS_DIR2_DATA_SPACE, 1505 XFS_DIR2_DATA_SPACE,
1525 &dbno)) || 1506 &dbno)) ||
1526 (error = xfs_dir2_data_init(args, dbno, &dbp)))) { 1507 (error = xfs_dir2_data_init(args, dbno, &dbp))))
1527 /*
1528 * Drop the freespace buffer unless it came from our
1529 * caller.
1530 */
1531 if ((fblk == NULL || fblk->bp == NULL) && fbp != NULL)
1532 xfs_da_buf_done(fbp);
1533 return error; 1508 return error;
1534 } 1509
1535 /* 1510 /*
1536 * If (somehow) we have a freespace block, get rid of it. 1511 * If (somehow) we have a freespace block, get rid of it.
1537 */ 1512 */
1538 if (fbp) 1513 if (fbp)
1539 xfs_da_brelse(tp, fbp); 1514 xfs_trans_brelse(tp, fbp);
1540 if (fblk && fblk->bp) 1515 if (fblk && fblk->bp)
1541 fblk->bp = NULL; 1516 fblk->bp = NULL;
1542 1517
@@ -1547,10 +1522,9 @@ xfs_dir2_node_addname_int(
1547 fbno = xfs_dir2_db_to_fdb(mp, dbno); 1522 fbno = xfs_dir2_db_to_fdb(mp, dbno);
1548 if (unlikely(error = xfs_da_read_buf(tp, dp, 1523 if (unlikely(error = xfs_da_read_buf(tp, dp,
1549 xfs_dir2_db_to_da(mp, fbno), -2, &fbp, 1524 xfs_dir2_db_to_da(mp, fbno), -2, &fbp,
1550 XFS_DATA_FORK))) { 1525 XFS_DATA_FORK)))
1551 xfs_da_buf_done(dbp);
1552 return error; 1526 return error;
1553 } 1527
1554 /* 1528 /*
1555 * If there wasn't a freespace block, the read will 1529 * If there wasn't a freespace block, the read will
1556 * return a NULL fbp. Allocate and initialize a new one. 1530 * return a NULL fbp. Allocate and initialize a new one.
@@ -1598,7 +1572,7 @@ xfs_dir2_node_addname_int(
1598 * Initialize the new block to be empty, and remember 1572 * Initialize the new block to be empty, and remember
1599 * its first slot as our empty slot. 1573 * its first slot as our empty slot.
1600 */ 1574 */
1601 free = fbp->data; 1575 free = fbp->b_addr;
1602 free->hdr.magic = cpu_to_be32(XFS_DIR2_FREE_MAGIC); 1576 free->hdr.magic = cpu_to_be32(XFS_DIR2_FREE_MAGIC);
1603 free->hdr.firstdb = cpu_to_be32( 1577 free->hdr.firstdb = cpu_to_be32(
1604 (fbno - XFS_DIR2_FREE_FIRSTDB(mp)) * 1578 (fbno - XFS_DIR2_FREE_FIRSTDB(mp)) *
@@ -1606,7 +1580,7 @@ xfs_dir2_node_addname_int(
1606 free->hdr.nvalid = 0; 1580 free->hdr.nvalid = 0;
1607 free->hdr.nused = 0; 1581 free->hdr.nused = 0;
1608 } else { 1582 } else {
1609 free = fbp->data; 1583 free = fbp->b_addr;
1610 ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC)); 1584 ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC));
1611 } 1585 }
1612 1586
@@ -1639,7 +1613,7 @@ xfs_dir2_node_addname_int(
1639 * We haven't allocated the data entry yet so this will 1613 * We haven't allocated the data entry yet so this will
1640 * change again. 1614 * change again.
1641 */ 1615 */
1642 hdr = dbp->data; 1616 hdr = dbp->b_addr;
1643 free->bests[findex] = hdr->bestfree[0].length; 1617 free->bests[findex] = hdr->bestfree[0].length;
1644 logfree = 1; 1618 logfree = 1;
1645 } 1619 }
@@ -1650,22 +1624,17 @@ xfs_dir2_node_addname_int(
1650 /* 1624 /*
1651 * If just checking, we succeeded. 1625 * If just checking, we succeeded.
1652 */ 1626 */
1653 if (args->op_flags & XFS_DA_OP_JUSTCHECK) { 1627 if (args->op_flags & XFS_DA_OP_JUSTCHECK)
1654 if ((fblk == NULL || fblk->bp == NULL) && fbp != NULL)
1655 xfs_da_buf_done(fbp);
1656 return 0; 1628 return 0;
1657 } 1629
1658 /* 1630 /*
1659 * Read the data block in. 1631 * Read the data block in.
1660 */ 1632 */
1661 if (unlikely( 1633 error = xfs_da_read_buf(tp, dp, xfs_dir2_db_to_da(mp, dbno),
1662 error = xfs_da_read_buf(tp, dp, xfs_dir2_db_to_da(mp, dbno), 1634 -1, &dbp, XFS_DATA_FORK);
1663 -1, &dbp, XFS_DATA_FORK))) { 1635 if (error)
1664 if ((fblk == NULL || fblk->bp == NULL) && fbp != NULL)
1665 xfs_da_buf_done(fbp);
1666 return error; 1636 return error;
1667 } 1637 hdr = dbp->b_addr;
1668 hdr = dbp->data;
1669 logfree = 0; 1638 logfree = 0;
1670 } 1639 }
1671 ASSERT(be16_to_cpu(hdr->bestfree[0].length) >= length); 1640 ASSERT(be16_to_cpu(hdr->bestfree[0].length) >= length);
@@ -1714,16 +1683,10 @@ xfs_dir2_node_addname_int(
1714 if (logfree) 1683 if (logfree)
1715 xfs_dir2_free_log_bests(tp, fbp, findex, findex); 1684 xfs_dir2_free_log_bests(tp, fbp, findex, findex);
1716 /* 1685 /*
1717 * If the caller didn't hand us the freespace block, drop it.
1718 */
1719 if ((fblk == NULL || fblk->bp == NULL) && fbp != NULL)
1720 xfs_da_buf_done(fbp);
1721 /*
1722 * Return the data block and offset in args, then drop the data block. 1686 * Return the data block and offset in args, then drop the data block.
1723 */ 1687 */
1724 args->blkno = (xfs_dablk_t)dbno; 1688 args->blkno = (xfs_dablk_t)dbno;
1725 args->index = be16_to_cpu(*tagp); 1689 args->index = be16_to_cpu(*tagp);
1726 xfs_da_buf_done(dbp);
1727 return 0; 1690 return 0;
1728} 1691}
1729 1692
@@ -1761,22 +1724,23 @@ xfs_dir2_node_lookup(
1761 /* If a CI match, dup the actual name and return EEXIST */ 1724 /* If a CI match, dup the actual name and return EEXIST */
1762 xfs_dir2_data_entry_t *dep; 1725 xfs_dir2_data_entry_t *dep;
1763 1726
1764 dep = (xfs_dir2_data_entry_t *)((char *)state->extrablk.bp-> 1727 dep = (xfs_dir2_data_entry_t *)
1765 data + state->extrablk.index); 1728 ((char *)state->extrablk.bp->b_addr +
1729 state->extrablk.index);
1766 rval = xfs_dir_cilookup_result(args, dep->name, dep->namelen); 1730 rval = xfs_dir_cilookup_result(args, dep->name, dep->namelen);
1767 } 1731 }
1768 /* 1732 /*
1769 * Release the btree blocks and leaf block. 1733 * Release the btree blocks and leaf block.
1770 */ 1734 */
1771 for (i = 0; i < state->path.active; i++) { 1735 for (i = 0; i < state->path.active; i++) {
1772 xfs_da_brelse(args->trans, state->path.blk[i].bp); 1736 xfs_trans_brelse(args->trans, state->path.blk[i].bp);
1773 state->path.blk[i].bp = NULL; 1737 state->path.blk[i].bp = NULL;
1774 } 1738 }
1775 /* 1739 /*
1776 * Release the data block if we have it. 1740 * Release the data block if we have it.
1777 */ 1741 */
1778 if (state->extravalid && state->extrablk.bp) { 1742 if (state->extravalid && state->extrablk.bp) {
1779 xfs_da_brelse(args->trans, state->extrablk.bp); 1743 xfs_trans_brelse(args->trans, state->extrablk.bp);
1780 state->extrablk.bp = NULL; 1744 state->extrablk.bp = NULL;
1781 } 1745 }
1782 xfs_da_state_free(state); 1746 xfs_da_state_free(state);
@@ -1893,13 +1857,13 @@ xfs_dir2_node_replace(
1893 */ 1857 */
1894 blk = &state->path.blk[state->path.active - 1]; 1858 blk = &state->path.blk[state->path.active - 1];
1895 ASSERT(blk->magic == XFS_DIR2_LEAFN_MAGIC); 1859 ASSERT(blk->magic == XFS_DIR2_LEAFN_MAGIC);
1896 leaf = blk->bp->data; 1860 leaf = blk->bp->b_addr;
1897 lep = &leaf->ents[blk->index]; 1861 lep = &leaf->ents[blk->index];
1898 ASSERT(state->extravalid); 1862 ASSERT(state->extravalid);
1899 /* 1863 /*
1900 * Point to the data entry. 1864 * Point to the data entry.
1901 */ 1865 */
1902 hdr = state->extrablk.bp->data; 1866 hdr = state->extrablk.bp->b_addr;
1903 ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC)); 1867 ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC));
1904 dep = (xfs_dir2_data_entry_t *) 1868 dep = (xfs_dir2_data_entry_t *)
1905 ((char *)hdr + 1869 ((char *)hdr +
@@ -1916,14 +1880,14 @@ xfs_dir2_node_replace(
1916 * Didn't find it, and we're holding a data block. Drop it. 1880 * Didn't find it, and we're holding a data block. Drop it.
1917 */ 1881 */
1918 else if (state->extravalid) { 1882 else if (state->extravalid) {
1919 xfs_da_brelse(args->trans, state->extrablk.bp); 1883 xfs_trans_brelse(args->trans, state->extrablk.bp);
1920 state->extrablk.bp = NULL; 1884 state->extrablk.bp = NULL;
1921 } 1885 }
1922 /* 1886 /*
1923 * Release all the buffers in the cursor. 1887 * Release all the buffers in the cursor.
1924 */ 1888 */
1925 for (i = 0; i < state->path.active; i++) { 1889 for (i = 0; i < state->path.active; i++) {
1926 xfs_da_brelse(args->trans, state->path.blk[i].bp); 1890 xfs_trans_brelse(args->trans, state->path.blk[i].bp);
1927 state->path.blk[i].bp = NULL; 1891 state->path.blk[i].bp = NULL;
1928 } 1892 }
1929 xfs_da_state_free(state); 1893 xfs_da_state_free(state);
@@ -1940,7 +1904,7 @@ xfs_dir2_node_trim_free(
1940 xfs_fileoff_t fo, /* free block number */ 1904 xfs_fileoff_t fo, /* free block number */
1941 int *rvalp) /* out: did something */ 1905 int *rvalp) /* out: did something */
1942{ 1906{
1943 xfs_dabuf_t *bp; /* freespace buffer */ 1907 struct xfs_buf *bp; /* freespace buffer */
1944 xfs_inode_t *dp; /* incore directory inode */ 1908 xfs_inode_t *dp; /* incore directory inode */
1945 int error; /* error return code */ 1909 int error; /* error return code */
1946 xfs_dir2_free_t *free; /* freespace structure */ 1910 xfs_dir2_free_t *free; /* freespace structure */
@@ -1965,13 +1929,13 @@ xfs_dir2_node_trim_free(
1965 if (bp == NULL) { 1929 if (bp == NULL) {
1966 return 0; 1930 return 0;
1967 } 1931 }
1968 free = bp->data; 1932 free = bp->b_addr;
1969 ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC)); 1933 ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC));
1970 /* 1934 /*
1971 * If there are used entries, there's nothing to do. 1935 * If there are used entries, there's nothing to do.
1972 */ 1936 */
1973 if (be32_to_cpu(free->hdr.nused) > 0) { 1937 if (be32_to_cpu(free->hdr.nused) > 0) {
1974 xfs_da_brelse(tp, bp); 1938 xfs_trans_brelse(tp, bp);
1975 *rvalp = 0; 1939 *rvalp = 0;
1976 return 0; 1940 return 0;
1977 } 1941 }
@@ -1987,7 +1951,7 @@ xfs_dir2_node_trim_free(
1987 * pieces. This is the last block of an extent. 1951 * pieces. This is the last block of an extent.
1988 */ 1952 */
1989 ASSERT(error != ENOSPC); 1953 ASSERT(error != ENOSPC);
1990 xfs_da_brelse(tp, bp); 1954 xfs_trans_brelse(tp, bp);
1991 return error; 1955 return error;
1992 } 1956 }
1993 /* 1957 /*
diff --git a/fs/xfs/xfs_dir2_priv.h b/fs/xfs/xfs_dir2_priv.h
index 067f403ecf8a..3523d3e15aa8 100644
--- a/fs/xfs/xfs_dir2_priv.h
+++ b/fs/xfs/xfs_dir2_priv.h
@@ -25,7 +25,7 @@ extern int xfs_dir2_isleaf(struct xfs_trans *tp, struct xfs_inode *dp, int *r);
25extern int xfs_dir2_grow_inode(struct xfs_da_args *args, int space, 25extern int xfs_dir2_grow_inode(struct xfs_da_args *args, int space,
26 xfs_dir2_db_t *dbp); 26 xfs_dir2_db_t *dbp);
27extern int xfs_dir2_shrink_inode(struct xfs_da_args *args, xfs_dir2_db_t db, 27extern int xfs_dir2_shrink_inode(struct xfs_da_args *args, xfs_dir2_db_t db,
28 struct xfs_dabuf *bp); 28 struct xfs_buf *bp);
29extern int xfs_dir_cilookup_result(struct xfs_da_args *args, 29extern int xfs_dir_cilookup_result(struct xfs_da_args *args,
30 const unsigned char *name, int len); 30 const unsigned char *name, int len);
31 31
@@ -37,11 +37,11 @@ extern int xfs_dir2_block_lookup(struct xfs_da_args *args);
37extern int xfs_dir2_block_removename(struct xfs_da_args *args); 37extern int xfs_dir2_block_removename(struct xfs_da_args *args);
38extern int xfs_dir2_block_replace(struct xfs_da_args *args); 38extern int xfs_dir2_block_replace(struct xfs_da_args *args);
39extern int xfs_dir2_leaf_to_block(struct xfs_da_args *args, 39extern int xfs_dir2_leaf_to_block(struct xfs_da_args *args,
40 struct xfs_dabuf *lbp, struct xfs_dabuf *dbp); 40 struct xfs_buf *lbp, struct xfs_buf *dbp);
41 41
42/* xfs_dir2_data.c */ 42/* xfs_dir2_data.c */
43#ifdef DEBUG 43#ifdef DEBUG
44extern void xfs_dir2_data_check(struct xfs_inode *dp, struct xfs_dabuf *bp); 44extern void xfs_dir2_data_check(struct xfs_inode *dp, struct xfs_buf *bp);
45#else 45#else
46#define xfs_dir2_data_check(dp,bp) 46#define xfs_dir2_data_check(dp,bp)
47#endif 47#endif
@@ -51,43 +51,43 @@ xfs_dir2_data_freeinsert(struct xfs_dir2_data_hdr *hdr,
51extern void xfs_dir2_data_freescan(struct xfs_mount *mp, 51extern void xfs_dir2_data_freescan(struct xfs_mount *mp,
52 struct xfs_dir2_data_hdr *hdr, int *loghead); 52 struct xfs_dir2_data_hdr *hdr, int *loghead);
53extern int xfs_dir2_data_init(struct xfs_da_args *args, xfs_dir2_db_t blkno, 53extern int xfs_dir2_data_init(struct xfs_da_args *args, xfs_dir2_db_t blkno,
54 struct xfs_dabuf **bpp); 54 struct xfs_buf **bpp);
55extern void xfs_dir2_data_log_entry(struct xfs_trans *tp, struct xfs_dabuf *bp, 55extern void xfs_dir2_data_log_entry(struct xfs_trans *tp, struct xfs_buf *bp,
56 struct xfs_dir2_data_entry *dep); 56 struct xfs_dir2_data_entry *dep);
57extern void xfs_dir2_data_log_header(struct xfs_trans *tp, 57extern void xfs_dir2_data_log_header(struct xfs_trans *tp,
58 struct xfs_dabuf *bp); 58 struct xfs_buf *bp);
59extern void xfs_dir2_data_log_unused(struct xfs_trans *tp, struct xfs_dabuf *bp, 59extern void xfs_dir2_data_log_unused(struct xfs_trans *tp, struct xfs_buf *bp,
60 struct xfs_dir2_data_unused *dup); 60 struct xfs_dir2_data_unused *dup);
61extern void xfs_dir2_data_make_free(struct xfs_trans *tp, struct xfs_dabuf *bp, 61extern void xfs_dir2_data_make_free(struct xfs_trans *tp, struct xfs_buf *bp,
62 xfs_dir2_data_aoff_t offset, xfs_dir2_data_aoff_t len, 62 xfs_dir2_data_aoff_t offset, xfs_dir2_data_aoff_t len,
63 int *needlogp, int *needscanp); 63 int *needlogp, int *needscanp);
64extern void xfs_dir2_data_use_free(struct xfs_trans *tp, struct xfs_dabuf *bp, 64extern void xfs_dir2_data_use_free(struct xfs_trans *tp, struct xfs_buf *bp,
65 struct xfs_dir2_data_unused *dup, xfs_dir2_data_aoff_t offset, 65 struct xfs_dir2_data_unused *dup, xfs_dir2_data_aoff_t offset,
66 xfs_dir2_data_aoff_t len, int *needlogp, int *needscanp); 66 xfs_dir2_data_aoff_t len, int *needlogp, int *needscanp);
67 67
68/* xfs_dir2_leaf.c */ 68/* xfs_dir2_leaf.c */
69extern int xfs_dir2_block_to_leaf(struct xfs_da_args *args, 69extern int xfs_dir2_block_to_leaf(struct xfs_da_args *args,
70 struct xfs_dabuf *dbp); 70 struct xfs_buf *dbp);
71extern int xfs_dir2_leaf_addname(struct xfs_da_args *args); 71extern int xfs_dir2_leaf_addname(struct xfs_da_args *args);
72extern void xfs_dir2_leaf_compact(struct xfs_da_args *args, 72extern void xfs_dir2_leaf_compact(struct xfs_da_args *args,
73 struct xfs_dabuf *bp); 73 struct xfs_buf *bp);
74extern void xfs_dir2_leaf_compact_x1(struct xfs_dabuf *bp, int *indexp, 74extern void xfs_dir2_leaf_compact_x1(struct xfs_buf *bp, int *indexp,
75 int *lowstalep, int *highstalep, int *lowlogp, int *highlogp); 75 int *lowstalep, int *highstalep, int *lowlogp, int *highlogp);
76extern int xfs_dir2_leaf_getdents(struct xfs_inode *dp, void *dirent, 76extern int xfs_dir2_leaf_getdents(struct xfs_inode *dp, void *dirent,
77 size_t bufsize, xfs_off_t *offset, filldir_t filldir); 77 size_t bufsize, xfs_off_t *offset, filldir_t filldir);
78extern int xfs_dir2_leaf_init(struct xfs_da_args *args, xfs_dir2_db_t bno, 78extern int xfs_dir2_leaf_init(struct xfs_da_args *args, xfs_dir2_db_t bno,
79 struct xfs_dabuf **bpp, int magic); 79 struct xfs_buf **bpp, int magic);
80extern void xfs_dir2_leaf_log_ents(struct xfs_trans *tp, struct xfs_dabuf *bp, 80extern void xfs_dir2_leaf_log_ents(struct xfs_trans *tp, struct xfs_buf *bp,
81 int first, int last); 81 int first, int last);
82extern void xfs_dir2_leaf_log_header(struct xfs_trans *tp, 82extern void xfs_dir2_leaf_log_header(struct xfs_trans *tp,
83 struct xfs_dabuf *bp); 83 struct xfs_buf *bp);
84extern int xfs_dir2_leaf_lookup(struct xfs_da_args *args); 84extern int xfs_dir2_leaf_lookup(struct xfs_da_args *args);
85extern int xfs_dir2_leaf_removename(struct xfs_da_args *args); 85extern int xfs_dir2_leaf_removename(struct xfs_da_args *args);
86extern int xfs_dir2_leaf_replace(struct xfs_da_args *args); 86extern int xfs_dir2_leaf_replace(struct xfs_da_args *args);
87extern int xfs_dir2_leaf_search_hash(struct xfs_da_args *args, 87extern int xfs_dir2_leaf_search_hash(struct xfs_da_args *args,
88 struct xfs_dabuf *lbp); 88 struct xfs_buf *lbp);
89extern int xfs_dir2_leaf_trim_data(struct xfs_da_args *args, 89extern int xfs_dir2_leaf_trim_data(struct xfs_da_args *args,
90 struct xfs_dabuf *lbp, xfs_dir2_db_t db); 90 struct xfs_buf *lbp, xfs_dir2_db_t db);
91extern struct xfs_dir2_leaf_entry * 91extern struct xfs_dir2_leaf_entry *
92xfs_dir2_leaf_find_entry(struct xfs_dir2_leaf *leaf, int index, int compact, 92xfs_dir2_leaf_find_entry(struct xfs_dir2_leaf *leaf, int index, int compact,
93 int lowstale, int highstale, 93 int lowstale, int highstale,
@@ -96,13 +96,13 @@ extern int xfs_dir2_node_to_leaf(struct xfs_da_state *state);
96 96
97/* xfs_dir2_node.c */ 97/* xfs_dir2_node.c */
98extern int xfs_dir2_leaf_to_node(struct xfs_da_args *args, 98extern int xfs_dir2_leaf_to_node(struct xfs_da_args *args,
99 struct xfs_dabuf *lbp); 99 struct xfs_buf *lbp);
100extern xfs_dahash_t xfs_dir2_leafn_lasthash(struct xfs_dabuf *bp, int *count); 100extern xfs_dahash_t xfs_dir2_leafn_lasthash(struct xfs_buf *bp, int *count);
101extern int xfs_dir2_leafn_lookup_int(struct xfs_dabuf *bp, 101extern int xfs_dir2_leafn_lookup_int(struct xfs_buf *bp,
102 struct xfs_da_args *args, int *indexp, 102 struct xfs_da_args *args, int *indexp,
103 struct xfs_da_state *state); 103 struct xfs_da_state *state);
104extern int xfs_dir2_leafn_order(struct xfs_dabuf *leaf1_bp, 104extern int xfs_dir2_leafn_order(struct xfs_buf *leaf1_bp,
105 struct xfs_dabuf *leaf2_bp); 105 struct xfs_buf *leaf2_bp);
106extern int xfs_dir2_leafn_split(struct xfs_da_state *state, 106extern int xfs_dir2_leafn_split(struct xfs_da_state *state,
107 struct xfs_da_state_blk *oldblk, struct xfs_da_state_blk *newblk); 107 struct xfs_da_state_blk *oldblk, struct xfs_da_state_blk *newblk);
108extern int xfs_dir2_leafn_toosmall(struct xfs_da_state *state, int *action); 108extern int xfs_dir2_leafn_toosmall(struct xfs_da_state *state, int *action);
@@ -122,7 +122,7 @@ extern xfs_ino_t xfs_dir2_sfe_get_ino(struct xfs_dir2_sf_hdr *sfp,
122 struct xfs_dir2_sf_entry *sfep); 122 struct xfs_dir2_sf_entry *sfep);
123extern int xfs_dir2_block_sfsize(struct xfs_inode *dp, 123extern int xfs_dir2_block_sfsize(struct xfs_inode *dp,
124 struct xfs_dir2_data_hdr *block, struct xfs_dir2_sf_hdr *sfhp); 124 struct xfs_dir2_data_hdr *block, struct xfs_dir2_sf_hdr *sfhp);
125extern int xfs_dir2_block_to_sf(struct xfs_da_args *args, struct xfs_dabuf *bp, 125extern int xfs_dir2_block_to_sf(struct xfs_da_args *args, struct xfs_buf *bp,
126 int size, xfs_dir2_sf_hdr_t *sfhp); 126 int size, xfs_dir2_sf_hdr_t *sfhp);
127extern int xfs_dir2_sf_addname(struct xfs_da_args *args); 127extern int xfs_dir2_sf_addname(struct xfs_da_args *args);
128extern int xfs_dir2_sf_create(struct xfs_da_args *args, xfs_ino_t pino); 128extern int xfs_dir2_sf_create(struct xfs_da_args *args, xfs_ino_t pino);
diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
index 19bf0c5e38f4..1b9fc3ec7e4b 100644
--- a/fs/xfs/xfs_dir2_sf.c
+++ b/fs/xfs/xfs_dir2_sf.c
@@ -222,7 +222,7 @@ xfs_dir2_block_sfsize(
222int /* error */ 222int /* error */
223xfs_dir2_block_to_sf( 223xfs_dir2_block_to_sf(
224 xfs_da_args_t *args, /* operation arguments */ 224 xfs_da_args_t *args, /* operation arguments */
225 xfs_dabuf_t *bp, /* block buffer */ 225 struct xfs_buf *bp,
226 int size, /* shortform directory size */ 226 int size, /* shortform directory size */
227 xfs_dir2_sf_hdr_t *sfhp) /* shortform directory hdr */ 227 xfs_dir2_sf_hdr_t *sfhp) /* shortform directory hdr */
228{ 228{
@@ -249,7 +249,7 @@ xfs_dir2_block_to_sf(
249 * and add local data. 249 * and add local data.
250 */ 250 */
251 hdr = kmem_alloc(mp->m_dirblksize, KM_SLEEP); 251 hdr = kmem_alloc(mp->m_dirblksize, KM_SLEEP);
252 memcpy(hdr, bp->data, mp->m_dirblksize); 252 memcpy(hdr, bp->b_addr, mp->m_dirblksize);
253 logflags = XFS_ILOG_CORE; 253 logflags = XFS_ILOG_CORE;
254 if ((error = xfs_dir2_shrink_inode(args, mp->m_dirdatablk, bp))) { 254 if ((error = xfs_dir2_shrink_inode(args, mp->m_dirdatablk, bp))) {
255 ASSERT(error != ENOSPC); 255 ASSERT(error != ENOSPC);
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 9f7ec15a6522..c4559c6e6f2c 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -236,7 +236,6 @@ xfs_file_aio_read(
236 ssize_t ret = 0; 236 ssize_t ret = 0;
237 int ioflags = 0; 237 int ioflags = 0;
238 xfs_fsize_t n; 238 xfs_fsize_t n;
239 unsigned long seg;
240 239
241 XFS_STATS_INC(xs_read_calls); 240 XFS_STATS_INC(xs_read_calls);
242 241
@@ -247,19 +246,9 @@ xfs_file_aio_read(
247 if (file->f_mode & FMODE_NOCMTIME) 246 if (file->f_mode & FMODE_NOCMTIME)
248 ioflags |= IO_INVIS; 247 ioflags |= IO_INVIS;
249 248
250 /* START copy & waste from filemap.c */ 249 ret = generic_segment_checks(iovp, &nr_segs, &size, VERIFY_WRITE);
251 for (seg = 0; seg < nr_segs; seg++) { 250 if (ret < 0)
252 const struct iovec *iv = &iovp[seg]; 251 return ret;
253
254 /*
255 * If any segment has a negative length, or the cumulative
256 * length ever wraps negative then return -EINVAL.
257 */
258 size += iv->iov_len;
259 if (unlikely((ssize_t)(size|iv->iov_len) < 0))
260 return XFS_ERROR(-EINVAL);
261 }
262 /* END copy & waste from filemap.c */
263 252
264 if (unlikely(ioflags & IO_ISDIRECT)) { 253 if (unlikely(ioflags & IO_ISDIRECT)) {
265 xfs_buftarg_t *target = 254 xfs_buftarg_t *target =
@@ -273,7 +262,7 @@ xfs_file_aio_read(
273 } 262 }
274 } 263 }
275 264
276 n = XFS_MAXIOFFSET(mp) - iocb->ki_pos; 265 n = mp->m_super->s_maxbytes - iocb->ki_pos;
277 if (n <= 0 || size == 0) 266 if (n <= 0 || size == 0)
278 return 0; 267 return 0;
279 268
diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c
index 177a21a7ac49..21e37b55f7e5 100644
--- a/fs/xfs/xfs_ialloc.c
+++ b/fs/xfs/xfs_ialloc.c
@@ -442,14 +442,13 @@ xfs_ialloc_next_ag(
442 * Select an allocation group to look for a free inode in, based on the parent 442 * Select an allocation group to look for a free inode in, based on the parent
443 * inode and then mode. Return the allocation group buffer. 443 * inode and then mode. Return the allocation group buffer.
444 */ 444 */
445STATIC xfs_buf_t * /* allocation group buffer */ 445STATIC xfs_agnumber_t
446xfs_ialloc_ag_select( 446xfs_ialloc_ag_select(
447 xfs_trans_t *tp, /* transaction pointer */ 447 xfs_trans_t *tp, /* transaction pointer */
448 xfs_ino_t parent, /* parent directory inode number */ 448 xfs_ino_t parent, /* parent directory inode number */
449 umode_t mode, /* bits set to indicate file type */ 449 umode_t mode, /* bits set to indicate file type */
450 int okalloc) /* ok to allocate more space */ 450 int okalloc) /* ok to allocate more space */
451{ 451{
452 xfs_buf_t *agbp; /* allocation group header buffer */
453 xfs_agnumber_t agcount; /* number of ag's in the filesystem */ 452 xfs_agnumber_t agcount; /* number of ag's in the filesystem */
454 xfs_agnumber_t agno; /* current ag number */ 453 xfs_agnumber_t agno; /* current ag number */
455 int flags; /* alloc buffer locking flags */ 454 int flags; /* alloc buffer locking flags */
@@ -459,6 +458,7 @@ xfs_ialloc_ag_select(
459 int needspace; /* file mode implies space allocated */ 458 int needspace; /* file mode implies space allocated */
460 xfs_perag_t *pag; /* per allocation group data */ 459 xfs_perag_t *pag; /* per allocation group data */
461 xfs_agnumber_t pagno; /* parent (starting) ag number */ 460 xfs_agnumber_t pagno; /* parent (starting) ag number */
461 int error;
462 462
463 /* 463 /*
464 * Files of these types need at least one block if length > 0 464 * Files of these types need at least one block if length > 0
@@ -474,7 +474,9 @@ xfs_ialloc_ag_select(
474 if (pagno >= agcount) 474 if (pagno >= agcount)
475 pagno = 0; 475 pagno = 0;
476 } 476 }
477
477 ASSERT(pagno < agcount); 478 ASSERT(pagno < agcount);
479
478 /* 480 /*
479 * Loop through allocation groups, looking for one with a little 481 * Loop through allocation groups, looking for one with a little
480 * free space in it. Note we don't look for free inodes, exactly. 482 * free space in it. Note we don't look for free inodes, exactly.
@@ -486,51 +488,45 @@ xfs_ialloc_ag_select(
486 flags = XFS_ALLOC_FLAG_TRYLOCK; 488 flags = XFS_ALLOC_FLAG_TRYLOCK;
487 for (;;) { 489 for (;;) {
488 pag = xfs_perag_get(mp, agno); 490 pag = xfs_perag_get(mp, agno);
491 if (!pag->pagi_inodeok) {
492 xfs_ialloc_next_ag(mp);
493 goto nextag;
494 }
495
489 if (!pag->pagi_init) { 496 if (!pag->pagi_init) {
490 if (xfs_ialloc_read_agi(mp, tp, agno, &agbp)) { 497 error = xfs_ialloc_pagi_init(mp, tp, agno);
491 agbp = NULL; 498 if (error)
492 goto nextag; 499 goto nextag;
493 } 500 }
494 } else
495 agbp = NULL;
496 501
497 if (!pag->pagi_inodeok) { 502 if (pag->pagi_freecount) {
498 xfs_ialloc_next_ag(mp); 503 xfs_perag_put(pag);
499 goto unlock_nextag; 504 return agno;
500 } 505 }
501 506
502 /* 507 if (!okalloc)
503 * Is there enough free space for the file plus a block 508 goto nextag;
504 * of inodes (if we need to allocate some)? 509
505 */ 510 if (!pag->pagf_init) {
506 ineed = pag->pagi_freecount ? 0 : XFS_IALLOC_BLOCKS(mp); 511 error = xfs_alloc_pagf_init(mp, tp, agno, flags);
507 if (ineed && !pag->pagf_init) { 512 if (error)
508 if (agbp == NULL &&
509 xfs_ialloc_read_agi(mp, tp, agno, &agbp)) {
510 agbp = NULL;
511 goto nextag; 513 goto nextag;
512 }
513 (void)xfs_alloc_pagf_init(mp, tp, agno, flags);
514 } 514 }
515 if (!ineed || pag->pagf_init) { 515
516 if (ineed && !(longest = pag->pagf_longest)) 516 /*
517 longest = pag->pagf_flcount > 0; 517 * Is there enough free space for the file plus a block of
518 if (!ineed || 518 * inodes? (if we need to allocate some)?
519 (pag->pagf_freeblks >= needspace + ineed && 519 */
520 longest >= ineed && 520 ineed = XFS_IALLOC_BLOCKS(mp);
521 okalloc)) { 521 longest = pag->pagf_longest;
522 if (agbp == NULL && 522 if (!longest)
523 xfs_ialloc_read_agi(mp, tp, agno, &agbp)) { 523 longest = pag->pagf_flcount > 0;
524 agbp = NULL; 524
525 goto nextag; 525 if (pag->pagf_freeblks >= needspace + ineed &&
526 } 526 longest >= ineed) {
527 xfs_perag_put(pag); 527 xfs_perag_put(pag);
528 return agbp; 528 return agno;
529 }
530 } 529 }
531unlock_nextag:
532 if (agbp)
533 xfs_trans_brelse(tp, agbp);
534nextag: 530nextag:
535 xfs_perag_put(pag); 531 xfs_perag_put(pag);
536 /* 532 /*
@@ -538,13 +534,13 @@ nextag:
538 * down. 534 * down.
539 */ 535 */
540 if (XFS_FORCED_SHUTDOWN(mp)) 536 if (XFS_FORCED_SHUTDOWN(mp))
541 return NULL; 537 return NULLAGNUMBER;
542 agno++; 538 agno++;
543 if (agno >= agcount) 539 if (agno >= agcount)
544 agno = 0; 540 agno = 0;
545 if (agno == pagno) { 541 if (agno == pagno) {
546 if (flags == 0) 542 if (flags == 0)
547 return NULL; 543 return NULLAGNUMBER;
548 flags = 0; 544 flags = 0;
549 } 545 }
550 } 546 }
@@ -607,195 +603,39 @@ xfs_ialloc_get_rec(
607} 603}
608 604
609/* 605/*
610 * Visible inode allocation functions. 606 * Allocate an inode.
611 */
612/*
613 * Find a free (set) bit in the inode bitmask.
614 */
615static inline int xfs_ialloc_find_free(xfs_inofree_t *fp)
616{
617 return xfs_lowbit64(*fp);
618}
619
620/*
621 * Allocate an inode on disk.
622 * Mode is used to tell whether the new inode will need space, and whether
623 * it is a directory.
624 *
625 * The arguments IO_agbp and alloc_done are defined to work within
626 * the constraint of one allocation per transaction.
627 * xfs_dialloc() is designed to be called twice if it has to do an
628 * allocation to make more free inodes. On the first call,
629 * IO_agbp should be set to NULL. If an inode is available,
630 * i.e., xfs_dialloc() did not need to do an allocation, an inode
631 * number is returned. In this case, IO_agbp would be set to the
632 * current ag_buf and alloc_done set to false.
633 * If an allocation needed to be done, xfs_dialloc would return
634 * the current ag_buf in IO_agbp and set alloc_done to true.
635 * The caller should then commit the current transaction, allocate a new
636 * transaction, and call xfs_dialloc() again, passing in the previous
637 * value of IO_agbp. IO_agbp should be held across the transactions.
638 * Since the agbp is locked across the two calls, the second call is
639 * guaranteed to have a free inode available.
640 * 607 *
641 * Once we successfully pick an inode its number is returned and the 608 * The caller selected an AG for us, and made sure that free inodes are
642 * on-disk data structures are updated. The inode itself is not read 609 * available.
643 * in, since doing so would break ordering constraints with xfs_reclaim.
644 */ 610 */
645int 611STATIC int
646xfs_dialloc( 612xfs_dialloc_ag(
647 xfs_trans_t *tp, /* transaction pointer */ 613 struct xfs_trans *tp,
648 xfs_ino_t parent, /* parent inode (directory) */ 614 struct xfs_buf *agbp,
649 umode_t mode, /* mode bits for new inode */ 615 xfs_ino_t parent,
650 int okalloc, /* ok to allocate more space */ 616 xfs_ino_t *inop)
651 xfs_buf_t **IO_agbp, /* in/out ag header's buffer */
652 boolean_t *alloc_done, /* true if we needed to replenish
653 inode freelist */
654 xfs_ino_t *inop) /* inode number allocated */
655{ 617{
656 xfs_agnumber_t agcount; /* number of allocation groups */ 618 struct xfs_mount *mp = tp->t_mountp;
657 xfs_buf_t *agbp; /* allocation group header's buffer */ 619 struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp);
658 xfs_agnumber_t agno; /* allocation group number */ 620 xfs_agnumber_t agno = be32_to_cpu(agi->agi_seqno);
659 xfs_agi_t *agi; /* allocation group header structure */ 621 xfs_agnumber_t pagno = XFS_INO_TO_AGNO(mp, parent);
660 xfs_btree_cur_t *cur; /* inode allocation btree cursor */ 622 xfs_agino_t pagino = XFS_INO_TO_AGINO(mp, parent);
661 int error; /* error return value */ 623 struct xfs_perag *pag;
662 int i; /* result code */ 624 struct xfs_btree_cur *cur, *tcur;
663 int ialloced; /* inode allocation status */ 625 struct xfs_inobt_rec_incore rec, trec;
664 int noroom = 0; /* no space for inode blk allocation */ 626 xfs_ino_t ino;
665 xfs_ino_t ino; /* fs-relative inode to be returned */ 627 int error;
666 /* REFERENCED */ 628 int offset;
667 int j; /* result code */ 629 int i, j;
668 xfs_mount_t *mp; /* file system mount structure */
669 int offset; /* index of inode in chunk */
670 xfs_agino_t pagino; /* parent's AG relative inode # */
671 xfs_agnumber_t pagno; /* parent's AG number */
672 xfs_inobt_rec_incore_t rec; /* inode allocation record */
673 xfs_agnumber_t tagno; /* testing allocation group number */
674 xfs_btree_cur_t *tcur; /* temp cursor */
675 xfs_inobt_rec_incore_t trec; /* temp inode allocation record */
676 struct xfs_perag *pag;
677
678
679 if (*IO_agbp == NULL) {
680 /*
681 * We do not have an agbp, so select an initial allocation
682 * group for inode allocation.
683 */
684 agbp = xfs_ialloc_ag_select(tp, parent, mode, okalloc);
685 /*
686 * Couldn't find an allocation group satisfying the
687 * criteria, give up.
688 */
689 if (!agbp) {
690 *inop = NULLFSINO;
691 return 0;
692 }
693 agi = XFS_BUF_TO_AGI(agbp);
694 ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC));
695 } else {
696 /*
697 * Continue where we left off before. In this case, we
698 * know that the allocation group has free inodes.
699 */
700 agbp = *IO_agbp;
701 agi = XFS_BUF_TO_AGI(agbp);
702 ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC));
703 ASSERT(be32_to_cpu(agi->agi_freecount) > 0);
704 }
705 mp = tp->t_mountp;
706 agcount = mp->m_sb.sb_agcount;
707 agno = be32_to_cpu(agi->agi_seqno);
708 tagno = agno;
709 pagno = XFS_INO_TO_AGNO(mp, parent);
710 pagino = XFS_INO_TO_AGINO(mp, parent);
711
712 /*
713 * If we have already hit the ceiling of inode blocks then clear
714 * okalloc so we scan all available agi structures for a free
715 * inode.
716 */
717
718 if (mp->m_maxicount &&
719 mp->m_sb.sb_icount + XFS_IALLOC_INODES(mp) > mp->m_maxicount) {
720 noroom = 1;
721 okalloc = 0;
722 }
723 630
724 /*
725 * Loop until we find an allocation group that either has free inodes
726 * or in which we can allocate some inodes. Iterate through the
727 * allocation groups upward, wrapping at the end.
728 */
729 *alloc_done = B_FALSE;
730 while (!agi->agi_freecount) {
731 /*
732 * Don't do anything if we're not supposed to allocate
733 * any blocks, just go on to the next ag.
734 */
735 if (okalloc) {
736 /*
737 * Try to allocate some new inodes in the allocation
738 * group.
739 */
740 if ((error = xfs_ialloc_ag_alloc(tp, agbp, &ialloced))) {
741 xfs_trans_brelse(tp, agbp);
742 if (error == ENOSPC) {
743 *inop = NULLFSINO;
744 return 0;
745 } else
746 return error;
747 }
748 if (ialloced) {
749 /*
750 * We successfully allocated some inodes, return
751 * the current context to the caller so that it
752 * can commit the current transaction and call
753 * us again where we left off.
754 */
755 ASSERT(be32_to_cpu(agi->agi_freecount) > 0);
756 *alloc_done = B_TRUE;
757 *IO_agbp = agbp;
758 *inop = NULLFSINO;
759 return 0;
760 }
761 }
762 /*
763 * If it failed, give up on this ag.
764 */
765 xfs_trans_brelse(tp, agbp);
766 /*
767 * Go on to the next ag: get its ag header.
768 */
769nextag:
770 if (++tagno == agcount)
771 tagno = 0;
772 if (tagno == agno) {
773 *inop = NULLFSINO;
774 return noroom ? ENOSPC : 0;
775 }
776 pag = xfs_perag_get(mp, tagno);
777 if (pag->pagi_inodeok == 0) {
778 xfs_perag_put(pag);
779 goto nextag;
780 }
781 error = xfs_ialloc_read_agi(mp, tp, tagno, &agbp);
782 xfs_perag_put(pag);
783 if (error)
784 goto nextag;
785 agi = XFS_BUF_TO_AGI(agbp);
786 ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC));
787 }
788 /*
789 * Here with an allocation group that has a free inode.
790 * Reset agno since we may have chosen a new ag in the
791 * loop above.
792 */
793 agno = tagno;
794 *IO_agbp = NULL;
795 pag = xfs_perag_get(mp, agno); 631 pag = xfs_perag_get(mp, agno);
796 632
633 ASSERT(pag->pagi_init);
634 ASSERT(pag->pagi_inodeok);
635 ASSERT(pag->pagi_freecount > 0);
636
797 restart_pagno: 637 restart_pagno:
798 cur = xfs_inobt_init_cursor(mp, tp, agbp, be32_to_cpu(agi->agi_seqno)); 638 cur = xfs_inobt_init_cursor(mp, tp, agbp, agno);
799 /* 639 /*
800 * If pagino is 0 (this is the root inode allocation) use newino. 640 * If pagino is 0 (this is the root inode allocation) use newino.
801 * This must work because we've just allocated some. 641 * This must work because we've just allocated some.
@@ -995,7 +835,7 @@ newino:
995 } 835 }
996 836
997alloc_inode: 837alloc_inode:
998 offset = xfs_ialloc_find_free(&rec.ir_free); 838 offset = xfs_lowbit64(rec.ir_free);
999 ASSERT(offset >= 0); 839 ASSERT(offset >= 0);
1000 ASSERT(offset < XFS_INODES_PER_CHUNK); 840 ASSERT(offset < XFS_INODES_PER_CHUNK);
1001 ASSERT((XFS_AGINO_TO_OFFSET(mp, rec.ir_startino) % 841 ASSERT((XFS_AGINO_TO_OFFSET(mp, rec.ir_startino) %
@@ -1028,6 +868,164 @@ error0:
1028} 868}
1029 869
1030/* 870/*
871 * Allocate an inode on disk.
872 *
873 * Mode is used to tell whether the new inode will need space, and whether it
874 * is a directory.
875 *
876 * This function is designed to be called twice if it has to do an allocation
877 * to make more free inodes. On the first call, *IO_agbp should be set to NULL.
878 * If an inode is available without having to performn an allocation, an inode
879 * number is returned. In this case, *IO_agbp would be NULL. If an allocation
880 * needes to be done, xfs_dialloc would return the current AGI buffer in
881 * *IO_agbp. The caller should then commit the current transaction, allocate a
882 * new transaction, and call xfs_dialloc() again, passing in the previous value
883 * of *IO_agbp. IO_agbp should be held across the transactions. Since the AGI
884 * buffer is locked across the two calls, the second call is guaranteed to have
885 * a free inode available.
886 *
887 * Once we successfully pick an inode its number is returned and the on-disk
888 * data structures are updated. The inode itself is not read in, since doing so
889 * would break ordering constraints with xfs_reclaim.
890 */
891int
892xfs_dialloc(
893 struct xfs_trans *tp,
894 xfs_ino_t parent,
895 umode_t mode,
896 int okalloc,
897 struct xfs_buf **IO_agbp,
898 xfs_ino_t *inop)
899{
900 struct xfs_mount *mp = tp->t_mountp;
901 struct xfs_buf *agbp;
902 xfs_agnumber_t agno;
903 int error;
904 int ialloced;
905 int noroom = 0;
906 xfs_agnumber_t start_agno;
907 struct xfs_perag *pag;
908
909 if (*IO_agbp) {
910 /*
911 * If the caller passes in a pointer to the AGI buffer,
912 * continue where we left off before. In this case, we
913 * know that the allocation group has free inodes.
914 */
915 agbp = *IO_agbp;
916 goto out_alloc;
917 }
918
919 /*
920 * We do not have an agbp, so select an initial allocation
921 * group for inode allocation.
922 */
923 start_agno = xfs_ialloc_ag_select(tp, parent, mode, okalloc);
924 if (start_agno == NULLAGNUMBER) {
925 *inop = NULLFSINO;
926 return 0;
927 }
928
929 /*
930 * If we have already hit the ceiling of inode blocks then clear
931 * okalloc so we scan all available agi structures for a free
932 * inode.
933 */
934 if (mp->m_maxicount &&
935 mp->m_sb.sb_icount + XFS_IALLOC_INODES(mp) > mp->m_maxicount) {
936 noroom = 1;
937 okalloc = 0;
938 }
939
940 /*
941 * Loop until we find an allocation group that either has free inodes
942 * or in which we can allocate some inodes. Iterate through the
943 * allocation groups upward, wrapping at the end.
944 */
945 agno = start_agno;
946 for (;;) {
947 pag = xfs_perag_get(mp, agno);
948 if (!pag->pagi_inodeok) {
949 xfs_ialloc_next_ag(mp);
950 goto nextag;
951 }
952
953 if (!pag->pagi_init) {
954 error = xfs_ialloc_pagi_init(mp, tp, agno);
955 if (error)
956 goto out_error;
957 }
958
959 /*
960 * Do a first racy fast path check if this AG is usable.
961 */
962 if (!pag->pagi_freecount && !okalloc)
963 goto nextag;
964
965 error = xfs_ialloc_read_agi(mp, tp, agno, &agbp);
966 if (error)
967 goto out_error;
968
969 /*
970 * Once the AGI has been read in we have to recheck
971 * pagi_freecount with the AGI buffer lock held.
972 */
973 if (pag->pagi_freecount) {
974 xfs_perag_put(pag);
975 goto out_alloc;
976 }
977
978 if (!okalloc) {
979 xfs_trans_brelse(tp, agbp);
980 goto nextag;
981 }
982
983 error = xfs_ialloc_ag_alloc(tp, agbp, &ialloced);
984 if (error) {
985 xfs_trans_brelse(tp, agbp);
986
987 if (error != ENOSPC)
988 goto out_error;
989
990 xfs_perag_put(pag);
991 *inop = NULLFSINO;
992 return 0;
993 }
994
995 if (ialloced) {
996 /*
997 * We successfully allocated some inodes, return
998 * the current context to the caller so that it
999 * can commit the current transaction and call
1000 * us again where we left off.
1001 */
1002 ASSERT(pag->pagi_freecount > 0);
1003 xfs_perag_put(pag);
1004
1005 *IO_agbp = agbp;
1006 *inop = NULLFSINO;
1007 return 0;
1008 }
1009
1010nextag:
1011 xfs_perag_put(pag);
1012 if (++agno == mp->m_sb.sb_agcount)
1013 agno = 0;
1014 if (agno == start_agno) {
1015 *inop = NULLFSINO;
1016 return noroom ? ENOSPC : 0;
1017 }
1018 }
1019
1020out_alloc:
1021 *IO_agbp = NULL;
1022 return xfs_dialloc_ag(tp, agbp, parent, inop);
1023out_error:
1024 xfs_perag_put(pag);
1025 return XFS_ERROR(error);
1026}
1027
1028/*
1031 * Free disk inode. Carefully avoids touching the incore inode, all 1029 * Free disk inode. Carefully avoids touching the incore inode, all
1032 * manipulations incore are the caller's responsibility. 1030 * manipulations incore are the caller's responsibility.
1033 * The on-disk inode is not changed by this operation, only the 1031 * The on-disk inode is not changed by this operation, only the
diff --git a/fs/xfs/xfs_ialloc.h b/fs/xfs/xfs_ialloc.h
index 65ac57c8063c..1fd6ea4e9c91 100644
--- a/fs/xfs/xfs_ialloc.h
+++ b/fs/xfs/xfs_ialloc.h
@@ -75,8 +75,6 @@ xfs_dialloc(
75 umode_t mode, /* mode bits for new inode */ 75 umode_t mode, /* mode bits for new inode */
76 int okalloc, /* ok to allocate more space */ 76 int okalloc, /* ok to allocate more space */
77 struct xfs_buf **agbp, /* buf for a.g. inode header */ 77 struct xfs_buf **agbp, /* buf for a.g. inode header */
78 boolean_t *alloc_done, /* an allocation was done to replenish
79 the free inodes */
80 xfs_ino_t *inop); /* inode number allocated */ 78 xfs_ino_t *inop); /* inode number allocated */
81 79
82/* 80/*
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c
index 1bb4365e8c25..784a803383ec 100644
--- a/fs/xfs/xfs_iget.c
+++ b/fs/xfs/xfs_iget.c
@@ -41,17 +41,6 @@
41 41
42 42
43/* 43/*
44 * Define xfs inode iolock lockdep classes. We need to ensure that all active
45 * inodes are considered the same for lockdep purposes, including inodes that
46 * are recycled through the XFS_IRECLAIMABLE state. This is the the only way to
47 * guarantee the locks are considered the same when there are multiple lock
48 * initialisation siteѕ. Also, define a reclaimable inode class so it is
49 * obvious in lockdep reports which class the report is against.
50 */
51static struct lock_class_key xfs_iolock_active;
52struct lock_class_key xfs_iolock_reclaimable;
53
54/*
55 * Allocate and initialise an xfs_inode. 44 * Allocate and initialise an xfs_inode.
56 */ 45 */
57STATIC struct xfs_inode * 46STATIC struct xfs_inode *
@@ -80,8 +69,6 @@ xfs_inode_alloc(
80 ASSERT(ip->i_ino == 0); 69 ASSERT(ip->i_ino == 0);
81 70
82 mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino); 71 mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
83 lockdep_set_class_and_name(&ip->i_iolock.mr_lock,
84 &xfs_iolock_active, "xfs_iolock_active");
85 72
86 /* initialise the xfs inode */ 73 /* initialise the xfs inode */
87 ip->i_ino = ino; 74 ip->i_ino = ino;
@@ -250,8 +237,6 @@ xfs_iget_cache_hit(
250 237
251 ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock)); 238 ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
252 mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino); 239 mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
253 lockdep_set_class_and_name(&ip->i_iolock.mr_lock,
254 &xfs_iolock_active, "xfs_iolock_active");
255 240
256 spin_unlock(&ip->i_flags_lock); 241 spin_unlock(&ip->i_flags_lock);
257 spin_unlock(&pag->pag_ici_lock); 242 spin_unlock(&pag->pag_ici_lock);
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index a59eea09930a..2778258fcfa2 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -132,23 +132,28 @@ xfs_inobp_check(
132#endif 132#endif
133 133
134/* 134/*
135 * Find the buffer associated with the given inode map 135 * This routine is called to map an inode to the buffer containing the on-disk
136 * We do basic validation checks on the buffer once it has been 136 * version of the inode. It returns a pointer to the buffer containing the
137 * retrieved from disk. 137 * on-disk inode in the bpp parameter, and in the dipp parameter it returns a
138 * pointer to the on-disk inode within that buffer.
139 *
140 * If a non-zero error is returned, then the contents of bpp and dipp are
141 * undefined.
138 */ 142 */
139STATIC int 143int
140xfs_imap_to_bp( 144xfs_imap_to_bp(
141 xfs_mount_t *mp, 145 struct xfs_mount *mp,
142 xfs_trans_t *tp, 146 struct xfs_trans *tp,
143 struct xfs_imap *imap, 147 struct xfs_imap *imap,
144 xfs_buf_t **bpp, 148 struct xfs_dinode **dipp,
145 uint buf_flags, 149 struct xfs_buf **bpp,
146 uint iget_flags) 150 uint buf_flags,
151 uint iget_flags)
147{ 152{
148 int error; 153 struct xfs_buf *bp;
149 int i; 154 int error;
150 int ni; 155 int i;
151 xfs_buf_t *bp; 156 int ni;
152 157
153 buf_flags |= XBF_UNMAPPED; 158 buf_flags |= XBF_UNMAPPED;
154 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap->im_blkno, 159 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap->im_blkno,
@@ -189,8 +194,8 @@ xfs_imap_to_bp(
189 xfs_trans_brelse(tp, bp); 194 xfs_trans_brelse(tp, bp);
190 return XFS_ERROR(EINVAL); 195 return XFS_ERROR(EINVAL);
191 } 196 }
192 XFS_CORRUPTION_ERROR("xfs_imap_to_bp", 197 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_HIGH,
193 XFS_ERRLEVEL_HIGH, mp, dip); 198 mp, dip);
194#ifdef DEBUG 199#ifdef DEBUG
195 xfs_emerg(mp, 200 xfs_emerg(mp,
196 "bad inode magic/vsn daddr %lld #%d (magic=%x)", 201 "bad inode magic/vsn daddr %lld #%d (magic=%x)",
@@ -204,96 +209,9 @@ xfs_imap_to_bp(
204 } 209 }
205 210
206 xfs_inobp_check(mp, bp); 211 xfs_inobp_check(mp, bp);
207 *bpp = bp;
208 return 0;
209}
210
211/*
212 * This routine is called to map an inode number within a file
213 * system to the buffer containing the on-disk version of the
214 * inode. It returns a pointer to the buffer containing the
215 * on-disk inode in the bpp parameter, and in the dip parameter
216 * it returns a pointer to the on-disk inode within that buffer.
217 *
218 * If a non-zero error is returned, then the contents of bpp and
219 * dipp are undefined.
220 *
221 * Use xfs_imap() to determine the size and location of the
222 * buffer to read from disk.
223 */
224int
225xfs_inotobp(
226 xfs_mount_t *mp,
227 xfs_trans_t *tp,
228 xfs_ino_t ino,
229 xfs_dinode_t **dipp,
230 xfs_buf_t **bpp,
231 int *offset,
232 uint imap_flags)
233{
234 struct xfs_imap imap;
235 xfs_buf_t *bp;
236 int error;
237
238 imap.im_blkno = 0;
239 error = xfs_imap(mp, tp, ino, &imap, imap_flags);
240 if (error)
241 return error;
242
243 error = xfs_imap_to_bp(mp, tp, &imap, &bp, 0, imap_flags);
244 if (error)
245 return error;
246
247 *dipp = (xfs_dinode_t *)xfs_buf_offset(bp, imap.im_boffset);
248 *bpp = bp;
249 *offset = imap.im_boffset;
250 return 0;
251}
252
253
254/*
255 * This routine is called to map an inode to the buffer containing
256 * the on-disk version of the inode. It returns a pointer to the
257 * buffer containing the on-disk inode in the bpp parameter, and in
258 * the dip parameter it returns a pointer to the on-disk inode within
259 * that buffer.
260 *
261 * If a non-zero error is returned, then the contents of bpp and
262 * dipp are undefined.
263 *
264 * The inode is expected to already been mapped to its buffer and read
265 * in once, thus we can use the mapping information stored in the inode
266 * rather than calling xfs_imap(). This allows us to avoid the overhead
267 * of looking at the inode btree for small block file systems
268 * (see xfs_imap()).
269 */
270int
271xfs_itobp(
272 xfs_mount_t *mp,
273 xfs_trans_t *tp,
274 xfs_inode_t *ip,
275 xfs_dinode_t **dipp,
276 xfs_buf_t **bpp,
277 uint buf_flags)
278{
279 xfs_buf_t *bp;
280 int error;
281
282 ASSERT(ip->i_imap.im_blkno != 0);
283
284 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp, buf_flags, 0);
285 if (error)
286 return error;
287 212
288 if (!bp) {
289 ASSERT(buf_flags & XBF_TRYLOCK);
290 ASSERT(tp == NULL);
291 *bpp = NULL;
292 return EAGAIN;
293 }
294
295 *dipp = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset);
296 *bpp = bp; 213 *bpp = bp;
214 *dipp = (struct xfs_dinode *)xfs_buf_offset(bp, imap->im_boffset);
297 return 0; 215 return 0;
298} 216}
299 217
@@ -796,10 +714,9 @@ xfs_iread(
796 /* 714 /*
797 * Get pointers to the on-disk inode and the buffer containing it. 715 * Get pointers to the on-disk inode and the buffer containing it.
798 */ 716 */
799 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp, 0, iget_flags); 717 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &bp, 0, iget_flags);
800 if (error) 718 if (error)
801 return error; 719 return error;
802 dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset);
803 720
804 /* 721 /*
805 * If we got something that isn't an inode it means someone 722 * If we got something that isn't an inode it means someone
@@ -876,7 +793,7 @@ xfs_iread(
876 /* 793 /*
877 * Use xfs_trans_brelse() to release the buffer containing the 794 * Use xfs_trans_brelse() to release the buffer containing the
878 * on-disk inode, because it was acquired with xfs_trans_read_buf() 795 * on-disk inode, because it was acquired with xfs_trans_read_buf()
879 * in xfs_itobp() above. If tp is NULL, this is just a normal 796 * in xfs_imap_to_bp() above. If tp is NULL, this is just a normal
880 * brelse(). If we're within a transaction, then xfs_trans_brelse() 797 * brelse(). If we're within a transaction, then xfs_trans_brelse()
881 * will only release the buffer if it is not dirty within the 798 * will only release the buffer if it is not dirty within the
882 * transaction. It will be OK to release the buffer in this case, 799 * transaction. It will be OK to release the buffer in this case,
@@ -970,7 +887,6 @@ xfs_ialloc(
970 prid_t prid, 887 prid_t prid,
971 int okalloc, 888 int okalloc,
972 xfs_buf_t **ialloc_context, 889 xfs_buf_t **ialloc_context,
973 boolean_t *call_again,
974 xfs_inode_t **ipp) 890 xfs_inode_t **ipp)
975{ 891{
976 xfs_ino_t ino; 892 xfs_ino_t ino;
@@ -985,10 +901,10 @@ xfs_ialloc(
985 * the on-disk inode to be allocated. 901 * the on-disk inode to be allocated.
986 */ 902 */
987 error = xfs_dialloc(tp, pip ? pip->i_ino : 0, mode, okalloc, 903 error = xfs_dialloc(tp, pip ? pip->i_ino : 0, mode, okalloc,
988 ialloc_context, call_again, &ino); 904 ialloc_context, &ino);
989 if (error) 905 if (error)
990 return error; 906 return error;
991 if (*call_again || ino == NULLFSINO) { 907 if (*ialloc_context || ino == NULLFSINO) {
992 *ipp = NULL; 908 *ipp = NULL;
993 return 0; 909 return 0;
994 } 910 }
@@ -1207,7 +1123,9 @@ xfs_itruncate_extents(
1207 int error = 0; 1123 int error = 0;
1208 int done = 0; 1124 int done = 0;
1209 1125
1210 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL)); 1126 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1127 ASSERT(!atomic_read(&VFS_I(ip)->i_count) ||
1128 xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1211 ASSERT(new_size <= XFS_ISIZE(ip)); 1129 ASSERT(new_size <= XFS_ISIZE(ip));
1212 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); 1130 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
1213 ASSERT(ip->i_itemp != NULL); 1131 ASSERT(ip->i_itemp != NULL);
@@ -1226,7 +1144,7 @@ xfs_itruncate_extents(
1226 * then there is nothing to do. 1144 * then there is nothing to do.
1227 */ 1145 */
1228 first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size); 1146 first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
1229 last_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp)); 1147 last_block = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
1230 if (first_unmap_block == last_block) 1148 if (first_unmap_block == last_block)
1231 return 0; 1149 return 0;
1232 1150
@@ -1355,7 +1273,8 @@ xfs_iunlink(
1355 * Here we put the head pointer into our next pointer, 1273 * Here we put the head pointer into our next pointer,
1356 * and then we fall through to point the head at us. 1274 * and then we fall through to point the head at us.
1357 */ 1275 */
1358 error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0); 1276 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp,
1277 0, 0);
1359 if (error) 1278 if (error)
1360 return error; 1279 return error;
1361 1280
@@ -1429,16 +1348,16 @@ xfs_iunlink_remove(
1429 1348
1430 if (be32_to_cpu(agi->agi_unlinked[bucket_index]) == agino) { 1349 if (be32_to_cpu(agi->agi_unlinked[bucket_index]) == agino) {
1431 /* 1350 /*
1432 * We're at the head of the list. Get the inode's 1351 * We're at the head of the list. Get the inode's on-disk
1433 * on-disk buffer to see if there is anyone after us 1352 * buffer to see if there is anyone after us on the list.
1434 * on the list. Only modify our next pointer if it 1353 * Only modify our next pointer if it is not already NULLAGINO.
1435 * is not already NULLAGINO. This saves us the overhead 1354 * This saves us the overhead of dealing with the buffer when
1436 * of dealing with the buffer when there is no need to 1355 * there is no need to change it.
1437 * change it.
1438 */ 1356 */
1439 error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0); 1357 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp,
1358 0, 0);
1440 if (error) { 1359 if (error) {
1441 xfs_warn(mp, "%s: xfs_itobp() returned error %d.", 1360 xfs_warn(mp, "%s: xfs_imap_to_bp returned error %d.",
1442 __func__, error); 1361 __func__, error);
1443 return error; 1362 return error;
1444 } 1363 }
@@ -1472,34 +1391,45 @@ xfs_iunlink_remove(
1472 next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]); 1391 next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
1473 last_ibp = NULL; 1392 last_ibp = NULL;
1474 while (next_agino != agino) { 1393 while (next_agino != agino) {
1475 /* 1394 struct xfs_imap imap;
1476 * If the last inode wasn't the one pointing to 1395
1477 * us, then release its buffer since we're not 1396 if (last_ibp)
1478 * going to do anything with it.
1479 */
1480 if (last_ibp != NULL) {
1481 xfs_trans_brelse(tp, last_ibp); 1397 xfs_trans_brelse(tp, last_ibp);
1482 } 1398
1399 imap.im_blkno = 0;
1483 next_ino = XFS_AGINO_TO_INO(mp, agno, next_agino); 1400 next_ino = XFS_AGINO_TO_INO(mp, agno, next_agino);
1484 error = xfs_inotobp(mp, tp, next_ino, &last_dip, 1401
1485 &last_ibp, &last_offset, 0); 1402 error = xfs_imap(mp, tp, next_ino, &imap, 0);
1403 if (error) {
1404 xfs_warn(mp,
1405 "%s: xfs_imap returned error %d.",
1406 __func__, error);
1407 return error;
1408 }
1409
1410 error = xfs_imap_to_bp(mp, tp, &imap, &last_dip,
1411 &last_ibp, 0, 0);
1486 if (error) { 1412 if (error) {
1487 xfs_warn(mp, 1413 xfs_warn(mp,
1488 "%s: xfs_inotobp() returned error %d.", 1414 "%s: xfs_imap_to_bp returned error %d.",
1489 __func__, error); 1415 __func__, error);
1490 return error; 1416 return error;
1491 } 1417 }
1418
1419 last_offset = imap.im_boffset;
1492 next_agino = be32_to_cpu(last_dip->di_next_unlinked); 1420 next_agino = be32_to_cpu(last_dip->di_next_unlinked);
1493 ASSERT(next_agino != NULLAGINO); 1421 ASSERT(next_agino != NULLAGINO);
1494 ASSERT(next_agino != 0); 1422 ASSERT(next_agino != 0);
1495 } 1423 }
1424
1496 /* 1425 /*
1497 * Now last_ibp points to the buffer previous to us on 1426 * Now last_ibp points to the buffer previous to us on the
1498 * the unlinked list. Pull us from the list. 1427 * unlinked list. Pull us from the list.
1499 */ 1428 */
1500 error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0); 1429 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp,
1430 0, 0);
1501 if (error) { 1431 if (error) {
1502 xfs_warn(mp, "%s: xfs_itobp(2) returned error %d.", 1432 xfs_warn(mp, "%s: xfs_imap_to_bp(2) returned error %d.",
1503 __func__, error); 1433 __func__, error);
1504 return error; 1434 return error;
1505 } 1435 }
@@ -1749,7 +1679,8 @@ xfs_ifree(
1749 1679
1750 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 1680 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1751 1681
1752 error = xfs_itobp(ip->i_mount, tp, ip, &dip, &ibp, 0); 1682 error = xfs_imap_to_bp(ip->i_mount, tp, &ip->i_imap, &dip, &ibp,
1683 0, 0);
1753 if (error) 1684 if (error)
1754 return error; 1685 return error;
1755 1686
@@ -2428,7 +2359,7 @@ xfs_iflush(
2428 /* 2359 /*
2429 * For stale inodes we cannot rely on the backing buffer remaining 2360 * For stale inodes we cannot rely on the backing buffer remaining
2430 * stale in cache for the remaining life of the stale inode and so 2361 * stale in cache for the remaining life of the stale inode and so
2431 * xfs_itobp() below may give us a buffer that no longer contains 2362 * xfs_imap_to_bp() below may give us a buffer that no longer contains
2432 * inodes below. We have to check this after ensuring the inode is 2363 * inodes below. We have to check this after ensuring the inode is
2433 * unpinned so that it is safe to reclaim the stale inode after the 2364 * unpinned so that it is safe to reclaim the stale inode after the
2434 * flush call. 2365 * flush call.
@@ -2454,7 +2385,8 @@ xfs_iflush(
2454 /* 2385 /*
2455 * Get the buffer containing the on-disk inode. 2386 * Get the buffer containing the on-disk inode.
2456 */ 2387 */
2457 error = xfs_itobp(mp, NULL, ip, &dip, &bp, XBF_TRYLOCK); 2388 error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &bp, XBF_TRYLOCK,
2389 0);
2458 if (error || !bp) { 2390 if (error || !bp) {
2459 xfs_ifunlock(ip); 2391 xfs_ifunlock(ip);
2460 return error; 2392 return error;
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 1efff36a75b6..94b32f906e79 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -487,8 +487,6 @@ static inline int xfs_isiflocked(struct xfs_inode *ip)
487#define XFS_IOLOCK_DEP(flags) (((flags) & XFS_IOLOCK_DEP_MASK) >> XFS_IOLOCK_SHIFT) 487#define XFS_IOLOCK_DEP(flags) (((flags) & XFS_IOLOCK_DEP_MASK) >> XFS_IOLOCK_SHIFT)
488#define XFS_ILOCK_DEP(flags) (((flags) & XFS_ILOCK_DEP_MASK) >> XFS_ILOCK_SHIFT) 488#define XFS_ILOCK_DEP(flags) (((flags) & XFS_ILOCK_DEP_MASK) >> XFS_ILOCK_SHIFT)
489 489
490extern struct lock_class_key xfs_iolock_reclaimable;
491
492/* 490/*
493 * For multiple groups support: if S_ISGID bit is set in the parent 491 * For multiple groups support: if S_ISGID bit is set in the parent
494 * directory, group of new file is set to that of the parent, and 492 * directory, group of new file is set to that of the parent, and
@@ -517,7 +515,7 @@ void xfs_inode_free(struct xfs_inode *ip);
517 */ 515 */
518int xfs_ialloc(struct xfs_trans *, xfs_inode_t *, umode_t, 516int xfs_ialloc(struct xfs_trans *, xfs_inode_t *, umode_t,
519 xfs_nlink_t, xfs_dev_t, prid_t, int, 517 xfs_nlink_t, xfs_dev_t, prid_t, int,
520 struct xfs_buf **, boolean_t *, xfs_inode_t **); 518 struct xfs_buf **, xfs_inode_t **);
521 519
522uint xfs_ip2xflags(struct xfs_inode *); 520uint xfs_ip2xflags(struct xfs_inode *);
523uint xfs_dic2xflags(struct xfs_dinode *); 521uint xfs_dic2xflags(struct xfs_dinode *);
@@ -557,12 +555,9 @@ do { \
557#define XFS_IGET_UNTRUSTED 0x2 555#define XFS_IGET_UNTRUSTED 0x2
558#define XFS_IGET_DONTCACHE 0x4 556#define XFS_IGET_DONTCACHE 0x4
559 557
560int xfs_inotobp(struct xfs_mount *, struct xfs_trans *, 558int xfs_imap_to_bp(struct xfs_mount *, struct xfs_trans *,
561 xfs_ino_t, struct xfs_dinode **, 559 struct xfs_imap *, struct xfs_dinode **,
562 struct xfs_buf **, int *, uint); 560 struct xfs_buf **, uint, uint);
563int xfs_itobp(struct xfs_mount *, struct xfs_trans *,
564 struct xfs_inode *, struct xfs_dinode **,
565 struct xfs_buf **, uint);
566int xfs_iread(struct xfs_mount *, struct xfs_trans *, 561int xfs_iread(struct xfs_mount *, struct xfs_trans *,
567 struct xfs_inode *, uint); 562 struct xfs_inode *, uint);
568void xfs_dinode_to_disk(struct xfs_dinode *, 563void xfs_dinode_to_disk(struct xfs_dinode *,
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index aadfce6681ee..915edf6639f0 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -285,7 +285,7 @@ xfs_iomap_eof_want_preallocate(
285 * do any speculative allocation. 285 * do any speculative allocation.
286 */ 286 */
287 start_fsb = XFS_B_TO_FSBT(mp, ((xfs_ufsize_t)(offset + count - 1))); 287 start_fsb = XFS_B_TO_FSBT(mp, ((xfs_ufsize_t)(offset + count - 1)));
288 count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp)); 288 count_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
289 while (count_fsb > 0) { 289 while (count_fsb > 0) {
290 imaps = nimaps; 290 imaps = nimaps;
291 firstblock = NULLFSBLOCK; 291 firstblock = NULLFSBLOCK;
@@ -416,8 +416,8 @@ retry:
416 * Make sure preallocation does not create extents beyond the range we 416 * Make sure preallocation does not create extents beyond the range we
417 * actually support in this filesystem. 417 * actually support in this filesystem.
418 */ 418 */
419 if (last_fsb > XFS_B_TO_FSB(mp, mp->m_maxioffset)) 419 if (last_fsb > XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes))
420 last_fsb = XFS_B_TO_FSB(mp, mp->m_maxioffset); 420 last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
421 421
422 ASSERT(last_fsb > offset_fsb); 422 ASSERT(last_fsb > offset_fsb);
423 423
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index 9c4340f5c3e0..4e00cf091d2c 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -897,6 +897,47 @@ xfs_vn_setattr(
897 return -xfs_setattr_nonsize(XFS_I(dentry->d_inode), iattr, 0); 897 return -xfs_setattr_nonsize(XFS_I(dentry->d_inode), iattr, 0);
898} 898}
899 899
900STATIC int
901xfs_vn_update_time(
902 struct inode *inode,
903 struct timespec *now,
904 int flags)
905{
906 struct xfs_inode *ip = XFS_I(inode);
907 struct xfs_mount *mp = ip->i_mount;
908 struct xfs_trans *tp;
909 int error;
910
911 trace_xfs_update_time(ip);
912
913 tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
914 error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0);
915 if (error) {
916 xfs_trans_cancel(tp, 0);
917 return -error;
918 }
919
920 xfs_ilock(ip, XFS_ILOCK_EXCL);
921 if (flags & S_CTIME) {
922 inode->i_ctime = *now;
923 ip->i_d.di_ctime.t_sec = (__int32_t)now->tv_sec;
924 ip->i_d.di_ctime.t_nsec = (__int32_t)now->tv_nsec;
925 }
926 if (flags & S_MTIME) {
927 inode->i_mtime = *now;
928 ip->i_d.di_mtime.t_sec = (__int32_t)now->tv_sec;
929 ip->i_d.di_mtime.t_nsec = (__int32_t)now->tv_nsec;
930 }
931 if (flags & S_ATIME) {
932 inode->i_atime = *now;
933 ip->i_d.di_atime.t_sec = (__int32_t)now->tv_sec;
934 ip->i_d.di_atime.t_nsec = (__int32_t)now->tv_nsec;
935 }
936 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
937 xfs_trans_log_inode(tp, ip, XFS_ILOG_TIMESTAMP);
938 return -xfs_trans_commit(tp, 0);
939}
940
900#define XFS_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR) 941#define XFS_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR)
901 942
902/* 943/*
@@ -991,6 +1032,7 @@ static const struct inode_operations xfs_inode_operations = {
991 .removexattr = generic_removexattr, 1032 .removexattr = generic_removexattr,
992 .listxattr = xfs_vn_listxattr, 1033 .listxattr = xfs_vn_listxattr,
993 .fiemap = xfs_vn_fiemap, 1034 .fiemap = xfs_vn_fiemap,
1035 .update_time = xfs_vn_update_time,
994}; 1036};
995 1037
996static const struct inode_operations xfs_dir_inode_operations = { 1038static const struct inode_operations xfs_dir_inode_operations = {
@@ -1016,6 +1058,7 @@ static const struct inode_operations xfs_dir_inode_operations = {
1016 .getxattr = generic_getxattr, 1058 .getxattr = generic_getxattr,
1017 .removexattr = generic_removexattr, 1059 .removexattr = generic_removexattr,
1018 .listxattr = xfs_vn_listxattr, 1060 .listxattr = xfs_vn_listxattr,
1061 .update_time = xfs_vn_update_time,
1019}; 1062};
1020 1063
1021static const struct inode_operations xfs_dir_ci_inode_operations = { 1064static const struct inode_operations xfs_dir_ci_inode_operations = {
@@ -1041,6 +1084,7 @@ static const struct inode_operations xfs_dir_ci_inode_operations = {
1041 .getxattr = generic_getxattr, 1084 .getxattr = generic_getxattr,
1042 .removexattr = generic_removexattr, 1085 .removexattr = generic_removexattr,
1043 .listxattr = xfs_vn_listxattr, 1086 .listxattr = xfs_vn_listxattr,
1087 .update_time = xfs_vn_update_time,
1044}; 1088};
1045 1089
1046static const struct inode_operations xfs_symlink_inode_operations = { 1090static const struct inode_operations xfs_symlink_inode_operations = {
@@ -1054,6 +1098,7 @@ static const struct inode_operations xfs_symlink_inode_operations = {
1054 .getxattr = generic_getxattr, 1098 .getxattr = generic_getxattr,
1055 .removexattr = generic_removexattr, 1099 .removexattr = generic_removexattr,
1056 .listxattr = xfs_vn_listxattr, 1100 .listxattr = xfs_vn_listxattr,
1101 .update_time = xfs_vn_update_time,
1057}; 1102};
1058 1103
1059STATIC void 1104STATIC void
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index eff577a9b67f..01d10a66e302 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -555,7 +555,7 @@ xfs_bulkstat_single(
555 555
556 /* 556 /*
557 * note that requesting valid inode numbers which are not allocated 557 * note that requesting valid inode numbers which are not allocated
558 * to inodes will most likely cause xfs_itobp to generate warning 558 * to inodes will most likely cause xfs_imap_to_bp to generate warning
559 * messages about bad magic numbers. This is ok. The fact that 559 * messages about bad magic numbers. This is ok. The fact that
560 * the inode isn't actually an inode is handled by the 560 * the inode isn't actually an inode is handled by the
561 * error check below. Done this way to make the usual case faster 561 * error check below. Done this way to make the usual case faster
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index d90d4a388609..7f4f9370d0e7 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -45,51 +45,85 @@ xlog_commit_record(
45 struct xlog_in_core **iclog, 45 struct xlog_in_core **iclog,
46 xfs_lsn_t *commitlsnp); 46 xfs_lsn_t *commitlsnp);
47 47
48STATIC xlog_t * xlog_alloc_log(xfs_mount_t *mp, 48STATIC struct xlog *
49 xfs_buftarg_t *log_target, 49xlog_alloc_log(
50 xfs_daddr_t blk_offset, 50 struct xfs_mount *mp,
51 int num_bblks); 51 struct xfs_buftarg *log_target,
52 xfs_daddr_t blk_offset,
53 int num_bblks);
52STATIC int 54STATIC int
53xlog_space_left( 55xlog_space_left(
54 struct xlog *log, 56 struct xlog *log,
55 atomic64_t *head); 57 atomic64_t *head);
56STATIC int xlog_sync(xlog_t *log, xlog_in_core_t *iclog); 58STATIC int
57STATIC void xlog_dealloc_log(xlog_t *log); 59xlog_sync(
60 struct xlog *log,
61 struct xlog_in_core *iclog);
62STATIC void
63xlog_dealloc_log(
64 struct xlog *log);
58 65
59/* local state machine functions */ 66/* local state machine functions */
60STATIC void xlog_state_done_syncing(xlog_in_core_t *iclog, int); 67STATIC void xlog_state_done_syncing(xlog_in_core_t *iclog, int);
61STATIC void xlog_state_do_callback(xlog_t *log,int aborted, xlog_in_core_t *iclog); 68STATIC void
62STATIC int xlog_state_get_iclog_space(xlog_t *log, 69xlog_state_do_callback(
63 int len, 70 struct xlog *log,
64 xlog_in_core_t **iclog, 71 int aborted,
65 xlog_ticket_t *ticket, 72 struct xlog_in_core *iclog);
66 int *continued_write, 73STATIC int
67 int *logoffsetp); 74xlog_state_get_iclog_space(
68STATIC int xlog_state_release_iclog(xlog_t *log, 75 struct xlog *log,
69 xlog_in_core_t *iclog); 76 int len,
70STATIC void xlog_state_switch_iclogs(xlog_t *log, 77 struct xlog_in_core **iclog,
71 xlog_in_core_t *iclog, 78 struct xlog_ticket *ticket,
72 int eventual_size); 79 int *continued_write,
73STATIC void xlog_state_want_sync(xlog_t *log, xlog_in_core_t *iclog); 80 int *logoffsetp);
81STATIC int
82xlog_state_release_iclog(
83 struct xlog *log,
84 struct xlog_in_core *iclog);
85STATIC void
86xlog_state_switch_iclogs(
87 struct xlog *log,
88 struct xlog_in_core *iclog,
89 int eventual_size);
90STATIC void
91xlog_state_want_sync(
92 struct xlog *log,
93 struct xlog_in_core *iclog);
74 94
75STATIC void 95STATIC void
76xlog_grant_push_ail( 96xlog_grant_push_ail(
77 struct xlog *log, 97 struct xlog *log,
78 int need_bytes); 98 int need_bytes);
79STATIC void xlog_regrant_reserve_log_space(xlog_t *log, 99STATIC void
80 xlog_ticket_t *ticket); 100xlog_regrant_reserve_log_space(
81STATIC void xlog_ungrant_log_space(xlog_t *log, 101 struct xlog *log,
82 xlog_ticket_t *ticket); 102 struct xlog_ticket *ticket);
103STATIC void
104xlog_ungrant_log_space(
105 struct xlog *log,
106 struct xlog_ticket *ticket);
83 107
84#if defined(DEBUG) 108#if defined(DEBUG)
85STATIC void xlog_verify_dest_ptr(xlog_t *log, char *ptr); 109STATIC void
110xlog_verify_dest_ptr(
111 struct xlog *log,
112 char *ptr);
86STATIC void 113STATIC void
87xlog_verify_grant_tail( 114xlog_verify_grant_tail(
88 struct xlog *log); 115 struct xlog *log);
89STATIC void xlog_verify_iclog(xlog_t *log, xlog_in_core_t *iclog, 116STATIC void
90 int count, boolean_t syncing); 117xlog_verify_iclog(
91STATIC void xlog_verify_tail_lsn(xlog_t *log, xlog_in_core_t *iclog, 118 struct xlog *log,
92 xfs_lsn_t tail_lsn); 119 struct xlog_in_core *iclog,
120 int count,
121 boolean_t syncing);
122STATIC void
123xlog_verify_tail_lsn(
124 struct xlog *log,
125 struct xlog_in_core *iclog,
126 xfs_lsn_t tail_lsn);
93#else 127#else
94#define xlog_verify_dest_ptr(a,b) 128#define xlog_verify_dest_ptr(a,b)
95#define xlog_verify_grant_tail(a) 129#define xlog_verify_grant_tail(a)
@@ -97,7 +131,9 @@ STATIC void xlog_verify_tail_lsn(xlog_t *log, xlog_in_core_t *iclog,
97#define xlog_verify_tail_lsn(a,b,c) 131#define xlog_verify_tail_lsn(a,b,c)
98#endif 132#endif
99 133
100STATIC int xlog_iclogs_empty(xlog_t *log); 134STATIC int
135xlog_iclogs_empty(
136 struct xlog *log);
101 137
102static void 138static void
103xlog_grant_sub_space( 139xlog_grant_sub_space(
@@ -684,7 +720,7 @@ xfs_log_mount_finish(xfs_mount_t *mp)
684int 720int
685xfs_log_unmount_write(xfs_mount_t *mp) 721xfs_log_unmount_write(xfs_mount_t *mp)
686{ 722{
687 xlog_t *log = mp->m_log; 723 struct xlog *log = mp->m_log;
688 xlog_in_core_t *iclog; 724 xlog_in_core_t *iclog;
689#ifdef DEBUG 725#ifdef DEBUG
690 xlog_in_core_t *first_iclog; 726 xlog_in_core_t *first_iclog;
@@ -893,7 +929,7 @@ int
893xfs_log_need_covered(xfs_mount_t *mp) 929xfs_log_need_covered(xfs_mount_t *mp)
894{ 930{
895 int needed = 0; 931 int needed = 0;
896 xlog_t *log = mp->m_log; 932 struct xlog *log = mp->m_log;
897 933
898 if (!xfs_fs_writable(mp)) 934 if (!xfs_fs_writable(mp))
899 return 0; 935 return 0;
@@ -1024,9 +1060,9 @@ xlog_space_left(
1024void 1060void
1025xlog_iodone(xfs_buf_t *bp) 1061xlog_iodone(xfs_buf_t *bp)
1026{ 1062{
1027 xlog_in_core_t *iclog = bp->b_fspriv; 1063 struct xlog_in_core *iclog = bp->b_fspriv;
1028 xlog_t *l = iclog->ic_log; 1064 struct xlog *l = iclog->ic_log;
1029 int aborted = 0; 1065 int aborted = 0;
1030 1066
1031 /* 1067 /*
1032 * Race to shutdown the filesystem if we see an error. 1068 * Race to shutdown the filesystem if we see an error.
@@ -1067,8 +1103,9 @@ xlog_iodone(xfs_buf_t *bp)
1067 */ 1103 */
1068 1104
1069STATIC void 1105STATIC void
1070xlog_get_iclog_buffer_size(xfs_mount_t *mp, 1106xlog_get_iclog_buffer_size(
1071 xlog_t *log) 1107 struct xfs_mount *mp,
1108 struct xlog *log)
1072{ 1109{
1073 int size; 1110 int size;
1074 int xhdrs; 1111 int xhdrs;
@@ -1129,13 +1166,14 @@ done:
1129 * Its primary purpose is to fill in enough, so recovery can occur. However, 1166 * Its primary purpose is to fill in enough, so recovery can occur. However,
1130 * some other stuff may be filled in too. 1167 * some other stuff may be filled in too.
1131 */ 1168 */
1132STATIC xlog_t * 1169STATIC struct xlog *
1133xlog_alloc_log(xfs_mount_t *mp, 1170xlog_alloc_log(
1134 xfs_buftarg_t *log_target, 1171 struct xfs_mount *mp,
1135 xfs_daddr_t blk_offset, 1172 struct xfs_buftarg *log_target,
1136 int num_bblks) 1173 xfs_daddr_t blk_offset,
1174 int num_bblks)
1137{ 1175{
1138 xlog_t *log; 1176 struct xlog *log;
1139 xlog_rec_header_t *head; 1177 xlog_rec_header_t *head;
1140 xlog_in_core_t **iclogp; 1178 xlog_in_core_t **iclogp;
1141 xlog_in_core_t *iclog, *prev_iclog=NULL; 1179 xlog_in_core_t *iclog, *prev_iclog=NULL;
@@ -1144,7 +1182,7 @@ xlog_alloc_log(xfs_mount_t *mp,
1144 int error = ENOMEM; 1182 int error = ENOMEM;
1145 uint log2_size = 0; 1183 uint log2_size = 0;
1146 1184
1147 log = kmem_zalloc(sizeof(xlog_t), KM_MAYFAIL); 1185 log = kmem_zalloc(sizeof(struct xlog), KM_MAYFAIL);
1148 if (!log) { 1186 if (!log) {
1149 xfs_warn(mp, "Log allocation failed: No memory!"); 1187 xfs_warn(mp, "Log allocation failed: No memory!");
1150 goto out; 1188 goto out;
@@ -1434,8 +1472,9 @@ xlog_bdstrat(
1434 */ 1472 */
1435 1473
1436STATIC int 1474STATIC int
1437xlog_sync(xlog_t *log, 1475xlog_sync(
1438 xlog_in_core_t *iclog) 1476 struct xlog *log,
1477 struct xlog_in_core *iclog)
1439{ 1478{
1440 xfs_caddr_t dptr; /* pointer to byte sized element */ 1479 xfs_caddr_t dptr; /* pointer to byte sized element */
1441 xfs_buf_t *bp; 1480 xfs_buf_t *bp;
@@ -1584,7 +1623,8 @@ xlog_sync(xlog_t *log,
1584 * Deallocate a log structure 1623 * Deallocate a log structure
1585 */ 1624 */
1586STATIC void 1625STATIC void
1587xlog_dealloc_log(xlog_t *log) 1626xlog_dealloc_log(
1627 struct xlog *log)
1588{ 1628{
1589 xlog_in_core_t *iclog, *next_iclog; 1629 xlog_in_core_t *iclog, *next_iclog;
1590 int i; 1630 int i;
@@ -1616,10 +1656,11 @@ xlog_dealloc_log(xlog_t *log)
1616 */ 1656 */
1617/* ARGSUSED */ 1657/* ARGSUSED */
1618static inline void 1658static inline void
1619xlog_state_finish_copy(xlog_t *log, 1659xlog_state_finish_copy(
1620 xlog_in_core_t *iclog, 1660 struct xlog *log,
1621 int record_cnt, 1661 struct xlog_in_core *iclog,
1622 int copy_bytes) 1662 int record_cnt,
1663 int copy_bytes)
1623{ 1664{
1624 spin_lock(&log->l_icloglock); 1665 spin_lock(&log->l_icloglock);
1625 1666
@@ -2142,7 +2183,8 @@ xlog_write(
2142 * State Change: DIRTY -> ACTIVE 2183 * State Change: DIRTY -> ACTIVE
2143 */ 2184 */
2144STATIC void 2185STATIC void
2145xlog_state_clean_log(xlog_t *log) 2186xlog_state_clean_log(
2187 struct xlog *log)
2146{ 2188{
2147 xlog_in_core_t *iclog; 2189 xlog_in_core_t *iclog;
2148 int changed = 0; 2190 int changed = 0;
@@ -2222,7 +2264,7 @@ xlog_state_clean_log(xlog_t *log)
2222 2264
2223STATIC xfs_lsn_t 2265STATIC xfs_lsn_t
2224xlog_get_lowest_lsn( 2266xlog_get_lowest_lsn(
2225 xlog_t *log) 2267 struct xlog *log)
2226{ 2268{
2227 xlog_in_core_t *lsn_log; 2269 xlog_in_core_t *lsn_log;
2228 xfs_lsn_t lowest_lsn, lsn; 2270 xfs_lsn_t lowest_lsn, lsn;
@@ -2245,9 +2287,9 @@ xlog_get_lowest_lsn(
2245 2287
2246STATIC void 2288STATIC void
2247xlog_state_do_callback( 2289xlog_state_do_callback(
2248 xlog_t *log, 2290 struct xlog *log,
2249 int aborted, 2291 int aborted,
2250 xlog_in_core_t *ciclog) 2292 struct xlog_in_core *ciclog)
2251{ 2293{
2252 xlog_in_core_t *iclog; 2294 xlog_in_core_t *iclog;
2253 xlog_in_core_t *first_iclog; /* used to know when we've 2295 xlog_in_core_t *first_iclog; /* used to know when we've
@@ -2467,7 +2509,7 @@ xlog_state_done_syncing(
2467 xlog_in_core_t *iclog, 2509 xlog_in_core_t *iclog,
2468 int aborted) 2510 int aborted)
2469{ 2511{
2470 xlog_t *log = iclog->ic_log; 2512 struct xlog *log = iclog->ic_log;
2471 2513
2472 spin_lock(&log->l_icloglock); 2514 spin_lock(&log->l_icloglock);
2473 2515
@@ -2521,12 +2563,13 @@ xlog_state_done_syncing(
2521 * is copied. 2563 * is copied.
2522 */ 2564 */
2523STATIC int 2565STATIC int
2524xlog_state_get_iclog_space(xlog_t *log, 2566xlog_state_get_iclog_space(
2525 int len, 2567 struct xlog *log,
2526 xlog_in_core_t **iclogp, 2568 int len,
2527 xlog_ticket_t *ticket, 2569 struct xlog_in_core **iclogp,
2528 int *continued_write, 2570 struct xlog_ticket *ticket,
2529 int *logoffsetp) 2571 int *continued_write,
2572 int *logoffsetp)
2530{ 2573{
2531 int log_offset; 2574 int log_offset;
2532 xlog_rec_header_t *head; 2575 xlog_rec_header_t *head;
@@ -2631,8 +2674,9 @@ restart:
2631 * move grant reservation head forward. 2674 * move grant reservation head forward.
2632 */ 2675 */
2633STATIC void 2676STATIC void
2634xlog_regrant_reserve_log_space(xlog_t *log, 2677xlog_regrant_reserve_log_space(
2635 xlog_ticket_t *ticket) 2678 struct xlog *log,
2679 struct xlog_ticket *ticket)
2636{ 2680{
2637 trace_xfs_log_regrant_reserve_enter(log, ticket); 2681 trace_xfs_log_regrant_reserve_enter(log, ticket);
2638 2682
@@ -2677,8 +2721,9 @@ xlog_regrant_reserve_log_space(xlog_t *log,
2677 * in the current reservation field. 2721 * in the current reservation field.
2678 */ 2722 */
2679STATIC void 2723STATIC void
2680xlog_ungrant_log_space(xlog_t *log, 2724xlog_ungrant_log_space(
2681 xlog_ticket_t *ticket) 2725 struct xlog *log,
2726 struct xlog_ticket *ticket)
2682{ 2727{
2683 int bytes; 2728 int bytes;
2684 2729
@@ -2717,8 +2762,8 @@ xlog_ungrant_log_space(xlog_t *log,
2717 */ 2762 */
2718STATIC int 2763STATIC int
2719xlog_state_release_iclog( 2764xlog_state_release_iclog(
2720 xlog_t *log, 2765 struct xlog *log,
2721 xlog_in_core_t *iclog) 2766 struct xlog_in_core *iclog)
2722{ 2767{
2723 int sync = 0; /* do we sync? */ 2768 int sync = 0; /* do we sync? */
2724 2769
@@ -2768,9 +2813,10 @@ xlog_state_release_iclog(
2768 * that every data block. We have run out of space in this log record. 2813 * that every data block. We have run out of space in this log record.
2769 */ 2814 */
2770STATIC void 2815STATIC void
2771xlog_state_switch_iclogs(xlog_t *log, 2816xlog_state_switch_iclogs(
2772 xlog_in_core_t *iclog, 2817 struct xlog *log,
2773 int eventual_size) 2818 struct xlog_in_core *iclog,
2819 int eventual_size)
2774{ 2820{
2775 ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE); 2821 ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
2776 if (!eventual_size) 2822 if (!eventual_size)
@@ -3114,7 +3160,9 @@ xfs_log_force_lsn(
3114 * disk. 3160 * disk.
3115 */ 3161 */
3116STATIC void 3162STATIC void
3117xlog_state_want_sync(xlog_t *log, xlog_in_core_t *iclog) 3163xlog_state_want_sync(
3164 struct xlog *log,
3165 struct xlog_in_core *iclog)
3118{ 3166{
3119 assert_spin_locked(&log->l_icloglock); 3167 assert_spin_locked(&log->l_icloglock);
3120 3168
@@ -3158,7 +3206,7 @@ xfs_log_ticket_get(
3158/* 3206/*
3159 * Allocate and initialise a new log ticket. 3207 * Allocate and initialise a new log ticket.
3160 */ 3208 */
3161xlog_ticket_t * 3209struct xlog_ticket *
3162xlog_ticket_alloc( 3210xlog_ticket_alloc(
3163 struct xlog *log, 3211 struct xlog *log,
3164 int unit_bytes, 3212 int unit_bytes,
@@ -3346,9 +3394,10 @@ xlog_verify_grant_tail(
3346 3394
3347/* check if it will fit */ 3395/* check if it will fit */
3348STATIC void 3396STATIC void
3349xlog_verify_tail_lsn(xlog_t *log, 3397xlog_verify_tail_lsn(
3350 xlog_in_core_t *iclog, 3398 struct xlog *log,
3351 xfs_lsn_t tail_lsn) 3399 struct xlog_in_core *iclog,
3400 xfs_lsn_t tail_lsn)
3352{ 3401{
3353 int blocks; 3402 int blocks;
3354 3403
@@ -3385,10 +3434,11 @@ xlog_verify_tail_lsn(xlog_t *log,
3385 * the cycle numbers agree with the current cycle number. 3434 * the cycle numbers agree with the current cycle number.
3386 */ 3435 */
3387STATIC void 3436STATIC void
3388xlog_verify_iclog(xlog_t *log, 3437xlog_verify_iclog(
3389 xlog_in_core_t *iclog, 3438 struct xlog *log,
3390 int count, 3439 struct xlog_in_core *iclog,
3391 boolean_t syncing) 3440 int count,
3441 boolean_t syncing)
3392{ 3442{
3393 xlog_op_header_t *ophead; 3443 xlog_op_header_t *ophead;
3394 xlog_in_core_t *icptr; 3444 xlog_in_core_t *icptr;
@@ -3482,7 +3532,7 @@ xlog_verify_iclog(xlog_t *log,
3482 */ 3532 */
3483STATIC int 3533STATIC int
3484xlog_state_ioerror( 3534xlog_state_ioerror(
3485 xlog_t *log) 3535 struct xlog *log)
3486{ 3536{
3487 xlog_in_core_t *iclog, *ic; 3537 xlog_in_core_t *iclog, *ic;
3488 3538
@@ -3527,7 +3577,7 @@ xfs_log_force_umount(
3527 struct xfs_mount *mp, 3577 struct xfs_mount *mp,
3528 int logerror) 3578 int logerror)
3529{ 3579{
3530 xlog_t *log; 3580 struct xlog *log;
3531 int retval; 3581 int retval;
3532 3582
3533 log = mp->m_log; 3583 log = mp->m_log;
@@ -3634,7 +3684,8 @@ xfs_log_force_umount(
3634} 3684}
3635 3685
3636STATIC int 3686STATIC int
3637xlog_iclogs_empty(xlog_t *log) 3687xlog_iclogs_empty(
3688 struct xlog *log)
3638{ 3689{
3639 xlog_in_core_t *iclog; 3690 xlog_in_core_t *iclog;
3640 3691
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index 72eba2201b14..18a801d76a42 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -487,7 +487,7 @@ struct xlog_grant_head {
487 * overflow 31 bits worth of byte offset, so using a byte number will mean 487 * overflow 31 bits worth of byte offset, so using a byte number will mean
488 * that round off problems won't occur when releasing partial reservations. 488 * that round off problems won't occur when releasing partial reservations.
489 */ 489 */
490typedef struct xlog { 490struct xlog {
491 /* The following fields don't need locking */ 491 /* The following fields don't need locking */
492 struct xfs_mount *l_mp; /* mount point */ 492 struct xfs_mount *l_mp; /* mount point */
493 struct xfs_ail *l_ailp; /* AIL log is working with */ 493 struct xfs_ail *l_ailp; /* AIL log is working with */
@@ -540,7 +540,7 @@ typedef struct xlog {
540 char *l_iclog_bak[XLOG_MAX_ICLOGS]; 540 char *l_iclog_bak[XLOG_MAX_ICLOGS];
541#endif 541#endif
542 542
543} xlog_t; 543};
544 544
545#define XLOG_BUF_CANCEL_BUCKET(log, blkno) \ 545#define XLOG_BUF_CANCEL_BUCKET(log, blkno) \
546 ((log)->l_buf_cancel_table + ((__uint64_t)blkno % XLOG_BC_TABLE_SIZE)) 546 ((log)->l_buf_cancel_table + ((__uint64_t)blkno % XLOG_BC_TABLE_SIZE))
@@ -548,9 +548,17 @@ typedef struct xlog {
548#define XLOG_FORCED_SHUTDOWN(log) ((log)->l_flags & XLOG_IO_ERROR) 548#define XLOG_FORCED_SHUTDOWN(log) ((log)->l_flags & XLOG_IO_ERROR)
549 549
550/* common routines */ 550/* common routines */
551extern int xlog_recover(xlog_t *log); 551extern int
552extern int xlog_recover_finish(xlog_t *log); 552xlog_recover(
553extern void xlog_pack_data(xlog_t *log, xlog_in_core_t *iclog, int); 553 struct xlog *log);
554extern int
555xlog_recover_finish(
556 struct xlog *log);
557extern void
558xlog_pack_data(
559 struct xlog *log,
560 struct xlog_in_core *iclog,
561 int);
554 562
555extern kmem_zone_t *xfs_log_ticket_zone; 563extern kmem_zone_t *xfs_log_ticket_zone;
556struct xlog_ticket * 564struct xlog_ticket *
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index a7be98abd6a9..5da3ace352bf 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -43,10 +43,18 @@
43#include "xfs_utils.h" 43#include "xfs_utils.h"
44#include "xfs_trace.h" 44#include "xfs_trace.h"
45 45
46STATIC int xlog_find_zeroed(xlog_t *, xfs_daddr_t *); 46STATIC int
47STATIC int xlog_clear_stale_blocks(xlog_t *, xfs_lsn_t); 47xlog_find_zeroed(
48 struct xlog *,
49 xfs_daddr_t *);
50STATIC int
51xlog_clear_stale_blocks(
52 struct xlog *,
53 xfs_lsn_t);
48#if defined(DEBUG) 54#if defined(DEBUG)
49STATIC void xlog_recover_check_summary(xlog_t *); 55STATIC void
56xlog_recover_check_summary(
57 struct xlog *);
50#else 58#else
51#define xlog_recover_check_summary(log) 59#define xlog_recover_check_summary(log)
52#endif 60#endif
@@ -74,7 +82,7 @@ struct xfs_buf_cancel {
74 82
75static inline int 83static inline int
76xlog_buf_bbcount_valid( 84xlog_buf_bbcount_valid(
77 xlog_t *log, 85 struct xlog *log,
78 int bbcount) 86 int bbcount)
79{ 87{
80 return bbcount > 0 && bbcount <= log->l_logBBsize; 88 return bbcount > 0 && bbcount <= log->l_logBBsize;
@@ -87,7 +95,7 @@ xlog_buf_bbcount_valid(
87 */ 95 */
88STATIC xfs_buf_t * 96STATIC xfs_buf_t *
89xlog_get_bp( 97xlog_get_bp(
90 xlog_t *log, 98 struct xlog *log,
91 int nbblks) 99 int nbblks)
92{ 100{
93 struct xfs_buf *bp; 101 struct xfs_buf *bp;
@@ -138,10 +146,10 @@ xlog_put_bp(
138 */ 146 */
139STATIC xfs_caddr_t 147STATIC xfs_caddr_t
140xlog_align( 148xlog_align(
141 xlog_t *log, 149 struct xlog *log,
142 xfs_daddr_t blk_no, 150 xfs_daddr_t blk_no,
143 int nbblks, 151 int nbblks,
144 xfs_buf_t *bp) 152 struct xfs_buf *bp)
145{ 153{
146 xfs_daddr_t offset = blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1); 154 xfs_daddr_t offset = blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1);
147 155
@@ -155,10 +163,10 @@ xlog_align(
155 */ 163 */
156STATIC int 164STATIC int
157xlog_bread_noalign( 165xlog_bread_noalign(
158 xlog_t *log, 166 struct xlog *log,
159 xfs_daddr_t blk_no, 167 xfs_daddr_t blk_no,
160 int nbblks, 168 int nbblks,
161 xfs_buf_t *bp) 169 struct xfs_buf *bp)
162{ 170{
163 int error; 171 int error;
164 172
@@ -189,10 +197,10 @@ xlog_bread_noalign(
189 197
190STATIC int 198STATIC int
191xlog_bread( 199xlog_bread(
192 xlog_t *log, 200 struct xlog *log,
193 xfs_daddr_t blk_no, 201 xfs_daddr_t blk_no,
194 int nbblks, 202 int nbblks,
195 xfs_buf_t *bp, 203 struct xfs_buf *bp,
196 xfs_caddr_t *offset) 204 xfs_caddr_t *offset)
197{ 205{
198 int error; 206 int error;
@@ -211,10 +219,10 @@ xlog_bread(
211 */ 219 */
212STATIC int 220STATIC int
213xlog_bread_offset( 221xlog_bread_offset(
214 xlog_t *log, 222 struct xlog *log,
215 xfs_daddr_t blk_no, /* block to read from */ 223 xfs_daddr_t blk_no, /* block to read from */
216 int nbblks, /* blocks to read */ 224 int nbblks, /* blocks to read */
217 xfs_buf_t *bp, 225 struct xfs_buf *bp,
218 xfs_caddr_t offset) 226 xfs_caddr_t offset)
219{ 227{
220 xfs_caddr_t orig_offset = bp->b_addr; 228 xfs_caddr_t orig_offset = bp->b_addr;
@@ -241,10 +249,10 @@ xlog_bread_offset(
241 */ 249 */
242STATIC int 250STATIC int
243xlog_bwrite( 251xlog_bwrite(
244 xlog_t *log, 252 struct xlog *log,
245 xfs_daddr_t blk_no, 253 xfs_daddr_t blk_no,
246 int nbblks, 254 int nbblks,
247 xfs_buf_t *bp) 255 struct xfs_buf *bp)
248{ 256{
249 int error; 257 int error;
250 258
@@ -378,8 +386,8 @@ xlog_recover_iodone(
378 */ 386 */
379STATIC int 387STATIC int
380xlog_find_cycle_start( 388xlog_find_cycle_start(
381 xlog_t *log, 389 struct xlog *log,
382 xfs_buf_t *bp, 390 struct xfs_buf *bp,
383 xfs_daddr_t first_blk, 391 xfs_daddr_t first_blk,
384 xfs_daddr_t *last_blk, 392 xfs_daddr_t *last_blk,
385 uint cycle) 393 uint cycle)
@@ -421,7 +429,7 @@ xlog_find_cycle_start(
421 */ 429 */
422STATIC int 430STATIC int
423xlog_find_verify_cycle( 431xlog_find_verify_cycle(
424 xlog_t *log, 432 struct xlog *log,
425 xfs_daddr_t start_blk, 433 xfs_daddr_t start_blk,
426 int nbblks, 434 int nbblks,
427 uint stop_on_cycle_no, 435 uint stop_on_cycle_no,
@@ -490,7 +498,7 @@ out:
490 */ 498 */
491STATIC int 499STATIC int
492xlog_find_verify_log_record( 500xlog_find_verify_log_record(
493 xlog_t *log, 501 struct xlog *log,
494 xfs_daddr_t start_blk, 502 xfs_daddr_t start_blk,
495 xfs_daddr_t *last_blk, 503 xfs_daddr_t *last_blk,
496 int extra_bblks) 504 int extra_bblks)
@@ -600,7 +608,7 @@ out:
600 */ 608 */
601STATIC int 609STATIC int
602xlog_find_head( 610xlog_find_head(
603 xlog_t *log, 611 struct xlog *log,
604 xfs_daddr_t *return_head_blk) 612 xfs_daddr_t *return_head_blk)
605{ 613{
606 xfs_buf_t *bp; 614 xfs_buf_t *bp;
@@ -871,7 +879,7 @@ validate_head:
871 */ 879 */
872STATIC int 880STATIC int
873xlog_find_tail( 881xlog_find_tail(
874 xlog_t *log, 882 struct xlog *log,
875 xfs_daddr_t *head_blk, 883 xfs_daddr_t *head_blk,
876 xfs_daddr_t *tail_blk) 884 xfs_daddr_t *tail_blk)
877{ 885{
@@ -1080,7 +1088,7 @@ done:
1080 */ 1088 */
1081STATIC int 1089STATIC int
1082xlog_find_zeroed( 1090xlog_find_zeroed(
1083 xlog_t *log, 1091 struct xlog *log,
1084 xfs_daddr_t *blk_no) 1092 xfs_daddr_t *blk_no)
1085{ 1093{
1086 xfs_buf_t *bp; 1094 xfs_buf_t *bp;
@@ -1183,7 +1191,7 @@ bp_err:
1183 */ 1191 */
1184STATIC void 1192STATIC void
1185xlog_add_record( 1193xlog_add_record(
1186 xlog_t *log, 1194 struct xlog *log,
1187 xfs_caddr_t buf, 1195 xfs_caddr_t buf,
1188 int cycle, 1196 int cycle,
1189 int block, 1197 int block,
@@ -1205,7 +1213,7 @@ xlog_add_record(
1205 1213
1206STATIC int 1214STATIC int
1207xlog_write_log_records( 1215xlog_write_log_records(
1208 xlog_t *log, 1216 struct xlog *log,
1209 int cycle, 1217 int cycle,
1210 int start_block, 1218 int start_block,
1211 int blocks, 1219 int blocks,
@@ -1305,7 +1313,7 @@ xlog_write_log_records(
1305 */ 1313 */
1306STATIC int 1314STATIC int
1307xlog_clear_stale_blocks( 1315xlog_clear_stale_blocks(
1308 xlog_t *log, 1316 struct xlog *log,
1309 xfs_lsn_t tail_lsn) 1317 xfs_lsn_t tail_lsn)
1310{ 1318{
1311 int tail_cycle, head_cycle; 1319 int tail_cycle, head_cycle;
@@ -2050,11 +2058,11 @@ xfs_qm_dqcheck(
2050 */ 2058 */
2051STATIC void 2059STATIC void
2052xlog_recover_do_dquot_buffer( 2060xlog_recover_do_dquot_buffer(
2053 xfs_mount_t *mp, 2061 struct xfs_mount *mp,
2054 xlog_t *log, 2062 struct xlog *log,
2055 xlog_recover_item_t *item, 2063 struct xlog_recover_item *item,
2056 xfs_buf_t *bp, 2064 struct xfs_buf *bp,
2057 xfs_buf_log_format_t *buf_f) 2065 struct xfs_buf_log_format *buf_f)
2058{ 2066{
2059 uint type; 2067 uint type;
2060 2068
@@ -2108,9 +2116,9 @@ xlog_recover_do_dquot_buffer(
2108 */ 2116 */
2109STATIC int 2117STATIC int
2110xlog_recover_buffer_pass2( 2118xlog_recover_buffer_pass2(
2111 xlog_t *log, 2119 struct xlog *log,
2112 struct list_head *buffer_list, 2120 struct list_head *buffer_list,
2113 xlog_recover_item_t *item) 2121 struct xlog_recover_item *item)
2114{ 2122{
2115 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr; 2123 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
2116 xfs_mount_t *mp = log->l_mp; 2124 xfs_mount_t *mp = log->l_mp;
@@ -2189,9 +2197,9 @@ xlog_recover_buffer_pass2(
2189 2197
2190STATIC int 2198STATIC int
2191xlog_recover_inode_pass2( 2199xlog_recover_inode_pass2(
2192 xlog_t *log, 2200 struct xlog *log,
2193 struct list_head *buffer_list, 2201 struct list_head *buffer_list,
2194 xlog_recover_item_t *item) 2202 struct xlog_recover_item *item)
2195{ 2203{
2196 xfs_inode_log_format_t *in_f; 2204 xfs_inode_log_format_t *in_f;
2197 xfs_mount_t *mp = log->l_mp; 2205 xfs_mount_t *mp = log->l_mp;
@@ -2452,14 +2460,14 @@ error:
2452} 2460}
2453 2461
2454/* 2462/*
2455 * Recover QUOTAOFF records. We simply make a note of it in the xlog_t 2463 * Recover QUOTAOFF records. We simply make a note of it in the xlog
2456 * structure, so that we know not to do any dquot item or dquot buffer recovery, 2464 * structure, so that we know not to do any dquot item or dquot buffer recovery,
2457 * of that type. 2465 * of that type.
2458 */ 2466 */
2459STATIC int 2467STATIC int
2460xlog_recover_quotaoff_pass1( 2468xlog_recover_quotaoff_pass1(
2461 xlog_t *log, 2469 struct xlog *log,
2462 xlog_recover_item_t *item) 2470 struct xlog_recover_item *item)
2463{ 2471{
2464 xfs_qoff_logformat_t *qoff_f = item->ri_buf[0].i_addr; 2472 xfs_qoff_logformat_t *qoff_f = item->ri_buf[0].i_addr;
2465 ASSERT(qoff_f); 2473 ASSERT(qoff_f);
@@ -2483,9 +2491,9 @@ xlog_recover_quotaoff_pass1(
2483 */ 2491 */
2484STATIC int 2492STATIC int
2485xlog_recover_dquot_pass2( 2493xlog_recover_dquot_pass2(
2486 xlog_t *log, 2494 struct xlog *log,
2487 struct list_head *buffer_list, 2495 struct list_head *buffer_list,
2488 xlog_recover_item_t *item) 2496 struct xlog_recover_item *item)
2489{ 2497{
2490 xfs_mount_t *mp = log->l_mp; 2498 xfs_mount_t *mp = log->l_mp;
2491 xfs_buf_t *bp; 2499 xfs_buf_t *bp;
@@ -2578,9 +2586,9 @@ xlog_recover_dquot_pass2(
2578 */ 2586 */
2579STATIC int 2587STATIC int
2580xlog_recover_efi_pass2( 2588xlog_recover_efi_pass2(
2581 xlog_t *log, 2589 struct xlog *log,
2582 xlog_recover_item_t *item, 2590 struct xlog_recover_item *item,
2583 xfs_lsn_t lsn) 2591 xfs_lsn_t lsn)
2584{ 2592{
2585 int error; 2593 int error;
2586 xfs_mount_t *mp = log->l_mp; 2594 xfs_mount_t *mp = log->l_mp;
@@ -2616,8 +2624,8 @@ xlog_recover_efi_pass2(
2616 */ 2624 */
2617STATIC int 2625STATIC int
2618xlog_recover_efd_pass2( 2626xlog_recover_efd_pass2(
2619 xlog_t *log, 2627 struct xlog *log,
2620 xlog_recover_item_t *item) 2628 struct xlog_recover_item *item)
2621{ 2629{
2622 xfs_efd_log_format_t *efd_formatp; 2630 xfs_efd_log_format_t *efd_formatp;
2623 xfs_efi_log_item_t *efip = NULL; 2631 xfs_efi_log_item_t *efip = NULL;
@@ -2812,9 +2820,9 @@ xlog_recover_unmount_trans(
2812 */ 2820 */
2813STATIC int 2821STATIC int
2814xlog_recover_process_data( 2822xlog_recover_process_data(
2815 xlog_t *log, 2823 struct xlog *log,
2816 struct hlist_head rhash[], 2824 struct hlist_head rhash[],
2817 xlog_rec_header_t *rhead, 2825 struct xlog_rec_header *rhead,
2818 xfs_caddr_t dp, 2826 xfs_caddr_t dp,
2819 int pass) 2827 int pass)
2820{ 2828{
@@ -2986,7 +2994,7 @@ abort_error:
2986 */ 2994 */
2987STATIC int 2995STATIC int
2988xlog_recover_process_efis( 2996xlog_recover_process_efis(
2989 xlog_t *log) 2997 struct xlog *log)
2990{ 2998{
2991 xfs_log_item_t *lip; 2999 xfs_log_item_t *lip;
2992 xfs_efi_log_item_t *efip; 3000 xfs_efi_log_item_t *efip;
@@ -3098,7 +3106,7 @@ xlog_recover_process_one_iunlink(
3098 /* 3106 /*
3099 * Get the on disk inode to find the next inode in the bucket. 3107 * Get the on disk inode to find the next inode in the bucket.
3100 */ 3108 */
3101 error = xfs_itobp(mp, NULL, ip, &dip, &ibp, 0); 3109 error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &ibp, 0, 0);
3102 if (error) 3110 if (error)
3103 goto fail_iput; 3111 goto fail_iput;
3104 3112
@@ -3147,7 +3155,7 @@ xlog_recover_process_one_iunlink(
3147 */ 3155 */
3148STATIC void 3156STATIC void
3149xlog_recover_process_iunlinks( 3157xlog_recover_process_iunlinks(
3150 xlog_t *log) 3158 struct xlog *log)
3151{ 3159{
3152 xfs_mount_t *mp; 3160 xfs_mount_t *mp;
3153 xfs_agnumber_t agno; 3161 xfs_agnumber_t agno;
@@ -3209,9 +3217,9 @@ xlog_recover_process_iunlinks(
3209#ifdef DEBUG 3217#ifdef DEBUG
3210STATIC void 3218STATIC void
3211xlog_pack_data_checksum( 3219xlog_pack_data_checksum(
3212 xlog_t *log, 3220 struct xlog *log,
3213 xlog_in_core_t *iclog, 3221 struct xlog_in_core *iclog,
3214 int size) 3222 int size)
3215{ 3223{
3216 int i; 3224 int i;
3217 __be32 *up; 3225 __be32 *up;
@@ -3234,8 +3242,8 @@ xlog_pack_data_checksum(
3234 */ 3242 */
3235void 3243void
3236xlog_pack_data( 3244xlog_pack_data(
3237 xlog_t *log, 3245 struct xlog *log,
3238 xlog_in_core_t *iclog, 3246 struct xlog_in_core *iclog,
3239 int roundoff) 3247 int roundoff)
3240{ 3248{
3241 int i, j, k; 3249 int i, j, k;
@@ -3274,9 +3282,9 @@ xlog_pack_data(
3274 3282
3275STATIC void 3283STATIC void
3276xlog_unpack_data( 3284xlog_unpack_data(
3277 xlog_rec_header_t *rhead, 3285 struct xlog_rec_header *rhead,
3278 xfs_caddr_t dp, 3286 xfs_caddr_t dp,
3279 xlog_t *log) 3287 struct xlog *log)
3280{ 3288{
3281 int i, j, k; 3289 int i, j, k;
3282 3290
@@ -3299,8 +3307,8 @@ xlog_unpack_data(
3299 3307
3300STATIC int 3308STATIC int
3301xlog_valid_rec_header( 3309xlog_valid_rec_header(
3302 xlog_t *log, 3310 struct xlog *log,
3303 xlog_rec_header_t *rhead, 3311 struct xlog_rec_header *rhead,
3304 xfs_daddr_t blkno) 3312 xfs_daddr_t blkno)
3305{ 3313{
3306 int hlen; 3314 int hlen;
@@ -3343,7 +3351,7 @@ xlog_valid_rec_header(
3343 */ 3351 */
3344STATIC int 3352STATIC int
3345xlog_do_recovery_pass( 3353xlog_do_recovery_pass(
3346 xlog_t *log, 3354 struct xlog *log,
3347 xfs_daddr_t head_blk, 3355 xfs_daddr_t head_blk,
3348 xfs_daddr_t tail_blk, 3356 xfs_daddr_t tail_blk,
3349 int pass) 3357 int pass)
@@ -3595,7 +3603,7 @@ xlog_do_recovery_pass(
3595 */ 3603 */
3596STATIC int 3604STATIC int
3597xlog_do_log_recovery( 3605xlog_do_log_recovery(
3598 xlog_t *log, 3606 struct xlog *log,
3599 xfs_daddr_t head_blk, 3607 xfs_daddr_t head_blk,
3600 xfs_daddr_t tail_blk) 3608 xfs_daddr_t tail_blk)
3601{ 3609{
@@ -3646,7 +3654,7 @@ xlog_do_log_recovery(
3646 */ 3654 */
3647STATIC int 3655STATIC int
3648xlog_do_recover( 3656xlog_do_recover(
3649 xlog_t *log, 3657 struct xlog *log,
3650 xfs_daddr_t head_blk, 3658 xfs_daddr_t head_blk,
3651 xfs_daddr_t tail_blk) 3659 xfs_daddr_t tail_blk)
3652{ 3660{
@@ -3721,7 +3729,7 @@ xlog_do_recover(
3721 */ 3729 */
3722int 3730int
3723xlog_recover( 3731xlog_recover(
3724 xlog_t *log) 3732 struct xlog *log)
3725{ 3733{
3726 xfs_daddr_t head_blk, tail_blk; 3734 xfs_daddr_t head_blk, tail_blk;
3727 int error; 3735 int error;
@@ -3767,7 +3775,7 @@ xlog_recover(
3767 */ 3775 */
3768int 3776int
3769xlog_recover_finish( 3777xlog_recover_finish(
3770 xlog_t *log) 3778 struct xlog *log)
3771{ 3779{
3772 /* 3780 /*
3773 * Now we're ready to do the transactions needed for the 3781 * Now we're ready to do the transactions needed for the
@@ -3814,7 +3822,7 @@ xlog_recover_finish(
3814 */ 3822 */
3815void 3823void
3816xlog_recover_check_summary( 3824xlog_recover_check_summary(
3817 xlog_t *log) 3825 struct xlog *log)
3818{ 3826{
3819 xfs_mount_t *mp; 3827 xfs_mount_t *mp;
3820 xfs_agf_t *agfp; 3828 xfs_agf_t *agfp;
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 536021fb3d4e..711ca51ca3d7 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -1200,8 +1200,6 @@ xfs_mountfs(
1200 1200
1201 xfs_set_maxicount(mp); 1201 xfs_set_maxicount(mp);
1202 1202
1203 mp->m_maxioffset = xfs_max_file_offset(sbp->sb_blocklog);
1204
1205 error = xfs_uuid_mount(mp); 1203 error = xfs_uuid_mount(mp);
1206 if (error) 1204 if (error)
1207 goto out; 1205 goto out;
@@ -1531,6 +1529,15 @@ xfs_unmountfs(
1531 xfs_ail_push_all_sync(mp->m_ail); 1529 xfs_ail_push_all_sync(mp->m_ail);
1532 xfs_wait_buftarg(mp->m_ddev_targp); 1530 xfs_wait_buftarg(mp->m_ddev_targp);
1533 1531
1532 /*
1533 * The superblock buffer is uncached and xfsaild_push() will lock and
1534 * set the XBF_ASYNC flag on the buffer. We cannot do xfs_buf_iowait()
1535 * here but a lock on the superblock buffer will block until iodone()
1536 * has completed.
1537 */
1538 xfs_buf_lock(mp->m_sb_bp);
1539 xfs_buf_unlock(mp->m_sb_bp);
1540
1534 xfs_log_unmount_write(mp); 1541 xfs_log_unmount_write(mp);
1535 xfs_log_unmount(mp); 1542 xfs_log_unmount(mp);
1536 xfs_uuid_unmount(mp); 1543 xfs_uuid_unmount(mp);
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 90c1fc9eaea4..8724336a9a08 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -176,7 +176,6 @@ typedef struct xfs_mount {
176 uint m_qflags; /* quota status flags */ 176 uint m_qflags; /* quota status flags */
177 xfs_trans_reservations_t m_reservations;/* precomputed res values */ 177 xfs_trans_reservations_t m_reservations;/* precomputed res values */
178 __uint64_t m_maxicount; /* maximum inode count */ 178 __uint64_t m_maxicount; /* maximum inode count */
179 __uint64_t m_maxioffset; /* maximum inode offset */
180 __uint64_t m_resblks; /* total reserved blocks */ 179 __uint64_t m_resblks; /* total reserved blocks */
181 __uint64_t m_resblks_avail;/* available reserved blocks */ 180 __uint64_t m_resblks_avail;/* available reserved blocks */
182 __uint64_t m_resblks_save; /* reserved blks @ remount,ro */ 181 __uint64_t m_resblks_save; /* reserved blks @ remount,ro */
@@ -297,8 +296,6 @@ xfs_preferred_iosize(xfs_mount_t *mp)
297 PAGE_CACHE_SIZE)); 296 PAGE_CACHE_SIZE));
298} 297}
299 298
300#define XFS_MAXIOFFSET(mp) ((mp)->m_maxioffset)
301
302#define XFS_LAST_UNMOUNT_WAS_CLEAN(mp) \ 299#define XFS_LAST_UNMOUNT_WAS_CLEAN(mp) \
303 ((mp)->m_flags & XFS_MOUNT_WAS_CLEAN) 300 ((mp)->m_flags & XFS_MOUNT_WAS_CLEAN)
304#define XFS_FORCED_SHUTDOWN(mp) ((mp)->m_flags & XFS_MOUNT_FS_SHUTDOWN) 301#define XFS_FORCED_SHUTDOWN(mp) ((mp)->m_flags & XFS_MOUNT_FS_SHUTDOWN)
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index 249db1987764..2e86fa0cfc0d 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -940,7 +940,7 @@ xfs_qm_dqiterate(
940 map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), KM_SLEEP); 940 map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), KM_SLEEP);
941 941
942 lblkno = 0; 942 lblkno = 0;
943 maxlblkcnt = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp)); 943 maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
944 do { 944 do {
945 nmaps = XFS_DQITER_MAP_SIZE; 945 nmaps = XFS_DQITER_MAP_SIZE;
946 /* 946 /*
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 0d9de41a7151..bdaf4cb9f4a2 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -868,67 +868,14 @@ xfs_fs_inode_init_once(
868 "xfsino", ip->i_ino); 868 "xfsino", ip->i_ino);
869} 869}
870 870
871/*
872 * This is called by the VFS when dirtying inode metadata. This can happen
873 * for a few reasons, but we only care about timestamp updates, given that
874 * we handled the rest ourselves. In theory no other calls should happen,
875 * but for example generic_write_end() keeps dirtying the inode after
876 * updating i_size. Thus we check that the flags are exactly I_DIRTY_SYNC,
877 * and skip this call otherwise.
878 *
879 * We'll hopefull get a different method just for updating timestamps soon,
880 * at which point this hack can go away, and maybe we'll also get real
881 * error handling here.
882 */
883STATIC void
884xfs_fs_dirty_inode(
885 struct inode *inode,
886 int flags)
887{
888 struct xfs_inode *ip = XFS_I(inode);
889 struct xfs_mount *mp = ip->i_mount;
890 struct xfs_trans *tp;
891 int error;
892
893 if (flags != I_DIRTY_SYNC)
894 return;
895
896 trace_xfs_dirty_inode(ip);
897
898 tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
899 error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0);
900 if (error) {
901 xfs_trans_cancel(tp, 0);
902 goto trouble;
903 }
904 xfs_ilock(ip, XFS_ILOCK_EXCL);
905 /*
906 * Grab all the latest timestamps from the Linux inode.
907 */
908 ip->i_d.di_atime.t_sec = (__int32_t)inode->i_atime.tv_sec;
909 ip->i_d.di_atime.t_nsec = (__int32_t)inode->i_atime.tv_nsec;
910 ip->i_d.di_ctime.t_sec = (__int32_t)inode->i_ctime.tv_sec;
911 ip->i_d.di_ctime.t_nsec = (__int32_t)inode->i_ctime.tv_nsec;
912 ip->i_d.di_mtime.t_sec = (__int32_t)inode->i_mtime.tv_sec;
913 ip->i_d.di_mtime.t_nsec = (__int32_t)inode->i_mtime.tv_nsec;
914
915 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
916 xfs_trans_log_inode(tp, ip, XFS_ILOG_TIMESTAMP);
917 error = xfs_trans_commit(tp, 0);
918 if (error)
919 goto trouble;
920 return;
921
922trouble:
923 xfs_warn(mp, "failed to update timestamps for inode 0x%llx", ip->i_ino);
924}
925
926STATIC void 871STATIC void
927xfs_fs_evict_inode( 872xfs_fs_evict_inode(
928 struct inode *inode) 873 struct inode *inode)
929{ 874{
930 xfs_inode_t *ip = XFS_I(inode); 875 xfs_inode_t *ip = XFS_I(inode);
931 876
877 ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
878
932 trace_xfs_evict_inode(ip); 879 trace_xfs_evict_inode(ip);
933 880
934 truncate_inode_pages(&inode->i_data, 0); 881 truncate_inode_pages(&inode->i_data, 0);
@@ -937,22 +884,6 @@ xfs_fs_evict_inode(
937 XFS_STATS_INC(vn_remove); 884 XFS_STATS_INC(vn_remove);
938 XFS_STATS_DEC(vn_active); 885 XFS_STATS_DEC(vn_active);
939 886
940 /*
941 * The iolock is used by the file system to coordinate reads,
942 * writes, and block truncates. Up to this point the lock
943 * protected concurrent accesses by users of the inode. But
944 * from here forward we're doing some final processing of the
945 * inode because we're done with it, and although we reuse the
946 * iolock for protection it is really a distinct lock class
947 * (in the lockdep sense) from before. To keep lockdep happy
948 * (and basically indicate what we are doing), we explicitly
949 * re-init the iolock here.
950 */
951 ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
952 mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
953 lockdep_set_class_and_name(&ip->i_iolock.mr_lock,
954 &xfs_iolock_reclaimable, "xfs_iolock_reclaimable");
955
956 xfs_inactive(ip); 887 xfs_inactive(ip);
957} 888}
958 889
@@ -1436,7 +1367,6 @@ xfs_fs_free_cached_objects(
1436static const struct super_operations xfs_super_operations = { 1367static const struct super_operations xfs_super_operations = {
1437 .alloc_inode = xfs_fs_alloc_inode, 1368 .alloc_inode = xfs_fs_alloc_inode,
1438 .destroy_inode = xfs_fs_destroy_inode, 1369 .destroy_inode = xfs_fs_destroy_inode,
1439 .dirty_inode = xfs_fs_dirty_inode,
1440 .evict_inode = xfs_fs_evict_inode, 1370 .evict_inode = xfs_fs_evict_inode,
1441 .drop_inode = xfs_fs_drop_inode, 1371 .drop_inode = xfs_fs_drop_inode,
1442 .put_super = xfs_fs_put_super, 1372 .put_super = xfs_fs_put_super,
@@ -1491,13 +1421,9 @@ xfs_init_zones(void)
1491 if (!xfs_da_state_zone) 1421 if (!xfs_da_state_zone)
1492 goto out_destroy_btree_cur_zone; 1422 goto out_destroy_btree_cur_zone;
1493 1423
1494 xfs_dabuf_zone = kmem_zone_init(sizeof(xfs_dabuf_t), "xfs_dabuf");
1495 if (!xfs_dabuf_zone)
1496 goto out_destroy_da_state_zone;
1497
1498 xfs_ifork_zone = kmem_zone_init(sizeof(xfs_ifork_t), "xfs_ifork"); 1424 xfs_ifork_zone = kmem_zone_init(sizeof(xfs_ifork_t), "xfs_ifork");
1499 if (!xfs_ifork_zone) 1425 if (!xfs_ifork_zone)
1500 goto out_destroy_dabuf_zone; 1426 goto out_destroy_da_state_zone;
1501 1427
1502 xfs_trans_zone = kmem_zone_init(sizeof(xfs_trans_t), "xfs_trans"); 1428 xfs_trans_zone = kmem_zone_init(sizeof(xfs_trans_t), "xfs_trans");
1503 if (!xfs_trans_zone) 1429 if (!xfs_trans_zone)
@@ -1514,9 +1440,8 @@ xfs_init_zones(void)
1514 * size possible under XFS. This wastes a little bit of memory, 1440 * size possible under XFS. This wastes a little bit of memory,
1515 * but it is much faster. 1441 * but it is much faster.
1516 */ 1442 */
1517 xfs_buf_item_zone = kmem_zone_init((sizeof(xfs_buf_log_item_t) + 1443 xfs_buf_item_zone = kmem_zone_init(sizeof(struct xfs_buf_log_item),
1518 (((XFS_MAX_BLOCKSIZE / XFS_BLF_CHUNK) / 1444 "xfs_buf_item");
1519 NBWORD) * sizeof(int))), "xfs_buf_item");
1520 if (!xfs_buf_item_zone) 1445 if (!xfs_buf_item_zone)
1521 goto out_destroy_log_item_desc_zone; 1446 goto out_destroy_log_item_desc_zone;
1522 1447
@@ -1561,8 +1486,6 @@ xfs_init_zones(void)
1561 kmem_zone_destroy(xfs_trans_zone); 1486 kmem_zone_destroy(xfs_trans_zone);
1562 out_destroy_ifork_zone: 1487 out_destroy_ifork_zone:
1563 kmem_zone_destroy(xfs_ifork_zone); 1488 kmem_zone_destroy(xfs_ifork_zone);
1564 out_destroy_dabuf_zone:
1565 kmem_zone_destroy(xfs_dabuf_zone);
1566 out_destroy_da_state_zone: 1489 out_destroy_da_state_zone:
1567 kmem_zone_destroy(xfs_da_state_zone); 1490 kmem_zone_destroy(xfs_da_state_zone);
1568 out_destroy_btree_cur_zone: 1491 out_destroy_btree_cur_zone:
@@ -1590,7 +1513,6 @@ xfs_destroy_zones(void)
1590 kmem_zone_destroy(xfs_log_item_desc_zone); 1513 kmem_zone_destroy(xfs_log_item_desc_zone);
1591 kmem_zone_destroy(xfs_trans_zone); 1514 kmem_zone_destroy(xfs_trans_zone);
1592 kmem_zone_destroy(xfs_ifork_zone); 1515 kmem_zone_destroy(xfs_ifork_zone);
1593 kmem_zone_destroy(xfs_dabuf_zone);
1594 kmem_zone_destroy(xfs_da_state_zone); 1516 kmem_zone_destroy(xfs_da_state_zone);
1595 kmem_zone_destroy(xfs_btree_cur_zone); 1517 kmem_zone_destroy(xfs_btree_cur_zone);
1596 kmem_zone_destroy(xfs_bmap_free_item_zone); 1518 kmem_zone_destroy(xfs_bmap_free_item_zone);
diff --git a/fs/xfs/xfs_sync.c b/fs/xfs/xfs_sync.c
index 1e9ee064dbb2..97304f10e78a 100644
--- a/fs/xfs/xfs_sync.c
+++ b/fs/xfs/xfs_sync.c
@@ -359,6 +359,15 @@ xfs_quiesce_attr(
359 * added an item to the AIL, thus flush it again. 359 * added an item to the AIL, thus flush it again.
360 */ 360 */
361 xfs_ail_push_all_sync(mp->m_ail); 361 xfs_ail_push_all_sync(mp->m_ail);
362
363 /*
364 * The superblock buffer is uncached and xfsaild_push() will lock and
365 * set the XBF_ASYNC flag on the buffer. We cannot do xfs_buf_iowait()
366 * here but a lock on the superblock buffer will block until iodone()
367 * has completed.
368 */
369 xfs_buf_lock(mp->m_sb_bp);
370 xfs_buf_unlock(mp->m_sb_bp);
362} 371}
363 372
364static void 373static void
@@ -712,8 +721,8 @@ restart:
712 * Note that xfs_iflush will never block on the inode buffer lock, as 721 * Note that xfs_iflush will never block on the inode buffer lock, as
713 * xfs_ifree_cluster() can lock the inode buffer before it locks the 722 * xfs_ifree_cluster() can lock the inode buffer before it locks the
714 * ip->i_lock, and we are doing the exact opposite here. As a result, 723 * ip->i_lock, and we are doing the exact opposite here. As a result,
715 * doing a blocking xfs_itobp() to get the cluster buffer would result 724 * doing a blocking xfs_imap_to_bp() to get the cluster buffer would
716 * in an ABBA deadlock with xfs_ifree_cluster(). 725 * result in an ABBA deadlock with xfs_ifree_cluster().
717 * 726 *
718 * As xfs_ifree_cluser() must gather all inodes that are active in the 727 * As xfs_ifree_cluser() must gather all inodes that are active in the
719 * cache to mark them stale, if we hit this case we don't actually want 728 * cache to mark them stale, if we hit this case we don't actually want
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index caf5dabfd553..e5795dd6013a 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -578,8 +578,8 @@ DEFINE_INODE_EVENT(xfs_ioctl_setattr);
578DEFINE_INODE_EVENT(xfs_dir_fsync); 578DEFINE_INODE_EVENT(xfs_dir_fsync);
579DEFINE_INODE_EVENT(xfs_file_fsync); 579DEFINE_INODE_EVENT(xfs_file_fsync);
580DEFINE_INODE_EVENT(xfs_destroy_inode); 580DEFINE_INODE_EVENT(xfs_destroy_inode);
581DEFINE_INODE_EVENT(xfs_dirty_inode);
582DEFINE_INODE_EVENT(xfs_evict_inode); 581DEFINE_INODE_EVENT(xfs_evict_inode);
582DEFINE_INODE_EVENT(xfs_update_time);
583 583
584DEFINE_INODE_EVENT(xfs_dquot_dqalloc); 584DEFINE_INODE_EVENT(xfs_dquot_dqalloc);
585DEFINE_INODE_EVENT(xfs_dquot_dqdetach); 585DEFINE_INODE_EVENT(xfs_dquot_dqdetach);
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index 7c37b533aa8e..bc2afd52a0b7 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -448,11 +448,51 @@ xfs_trans_t *xfs_trans_dup(xfs_trans_t *);
448int xfs_trans_reserve(xfs_trans_t *, uint, uint, uint, 448int xfs_trans_reserve(xfs_trans_t *, uint, uint, uint,
449 uint, uint); 449 uint, uint);
450void xfs_trans_mod_sb(xfs_trans_t *, uint, int64_t); 450void xfs_trans_mod_sb(xfs_trans_t *, uint, int64_t);
451struct xfs_buf *xfs_trans_get_buf(xfs_trans_t *, struct xfs_buftarg *, xfs_daddr_t, 451
452 int, uint); 452struct xfs_buf *xfs_trans_get_buf_map(struct xfs_trans *tp,
453int xfs_trans_read_buf(struct xfs_mount *, xfs_trans_t *, 453 struct xfs_buftarg *target,
454 struct xfs_buftarg *, xfs_daddr_t, int, uint, 454 struct xfs_buf_map *map, int nmaps,
455 struct xfs_buf **); 455 uint flags);
456
457static inline struct xfs_buf *
458xfs_trans_get_buf(
459 struct xfs_trans *tp,
460 struct xfs_buftarg *target,
461 xfs_daddr_t blkno,
462 int numblks,
463 uint flags)
464{
465 struct xfs_buf_map map = {
466 .bm_bn = blkno,
467 .bm_len = numblks,
468 };
469 return xfs_trans_get_buf_map(tp, target, &map, 1, flags);
470}
471
472int xfs_trans_read_buf_map(struct xfs_mount *mp,
473 struct xfs_trans *tp,
474 struct xfs_buftarg *target,
475 struct xfs_buf_map *map, int nmaps,
476 xfs_buf_flags_t flags,
477 struct xfs_buf **bpp);
478
479static inline int
480xfs_trans_read_buf(
481 struct xfs_mount *mp,
482 struct xfs_trans *tp,
483 struct xfs_buftarg *target,
484 xfs_daddr_t blkno,
485 int numblks,
486 xfs_buf_flags_t flags,
487 struct xfs_buf **bpp)
488{
489 struct xfs_buf_map map = {
490 .bm_bn = blkno,
491 .bm_len = numblks,
492 };
493 return xfs_trans_read_buf_map(mp, tp, target, &map, 1, flags, bpp);
494}
495
456struct xfs_buf *xfs_trans_getsb(xfs_trans_t *, struct xfs_mount *, int); 496struct xfs_buf *xfs_trans_getsb(xfs_trans_t *, struct xfs_mount *, int);
457 497
458void xfs_trans_brelse(xfs_trans_t *, struct xfs_buf *); 498void xfs_trans_brelse(xfs_trans_t *, struct xfs_buf *);
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
index 9c514483e599..6011ee661339 100644
--- a/fs/xfs/xfs_trans_ail.c
+++ b/fs/xfs/xfs_trans_ail.c
@@ -383,6 +383,12 @@ xfsaild_push(
383 } 383 }
384 384
385 spin_lock(&ailp->xa_lock); 385 spin_lock(&ailp->xa_lock);
386
387 /* barrier matches the xa_target update in xfs_ail_push() */
388 smp_rmb();
389 target = ailp->xa_target;
390 ailp->xa_target_prev = target;
391
386 lip = xfs_trans_ail_cursor_first(ailp, &cur, ailp->xa_last_pushed_lsn); 392 lip = xfs_trans_ail_cursor_first(ailp, &cur, ailp->xa_last_pushed_lsn);
387 if (!lip) { 393 if (!lip) {
388 /* 394 /*
@@ -397,7 +403,6 @@ xfsaild_push(
397 XFS_STATS_INC(xs_push_ail); 403 XFS_STATS_INC(xs_push_ail);
398 404
399 lsn = lip->li_lsn; 405 lsn = lip->li_lsn;
400 target = ailp->xa_target;
401 while ((XFS_LSN_CMP(lip->li_lsn, target) <= 0)) { 406 while ((XFS_LSN_CMP(lip->li_lsn, target) <= 0)) {
402 int lock_result; 407 int lock_result;
403 408
@@ -527,8 +532,32 @@ xfsaild(
527 __set_current_state(TASK_KILLABLE); 532 __set_current_state(TASK_KILLABLE);
528 else 533 else
529 __set_current_state(TASK_INTERRUPTIBLE); 534 __set_current_state(TASK_INTERRUPTIBLE);
530 schedule_timeout(tout ? 535
531 msecs_to_jiffies(tout) : MAX_SCHEDULE_TIMEOUT); 536 spin_lock(&ailp->xa_lock);
537
538 /*
539 * Idle if the AIL is empty and we are not racing with a target
540 * update. We check the AIL after we set the task to a sleep
541 * state to guarantee that we either catch an xa_target update
542 * or that a wake_up resets the state to TASK_RUNNING.
543 * Otherwise, we run the risk of sleeping indefinitely.
544 *
545 * The barrier matches the xa_target update in xfs_ail_push().
546 */
547 smp_rmb();
548 if (!xfs_ail_min(ailp) &&
549 ailp->xa_target == ailp->xa_target_prev) {
550 spin_unlock(&ailp->xa_lock);
551 schedule();
552 tout = 0;
553 continue;
554 }
555 spin_unlock(&ailp->xa_lock);
556
557 if (tout)
558 schedule_timeout(msecs_to_jiffies(tout));
559
560 __set_current_state(TASK_RUNNING);
532 561
533 try_to_freeze(); 562 try_to_freeze();
534 563
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c
index 21c5a5e3700d..6311b99c267f 100644
--- a/fs/xfs/xfs_trans_buf.c
+++ b/fs/xfs/xfs_trans_buf.c
@@ -41,20 +41,26 @@ STATIC struct xfs_buf *
41xfs_trans_buf_item_match( 41xfs_trans_buf_item_match(
42 struct xfs_trans *tp, 42 struct xfs_trans *tp,
43 struct xfs_buftarg *target, 43 struct xfs_buftarg *target,
44 xfs_daddr_t blkno, 44 struct xfs_buf_map *map,
45 int len) 45 int nmaps)
46{ 46{
47 struct xfs_log_item_desc *lidp; 47 struct xfs_log_item_desc *lidp;
48 struct xfs_buf_log_item *blip; 48 struct xfs_buf_log_item *blip;
49 int len = 0;
50 int i;
51
52 for (i = 0; i < nmaps; i++)
53 len += map[i].bm_len;
49 54
50 len = BBTOB(len);
51 list_for_each_entry(lidp, &tp->t_items, lid_trans) { 55 list_for_each_entry(lidp, &tp->t_items, lid_trans) {
52 blip = (struct xfs_buf_log_item *)lidp->lid_item; 56 blip = (struct xfs_buf_log_item *)lidp->lid_item;
53 if (blip->bli_item.li_type == XFS_LI_BUF && 57 if (blip->bli_item.li_type == XFS_LI_BUF &&
54 blip->bli_buf->b_target == target && 58 blip->bli_buf->b_target == target &&
55 XFS_BUF_ADDR(blip->bli_buf) == blkno && 59 XFS_BUF_ADDR(blip->bli_buf) == map[0].bm_bn &&
56 BBTOB(blip->bli_buf->b_length) == len) 60 blip->bli_buf->b_length == len) {
61 ASSERT(blip->bli_buf->b_map_count == nmaps);
57 return blip->bli_buf; 62 return blip->bli_buf;
63 }
58 } 64 }
59 65
60 return NULL; 66 return NULL;
@@ -128,21 +134,19 @@ xfs_trans_bjoin(
128 * If the transaction pointer is NULL, make this just a normal 134 * If the transaction pointer is NULL, make this just a normal
129 * get_buf() call. 135 * get_buf() call.
130 */ 136 */
131xfs_buf_t * 137struct xfs_buf *
132xfs_trans_get_buf(xfs_trans_t *tp, 138xfs_trans_get_buf_map(
133 xfs_buftarg_t *target_dev, 139 struct xfs_trans *tp,
134 xfs_daddr_t blkno, 140 struct xfs_buftarg *target,
135 int len, 141 struct xfs_buf_map *map,
136 uint flags) 142 int nmaps,
143 xfs_buf_flags_t flags)
137{ 144{
138 xfs_buf_t *bp; 145 xfs_buf_t *bp;
139 xfs_buf_log_item_t *bip; 146 xfs_buf_log_item_t *bip;
140 147
141 /* 148 if (!tp)
142 * Default to a normal get_buf() call if the tp is NULL. 149 return xfs_buf_get_map(target, map, nmaps, flags);
143 */
144 if (tp == NULL)
145 return xfs_buf_get(target_dev, blkno, len, flags);
146 150
147 /* 151 /*
148 * If we find the buffer in the cache with this transaction 152 * If we find the buffer in the cache with this transaction
@@ -150,7 +154,7 @@ xfs_trans_get_buf(xfs_trans_t *tp,
150 * have it locked. In this case we just increment the lock 154 * have it locked. In this case we just increment the lock
151 * recursion count and return the buffer to the caller. 155 * recursion count and return the buffer to the caller.
152 */ 156 */
153 bp = xfs_trans_buf_item_match(tp, target_dev, blkno, len); 157 bp = xfs_trans_buf_item_match(tp, target, map, nmaps);
154 if (bp != NULL) { 158 if (bp != NULL) {
155 ASSERT(xfs_buf_islocked(bp)); 159 ASSERT(xfs_buf_islocked(bp));
156 if (XFS_FORCED_SHUTDOWN(tp->t_mountp)) { 160 if (XFS_FORCED_SHUTDOWN(tp->t_mountp)) {
@@ -167,7 +171,7 @@ xfs_trans_get_buf(xfs_trans_t *tp,
167 return (bp); 171 return (bp);
168 } 172 }
169 173
170 bp = xfs_buf_get(target_dev, blkno, len, flags); 174 bp = xfs_buf_get_map(target, map, nmaps, flags);
171 if (bp == NULL) { 175 if (bp == NULL) {
172 return NULL; 176 return NULL;
173 } 177 }
@@ -246,26 +250,22 @@ int xfs_error_mod = 33;
246 * read_buf() call. 250 * read_buf() call.
247 */ 251 */
248int 252int
249xfs_trans_read_buf( 253xfs_trans_read_buf_map(
250 xfs_mount_t *mp, 254 struct xfs_mount *mp,
251 xfs_trans_t *tp, 255 struct xfs_trans *tp,
252 xfs_buftarg_t *target, 256 struct xfs_buftarg *target,
253 xfs_daddr_t blkno, 257 struct xfs_buf_map *map,
254 int len, 258 int nmaps,
255 uint flags, 259 xfs_buf_flags_t flags,
256 xfs_buf_t **bpp) 260 struct xfs_buf **bpp)
257{ 261{
258 xfs_buf_t *bp; 262 xfs_buf_t *bp;
259 xfs_buf_log_item_t *bip; 263 xfs_buf_log_item_t *bip;
260 int error; 264 int error;
261 265
262 *bpp = NULL; 266 *bpp = NULL;
263 267 if (!tp) {
264 /* 268 bp = xfs_buf_read_map(target, map, nmaps, flags);
265 * Default to a normal get_buf() call if the tp is NULL.
266 */
267 if (tp == NULL) {
268 bp = xfs_buf_read(target, blkno, len, flags);
269 if (!bp) 269 if (!bp)
270 return (flags & XBF_TRYLOCK) ? 270 return (flags & XBF_TRYLOCK) ?
271 EAGAIN : XFS_ERROR(ENOMEM); 271 EAGAIN : XFS_ERROR(ENOMEM);
@@ -303,7 +303,7 @@ xfs_trans_read_buf(
303 * If the buffer is not yet read in, then we read it in, increment 303 * If the buffer is not yet read in, then we read it in, increment
304 * the lock recursion count, and return it to the caller. 304 * the lock recursion count, and return it to the caller.
305 */ 305 */
306 bp = xfs_trans_buf_item_match(tp, target, blkno, len); 306 bp = xfs_trans_buf_item_match(tp, target, map, nmaps);
307 if (bp != NULL) { 307 if (bp != NULL) {
308 ASSERT(xfs_buf_islocked(bp)); 308 ASSERT(xfs_buf_islocked(bp));
309 ASSERT(bp->b_transp == tp); 309 ASSERT(bp->b_transp == tp);
@@ -349,7 +349,7 @@ xfs_trans_read_buf(
349 return 0; 349 return 0;
350 } 350 }
351 351
352 bp = xfs_buf_read(target, blkno, len, flags); 352 bp = xfs_buf_read_map(target, map, nmaps, flags);
353 if (bp == NULL) { 353 if (bp == NULL) {
354 *bpp = NULL; 354 *bpp = NULL;
355 return (flags & XBF_TRYLOCK) ? 355 return (flags & XBF_TRYLOCK) ?
diff --git a/fs/xfs/xfs_trans_priv.h b/fs/xfs/xfs_trans_priv.h
index fb62377d1cbc..53b7c9b0f8f7 100644
--- a/fs/xfs/xfs_trans_priv.h
+++ b/fs/xfs/xfs_trans_priv.h
@@ -67,6 +67,7 @@ struct xfs_ail {
67 struct task_struct *xa_task; 67 struct task_struct *xa_task;
68 struct list_head xa_ail; 68 struct list_head xa_ail;
69 xfs_lsn_t xa_target; 69 xfs_lsn_t xa_target;
70 xfs_lsn_t xa_target_prev;
70 struct list_head xa_cursors; 71 struct list_head xa_cursors;
71 spinlock_t xa_lock; 72 spinlock_t xa_lock;
72 xfs_lsn_t xa_last_pushed_lsn; 73 xfs_lsn_t xa_last_pushed_lsn;
diff --git a/fs/xfs/xfs_types.h b/fs/xfs/xfs_types.h
index 398cf681d025..7a41874f4c20 100644
--- a/fs/xfs/xfs_types.h
+++ b/fs/xfs/xfs_types.h
@@ -133,6 +133,20 @@ typedef __uint64_t xfs_filblks_t; /* number of blocks in a file */
133#define MAXAEXTNUM ((xfs_aextnum_t)0x7fff) /* signed short */ 133#define MAXAEXTNUM ((xfs_aextnum_t)0x7fff) /* signed short */
134 134
135/* 135/*
136 * Minimum and maximum blocksize and sectorsize.
137 * The blocksize upper limit is pretty much arbitrary.
138 * The sectorsize upper limit is due to sizeof(sb_sectsize).
139 */
140#define XFS_MIN_BLOCKSIZE_LOG 9 /* i.e. 512 bytes */
141#define XFS_MAX_BLOCKSIZE_LOG 16 /* i.e. 65536 bytes */
142#define XFS_MIN_BLOCKSIZE (1 << XFS_MIN_BLOCKSIZE_LOG)
143#define XFS_MAX_BLOCKSIZE (1 << XFS_MAX_BLOCKSIZE_LOG)
144#define XFS_MIN_SECTORSIZE_LOG 9 /* i.e. 512 bytes */
145#define XFS_MAX_SECTORSIZE_LOG 15 /* i.e. 32768 bytes */
146#define XFS_MIN_SECTORSIZE (1 << XFS_MIN_SECTORSIZE_LOG)
147#define XFS_MAX_SECTORSIZE (1 << XFS_MAX_SECTORSIZE_LOG)
148
149/*
136 * Min numbers of data/attr fork btree root pointers. 150 * Min numbers of data/attr fork btree root pointers.
137 */ 151 */
138#define MINDBTPTRS 3 152#define MINDBTPTRS 3
diff --git a/fs/xfs/xfs_utils.c b/fs/xfs/xfs_utils.c
index 4e5b9ad5cb97..0025c78ac03c 100644
--- a/fs/xfs/xfs_utils.c
+++ b/fs/xfs/xfs_utils.c
@@ -65,7 +65,6 @@ xfs_dir_ialloc(
65 xfs_trans_t *ntp; 65 xfs_trans_t *ntp;
66 xfs_inode_t *ip; 66 xfs_inode_t *ip;
67 xfs_buf_t *ialloc_context = NULL; 67 xfs_buf_t *ialloc_context = NULL;
68 boolean_t call_again = B_FALSE;
69 int code; 68 int code;
70 uint log_res; 69 uint log_res;
71 uint log_count; 70 uint log_count;
@@ -91,7 +90,7 @@ xfs_dir_ialloc(
91 * the inode(s) that we've just allocated. 90 * the inode(s) that we've just allocated.
92 */ 91 */
93 code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid, okalloc, 92 code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid, okalloc,
94 &ialloc_context, &call_again, &ip); 93 &ialloc_context, &ip);
95 94
96 /* 95 /*
97 * Return an error if we were unable to allocate a new inode. 96 * Return an error if we were unable to allocate a new inode.
@@ -102,19 +101,18 @@ xfs_dir_ialloc(
102 *ipp = NULL; 101 *ipp = NULL;
103 return code; 102 return code;
104 } 103 }
105 if (!call_again && (ip == NULL)) { 104 if (!ialloc_context && !ip) {
106 *ipp = NULL; 105 *ipp = NULL;
107 return XFS_ERROR(ENOSPC); 106 return XFS_ERROR(ENOSPC);
108 } 107 }
109 108
110 /* 109 /*
111 * If call_again is set, then we were unable to get an 110 * If the AGI buffer is non-NULL, then we were unable to get an
112 * inode in one operation. We need to commit the current 111 * inode in one operation. We need to commit the current
113 * transaction and call xfs_ialloc() again. It is guaranteed 112 * transaction and call xfs_ialloc() again. It is guaranteed
114 * to succeed the second time. 113 * to succeed the second time.
115 */ 114 */
116 if (call_again) { 115 if (ialloc_context) {
117
118 /* 116 /*
119 * Normally, xfs_trans_commit releases all the locks. 117 * Normally, xfs_trans_commit releases all the locks.
120 * We call bhold to hang on to the ialloc_context across 118 * We call bhold to hang on to the ialloc_context across
@@ -195,7 +193,7 @@ xfs_dir_ialloc(
195 * this call should always succeed. 193 * this call should always succeed.
196 */ 194 */
197 code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid, 195 code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid,
198 okalloc, &ialloc_context, &call_again, &ip); 196 okalloc, &ialloc_context, &ip);
199 197
200 /* 198 /*
201 * If we get an error at this point, return to the caller 199 * If we get an error at this point, return to the caller
@@ -206,12 +204,11 @@ xfs_dir_ialloc(
206 *ipp = NULL; 204 *ipp = NULL;
207 return code; 205 return code;
208 } 206 }
209 ASSERT ((!call_again) && (ip != NULL)); 207 ASSERT(!ialloc_context && ip);
210 208
211 } else { 209 } else {
212 if (committed != NULL) { 210 if (committed != NULL)
213 *committed = 0; 211 *committed = 0;
214 }
215 } 212 }
216 213
217 *ipp = ip; 214 *ipp = ip;
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index b6a82d817a82..2a5c637344b4 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -146,11 +146,6 @@ xfs_readlink(
146} 146}
147 147
148/* 148/*
149 * Flags for xfs_free_eofblocks
150 */
151#define XFS_FREE_EOF_TRYLOCK (1<<0)
152
153/*
154 * This is called by xfs_inactive to free any blocks beyond eof 149 * This is called by xfs_inactive to free any blocks beyond eof
155 * when the link count isn't zero and by xfs_dm_punch_hole() when 150 * when the link count isn't zero and by xfs_dm_punch_hole() when
156 * punching a hole to EOF. 151 * punching a hole to EOF.
@@ -159,7 +154,7 @@ STATIC int
159xfs_free_eofblocks( 154xfs_free_eofblocks(
160 xfs_mount_t *mp, 155 xfs_mount_t *mp,
161 xfs_inode_t *ip, 156 xfs_inode_t *ip,
162 int flags) 157 bool need_iolock)
163{ 158{
164 xfs_trans_t *tp; 159 xfs_trans_t *tp;
165 int error; 160 int error;
@@ -174,7 +169,7 @@ xfs_free_eofblocks(
174 * of the file. If not, then there is nothing to do. 169 * of the file. If not, then there is nothing to do.
175 */ 170 */
176 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip)); 171 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip));
177 last_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp)); 172 last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
178 if (last_fsb <= end_fsb) 173 if (last_fsb <= end_fsb)
179 return 0; 174 return 0;
180 map_len = last_fsb - end_fsb; 175 map_len = last_fsb - end_fsb;
@@ -201,13 +196,11 @@ xfs_free_eofblocks(
201 */ 196 */
202 tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE); 197 tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
203 198
204 if (flags & XFS_FREE_EOF_TRYLOCK) { 199 if (need_iolock) {
205 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) { 200 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
206 xfs_trans_cancel(tp, 0); 201 xfs_trans_cancel(tp, 0);
207 return 0; 202 return 0;
208 } 203 }
209 } else {
210 xfs_ilock(ip, XFS_IOLOCK_EXCL);
211 } 204 }
212 205
213 error = xfs_trans_reserve(tp, 0, 206 error = xfs_trans_reserve(tp, 0,
@@ -217,7 +210,8 @@ xfs_free_eofblocks(
217 if (error) { 210 if (error) {
218 ASSERT(XFS_FORCED_SHUTDOWN(mp)); 211 ASSERT(XFS_FORCED_SHUTDOWN(mp));
219 xfs_trans_cancel(tp, 0); 212 xfs_trans_cancel(tp, 0);
220 xfs_iunlock(ip, XFS_IOLOCK_EXCL); 213 if (need_iolock)
214 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
221 return error; 215 return error;
222 } 216 }
223 217
@@ -244,7 +238,10 @@ xfs_free_eofblocks(
244 error = xfs_trans_commit(tp, 238 error = xfs_trans_commit(tp,
245 XFS_TRANS_RELEASE_LOG_RES); 239 XFS_TRANS_RELEASE_LOG_RES);
246 } 240 }
247 xfs_iunlock(ip, XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL); 241
242 xfs_iunlock(ip, XFS_ILOCK_EXCL);
243 if (need_iolock)
244 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
248 } 245 }
249 return error; 246 return error;
250} 247}
@@ -282,23 +279,15 @@ xfs_inactive_symlink_rmt(
282 * free them all in one bunmapi call. 279 * free them all in one bunmapi call.
283 */ 280 */
284 ASSERT(ip->i_d.di_nextents > 0 && ip->i_d.di_nextents <= 2); 281 ASSERT(ip->i_d.di_nextents > 0 && ip->i_d.di_nextents <= 2);
285 if ((error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, 282
286 XFS_TRANS_PERM_LOG_RES, XFS_ITRUNCATE_LOG_COUNT))) {
287 ASSERT(XFS_FORCED_SHUTDOWN(mp));
288 xfs_trans_cancel(tp, 0);
289 *tpp = NULL;
290 return error;
291 }
292 /* 283 /*
293 * Lock the inode, fix the size, and join it to the transaction. 284 * Lock the inode, fix the size, and join it to the transaction.
294 * Hold it so in the normal path, we still have it locked for 285 * Hold it so in the normal path, we still have it locked for
295 * the second transaction. In the error paths we need it 286 * the second transaction. In the error paths we need it
296 * held so the cancel won't rele it, see below. 287 * held so the cancel won't rele it, see below.
297 */ 288 */
298 xfs_ilock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
299 size = (int)ip->i_d.di_size; 289 size = (int)ip->i_d.di_size;
300 ip->i_d.di_size = 0; 290 ip->i_d.di_size = 0;
301 xfs_trans_ijoin(tp, ip, 0);
302 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 291 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
303 /* 292 /*
304 * Find the block(s) so we can inval and unmap them. 293 * Find the block(s) so we can inval and unmap them.
@@ -385,114 +374,14 @@ xfs_inactive_symlink_rmt(
385 ASSERT(XFS_FORCED_SHUTDOWN(mp)); 374 ASSERT(XFS_FORCED_SHUTDOWN(mp));
386 goto error0; 375 goto error0;
387 } 376 }
388 /* 377
389 * Return with the inode locked but not joined to the transaction. 378 xfs_trans_ijoin(tp, ip, 0);
390 */
391 *tpp = tp; 379 *tpp = tp;
392 return 0; 380 return 0;
393 381
394 error1: 382 error1:
395 xfs_bmap_cancel(&free_list); 383 xfs_bmap_cancel(&free_list);
396 error0: 384 error0:
397 /*
398 * Have to come here with the inode locked and either
399 * (held and in the transaction) or (not in the transaction).
400 * If the inode isn't held then cancel would iput it, but
401 * that's wrong since this is inactive and the vnode ref
402 * count is 0 already.
403 * Cancel won't do anything to the inode if held, but it still
404 * needs to be locked until the cancel is done, if it was
405 * joined to the transaction.
406 */
407 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
408 xfs_iunlock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
409 *tpp = NULL;
410 return error;
411
412}
413
414STATIC int
415xfs_inactive_symlink_local(
416 xfs_inode_t *ip,
417 xfs_trans_t **tpp)
418{
419 int error;
420
421 ASSERT(ip->i_d.di_size <= XFS_IFORK_DSIZE(ip));
422 /*
423 * We're freeing a symlink which fit into
424 * the inode. Just free the memory used
425 * to hold the old symlink.
426 */
427 error = xfs_trans_reserve(*tpp, 0,
428 XFS_ITRUNCATE_LOG_RES(ip->i_mount),
429 0, XFS_TRANS_PERM_LOG_RES,
430 XFS_ITRUNCATE_LOG_COUNT);
431
432 if (error) {
433 xfs_trans_cancel(*tpp, 0);
434 *tpp = NULL;
435 return error;
436 }
437 xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
438
439 /*
440 * Zero length symlinks _can_ exist.
441 */
442 if (ip->i_df.if_bytes > 0) {
443 xfs_idata_realloc(ip,
444 -(ip->i_df.if_bytes),
445 XFS_DATA_FORK);
446 ASSERT(ip->i_df.if_bytes == 0);
447 }
448 return 0;
449}
450
451STATIC int
452xfs_inactive_attrs(
453 xfs_inode_t *ip,
454 xfs_trans_t **tpp)
455{
456 xfs_trans_t *tp;
457 int error;
458 xfs_mount_t *mp;
459
460 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
461 tp = *tpp;
462 mp = ip->i_mount;
463 ASSERT(ip->i_d.di_forkoff != 0);
464 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
465 xfs_iunlock(ip, XFS_ILOCK_EXCL);
466 if (error)
467 goto error_unlock;
468
469 error = xfs_attr_inactive(ip);
470 if (error)
471 goto error_unlock;
472
473 tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
474 error = xfs_trans_reserve(tp, 0,
475 XFS_IFREE_LOG_RES(mp),
476 0, XFS_TRANS_PERM_LOG_RES,
477 XFS_INACTIVE_LOG_COUNT);
478 if (error)
479 goto error_cancel;
480
481 xfs_ilock(ip, XFS_ILOCK_EXCL);
482 xfs_trans_ijoin(tp, ip, 0);
483 xfs_idestroy_fork(ip, XFS_ATTR_FORK);
484
485 ASSERT(ip->i_d.di_anextents == 0);
486
487 *tpp = tp;
488 return 0;
489
490error_cancel:
491 ASSERT(XFS_FORCED_SHUTDOWN(mp));
492 xfs_trans_cancel(tp, 0);
493error_unlock:
494 *tpp = NULL;
495 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
496 return error; 385 return error;
497} 386}
498 387
@@ -574,8 +463,7 @@ xfs_release(
574 if (xfs_iflags_test(ip, XFS_IDIRTY_RELEASE)) 463 if (xfs_iflags_test(ip, XFS_IDIRTY_RELEASE))
575 return 0; 464 return 0;
576 465
577 error = xfs_free_eofblocks(mp, ip, 466 error = xfs_free_eofblocks(mp, ip, true);
578 XFS_FREE_EOF_TRYLOCK);
579 if (error) 467 if (error)
580 return error; 468 return error;
581 469
@@ -604,7 +492,7 @@ xfs_inactive(
604 xfs_trans_t *tp; 492 xfs_trans_t *tp;
605 xfs_mount_t *mp; 493 xfs_mount_t *mp;
606 int error; 494 int error;
607 int truncate; 495 int truncate = 0;
608 496
609 /* 497 /*
610 * If the inode is already free, then there can be nothing 498 * If the inode is already free, then there can be nothing
@@ -616,17 +504,6 @@ xfs_inactive(
616 return VN_INACTIVE_CACHE; 504 return VN_INACTIVE_CACHE;
617 } 505 }
618 506
619 /*
620 * Only do a truncate if it's a regular file with
621 * some actual space in it. It's OK to look at the
622 * inode's fields without the lock because we're the
623 * only one with a reference to the inode.
624 */
625 truncate = ((ip->i_d.di_nlink == 0) &&
626 ((ip->i_d.di_size != 0) || XFS_ISIZE(ip) != 0 ||
627 (ip->i_d.di_nextents > 0) || (ip->i_delayed_blks > 0)) &&
628 S_ISREG(ip->i_d.di_mode));
629
630 mp = ip->i_mount; 507 mp = ip->i_mount;
631 508
632 error = 0; 509 error = 0;
@@ -643,99 +520,100 @@ xfs_inactive(
643 (!(ip->i_d.di_flags & 520 (!(ip->i_d.di_flags &
644 (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)) || 521 (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)) ||
645 ip->i_delayed_blks != 0))) { 522 ip->i_delayed_blks != 0))) {
646 error = xfs_free_eofblocks(mp, ip, 0); 523 error = xfs_free_eofblocks(mp, ip, false);
647 if (error) 524 if (error)
648 return VN_INACTIVE_CACHE; 525 return VN_INACTIVE_CACHE;
649 } 526 }
650 goto out; 527 goto out;
651 } 528 }
652 529
653 ASSERT(ip->i_d.di_nlink == 0); 530 if (S_ISREG(ip->i_d.di_mode) &&
531 (ip->i_d.di_size != 0 || XFS_ISIZE(ip) != 0 ||
532 ip->i_d.di_nextents > 0 || ip->i_delayed_blks > 0))
533 truncate = 1;
654 534
655 error = xfs_qm_dqattach(ip, 0); 535 error = xfs_qm_dqattach(ip, 0);
656 if (error) 536 if (error)
657 return VN_INACTIVE_CACHE; 537 return VN_INACTIVE_CACHE;
658 538
659 tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE); 539 tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
660 if (truncate) { 540 error = xfs_trans_reserve(tp, 0,
661 xfs_ilock(ip, XFS_IOLOCK_EXCL); 541 (truncate || S_ISLNK(ip->i_d.di_mode)) ?
662 542 XFS_ITRUNCATE_LOG_RES(mp) :
663 error = xfs_trans_reserve(tp, 0, 543 XFS_IFREE_LOG_RES(mp),
664 XFS_ITRUNCATE_LOG_RES(mp), 544 0,
665 0, XFS_TRANS_PERM_LOG_RES, 545 XFS_TRANS_PERM_LOG_RES,
666 XFS_ITRUNCATE_LOG_COUNT); 546 XFS_ITRUNCATE_LOG_COUNT);
667 if (error) { 547 if (error) {
668 /* Don't call itruncate_cleanup */ 548 ASSERT(XFS_FORCED_SHUTDOWN(mp));
669 ASSERT(XFS_FORCED_SHUTDOWN(mp)); 549 xfs_trans_cancel(tp, 0);
670 xfs_trans_cancel(tp, 0); 550 return VN_INACTIVE_CACHE;
671 xfs_iunlock(ip, XFS_IOLOCK_EXCL); 551 }
672 return VN_INACTIVE_CACHE;
673 }
674 552
675 xfs_ilock(ip, XFS_ILOCK_EXCL); 553 xfs_ilock(ip, XFS_ILOCK_EXCL);
676 xfs_trans_ijoin(tp, ip, 0); 554 xfs_trans_ijoin(tp, ip, 0);
677 555
556 if (S_ISLNK(ip->i_d.di_mode)) {
557 /*
558 * Zero length symlinks _can_ exist.
559 */
560 if (ip->i_d.di_size > XFS_IFORK_DSIZE(ip)) {
561 error = xfs_inactive_symlink_rmt(ip, &tp);
562 if (error)
563 goto out_cancel;
564 } else if (ip->i_df.if_bytes > 0) {
565 xfs_idata_realloc(ip, -(ip->i_df.if_bytes),
566 XFS_DATA_FORK);
567 ASSERT(ip->i_df.if_bytes == 0);
568 }
569 } else if (truncate) {
678 ip->i_d.di_size = 0; 570 ip->i_d.di_size = 0;
679 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 571 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
680 572
681 error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0); 573 error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
682 if (error) { 574 if (error)
683 xfs_trans_cancel(tp, 575 goto out_cancel;
684 XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
685 xfs_iunlock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
686 return VN_INACTIVE_CACHE;
687 }
688 576
689 ASSERT(ip->i_d.di_nextents == 0); 577 ASSERT(ip->i_d.di_nextents == 0);
690 } else if (S_ISLNK(ip->i_d.di_mode)) { 578 }
691 579
692 /* 580 /*
693 * If we get an error while cleaning up a 581 * If there are attributes associated with the file then blow them away
694 * symlink we bail out. 582 * now. The code calls a routine that recursively deconstructs the
695 */ 583 * attribute fork. We need to just commit the current transaction
696 error = (ip->i_d.di_size > XFS_IFORK_DSIZE(ip)) ? 584 * because we can't use it for xfs_attr_inactive().
697 xfs_inactive_symlink_rmt(ip, &tp) : 585 */
698 xfs_inactive_symlink_local(ip, &tp); 586 if (ip->i_d.di_anextents > 0) {
587 ASSERT(ip->i_d.di_forkoff != 0);
699 588
700 if (error) { 589 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
701 ASSERT(tp == NULL); 590 if (error)
702 return VN_INACTIVE_CACHE; 591 goto out_unlock;
703 }
704 592
705 xfs_trans_ijoin(tp, ip, 0); 593 xfs_iunlock(ip, XFS_ILOCK_EXCL);
706 } else { 594
595 error = xfs_attr_inactive(ip);
596 if (error)
597 goto out;
598
599 tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
707 error = xfs_trans_reserve(tp, 0, 600 error = xfs_trans_reserve(tp, 0,
708 XFS_IFREE_LOG_RES(mp), 601 XFS_IFREE_LOG_RES(mp),
709 0, XFS_TRANS_PERM_LOG_RES, 602 0, XFS_TRANS_PERM_LOG_RES,
710 XFS_INACTIVE_LOG_COUNT); 603 XFS_INACTIVE_LOG_COUNT);
711 if (error) { 604 if (error) {
712 ASSERT(XFS_FORCED_SHUTDOWN(mp));
713 xfs_trans_cancel(tp, 0); 605 xfs_trans_cancel(tp, 0);
714 return VN_INACTIVE_CACHE; 606 goto out;
715 } 607 }
716 608
717 xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); 609 xfs_ilock(ip, XFS_ILOCK_EXCL);
718 xfs_trans_ijoin(tp, ip, 0); 610 xfs_trans_ijoin(tp, ip, 0);
719 } 611 }
720 612
721 /* 613 if (ip->i_afp)
722 * If there are attributes associated with the file
723 * then blow them away now. The code calls a routine
724 * that recursively deconstructs the attribute fork.
725 * We need to just commit the current transaction
726 * because we can't use it for xfs_attr_inactive().
727 */
728 if (ip->i_d.di_anextents > 0) {
729 error = xfs_inactive_attrs(ip, &tp);
730 /*
731 * If we got an error, the transaction is already
732 * cancelled, and the inode is unlocked. Just get out.
733 */
734 if (error)
735 return VN_INACTIVE_CACHE;
736 } else if (ip->i_afp) {
737 xfs_idestroy_fork(ip, XFS_ATTR_FORK); 614 xfs_idestroy_fork(ip, XFS_ATTR_FORK);
738 } 615
616 ASSERT(ip->i_d.di_anextents == 0);
739 617
740 /* 618 /*
741 * Free the inode. 619 * Free the inode.
@@ -779,10 +657,13 @@ xfs_inactive(
779 * Release the dquots held by inode, if any. 657 * Release the dquots held by inode, if any.
780 */ 658 */
781 xfs_qm_dqdetach(ip); 659 xfs_qm_dqdetach(ip);
782 xfs_iunlock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); 660out_unlock:
783 661 xfs_iunlock(ip, XFS_ILOCK_EXCL);
784 out: 662out:
785 return VN_INACTIVE_CACHE; 663 return VN_INACTIVE_CACHE;
664out_cancel:
665 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
666 goto out_unlock;
786} 667}
787 668
788/* 669/*
@@ -2262,10 +2143,10 @@ xfs_change_file_space(
2262 2143
2263 llen = bf->l_len > 0 ? bf->l_len - 1 : bf->l_len; 2144 llen = bf->l_len > 0 ? bf->l_len - 1 : bf->l_len;
2264 2145
2265 if ( (bf->l_start < 0) 2146 if (bf->l_start < 0 ||
2266 || (bf->l_start > XFS_MAXIOFFSET(mp)) 2147 bf->l_start > mp->m_super->s_maxbytes ||
2267 || (bf->l_start + llen < 0) 2148 bf->l_start + llen < 0 ||
2268 || (bf->l_start + llen > XFS_MAXIOFFSET(mp))) 2149 bf->l_start + llen > mp->m_super->s_maxbytes)
2269 return XFS_ERROR(EINVAL); 2150 return XFS_ERROR(EINVAL);
2270 2151
2271 bf->l_whence = 0; 2152 bf->l_whence = 0;