aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-15 19:43:43 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-15 19:43:43 -0400
commit2d896c780db9cda5dc102bf7a0a2cd4394c1342e (patch)
treedb58116f4b5c1539bdac1e3c680587784396037f
parent2a9915c8a2e532f6c9b435e5f90008d601a87b90 (diff)
parent0f1145cc18e970ebe37da114fc34c297f135e062 (diff)
Merge branch 'for-linus' of git://oss.sgi.com:8090/xfs/xfs-2.6
* 'for-linus' of git://oss.sgi.com:8090/xfs/xfs-2.6: (37 commits) [XFS] Fix lockdep annotations for xfs_lock_inodes [LIB]: export radix_tree_preload() [XFS] Fix XFS_IOC_FSBULKSTAT{,_SINGLE} & XFS_IOC_FSINUMBERS in compat mode [XFS] Compat ioctl handler for handle operations [XFS] Compat ioctl handler for XFS_IOC_FSGEOMETRY_V1. [XFS] Clean up function name handling in tracing code [XFS] Quota inode has no parent. [XFS] Concurrent Multi-File Data Streams [XFS] Use uninitialized_var macro to stop warning about rtx [XFS] XFS should not be looking at filp reference counts [XFS] Use is_power_of_2 instead of open coding checks [XFS] Reduce shouting by removing unnecessary macros from dir2 code. [XFS] Simplify XFS min/max macros. [XFS] Kill off xfs_count_bits [XFS] Cancel transactions on xfs_itruncate_start error. [XFS] Use do_div() on 64 bit types. [XFS] Fix remount,readonly path to flush everything correctly. [XFS] Cleanup inode extent size hint extraction [XFS] Prevent ENOSPC from aborting transactions that need to succeed [XFS] Prevent deadlock when flushing inodes on unmount ...
-rw-r--r--fs/xfs/Makefile-linux-2.62
-rw-r--r--fs/xfs/linux-2.6/kmem.h19
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c43
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c59
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.h2
-rw-r--r--fs/xfs/linux-2.6/xfs_file.c11
-rw-r--r--fs/xfs/linux-2.6/xfs_globals.c1
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl32.c321
-rw-r--r--fs/xfs/linux-2.6/xfs_linux.h1
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c5
-rw-r--r--fs/xfs/linux-2.6/xfs_sysctl.c11
-rw-r--r--fs/xfs/linux-2.6/xfs_sysctl.h2
-rw-r--r--fs/xfs/linux-2.6/xfs_vfs.h15
-rw-r--r--fs/xfs/linux-2.6/xfs_vnode.h5
-rw-r--r--fs/xfs/quota/xfs_qm.c9
-rw-r--r--fs/xfs/xfs.h1
-rw-r--r--fs/xfs/xfs_ag.h9
-rw-r--r--fs/xfs/xfs_alloc.c101
-rw-r--r--fs/xfs/xfs_alloc.h6
-rw-r--r--fs/xfs/xfs_alloc_btree.c20
-rw-r--r--fs/xfs/xfs_bit.c91
-rw-r--r--fs/xfs/xfs_bit.h4
-rw-r--r--fs/xfs/xfs_bmap.c369
-rw-r--r--fs/xfs/xfs_bmap.h6
-rw-r--r--fs/xfs/xfs_bmap_btree.c88
-rw-r--r--fs/xfs/xfs_btree.h32
-rw-r--r--fs/xfs/xfs_buf_item.c4
-rw-r--r--fs/xfs/xfs_clnt.h2
-rw-r--r--fs/xfs/xfs_dinode.h4
-rw-r--r--fs/xfs/xfs_dir2.c12
-rw-r--r--fs/xfs/xfs_dir2_block.c98
-rw-r--r--fs/xfs/xfs_dir2_block.h2
-rw-r--r--fs/xfs/xfs_dir2_data.c54
-rw-r--r--fs/xfs/xfs_dir2_data.h12
-rw-r--r--fs/xfs/xfs_dir2_leaf.c106
-rw-r--r--fs/xfs/xfs_dir2_leaf.h29
-rw-r--r--fs/xfs/xfs_dir2_node.c66
-rw-r--r--fs/xfs/xfs_dir2_node.h4
-rw-r--r--fs/xfs/xfs_dir2_sf.c204
-rw-r--r--fs/xfs/xfs_dir2_sf.h20
-rw-r--r--fs/xfs/xfs_filestream.c771
-rw-r--r--fs/xfs/xfs_filestream.h136
-rw-r--r--fs/xfs/xfs_fs.h2
-rw-r--r--fs/xfs/xfs_fsops.c17
-rw-r--r--fs/xfs/xfs_ialloc.c28
-rw-r--r--fs/xfs/xfs_ialloc.h10
-rw-r--r--fs/xfs/xfs_inode.c39
-rw-r--r--fs/xfs/xfs_inode.h16
-rw-r--r--fs/xfs/xfs_iomap.c41
-rw-r--r--fs/xfs/xfs_itable.c42
-rw-r--r--fs/xfs/xfs_itable.h20
-rw-r--r--fs/xfs/xfs_log.c41
-rw-r--r--fs/xfs/xfs_log_recover.c8
-rw-r--r--fs/xfs/xfs_mount.c237
-rw-r--r--fs/xfs/xfs_mount.h15
-rw-r--r--fs/xfs/xfs_mru_cache.c608
-rw-r--r--fs/xfs/xfs_mru_cache.h57
-rw-r--r--fs/xfs/xfs_rtalloc.c4
-rw-r--r--fs/xfs/xfs_rw.h36
-rw-r--r--fs/xfs/xfs_sb.h16
-rw-r--r--fs/xfs/xfs_trans.c125
-rw-r--r--fs/xfs/xfs_trans.h3
-rw-r--r--fs/xfs/xfs_vfsops.c159
-rw-r--r--fs/xfs/xfs_vnodeops.c119
-rw-r--r--lib/radix-tree.c1
66 files changed, 3263 insertions, 1140 deletions
diff --git a/fs/xfs/Makefile-linux-2.6 b/fs/xfs/Makefile-linux-2.6
index b49989bb89ad..e7a9a83f0087 100644
--- a/fs/xfs/Makefile-linux-2.6
+++ b/fs/xfs/Makefile-linux-2.6
@@ -64,6 +64,7 @@ xfs-y += xfs_alloc.o \
64 xfs_dir2_sf.o \ 64 xfs_dir2_sf.o \
65 xfs_error.o \ 65 xfs_error.o \
66 xfs_extfree_item.o \ 66 xfs_extfree_item.o \
67 xfs_filestream.o \
67 xfs_fsops.o \ 68 xfs_fsops.o \
68 xfs_ialloc.o \ 69 xfs_ialloc.o \
69 xfs_ialloc_btree.o \ 70 xfs_ialloc_btree.o \
@@ -77,6 +78,7 @@ xfs-y += xfs_alloc.o \
77 xfs_log.o \ 78 xfs_log.o \
78 xfs_log_recover.o \ 79 xfs_log_recover.o \
79 xfs_mount.o \ 80 xfs_mount.o \
81 xfs_mru_cache.o \
80 xfs_rename.o \ 82 xfs_rename.o \
81 xfs_trans.o \ 83 xfs_trans.o \
82 xfs_trans_ail.o \ 84 xfs_trans_ail.o \
diff --git a/fs/xfs/linux-2.6/kmem.h b/fs/xfs/linux-2.6/kmem.h
index 9ebabdf7829c..4b6470cf87f0 100644
--- a/fs/xfs/linux-2.6/kmem.h
+++ b/fs/xfs/linux-2.6/kmem.h
@@ -100,25 +100,6 @@ kmem_zone_destroy(kmem_zone_t *zone)
100extern void *kmem_zone_alloc(kmem_zone_t *, unsigned int __nocast); 100extern void *kmem_zone_alloc(kmem_zone_t *, unsigned int __nocast);
101extern void *kmem_zone_zalloc(kmem_zone_t *, unsigned int __nocast); 101extern void *kmem_zone_zalloc(kmem_zone_t *, unsigned int __nocast);
102 102
103/*
104 * Low memory cache shrinkers
105 */
106
107typedef struct shrinker *kmem_shaker_t;
108typedef int (*kmem_shake_func_t)(int, gfp_t);
109
110static inline kmem_shaker_t
111kmem_shake_register(kmem_shake_func_t sfunc)
112{
113 return set_shrinker(DEFAULT_SEEKS, sfunc);
114}
115
116static inline void
117kmem_shake_deregister(kmem_shaker_t shrinker)
118{
119 remove_shrinker(shrinker);
120}
121
122static inline int 103static inline int
123kmem_shake_allow(gfp_t gfp_mask) 104kmem_shake_allow(gfp_t gfp_mask)
124{ 105{
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index 7361861e3aac..fd4105d662e0 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -108,14 +108,19 @@ xfs_page_trace(
108 108
109/* 109/*
110 * Schedule IO completion handling on a xfsdatad if this was 110 * Schedule IO completion handling on a xfsdatad if this was
111 * the final hold on this ioend. 111 * the final hold on this ioend. If we are asked to wait,
112 * flush the workqueue.
112 */ 113 */
113STATIC void 114STATIC void
114xfs_finish_ioend( 115xfs_finish_ioend(
115 xfs_ioend_t *ioend) 116 xfs_ioend_t *ioend,
117 int wait)
116{ 118{
117 if (atomic_dec_and_test(&ioend->io_remaining)) 119 if (atomic_dec_and_test(&ioend->io_remaining)) {
118 queue_work(xfsdatad_workqueue, &ioend->io_work); 120 queue_work(xfsdatad_workqueue, &ioend->io_work);
121 if (wait)
122 flush_workqueue(xfsdatad_workqueue);
123 }
119} 124}
120 125
121/* 126/*
@@ -156,6 +161,8 @@ xfs_setfilesize(
156 xfs_fsize_t bsize; 161 xfs_fsize_t bsize;
157 162
158 ip = xfs_vtoi(ioend->io_vnode); 163 ip = xfs_vtoi(ioend->io_vnode);
164 if (!ip)
165 return;
159 166
160 ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG); 167 ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG);
161 ASSERT(ioend->io_type != IOMAP_READ); 168 ASSERT(ioend->io_type != IOMAP_READ);
@@ -334,7 +341,7 @@ xfs_end_bio(
334 bio->bi_end_io = NULL; 341 bio->bi_end_io = NULL;
335 bio_put(bio); 342 bio_put(bio);
336 343
337 xfs_finish_ioend(ioend); 344 xfs_finish_ioend(ioend, 0);
338 return 0; 345 return 0;
339} 346}
340 347
@@ -470,7 +477,7 @@ xfs_submit_ioend(
470 } 477 }
471 if (bio) 478 if (bio)
472 xfs_submit_ioend_bio(ioend, bio); 479 xfs_submit_ioend_bio(ioend, bio);
473 xfs_finish_ioend(ioend); 480 xfs_finish_ioend(ioend, 0);
474 } while ((ioend = next) != NULL); 481 } while ((ioend = next) != NULL);
475} 482}
476 483
@@ -1003,6 +1010,8 @@ xfs_page_state_convert(
1003 if (buffer_unwritten(bh) || buffer_delay(bh) || 1010 if (buffer_unwritten(bh) || buffer_delay(bh) ||
1004 ((buffer_uptodate(bh) || PageUptodate(page)) && 1011 ((buffer_uptodate(bh) || PageUptodate(page)) &&
1005 !buffer_mapped(bh) && (unmapped || startio))) { 1012 !buffer_mapped(bh) && (unmapped || startio))) {
1013 int new_ioend = 0;
1014
1006 /* 1015 /*
1007 * Make sure we don't use a read-only iomap 1016 * Make sure we don't use a read-only iomap
1008 */ 1017 */
@@ -1021,6 +1030,15 @@ xfs_page_state_convert(
1021 } 1030 }
1022 1031
1023 if (!iomap_valid) { 1032 if (!iomap_valid) {
1033 /*
1034 * if we didn't have a valid mapping then we
1035 * need to ensure that we put the new mapping
1036 * in a new ioend structure. This needs to be
1037 * done to ensure that the ioends correctly
1038 * reflect the block mappings at io completion
1039 * for unwritten extent conversion.
1040 */
1041 new_ioend = 1;
1024 if (type == IOMAP_NEW) { 1042 if (type == IOMAP_NEW) {
1025 size = xfs_probe_cluster(inode, 1043 size = xfs_probe_cluster(inode,
1026 page, bh, head, 0); 1044 page, bh, head, 0);
@@ -1040,7 +1058,7 @@ xfs_page_state_convert(
1040 if (startio) { 1058 if (startio) {
1041 xfs_add_to_ioend(inode, bh, offset, 1059 xfs_add_to_ioend(inode, bh, offset,
1042 type, &ioend, 1060 type, &ioend,
1043 !iomap_valid); 1061 new_ioend);
1044 } else { 1062 } else {
1045 set_buffer_dirty(bh); 1063 set_buffer_dirty(bh);
1046 unlock_buffer(bh); 1064 unlock_buffer(bh);
@@ -1416,6 +1434,13 @@ xfs_end_io_direct(
1416 * This is not necessary for synchronous direct I/O, but we do 1434 * This is not necessary for synchronous direct I/O, but we do
1417 * it anyway to keep the code uniform and simpler. 1435 * it anyway to keep the code uniform and simpler.
1418 * 1436 *
1437 * Well, if only it were that simple. Because synchronous direct I/O
1438 * requires extent conversion to occur *before* we return to userspace,
1439 * we have to wait for extent conversion to complete. Look at the
1440 * iocb that has been passed to us to determine if this is AIO or
1441 * not. If it is synchronous, tell xfs_finish_ioend() to kick the
1442 * workqueue and wait for it to complete.
1443 *
1419 * The core direct I/O code might be changed to always call the 1444 * The core direct I/O code might be changed to always call the
1420 * completion handler in the future, in which case all this can 1445 * completion handler in the future, in which case all this can
1421 * go away. 1446 * go away.
@@ -1423,9 +1448,9 @@ xfs_end_io_direct(
1423 ioend->io_offset = offset; 1448 ioend->io_offset = offset;
1424 ioend->io_size = size; 1449 ioend->io_size = size;
1425 if (ioend->io_type == IOMAP_READ) { 1450 if (ioend->io_type == IOMAP_READ) {
1426 xfs_finish_ioend(ioend); 1451 xfs_finish_ioend(ioend, 0);
1427 } else if (private && size > 0) { 1452 } else if (private && size > 0) {
1428 xfs_finish_ioend(ioend); 1453 xfs_finish_ioend(ioend, is_sync_kiocb(iocb));
1429 } else { 1454 } else {
1430 /* 1455 /*
1431 * A direct I/O write ioend starts it's life in unwritten 1456 * A direct I/O write ioend starts it's life in unwritten
@@ -1434,7 +1459,7 @@ xfs_end_io_direct(
1434 * handler. 1459 * handler.
1435 */ 1460 */
1436 INIT_WORK(&ioend->io_work, xfs_end_bio_written); 1461 INIT_WORK(&ioend->io_work, xfs_end_bio_written);
1437 xfs_finish_ioend(ioend); 1462 xfs_finish_ioend(ioend, 0);
1438 } 1463 }
1439 1464
1440 /* 1465 /*
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index fe4f66a5af14..2df63622354e 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -35,7 +35,7 @@
35#include <linux/freezer.h> 35#include <linux/freezer.h>
36 36
37static kmem_zone_t *xfs_buf_zone; 37static kmem_zone_t *xfs_buf_zone;
38static kmem_shaker_t xfs_buf_shake; 38static struct shrinker *xfs_buf_shake;
39STATIC int xfsbufd(void *); 39STATIC int xfsbufd(void *);
40STATIC int xfsbufd_wakeup(int, gfp_t); 40STATIC int xfsbufd_wakeup(int, gfp_t);
41STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int); 41STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int);
@@ -314,7 +314,7 @@ xfs_buf_free(
314 314
315 ASSERT(list_empty(&bp->b_hash_list)); 315 ASSERT(list_empty(&bp->b_hash_list));
316 316
317 if (bp->b_flags & _XBF_PAGE_CACHE) { 317 if (bp->b_flags & (_XBF_PAGE_CACHE|_XBF_PAGES)) {
318 uint i; 318 uint i;
319 319
320 if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1)) 320 if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1))
@@ -323,18 +323,11 @@ xfs_buf_free(
323 for (i = 0; i < bp->b_page_count; i++) { 323 for (i = 0; i < bp->b_page_count; i++) {
324 struct page *page = bp->b_pages[i]; 324 struct page *page = bp->b_pages[i];
325 325
326 ASSERT(!PagePrivate(page)); 326 if (bp->b_flags & _XBF_PAGE_CACHE)
327 ASSERT(!PagePrivate(page));
327 page_cache_release(page); 328 page_cache_release(page);
328 } 329 }
329 _xfs_buf_free_pages(bp); 330 _xfs_buf_free_pages(bp);
330 } else if (bp->b_flags & _XBF_KMEM_ALLOC) {
331 /*
332 * XXX(hch): bp->b_count_desired might be incorrect (see
333 * xfs_buf_associate_memory for details), but fortunately
334 * the Linux version of kmem_free ignores the len argument..
335 */
336 kmem_free(bp->b_addr, bp->b_count_desired);
337 _xfs_buf_free_pages(bp);
338 } 331 }
339 332
340 xfs_buf_deallocate(bp); 333 xfs_buf_deallocate(bp);
@@ -764,43 +757,44 @@ xfs_buf_get_noaddr(
764 size_t len, 757 size_t len,
765 xfs_buftarg_t *target) 758 xfs_buftarg_t *target)
766{ 759{
767 size_t malloc_len = len; 760 unsigned long page_count = PAGE_ALIGN(len) >> PAGE_SHIFT;
761 int error, i;
768 xfs_buf_t *bp; 762 xfs_buf_t *bp;
769 void *data;
770 int error;
771 763
772 bp = xfs_buf_allocate(0); 764 bp = xfs_buf_allocate(0);
773 if (unlikely(bp == NULL)) 765 if (unlikely(bp == NULL))
774 goto fail; 766 goto fail;
775 _xfs_buf_initialize(bp, target, 0, len, 0); 767 _xfs_buf_initialize(bp, target, 0, len, 0);
776 768
777 try_again: 769 error = _xfs_buf_get_pages(bp, page_count, 0);
778 data = kmem_alloc(malloc_len, KM_SLEEP | KM_MAYFAIL | KM_LARGE); 770 if (error)
779 if (unlikely(data == NULL))
780 goto fail_free_buf; 771 goto fail_free_buf;
781 772
782 /* check whether alignment matches.. */ 773 for (i = 0; i < page_count; i++) {
783 if ((__psunsigned_t)data != 774 bp->b_pages[i] = alloc_page(GFP_KERNEL);
784 ((__psunsigned_t)data & ~target->bt_smask)) { 775 if (!bp->b_pages[i])
785 /* .. else double the size and try again */ 776 goto fail_free_mem;
786 kmem_free(data, malloc_len);
787 malloc_len <<= 1;
788 goto try_again;
789 } 777 }
778 bp->b_flags |= _XBF_PAGES;
790 779
791 error = xfs_buf_associate_memory(bp, data, len); 780 error = _xfs_buf_map_pages(bp, XBF_MAPPED);
792 if (error) 781 if (unlikely(error)) {
782 printk(KERN_WARNING "%s: failed to map pages\n",
783 __FUNCTION__);
793 goto fail_free_mem; 784 goto fail_free_mem;
794 bp->b_flags |= _XBF_KMEM_ALLOC; 785 }
795 786
796 xfs_buf_unlock(bp); 787 xfs_buf_unlock(bp);
797 788
798 XB_TRACE(bp, "no_daddr", data); 789 XB_TRACE(bp, "no_daddr", len);
799 return bp; 790 return bp;
791
800 fail_free_mem: 792 fail_free_mem:
801 kmem_free(data, malloc_len); 793 while (--i >= 0)
794 __free_page(bp->b_pages[i]);
795 _xfs_buf_free_pages(bp);
802 fail_free_buf: 796 fail_free_buf:
803 xfs_buf_free(bp); 797 xfs_buf_deallocate(bp);
804 fail: 798 fail:
805 return NULL; 799 return NULL;
806} 800}
@@ -1453,6 +1447,7 @@ xfs_free_buftarg(
1453 int external) 1447 int external)
1454{ 1448{
1455 xfs_flush_buftarg(btp, 1); 1449 xfs_flush_buftarg(btp, 1);
1450 xfs_blkdev_issue_flush(btp);
1456 if (external) 1451 if (external)
1457 xfs_blkdev_put(btp->bt_bdev); 1452 xfs_blkdev_put(btp->bt_bdev);
1458 xfs_free_bufhash(btp); 1453 xfs_free_bufhash(btp);
@@ -1837,7 +1832,7 @@ xfs_buf_init(void)
1837 if (!xfsdatad_workqueue) 1832 if (!xfsdatad_workqueue)
1838 goto out_destroy_xfslogd_workqueue; 1833 goto out_destroy_xfslogd_workqueue;
1839 1834
1840 xfs_buf_shake = kmem_shake_register(xfsbufd_wakeup); 1835 xfs_buf_shake = set_shrinker(DEFAULT_SEEKS, xfsbufd_wakeup);
1841 if (!xfs_buf_shake) 1836 if (!xfs_buf_shake)
1842 goto out_destroy_xfsdatad_workqueue; 1837 goto out_destroy_xfsdatad_workqueue;
1843 1838
@@ -1859,7 +1854,7 @@ xfs_buf_init(void)
1859void 1854void
1860xfs_buf_terminate(void) 1855xfs_buf_terminate(void)
1861{ 1856{
1862 kmem_shake_deregister(xfs_buf_shake); 1857 remove_shrinker(xfs_buf_shake);
1863 destroy_workqueue(xfsdatad_workqueue); 1858 destroy_workqueue(xfsdatad_workqueue);
1864 destroy_workqueue(xfslogd_workqueue); 1859 destroy_workqueue(xfslogd_workqueue);
1865 kmem_zone_destroy(xfs_buf_zone); 1860 kmem_zone_destroy(xfs_buf_zone);
diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h
index b6241f6201a5..b5908a34b15d 100644
--- a/fs/xfs/linux-2.6/xfs_buf.h
+++ b/fs/xfs/linux-2.6/xfs_buf.h
@@ -63,7 +63,7 @@ typedef enum {
63 63
64 /* flags used only internally */ 64 /* flags used only internally */
65 _XBF_PAGE_CACHE = (1 << 17),/* backed by pagecache */ 65 _XBF_PAGE_CACHE = (1 << 17),/* backed by pagecache */
66 _XBF_KMEM_ALLOC = (1 << 18),/* backed by kmem_alloc() */ 66 _XBF_PAGES = (1 << 18), /* backed by refcounted pages */
67 _XBF_RUN_QUEUES = (1 << 19),/* run block device task queue */ 67 _XBF_RUN_QUEUES = (1 << 19),/* run block device task queue */
68 _XBF_DELWRI_Q = (1 << 21), /* buffer on delwri queue */ 68 _XBF_DELWRI_Q = (1 << 21), /* buffer on delwri queue */
69} xfs_buf_flags_t; 69} xfs_buf_flags_t;
diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c
index 8c43cd2e237a..cbcd40c8c2a0 100644
--- a/fs/xfs/linux-2.6/xfs_file.c
+++ b/fs/xfs/linux-2.6/xfs_file.c
@@ -184,15 +184,6 @@ xfs_file_open(
184} 184}
185 185
186STATIC int 186STATIC int
187xfs_file_close(
188 struct file *filp,
189 fl_owner_t id)
190{
191 return -bhv_vop_close(vn_from_inode(filp->f_path.dentry->d_inode), 0,
192 file_count(filp) > 1 ? L_FALSE : L_TRUE, NULL);
193}
194
195STATIC int
196xfs_file_release( 187xfs_file_release(
197 struct inode *inode, 188 struct inode *inode,
198 struct file *filp) 189 struct file *filp)
@@ -436,7 +427,6 @@ const struct file_operations xfs_file_operations = {
436#endif 427#endif
437 .mmap = xfs_file_mmap, 428 .mmap = xfs_file_mmap,
438 .open = xfs_file_open, 429 .open = xfs_file_open,
439 .flush = xfs_file_close,
440 .release = xfs_file_release, 430 .release = xfs_file_release,
441 .fsync = xfs_file_fsync, 431 .fsync = xfs_file_fsync,
442#ifdef HAVE_FOP_OPEN_EXEC 432#ifdef HAVE_FOP_OPEN_EXEC
@@ -458,7 +448,6 @@ const struct file_operations xfs_invis_file_operations = {
458#endif 448#endif
459 .mmap = xfs_file_mmap, 449 .mmap = xfs_file_mmap,
460 .open = xfs_file_open, 450 .open = xfs_file_open,
461 .flush = xfs_file_close,
462 .release = xfs_file_release, 451 .release = xfs_file_release,
463 .fsync = xfs_file_fsync, 452 .fsync = xfs_file_fsync,
464}; 453};
diff --git a/fs/xfs/linux-2.6/xfs_globals.c b/fs/xfs/linux-2.6/xfs_globals.c
index ed3a5e1b4b67..bb72c3d4141f 100644
--- a/fs/xfs/linux-2.6/xfs_globals.c
+++ b/fs/xfs/linux-2.6/xfs_globals.c
@@ -46,6 +46,7 @@ xfs_param_t xfs_params = {
46 .inherit_nosym = { 0, 0, 1 }, 46 .inherit_nosym = { 0, 0, 1 },
47 .rotorstep = { 1, 1, 255 }, 47 .rotorstep = { 1, 1, 255 },
48 .inherit_nodfrg = { 0, 1, 1 }, 48 .inherit_nodfrg = { 0, 1, 1 },
49 .fstrm_timer = { 1, 50, 3600*100},
49}; 50};
50 51
51/* 52/*
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
index ff5c41ff8d40..5917808abbd6 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
@@ -1019,7 +1019,7 @@ xfs_ioc_bulkstat(
1019 1019
1020 if (cmd == XFS_IOC_FSINUMBERS) 1020 if (cmd == XFS_IOC_FSINUMBERS)
1021 error = xfs_inumbers(mp, &inlast, &count, 1021 error = xfs_inumbers(mp, &inlast, &count,
1022 bulkreq.ubuffer); 1022 bulkreq.ubuffer, xfs_inumbers_fmt);
1023 else if (cmd == XFS_IOC_FSBULKSTAT_SINGLE) 1023 else if (cmd == XFS_IOC_FSBULKSTAT_SINGLE)
1024 error = xfs_bulkstat_single(mp, &inlast, 1024 error = xfs_bulkstat_single(mp, &inlast,
1025 bulkreq.ubuffer, &done); 1025 bulkreq.ubuffer, &done);
diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c
index b83cebc165f1..141cf15067c2 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl32.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl32.c
@@ -23,10 +23,25 @@
23#include <linux/fs.h> 23#include <linux/fs.h>
24#include <asm/uaccess.h> 24#include <asm/uaccess.h>
25#include "xfs.h" 25#include "xfs.h"
26#include "xfs_types.h"
27#include "xfs_fs.h" 26#include "xfs_fs.h"
27#include "xfs_bit.h"
28#include "xfs_log.h"
29#include "xfs_inum.h"
30#include "xfs_trans.h"
31#include "xfs_sb.h"
32#include "xfs_ag.h"
33#include "xfs_dir2.h"
34#include "xfs_dmapi.h"
35#include "xfs_mount.h"
36#include "xfs_bmap_btree.h"
37#include "xfs_attr_sf.h"
38#include "xfs_dir2_sf.h"
28#include "xfs_vfs.h" 39#include "xfs_vfs.h"
29#include "xfs_vnode.h" 40#include "xfs_vnode.h"
41#include "xfs_dinode.h"
42#include "xfs_inode.h"
43#include "xfs_itable.h"
44#include "xfs_error.h"
30#include "xfs_dfrag.h" 45#include "xfs_dfrag.h"
31 46
32#define _NATIVE_IOC(cmd, type) \ 47#define _NATIVE_IOC(cmd, type) \
@@ -34,6 +49,7 @@
34 49
35#if defined(CONFIG_IA64) || defined(CONFIG_X86_64) 50#if defined(CONFIG_IA64) || defined(CONFIG_X86_64)
36#define BROKEN_X86_ALIGNMENT 51#define BROKEN_X86_ALIGNMENT
52#define _PACKED __attribute__((packed))
37/* on ia32 l_start is on a 32-bit boundary */ 53/* on ia32 l_start is on a 32-bit boundary */
38typedef struct xfs_flock64_32 { 54typedef struct xfs_flock64_32 {
39 __s16 l_type; 55 __s16 l_type;
@@ -75,35 +91,276 @@ xfs_ioctl32_flock(
75 return (unsigned long)p; 91 return (unsigned long)p;
76} 92}
77 93
94typedef struct compat_xfs_fsop_geom_v1 {
95 __u32 blocksize; /* filesystem (data) block size */
96 __u32 rtextsize; /* realtime extent size */
97 __u32 agblocks; /* fsblocks in an AG */
98 __u32 agcount; /* number of allocation groups */
99 __u32 logblocks; /* fsblocks in the log */
100 __u32 sectsize; /* (data) sector size, bytes */
101 __u32 inodesize; /* inode size in bytes */
102 __u32 imaxpct; /* max allowed inode space(%) */
103 __u64 datablocks; /* fsblocks in data subvolume */
104 __u64 rtblocks; /* fsblocks in realtime subvol */
105 __u64 rtextents; /* rt extents in realtime subvol*/
106 __u64 logstart; /* starting fsblock of the log */
107 unsigned char uuid[16]; /* unique id of the filesystem */
108 __u32 sunit; /* stripe unit, fsblocks */
109 __u32 swidth; /* stripe width, fsblocks */
110 __s32 version; /* structure version */
111 __u32 flags; /* superblock version flags */
112 __u32 logsectsize; /* log sector size, bytes */
113 __u32 rtsectsize; /* realtime sector size, bytes */
114 __u32 dirblocksize; /* directory block size, bytes */
115} __attribute__((packed)) compat_xfs_fsop_geom_v1_t;
116
117#define XFS_IOC_FSGEOMETRY_V1_32 \
118 _IOR ('X', 100, struct compat_xfs_fsop_geom_v1)
119
120STATIC unsigned long xfs_ioctl32_geom_v1(unsigned long arg)
121{
122 compat_xfs_fsop_geom_v1_t __user *p32 = (void __user *)arg;
123 xfs_fsop_geom_v1_t __user *p = compat_alloc_user_space(sizeof(*p));
124
125 if (copy_in_user(p, p32, sizeof(*p32)))
126 return -EFAULT;
127 return (unsigned long)p;
128}
129
130typedef struct compat_xfs_inogrp {
131 __u64 xi_startino; /* starting inode number */
132 __s32 xi_alloccount; /* # bits set in allocmask */
133 __u64 xi_allocmask; /* mask of allocated inodes */
134} __attribute__((packed)) compat_xfs_inogrp_t;
135
136STATIC int xfs_inumbers_fmt_compat(
137 void __user *ubuffer,
138 const xfs_inogrp_t *buffer,
139 long count,
140 long *written)
141{
142 compat_xfs_inogrp_t *p32 = ubuffer;
143 long i;
144
145 for (i = 0; i < count; i++) {
146 if (put_user(buffer[i].xi_startino, &p32[i].xi_startino) ||
147 put_user(buffer[i].xi_alloccount, &p32[i].xi_alloccount) ||
148 put_user(buffer[i].xi_allocmask, &p32[i].xi_allocmask))
149 return -EFAULT;
150 }
151 *written = count * sizeof(*p32);
152 return 0;
153}
154
78#else 155#else
79 156
80typedef struct xfs_fsop_bulkreq32 { 157#define xfs_inumbers_fmt_compat xfs_inumbers_fmt
158#define _PACKED
159
160#endif
161
162/* XFS_IOC_FSBULKSTAT and friends */
163
164typedef struct compat_xfs_bstime {
165 __s32 tv_sec; /* seconds */
166 __s32 tv_nsec; /* and nanoseconds */
167} compat_xfs_bstime_t;
168
169STATIC int xfs_bstime_store_compat(
170 compat_xfs_bstime_t __user *p32,
171 const xfs_bstime_t *p)
172{
173 __s32 sec32;
174
175 sec32 = p->tv_sec;
176 if (put_user(sec32, &p32->tv_sec) ||
177 put_user(p->tv_nsec, &p32->tv_nsec))
178 return -EFAULT;
179 return 0;
180}
181
182typedef struct compat_xfs_bstat {
183 __u64 bs_ino; /* inode number */
184 __u16 bs_mode; /* type and mode */
185 __u16 bs_nlink; /* number of links */
186 __u32 bs_uid; /* user id */
187 __u32 bs_gid; /* group id */
188 __u32 bs_rdev; /* device value */
189 __s32 bs_blksize; /* block size */
190 __s64 bs_size; /* file size */
191 compat_xfs_bstime_t bs_atime; /* access time */
192 compat_xfs_bstime_t bs_mtime; /* modify time */
193 compat_xfs_bstime_t bs_ctime; /* inode change time */
194 int64_t bs_blocks; /* number of blocks */
195 __u32 bs_xflags; /* extended flags */
196 __s32 bs_extsize; /* extent size */
197 __s32 bs_extents; /* number of extents */
198 __u32 bs_gen; /* generation count */
199 __u16 bs_projid; /* project id */
200 unsigned char bs_pad[14]; /* pad space, unused */
201 __u32 bs_dmevmask; /* DMIG event mask */
202 __u16 bs_dmstate; /* DMIG state info */
203 __u16 bs_aextents; /* attribute number of extents */
204} _PACKED compat_xfs_bstat_t;
205
206STATIC int xfs_bulkstat_one_fmt_compat(
207 void __user *ubuffer,
208 const xfs_bstat_t *buffer)
209{
210 compat_xfs_bstat_t __user *p32 = ubuffer;
211
212 if (put_user(buffer->bs_ino, &p32->bs_ino) ||
213 put_user(buffer->bs_mode, &p32->bs_mode) ||
214 put_user(buffer->bs_nlink, &p32->bs_nlink) ||
215 put_user(buffer->bs_uid, &p32->bs_uid) ||
216 put_user(buffer->bs_gid, &p32->bs_gid) ||
217 put_user(buffer->bs_rdev, &p32->bs_rdev) ||
218 put_user(buffer->bs_blksize, &p32->bs_blksize) ||
219 put_user(buffer->bs_size, &p32->bs_size) ||
220 xfs_bstime_store_compat(&p32->bs_atime, &buffer->bs_atime) ||
221 xfs_bstime_store_compat(&p32->bs_mtime, &buffer->bs_mtime) ||
222 xfs_bstime_store_compat(&p32->bs_ctime, &buffer->bs_ctime) ||
223 put_user(buffer->bs_blocks, &p32->bs_blocks) ||
224 put_user(buffer->bs_xflags, &p32->bs_xflags) ||
225 put_user(buffer->bs_extsize, &p32->bs_extsize) ||
226 put_user(buffer->bs_extents, &p32->bs_extents) ||
227 put_user(buffer->bs_gen, &p32->bs_gen) ||
228 put_user(buffer->bs_projid, &p32->bs_projid) ||
229 put_user(buffer->bs_dmevmask, &p32->bs_dmevmask) ||
230 put_user(buffer->bs_dmstate, &p32->bs_dmstate) ||
231 put_user(buffer->bs_aextents, &p32->bs_aextents))
232 return -EFAULT;
233 return sizeof(*p32);
234}
235
236
237
238typedef struct compat_xfs_fsop_bulkreq {
81 compat_uptr_t lastip; /* last inode # pointer */ 239 compat_uptr_t lastip; /* last inode # pointer */
82 __s32 icount; /* count of entries in buffer */ 240 __s32 icount; /* count of entries in buffer */
83 compat_uptr_t ubuffer; /* user buffer for inode desc. */ 241 compat_uptr_t ubuffer; /* user buffer for inode desc. */
84 __s32 ocount; /* output count pointer */ 242 compat_uptr_t ocount; /* output count pointer */
85} xfs_fsop_bulkreq32_t; 243} compat_xfs_fsop_bulkreq_t;
86 244
87STATIC unsigned long 245#define XFS_IOC_FSBULKSTAT_32 \
88xfs_ioctl32_bulkstat( 246 _IOWR('X', 101, struct compat_xfs_fsop_bulkreq)
89 unsigned long arg) 247#define XFS_IOC_FSBULKSTAT_SINGLE_32 \
248 _IOWR('X', 102, struct compat_xfs_fsop_bulkreq)
249#define XFS_IOC_FSINUMBERS_32 \
250 _IOWR('X', 103, struct compat_xfs_fsop_bulkreq)
251
252/* copied from xfs_ioctl.c */
253STATIC int
254xfs_ioc_bulkstat_compat(
255 xfs_mount_t *mp,
256 unsigned int cmd,
257 void __user *arg)
90{ 258{
91 xfs_fsop_bulkreq32_t __user *p32 = (void __user *)arg; 259 compat_xfs_fsop_bulkreq_t __user *p32 = (void __user *)arg;
92 xfs_fsop_bulkreq_t __user *p = compat_alloc_user_space(sizeof(*p));
93 u32 addr; 260 u32 addr;
261 xfs_fsop_bulkreq_t bulkreq;
262 int count; /* # of records returned */
263 xfs_ino_t inlast; /* last inode number */
264 int done;
265 int error;
266
267 /* done = 1 if there are more stats to get and if bulkstat */
268 /* should be called again (unused here, but used in dmapi) */
269
270 if (!capable(CAP_SYS_ADMIN))
271 return -EPERM;
272
273 if (XFS_FORCED_SHUTDOWN(mp))
274 return -XFS_ERROR(EIO);
275
276 if (get_user(addr, &p32->lastip))
277 return -EFAULT;
278 bulkreq.lastip = compat_ptr(addr);
279 if (get_user(bulkreq.icount, &p32->icount) ||
280 get_user(addr, &p32->ubuffer))
281 return -EFAULT;
282 bulkreq.ubuffer = compat_ptr(addr);
283 if (get_user(addr, &p32->ocount))
284 return -EFAULT;
285 bulkreq.ocount = compat_ptr(addr);
286
287 if (copy_from_user(&inlast, bulkreq.lastip, sizeof(__s64)))
288 return -XFS_ERROR(EFAULT);
289
290 if ((count = bulkreq.icount) <= 0)
291 return -XFS_ERROR(EINVAL);
292
293 if (cmd == XFS_IOC_FSINUMBERS)
294 error = xfs_inumbers(mp, &inlast, &count,
295 bulkreq.ubuffer, xfs_inumbers_fmt_compat);
296 else {
297 /* declare a var to get a warning in case the type changes */
298 bulkstat_one_fmt_pf formatter = xfs_bulkstat_one_fmt_compat;
299 error = xfs_bulkstat(mp, &inlast, &count,
300 xfs_bulkstat_one, formatter,
301 sizeof(compat_xfs_bstat_t), bulkreq.ubuffer,
302 BULKSTAT_FG_QUICK, &done);
303 }
304 if (error)
305 return -error;
306
307 if (bulkreq.ocount != NULL) {
308 if (copy_to_user(bulkreq.lastip, &inlast,
309 sizeof(xfs_ino_t)))
310 return -XFS_ERROR(EFAULT);
311
312 if (copy_to_user(bulkreq.ocount, &count, sizeof(count)))
313 return -XFS_ERROR(EFAULT);
314 }
315
316 return 0;
317}
318
319
320
321typedef struct compat_xfs_fsop_handlereq {
322 __u32 fd; /* fd for FD_TO_HANDLE */
323 compat_uptr_t path; /* user pathname */
324 __u32 oflags; /* open flags */
325 compat_uptr_t ihandle; /* user supplied handle */
326 __u32 ihandlen; /* user supplied length */
327 compat_uptr_t ohandle; /* user buffer for handle */
328 compat_uptr_t ohandlen; /* user buffer length */
329} compat_xfs_fsop_handlereq_t;
330
331#define XFS_IOC_PATH_TO_FSHANDLE_32 \
332 _IOWR('X', 104, struct compat_xfs_fsop_handlereq)
333#define XFS_IOC_PATH_TO_HANDLE_32 \
334 _IOWR('X', 105, struct compat_xfs_fsop_handlereq)
335#define XFS_IOC_FD_TO_HANDLE_32 \
336 _IOWR('X', 106, struct compat_xfs_fsop_handlereq)
337#define XFS_IOC_OPEN_BY_HANDLE_32 \
338 _IOWR('X', 107, struct compat_xfs_fsop_handlereq)
339#define XFS_IOC_READLINK_BY_HANDLE_32 \
340 _IOWR('X', 108, struct compat_xfs_fsop_handlereq)
341
342STATIC unsigned long xfs_ioctl32_fshandle(unsigned long arg)
343{
344 compat_xfs_fsop_handlereq_t __user *p32 = (void __user *)arg;
345 xfs_fsop_handlereq_t __user *p = compat_alloc_user_space(sizeof(*p));
346 u32 addr;
94 347
95 if (get_user(addr, &p32->lastip) || 348 if (copy_in_user(&p->fd, &p32->fd, sizeof(__u32)) ||
96 put_user(compat_ptr(addr), &p->lastip) || 349 get_user(addr, &p32->path) ||
97 copy_in_user(&p->icount, &p32->icount, sizeof(s32)) || 350 put_user(compat_ptr(addr), &p->path) ||
98 get_user(addr, &p32->ubuffer) || 351 copy_in_user(&p->oflags, &p32->oflags, sizeof(__u32)) ||
99 put_user(compat_ptr(addr), &p->ubuffer) || 352 get_user(addr, &p32->ihandle) ||
100 get_user(addr, &p32->ocount) || 353 put_user(compat_ptr(addr), &p->ihandle) ||
101 put_user(compat_ptr(addr), &p->ocount)) 354 copy_in_user(&p->ihandlen, &p32->ihandlen, sizeof(__u32)) ||
355 get_user(addr, &p32->ohandle) ||
356 put_user(compat_ptr(addr), &p->ohandle) ||
357 get_user(addr, &p32->ohandlen) ||
358 put_user(compat_ptr(addr), &p->ohandlen))
102 return -EFAULT; 359 return -EFAULT;
103 360
104 return (unsigned long)p; 361 return (unsigned long)p;
105} 362}
106#endif 363
107 364
108STATIC long 365STATIC long
109xfs_compat_ioctl( 366xfs_compat_ioctl(
@@ -118,7 +375,6 @@ xfs_compat_ioctl(
118 375
119 switch (cmd) { 376 switch (cmd) {
120 case XFS_IOC_DIOINFO: 377 case XFS_IOC_DIOINFO:
121 case XFS_IOC_FSGEOMETRY_V1:
122 case XFS_IOC_FSGEOMETRY: 378 case XFS_IOC_FSGEOMETRY:
123 case XFS_IOC_GETVERSION: 379 case XFS_IOC_GETVERSION:
124 case XFS_IOC_GETXFLAGS: 380 case XFS_IOC_GETXFLAGS:
@@ -131,12 +387,7 @@ xfs_compat_ioctl(
131 case XFS_IOC_GETBMAPA: 387 case XFS_IOC_GETBMAPA:
132 case XFS_IOC_GETBMAPX: 388 case XFS_IOC_GETBMAPX:
133/* not handled 389/* not handled
134 case XFS_IOC_FD_TO_HANDLE:
135 case XFS_IOC_PATH_TO_HANDLE:
136 case XFS_IOC_PATH_TO_FSHANDLE:
137 case XFS_IOC_OPEN_BY_HANDLE:
138 case XFS_IOC_FSSETDM_BY_HANDLE: 390 case XFS_IOC_FSSETDM_BY_HANDLE:
139 case XFS_IOC_READLINK_BY_HANDLE:
140 case XFS_IOC_ATTRLIST_BY_HANDLE: 391 case XFS_IOC_ATTRLIST_BY_HANDLE:
141 case XFS_IOC_ATTRMULTI_BY_HANDLE: 392 case XFS_IOC_ATTRMULTI_BY_HANDLE:
142*/ 393*/
@@ -166,6 +417,10 @@ xfs_compat_ioctl(
166 arg = xfs_ioctl32_flock(arg); 417 arg = xfs_ioctl32_flock(arg);
167 cmd = _NATIVE_IOC(cmd, struct xfs_flock64); 418 cmd = _NATIVE_IOC(cmd, struct xfs_flock64);
168 break; 419 break;
420 case XFS_IOC_FSGEOMETRY_V1_32:
421 arg = xfs_ioctl32_geom_v1(arg);
422 cmd = _NATIVE_IOC(cmd, struct xfs_fsop_geom_v1);
423 break;
169 424
170#else /* These are handled fine if no alignment issues */ 425#else /* These are handled fine if no alignment issues */
171 case XFS_IOC_ALLOCSP: 426 case XFS_IOC_ALLOCSP:
@@ -176,18 +431,28 @@ xfs_compat_ioctl(
176 case XFS_IOC_FREESP64: 431 case XFS_IOC_FREESP64:
177 case XFS_IOC_RESVSP64: 432 case XFS_IOC_RESVSP64:
178 case XFS_IOC_UNRESVSP64: 433 case XFS_IOC_UNRESVSP64:
434 case XFS_IOC_FSGEOMETRY_V1:
179 break; 435 break;
180 436
181 /* xfs_bstat_t still has wrong u32 vs u64 alignment */ 437 /* xfs_bstat_t still has wrong u32 vs u64 alignment */
182 case XFS_IOC_SWAPEXT: 438 case XFS_IOC_SWAPEXT:
183 break; 439 break;
184 440
185 case XFS_IOC_FSBULKSTAT_SINGLE:
186 case XFS_IOC_FSBULKSTAT:
187 case XFS_IOC_FSINUMBERS:
188 arg = xfs_ioctl32_bulkstat(arg);
189 break;
190#endif 441#endif
442 case XFS_IOC_FSBULKSTAT_32:
443 case XFS_IOC_FSBULKSTAT_SINGLE_32:
444 case XFS_IOC_FSINUMBERS_32:
445 cmd = _NATIVE_IOC(cmd, struct xfs_fsop_bulkreq);
446 return xfs_ioc_bulkstat_compat(XFS_BHVTOI(VNHEAD(vp))->i_mount,
447 cmd, (void*)arg);
448 case XFS_IOC_FD_TO_HANDLE_32:
449 case XFS_IOC_PATH_TO_HANDLE_32:
450 case XFS_IOC_PATH_TO_FSHANDLE_32:
451 case XFS_IOC_OPEN_BY_HANDLE_32:
452 case XFS_IOC_READLINK_BY_HANDLE_32:
453 arg = xfs_ioctl32_fshandle(arg);
454 cmd = _NATIVE_IOC(cmd, struct xfs_fsop_handlereq);
455 break;
191 default: 456 default:
192 return -ENOIOCTLCMD; 457 return -ENOIOCTLCMD;
193 } 458 }
diff --git a/fs/xfs/linux-2.6/xfs_linux.h b/fs/xfs/linux-2.6/xfs_linux.h
index af24a457d3a3..330c4ba9d404 100644
--- a/fs/xfs/linux-2.6/xfs_linux.h
+++ b/fs/xfs/linux-2.6/xfs_linux.h
@@ -123,6 +123,7 @@
123#define xfs_inherit_nosymlinks xfs_params.inherit_nosym.val 123#define xfs_inherit_nosymlinks xfs_params.inherit_nosym.val
124#define xfs_rotorstep xfs_params.rotorstep.val 124#define xfs_rotorstep xfs_params.rotorstep.val
125#define xfs_inherit_nodefrag xfs_params.inherit_nodfrg.val 125#define xfs_inherit_nodefrag xfs_params.inherit_nodfrg.val
126#define xfs_fstrm_centisecs xfs_params.fstrm_timer.val
126 127
127#define current_cpu() (raw_smp_processor_id()) 128#define current_cpu() (raw_smp_processor_id())
128#define current_pid() (current->pid) 129#define current_pid() (current->pid)
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index bf9a9d5909be..06894cf00b12 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -547,7 +547,8 @@ vfs_sync_worker(
547 547
548 if (!(vfsp->vfs_flag & VFS_RDONLY)) 548 if (!(vfsp->vfs_flag & VFS_RDONLY))
549 error = bhv_vfs_sync(vfsp, SYNC_FSDATA | SYNC_BDFLUSH | \ 549 error = bhv_vfs_sync(vfsp, SYNC_FSDATA | SYNC_BDFLUSH | \
550 SYNC_ATTR | SYNC_REFCACHE, NULL); 550 SYNC_ATTR | SYNC_REFCACHE | SYNC_SUPER,
551 NULL);
551 vfsp->vfs_sync_seq++; 552 vfsp->vfs_sync_seq++;
552 wake_up(&vfsp->vfs_wait_single_sync_task); 553 wake_up(&vfsp->vfs_wait_single_sync_task);
553} 554}
@@ -663,7 +664,7 @@ xfs_fs_sync_super(
663 * occur here so don't bother flushing the buftarg (i.e 664 * occur here so don't bother flushing the buftarg (i.e
664 * SYNC_QUIESCE) because it'll just get dirty again. 665 * SYNC_QUIESCE) because it'll just get dirty again.
665 */ 666 */
666 flags = SYNC_FSDATA | SYNC_DELWRI | SYNC_WAIT | SYNC_IOWAIT; 667 flags = SYNC_DATA_QUIESCE;
667 } else 668 } else
668 flags = SYNC_FSDATA | (wait ? SYNC_WAIT : 0); 669 flags = SYNC_FSDATA | (wait ? SYNC_WAIT : 0);
669 670
diff --git a/fs/xfs/linux-2.6/xfs_sysctl.c b/fs/xfs/linux-2.6/xfs_sysctl.c
index cd6eaa44aa2b..bb997d75c05c 100644
--- a/fs/xfs/linux-2.6/xfs_sysctl.c
+++ b/fs/xfs/linux-2.6/xfs_sysctl.c
@@ -210,6 +210,17 @@ static ctl_table xfs_table[] = {
210 .extra1 = &xfs_params.inherit_nodfrg.min, 210 .extra1 = &xfs_params.inherit_nodfrg.min,
211 .extra2 = &xfs_params.inherit_nodfrg.max 211 .extra2 = &xfs_params.inherit_nodfrg.max
212 }, 212 },
213 {
214 .ctl_name = XFS_FILESTREAM_TIMER,
215 .procname = "filestream_centisecs",
216 .data = &xfs_params.fstrm_timer.val,
217 .maxlen = sizeof(int),
218 .mode = 0644,
219 .proc_handler = &proc_dointvec_minmax,
220 .strategy = &sysctl_intvec,
221 .extra1 = &xfs_params.fstrm_timer.min,
222 .extra2 = &xfs_params.fstrm_timer.max,
223 },
213 /* please keep this the last entry */ 224 /* please keep this the last entry */
214#ifdef CONFIG_PROC_FS 225#ifdef CONFIG_PROC_FS
215 { 226 {
diff --git a/fs/xfs/linux-2.6/xfs_sysctl.h b/fs/xfs/linux-2.6/xfs_sysctl.h
index a631fb8cc5ac..98b97e399d6f 100644
--- a/fs/xfs/linux-2.6/xfs_sysctl.h
+++ b/fs/xfs/linux-2.6/xfs_sysctl.h
@@ -47,6 +47,7 @@ typedef struct xfs_param {
47 xfs_sysctl_val_t inherit_nosym; /* Inherit the "nosymlinks" flag. */ 47 xfs_sysctl_val_t inherit_nosym; /* Inherit the "nosymlinks" flag. */
48 xfs_sysctl_val_t rotorstep; /* inode32 AG rotoring control knob */ 48 xfs_sysctl_val_t rotorstep; /* inode32 AG rotoring control knob */
49 xfs_sysctl_val_t inherit_nodfrg;/* Inherit the "nodefrag" inode flag. */ 49 xfs_sysctl_val_t inherit_nodfrg;/* Inherit the "nodefrag" inode flag. */
50 xfs_sysctl_val_t fstrm_timer; /* Filestream dir-AG assoc'n timeout. */
50} xfs_param_t; 51} xfs_param_t;
51 52
52/* 53/*
@@ -86,6 +87,7 @@ enum {
86 XFS_INHERIT_NOSYM = 19, 87 XFS_INHERIT_NOSYM = 19,
87 XFS_ROTORSTEP = 20, 88 XFS_ROTORSTEP = 20,
88 XFS_INHERIT_NODFRG = 21, 89 XFS_INHERIT_NODFRG = 21,
90 XFS_FILESTREAM_TIMER = 22,
89}; 91};
90 92
91extern xfs_param_t xfs_params; 93extern xfs_param_t xfs_params;
diff --git a/fs/xfs/linux-2.6/xfs_vfs.h b/fs/xfs/linux-2.6/xfs_vfs.h
index e2c2ce98ab5b..dca3481aaafa 100644
--- a/fs/xfs/linux-2.6/xfs_vfs.h
+++ b/fs/xfs/linux-2.6/xfs_vfs.h
@@ -92,6 +92,21 @@ typedef enum {
92#define SYNC_REFCACHE 0x0040 /* prune some of the nfs ref cache */ 92#define SYNC_REFCACHE 0x0040 /* prune some of the nfs ref cache */
93#define SYNC_REMOUNT 0x0080 /* remount readonly, no dummy LRs */ 93#define SYNC_REMOUNT 0x0080 /* remount readonly, no dummy LRs */
94#define SYNC_IOWAIT 0x0100 /* wait for all I/O to complete */ 94#define SYNC_IOWAIT 0x0100 /* wait for all I/O to complete */
95#define SYNC_SUPER 0x0200 /* flush superblock to disk */
96
97/*
98 * When remounting a filesystem read-only or freezing the filesystem,
99 * we have two phases to execute. This first phase is syncing the data
100 * before we quiesce the fielsystem, and the second is flushing all the
101 * inodes out after we've waited for all the transactions created by
102 * the first phase to complete. The second phase uses SYNC_INODE_QUIESCE
103 * to ensure that the inodes are written to their location on disk
104 * rather than just existing in transactions in the log. This means
105 * after a quiesce there is no log replay required to write the inodes
106 * to disk (this is the main difference between a sync and a quiesce).
107 */
108#define SYNC_DATA_QUIESCE (SYNC_DELWRI|SYNC_FSDATA|SYNC_WAIT|SYNC_IOWAIT)
109#define SYNC_INODE_QUIESCE (SYNC_REMOUNT|SYNC_ATTR|SYNC_WAIT)
95 110
96#define SHUTDOWN_META_IO_ERROR 0x0001 /* write attempt to metadata failed */ 111#define SHUTDOWN_META_IO_ERROR 0x0001 /* write attempt to metadata failed */
97#define SHUTDOWN_LOG_IO_ERROR 0x0002 /* write attempt to the log failed */ 112#define SHUTDOWN_LOG_IO_ERROR 0x0002 /* write attempt to the log failed */
diff --git a/fs/xfs/linux-2.6/xfs_vnode.h b/fs/xfs/linux-2.6/xfs_vnode.h
index 013048a92643..5742d65f0785 100644
--- a/fs/xfs/linux-2.6/xfs_vnode.h
+++ b/fs/xfs/linux-2.6/xfs_vnode.h
@@ -129,10 +129,7 @@ typedef enum bhv_vchange {
129 VCHANGE_FLAGS_IOEXCL_COUNT = 4 129 VCHANGE_FLAGS_IOEXCL_COUNT = 4
130} bhv_vchange_t; 130} bhv_vchange_t;
131 131
132typedef enum { L_FALSE, L_TRUE } lastclose_t;
133
134typedef int (*vop_open_t)(bhv_desc_t *, struct cred *); 132typedef int (*vop_open_t)(bhv_desc_t *, struct cred *);
135typedef int (*vop_close_t)(bhv_desc_t *, int, lastclose_t, struct cred *);
136typedef ssize_t (*vop_read_t)(bhv_desc_t *, struct kiocb *, 133typedef ssize_t (*vop_read_t)(bhv_desc_t *, struct kiocb *,
137 const struct iovec *, unsigned int, 134 const struct iovec *, unsigned int,
138 loff_t *, int, struct cred *); 135 loff_t *, int, struct cred *);
@@ -200,7 +197,6 @@ typedef int (*vop_iflush_t)(bhv_desc_t *, int);
200typedef struct bhv_vnodeops { 197typedef struct bhv_vnodeops {
201 bhv_position_t vn_position; /* position within behavior chain */ 198 bhv_position_t vn_position; /* position within behavior chain */
202 vop_open_t vop_open; 199 vop_open_t vop_open;
203 vop_close_t vop_close;
204 vop_read_t vop_read; 200 vop_read_t vop_read;
205 vop_write_t vop_write; 201 vop_write_t vop_write;
206 vop_splice_read_t vop_splice_read; 202 vop_splice_read_t vop_splice_read;
@@ -245,7 +241,6 @@ typedef struct bhv_vnodeops {
245#define VNHEAD(vp) ((vp)->v_bh.bh_first) 241#define VNHEAD(vp) ((vp)->v_bh.bh_first)
246#define VOP(op, vp) (*((bhv_vnodeops_t *)VNHEAD(vp)->bd_ops)->op) 242#define VOP(op, vp) (*((bhv_vnodeops_t *)VNHEAD(vp)->bd_ops)->op)
247#define bhv_vop_open(vp, cr) VOP(vop_open, vp)(VNHEAD(vp),cr) 243#define bhv_vop_open(vp, cr) VOP(vop_open, vp)(VNHEAD(vp),cr)
248#define bhv_vop_close(vp, f,last,cr) VOP(vop_close, vp)(VNHEAD(vp),f,last,cr)
249#define bhv_vop_read(vp,file,iov,segs,offset,ioflags,cr) \ 244#define bhv_vop_read(vp,file,iov,segs,offset,ioflags,cr) \
250 VOP(vop_read, vp)(VNHEAD(vp),file,iov,segs,offset,ioflags,cr) 245 VOP(vop_read, vp)(VNHEAD(vp),file,iov,segs,offset,ioflags,cr)
251#define bhv_vop_write(vp,file,iov,segs,offset,ioflags,cr) \ 246#define bhv_vop_write(vp,file,iov,segs,offset,ioflags,cr) \
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c
index 3e4a8ad8a34c..7def4c699343 100644
--- a/fs/xfs/quota/xfs_qm.c
+++ b/fs/xfs/quota/xfs_qm.c
@@ -62,10 +62,9 @@ uint ndquot;
62 62
63kmem_zone_t *qm_dqzone; 63kmem_zone_t *qm_dqzone;
64kmem_zone_t *qm_dqtrxzone; 64kmem_zone_t *qm_dqtrxzone;
65static kmem_shaker_t xfs_qm_shaker; 65static struct shrinker *xfs_qm_shaker;
66 66
67static cred_t xfs_zerocr; 67static cred_t xfs_zerocr;
68static xfs_inode_t xfs_zeroino;
69 68
70STATIC void xfs_qm_list_init(xfs_dqlist_t *, char *, int); 69STATIC void xfs_qm_list_init(xfs_dqlist_t *, char *, int);
71STATIC void xfs_qm_list_destroy(xfs_dqlist_t *); 70STATIC void xfs_qm_list_destroy(xfs_dqlist_t *);
@@ -150,7 +149,7 @@ xfs_Gqm_init(void)
150 } else 149 } else
151 xqm->qm_dqzone = qm_dqzone; 150 xqm->qm_dqzone = qm_dqzone;
152 151
153 xfs_qm_shaker = kmem_shake_register(xfs_qm_shake); 152 xfs_qm_shaker = set_shrinker(DEFAULT_SEEKS, xfs_qm_shake);
154 153
155 /* 154 /*
156 * The t_dqinfo portion of transactions. 155 * The t_dqinfo portion of transactions.
@@ -182,7 +181,7 @@ xfs_qm_destroy(
182 181
183 ASSERT(xqm != NULL); 182 ASSERT(xqm != NULL);
184 ASSERT(xqm->qm_nrefs == 0); 183 ASSERT(xqm->qm_nrefs == 0);
185 kmem_shake_deregister(xfs_qm_shaker); 184 remove_shrinker(xfs_qm_shaker);
186 hsize = xqm->qm_dqhashmask + 1; 185 hsize = xqm->qm_dqhashmask + 1;
187 for (i = 0; i < hsize; i++) { 186 for (i = 0; i < hsize; i++) {
188 xfs_qm_list_destroy(&(xqm->qm_usr_dqhtable[i])); 187 xfs_qm_list_destroy(&(xqm->qm_usr_dqhtable[i]));
@@ -1415,7 +1414,7 @@ xfs_qm_qino_alloc(
1415 return error; 1414 return error;
1416 } 1415 }
1417 1416
1418 if ((error = xfs_dir_ialloc(&tp, &xfs_zeroino, S_IFREG, 1, 0, 1417 if ((error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0,
1419 &xfs_zerocr, 0, 1, ip, &committed))) { 1418 &xfs_zerocr, 0, 1, ip, &committed))) {
1420 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | 1419 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES |
1421 XFS_TRANS_ABORT); 1420 XFS_TRANS_ABORT);
diff --git a/fs/xfs/xfs.h b/fs/xfs/xfs.h
index bf0a12040b13..b5a7d92c6843 100644
--- a/fs/xfs/xfs.h
+++ b/fs/xfs/xfs.h
@@ -38,6 +38,7 @@
38#define XFS_RW_TRACE 1 38#define XFS_RW_TRACE 1
39#define XFS_BUF_TRACE 1 39#define XFS_BUF_TRACE 1
40#define XFS_VNODE_TRACE 1 40#define XFS_VNODE_TRACE 1
41#define XFS_FILESTREAMS_TRACE 1
41#endif 42#endif
42 43
43#include <linux-2.6/xfs_linux.h> 44#include <linux-2.6/xfs_linux.h>
diff --git a/fs/xfs/xfs_ag.h b/fs/xfs/xfs_ag.h
index 9ece7f87ec5b..51c09c114a20 100644
--- a/fs/xfs/xfs_ag.h
+++ b/fs/xfs/xfs_ag.h
@@ -68,6 +68,7 @@ typedef struct xfs_agf {
68 __be32 agf_flcount; /* count of blocks in freelist */ 68 __be32 agf_flcount; /* count of blocks in freelist */
69 __be32 agf_freeblks; /* total free blocks */ 69 __be32 agf_freeblks; /* total free blocks */
70 __be32 agf_longest; /* longest free space */ 70 __be32 agf_longest; /* longest free space */
71 __be32 agf_btreeblks; /* # of blocks held in AGF btrees */
71} xfs_agf_t; 72} xfs_agf_t;
72 73
73#define XFS_AGF_MAGICNUM 0x00000001 74#define XFS_AGF_MAGICNUM 0x00000001
@@ -81,7 +82,8 @@ typedef struct xfs_agf {
81#define XFS_AGF_FLCOUNT 0x00000100 82#define XFS_AGF_FLCOUNT 0x00000100
82#define XFS_AGF_FREEBLKS 0x00000200 83#define XFS_AGF_FREEBLKS 0x00000200
83#define XFS_AGF_LONGEST 0x00000400 84#define XFS_AGF_LONGEST 0x00000400
84#define XFS_AGF_NUM_BITS 11 85#define XFS_AGF_BTREEBLKS 0x00000800
86#define XFS_AGF_NUM_BITS 12
85#define XFS_AGF_ALL_BITS ((1 << XFS_AGF_NUM_BITS) - 1) 87#define XFS_AGF_ALL_BITS ((1 << XFS_AGF_NUM_BITS) - 1)
86 88
87/* disk block (xfs_daddr_t) in the AG */ 89/* disk block (xfs_daddr_t) in the AG */
@@ -186,12 +188,15 @@ typedef struct xfs_perag
186 __uint32_t pagf_flcount; /* count of blocks in freelist */ 188 __uint32_t pagf_flcount; /* count of blocks in freelist */
187 xfs_extlen_t pagf_freeblks; /* total free blocks */ 189 xfs_extlen_t pagf_freeblks; /* total free blocks */
188 xfs_extlen_t pagf_longest; /* longest free space */ 190 xfs_extlen_t pagf_longest; /* longest free space */
191 __uint32_t pagf_btreeblks; /* # of blocks held in AGF btrees */
189 xfs_agino_t pagi_freecount; /* number of free inodes */ 192 xfs_agino_t pagi_freecount; /* number of free inodes */
193 xfs_agino_t pagi_count; /* number of allocated inodes */
194 int pagb_count; /* pagb slots in use */
190#ifdef __KERNEL__ 195#ifdef __KERNEL__
191 lock_t pagb_lock; /* lock for pagb_list */ 196 lock_t pagb_lock; /* lock for pagb_list */
192#endif 197#endif
193 int pagb_count; /* pagb slots in use */
194 xfs_perag_busy_t *pagb_list; /* unstable blocks */ 198 xfs_perag_busy_t *pagb_list; /* unstable blocks */
199 atomic_t pagf_fstrms; /* # of filestreams active in this AG */
195} xfs_perag_t; 200} xfs_perag_t;
196 201
197#define XFS_AG_MAXLEVELS(mp) ((mp)->m_ag_maxlevels) 202#define XFS_AG_MAXLEVELS(mp) ((mp)->m_ag_maxlevels)
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c
index 8e9a40aa0cd3..012a649a19c3 100644
--- a/fs/xfs/xfs_alloc.c
+++ b/fs/xfs/xfs_alloc.c
@@ -55,17 +55,17 @@ xfs_alloc_search_busy(xfs_trans_t *tp,
55ktrace_t *xfs_alloc_trace_buf; 55ktrace_t *xfs_alloc_trace_buf;
56 56
57#define TRACE_ALLOC(s,a) \ 57#define TRACE_ALLOC(s,a) \
58 xfs_alloc_trace_alloc(fname, s, a, __LINE__) 58 xfs_alloc_trace_alloc(__FUNCTION__, s, a, __LINE__)
59#define TRACE_FREE(s,a,b,x,f) \ 59#define TRACE_FREE(s,a,b,x,f) \
60 xfs_alloc_trace_free(fname, s, mp, a, b, x, f, __LINE__) 60 xfs_alloc_trace_free(__FUNCTION__, s, mp, a, b, x, f, __LINE__)
61#define TRACE_MODAGF(s,a,f) \ 61#define TRACE_MODAGF(s,a,f) \
62 xfs_alloc_trace_modagf(fname, s, mp, a, f, __LINE__) 62 xfs_alloc_trace_modagf(__FUNCTION__, s, mp, a, f, __LINE__)
63#define TRACE_BUSY(fname,s,ag,agb,l,sl,tp) \ 63#define TRACE_BUSY(__FUNCTION__,s,ag,agb,l,sl,tp) \
64 xfs_alloc_trace_busy(fname, s, mp, ag, agb, l, sl, tp, XFS_ALLOC_KTRACE_BUSY, __LINE__) 64 xfs_alloc_trace_busy(__FUNCTION__, s, mp, ag, agb, l, sl, tp, XFS_ALLOC_KTRACE_BUSY, __LINE__)
65#define TRACE_UNBUSY(fname,s,ag,sl,tp) \ 65#define TRACE_UNBUSY(__FUNCTION__,s,ag,sl,tp) \
66 xfs_alloc_trace_busy(fname, s, mp, ag, -1, -1, sl, tp, XFS_ALLOC_KTRACE_UNBUSY, __LINE__) 66 xfs_alloc_trace_busy(__FUNCTION__, s, mp, ag, -1, -1, sl, tp, XFS_ALLOC_KTRACE_UNBUSY, __LINE__)
67#define TRACE_BUSYSEARCH(fname,s,ag,agb,l,sl,tp) \ 67#define TRACE_BUSYSEARCH(__FUNCTION__,s,ag,agb,l,sl,tp) \
68 xfs_alloc_trace_busy(fname, s, mp, ag, agb, l, sl, tp, XFS_ALLOC_KTRACE_BUSYSEARCH, __LINE__) 68 xfs_alloc_trace_busy(__FUNCTION__, s, mp, ag, agb, l, sl, tp, XFS_ALLOC_KTRACE_BUSYSEARCH, __LINE__)
69#else 69#else
70#define TRACE_ALLOC(s,a) 70#define TRACE_ALLOC(s,a)
71#define TRACE_FREE(s,a,b,x,f) 71#define TRACE_FREE(s,a,b,x,f)
@@ -420,7 +420,7 @@ xfs_alloc_read_agfl(
420 */ 420 */
421STATIC void 421STATIC void
422xfs_alloc_trace_alloc( 422xfs_alloc_trace_alloc(
423 char *name, /* function tag string */ 423 const char *name, /* function tag string */
424 char *str, /* additional string */ 424 char *str, /* additional string */
425 xfs_alloc_arg_t *args, /* allocation argument structure */ 425 xfs_alloc_arg_t *args, /* allocation argument structure */
426 int line) /* source line number */ 426 int line) /* source line number */
@@ -453,7 +453,7 @@ xfs_alloc_trace_alloc(
453 */ 453 */
454STATIC void 454STATIC void
455xfs_alloc_trace_free( 455xfs_alloc_trace_free(
456 char *name, /* function tag string */ 456 const char *name, /* function tag string */
457 char *str, /* additional string */ 457 char *str, /* additional string */
458 xfs_mount_t *mp, /* file system mount point */ 458 xfs_mount_t *mp, /* file system mount point */
459 xfs_agnumber_t agno, /* allocation group number */ 459 xfs_agnumber_t agno, /* allocation group number */
@@ -479,7 +479,7 @@ xfs_alloc_trace_free(
479 */ 479 */
480STATIC void 480STATIC void
481xfs_alloc_trace_modagf( 481xfs_alloc_trace_modagf(
482 char *name, /* function tag string */ 482 const char *name, /* function tag string */
483 char *str, /* additional string */ 483 char *str, /* additional string */
484 xfs_mount_t *mp, /* file system mount point */ 484 xfs_mount_t *mp, /* file system mount point */
485 xfs_agf_t *agf, /* new agf value */ 485 xfs_agf_t *agf, /* new agf value */
@@ -507,7 +507,7 @@ xfs_alloc_trace_modagf(
507 507
508STATIC void 508STATIC void
509xfs_alloc_trace_busy( 509xfs_alloc_trace_busy(
510 char *name, /* function tag string */ 510 const char *name, /* function tag string */
511 char *str, /* additional string */ 511 char *str, /* additional string */
512 xfs_mount_t *mp, /* file system mount point */ 512 xfs_mount_t *mp, /* file system mount point */
513 xfs_agnumber_t agno, /* allocation group number */ 513 xfs_agnumber_t agno, /* allocation group number */
@@ -549,9 +549,6 @@ xfs_alloc_ag_vextent(
549 xfs_alloc_arg_t *args) /* argument structure for allocation */ 549 xfs_alloc_arg_t *args) /* argument structure for allocation */
550{ 550{
551 int error=0; 551 int error=0;
552#ifdef XFS_ALLOC_TRACE
553 static char fname[] = "xfs_alloc_ag_vextent";
554#endif
555 552
556 ASSERT(args->minlen > 0); 553 ASSERT(args->minlen > 0);
557 ASSERT(args->maxlen > 0); 554 ASSERT(args->maxlen > 0);
@@ -635,9 +632,6 @@ xfs_alloc_ag_vextent_exact(
635 xfs_agblock_t fbno; /* start block of found extent */ 632 xfs_agblock_t fbno; /* start block of found extent */
636 xfs_agblock_t fend; /* end block of found extent */ 633 xfs_agblock_t fend; /* end block of found extent */
637 xfs_extlen_t flen; /* length of found extent */ 634 xfs_extlen_t flen; /* length of found extent */
638#ifdef XFS_ALLOC_TRACE
639 static char fname[] = "xfs_alloc_ag_vextent_exact";
640#endif
641 int i; /* success/failure of operation */ 635 int i; /* success/failure of operation */
642 xfs_agblock_t maxend; /* end of maximal extent */ 636 xfs_agblock_t maxend; /* end of maximal extent */
643 xfs_agblock_t minend; /* end of minimal extent */ 637 xfs_agblock_t minend; /* end of minimal extent */
@@ -737,9 +731,6 @@ xfs_alloc_ag_vextent_near(
737 xfs_btree_cur_t *bno_cur_gt; /* cursor for bno btree, right side */ 731 xfs_btree_cur_t *bno_cur_gt; /* cursor for bno btree, right side */
738 xfs_btree_cur_t *bno_cur_lt; /* cursor for bno btree, left side */ 732 xfs_btree_cur_t *bno_cur_lt; /* cursor for bno btree, left side */
739 xfs_btree_cur_t *cnt_cur; /* cursor for count btree */ 733 xfs_btree_cur_t *cnt_cur; /* cursor for count btree */
740#ifdef XFS_ALLOC_TRACE
741 static char fname[] = "xfs_alloc_ag_vextent_near";
742#endif
743 xfs_agblock_t gtbno; /* start bno of right side entry */ 734 xfs_agblock_t gtbno; /* start bno of right side entry */
744 xfs_agblock_t gtbnoa; /* aligned ... */ 735 xfs_agblock_t gtbnoa; /* aligned ... */
745 xfs_extlen_t gtdiff; /* difference to right side entry */ 736 xfs_extlen_t gtdiff; /* difference to right side entry */
@@ -1270,9 +1261,6 @@ xfs_alloc_ag_vextent_size(
1270 int error; /* error result */ 1261 int error; /* error result */
1271 xfs_agblock_t fbno; /* start of found freespace */ 1262 xfs_agblock_t fbno; /* start of found freespace */
1272 xfs_extlen_t flen; /* length of found freespace */ 1263 xfs_extlen_t flen; /* length of found freespace */
1273#ifdef XFS_ALLOC_TRACE
1274 static char fname[] = "xfs_alloc_ag_vextent_size";
1275#endif
1276 int i; /* temp status variable */ 1264 int i; /* temp status variable */
1277 xfs_agblock_t rbno; /* returned block number */ 1265 xfs_agblock_t rbno; /* returned block number */
1278 xfs_extlen_t rlen; /* length of returned extent */ 1266 xfs_extlen_t rlen; /* length of returned extent */
@@ -1427,9 +1415,6 @@ xfs_alloc_ag_vextent_small(
1427 int error; 1415 int error;
1428 xfs_agblock_t fbno; 1416 xfs_agblock_t fbno;
1429 xfs_extlen_t flen; 1417 xfs_extlen_t flen;
1430#ifdef XFS_ALLOC_TRACE
1431 static char fname[] = "xfs_alloc_ag_vextent_small";
1432#endif
1433 int i; 1418 int i;
1434 1419
1435 if ((error = xfs_alloc_decrement(ccur, 0, &i))) 1420 if ((error = xfs_alloc_decrement(ccur, 0, &i)))
@@ -1447,7 +1432,8 @@ xfs_alloc_ag_vextent_small(
1447 else if (args->minlen == 1 && args->alignment == 1 && !args->isfl && 1432 else if (args->minlen == 1 && args->alignment == 1 && !args->isfl &&
1448 (be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_flcount) 1433 (be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_flcount)
1449 > args->minleft)) { 1434 > args->minleft)) {
1450 if ((error = xfs_alloc_get_freelist(args->tp, args->agbp, &fbno))) 1435 error = xfs_alloc_get_freelist(args->tp, args->agbp, &fbno, 0);
1436 if (error)
1451 goto error0; 1437 goto error0;
1452 if (fbno != NULLAGBLOCK) { 1438 if (fbno != NULLAGBLOCK) {
1453 if (args->userdata) { 1439 if (args->userdata) {
@@ -1515,9 +1501,6 @@ xfs_free_ag_extent(
1515 xfs_btree_cur_t *bno_cur; /* cursor for by-block btree */ 1501 xfs_btree_cur_t *bno_cur; /* cursor for by-block btree */
1516 xfs_btree_cur_t *cnt_cur; /* cursor for by-size btree */ 1502 xfs_btree_cur_t *cnt_cur; /* cursor for by-size btree */
1517 int error; /* error return value */ 1503 int error; /* error return value */
1518#ifdef XFS_ALLOC_TRACE
1519 static char fname[] = "xfs_free_ag_extent";
1520#endif
1521 xfs_agblock_t gtbno; /* start of right neighbor block */ 1504 xfs_agblock_t gtbno; /* start of right neighbor block */
1522 xfs_extlen_t gtlen; /* length of right neighbor block */ 1505 xfs_extlen_t gtlen; /* length of right neighbor block */
1523 int haveleft; /* have a left neighbor block */ 1506 int haveleft; /* have a left neighbor block */
@@ -1923,7 +1906,8 @@ xfs_alloc_fix_freelist(
1923 while (be32_to_cpu(agf->agf_flcount) > need) { 1906 while (be32_to_cpu(agf->agf_flcount) > need) {
1924 xfs_buf_t *bp; 1907 xfs_buf_t *bp;
1925 1908
1926 if ((error = xfs_alloc_get_freelist(tp, agbp, &bno))) 1909 error = xfs_alloc_get_freelist(tp, agbp, &bno, 0);
1910 if (error)
1927 return error; 1911 return error;
1928 if ((error = xfs_free_ag_extent(tp, agbp, args->agno, bno, 1, 1))) 1912 if ((error = xfs_free_ag_extent(tp, agbp, args->agno, bno, 1, 1)))
1929 return error; 1913 return error;
@@ -1973,8 +1957,9 @@ xfs_alloc_fix_freelist(
1973 * Put each allocated block on the list. 1957 * Put each allocated block on the list.
1974 */ 1958 */
1975 for (bno = targs.agbno; bno < targs.agbno + targs.len; bno++) { 1959 for (bno = targs.agbno; bno < targs.agbno + targs.len; bno++) {
1976 if ((error = xfs_alloc_put_freelist(tp, agbp, agflbp, 1960 error = xfs_alloc_put_freelist(tp, agbp,
1977 bno))) 1961 agflbp, bno, 0);
1962 if (error)
1978 return error; 1963 return error;
1979 } 1964 }
1980 } 1965 }
@@ -1991,16 +1976,15 @@ int /* error */
1991xfs_alloc_get_freelist( 1976xfs_alloc_get_freelist(
1992 xfs_trans_t *tp, /* transaction pointer */ 1977 xfs_trans_t *tp, /* transaction pointer */
1993 xfs_buf_t *agbp, /* buffer containing the agf structure */ 1978 xfs_buf_t *agbp, /* buffer containing the agf structure */
1994 xfs_agblock_t *bnop) /* block address retrieved from freelist */ 1979 xfs_agblock_t *bnop, /* block address retrieved from freelist */
1980 int btreeblk) /* destination is a AGF btree */
1995{ 1981{
1996 xfs_agf_t *agf; /* a.g. freespace structure */ 1982 xfs_agf_t *agf; /* a.g. freespace structure */
1997 xfs_agfl_t *agfl; /* a.g. freelist structure */ 1983 xfs_agfl_t *agfl; /* a.g. freelist structure */
1998 xfs_buf_t *agflbp;/* buffer for a.g. freelist structure */ 1984 xfs_buf_t *agflbp;/* buffer for a.g. freelist structure */
1999 xfs_agblock_t bno; /* block number returned */ 1985 xfs_agblock_t bno; /* block number returned */
2000 int error; 1986 int error;
2001#ifdef XFS_ALLOC_TRACE 1987 int logflags;
2002 static char fname[] = "xfs_alloc_get_freelist";
2003#endif
2004 xfs_mount_t *mp; /* mount structure */ 1988 xfs_mount_t *mp; /* mount structure */
2005 xfs_perag_t *pag; /* per allocation group data */ 1989 xfs_perag_t *pag; /* per allocation group data */
2006 1990
@@ -2032,8 +2016,16 @@ xfs_alloc_get_freelist(
2032 be32_add(&agf->agf_flcount, -1); 2016 be32_add(&agf->agf_flcount, -1);
2033 xfs_trans_agflist_delta(tp, -1); 2017 xfs_trans_agflist_delta(tp, -1);
2034 pag->pagf_flcount--; 2018 pag->pagf_flcount--;
2035 TRACE_MODAGF(NULL, agf, XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT); 2019
2036 xfs_alloc_log_agf(tp, agbp, XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT); 2020 logflags = XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT;
2021 if (btreeblk) {
2022 be32_add(&agf->agf_btreeblks, 1);
2023 pag->pagf_btreeblks++;
2024 logflags |= XFS_AGF_BTREEBLKS;
2025 }
2026
2027 TRACE_MODAGF(NULL, agf, logflags);
2028 xfs_alloc_log_agf(tp, agbp, logflags);
2037 *bnop = bno; 2029 *bnop = bno;
2038 2030
2039 /* 2031 /*
@@ -2071,6 +2063,7 @@ xfs_alloc_log_agf(
2071 offsetof(xfs_agf_t, agf_flcount), 2063 offsetof(xfs_agf_t, agf_flcount),
2072 offsetof(xfs_agf_t, agf_freeblks), 2064 offsetof(xfs_agf_t, agf_freeblks),
2073 offsetof(xfs_agf_t, agf_longest), 2065 offsetof(xfs_agf_t, agf_longest),
2066 offsetof(xfs_agf_t, agf_btreeblks),
2074 sizeof(xfs_agf_t) 2067 sizeof(xfs_agf_t)
2075 }; 2068 };
2076 2069
@@ -2106,15 +2099,14 @@ xfs_alloc_put_freelist(
2106 xfs_trans_t *tp, /* transaction pointer */ 2099 xfs_trans_t *tp, /* transaction pointer */
2107 xfs_buf_t *agbp, /* buffer for a.g. freelist header */ 2100 xfs_buf_t *agbp, /* buffer for a.g. freelist header */
2108 xfs_buf_t *agflbp,/* buffer for a.g. free block array */ 2101 xfs_buf_t *agflbp,/* buffer for a.g. free block array */
2109 xfs_agblock_t bno) /* block being freed */ 2102 xfs_agblock_t bno, /* block being freed */
2103 int btreeblk) /* block came from a AGF btree */
2110{ 2104{
2111 xfs_agf_t *agf; /* a.g. freespace structure */ 2105 xfs_agf_t *agf; /* a.g. freespace structure */
2112 xfs_agfl_t *agfl; /* a.g. free block array */ 2106 xfs_agfl_t *agfl; /* a.g. free block array */
2113 __be32 *blockp;/* pointer to array entry */ 2107 __be32 *blockp;/* pointer to array entry */
2114 int error; 2108 int error;
2115#ifdef XFS_ALLOC_TRACE 2109 int logflags;
2116 static char fname[] = "xfs_alloc_put_freelist";
2117#endif
2118 xfs_mount_t *mp; /* mount structure */ 2110 xfs_mount_t *mp; /* mount structure */
2119 xfs_perag_t *pag; /* per allocation group data */ 2111 xfs_perag_t *pag; /* per allocation group data */
2120 2112
@@ -2132,11 +2124,22 @@ xfs_alloc_put_freelist(
2132 be32_add(&agf->agf_flcount, 1); 2124 be32_add(&agf->agf_flcount, 1);
2133 xfs_trans_agflist_delta(tp, 1); 2125 xfs_trans_agflist_delta(tp, 1);
2134 pag->pagf_flcount++; 2126 pag->pagf_flcount++;
2127
2128 logflags = XFS_AGF_FLLAST | XFS_AGF_FLCOUNT;
2129 if (btreeblk) {
2130 be32_add(&agf->agf_btreeblks, -1);
2131 pag->pagf_btreeblks--;
2132 logflags |= XFS_AGF_BTREEBLKS;
2133 }
2134
2135 TRACE_MODAGF(NULL, agf, logflags);
2136 xfs_alloc_log_agf(tp, agbp, logflags);
2137
2135 ASSERT(be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp)); 2138 ASSERT(be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp));
2136 blockp = &agfl->agfl_bno[be32_to_cpu(agf->agf_fllast)]; 2139 blockp = &agfl->agfl_bno[be32_to_cpu(agf->agf_fllast)];
2137 *blockp = cpu_to_be32(bno); 2140 *blockp = cpu_to_be32(bno);
2138 TRACE_MODAGF(NULL, agf, XFS_AGF_FLLAST | XFS_AGF_FLCOUNT); 2141 TRACE_MODAGF(NULL, agf, logflags);
2139 xfs_alloc_log_agf(tp, agbp, XFS_AGF_FLLAST | XFS_AGF_FLCOUNT); 2142 xfs_alloc_log_agf(tp, agbp, logflags);
2140 xfs_trans_log_buf(tp, agflbp, 2143 xfs_trans_log_buf(tp, agflbp,
2141 (int)((xfs_caddr_t)blockp - (xfs_caddr_t)agfl), 2144 (int)((xfs_caddr_t)blockp - (xfs_caddr_t)agfl),
2142 (int)((xfs_caddr_t)blockp - (xfs_caddr_t)agfl + 2145 (int)((xfs_caddr_t)blockp - (xfs_caddr_t)agfl +
@@ -2196,6 +2199,7 @@ xfs_alloc_read_agf(
2196 pag = &mp->m_perag[agno]; 2199 pag = &mp->m_perag[agno];
2197 if (!pag->pagf_init) { 2200 if (!pag->pagf_init) {
2198 pag->pagf_freeblks = be32_to_cpu(agf->agf_freeblks); 2201 pag->pagf_freeblks = be32_to_cpu(agf->agf_freeblks);
2202 pag->pagf_btreeblks = be32_to_cpu(agf->agf_btreeblks);
2199 pag->pagf_flcount = be32_to_cpu(agf->agf_flcount); 2203 pag->pagf_flcount = be32_to_cpu(agf->agf_flcount);
2200 pag->pagf_longest = be32_to_cpu(agf->agf_longest); 2204 pag->pagf_longest = be32_to_cpu(agf->agf_longest);
2201 pag->pagf_levels[XFS_BTNUM_BNOi] = 2205 pag->pagf_levels[XFS_BTNUM_BNOi] =
@@ -2235,9 +2239,6 @@ xfs_alloc_vextent(
2235 xfs_agblock_t agsize; /* allocation group size */ 2239 xfs_agblock_t agsize; /* allocation group size */
2236 int error; 2240 int error;
2237 int flags; /* XFS_ALLOC_FLAG_... locking flags */ 2241 int flags; /* XFS_ALLOC_FLAG_... locking flags */
2238#ifdef XFS_ALLOC_TRACE
2239 static char fname[] = "xfs_alloc_vextent";
2240#endif
2241 xfs_extlen_t minleft;/* minimum left value, temp copy */ 2242 xfs_extlen_t minleft;/* minimum left value, temp copy */
2242 xfs_mount_t *mp; /* mount structure pointer */ 2243 xfs_mount_t *mp; /* mount structure pointer */
2243 xfs_agnumber_t sagno; /* starting allocation group number */ 2244 xfs_agnumber_t sagno; /* starting allocation group number */
diff --git a/fs/xfs/xfs_alloc.h b/fs/xfs/xfs_alloc.h
index 5a4256120ccc..5aec15d0651e 100644
--- a/fs/xfs/xfs_alloc.h
+++ b/fs/xfs/xfs_alloc.h
@@ -136,7 +136,8 @@ int /* error */
136xfs_alloc_get_freelist( 136xfs_alloc_get_freelist(
137 struct xfs_trans *tp, /* transaction pointer */ 137 struct xfs_trans *tp, /* transaction pointer */
138 struct xfs_buf *agbp, /* buffer containing the agf structure */ 138 struct xfs_buf *agbp, /* buffer containing the agf structure */
139 xfs_agblock_t *bnop); /* block address retrieved from freelist */ 139 xfs_agblock_t *bnop, /* block address retrieved from freelist */
140 int btreeblk); /* destination is a AGF btree */
140 141
141/* 142/*
142 * Log the given fields from the agf structure. 143 * Log the given fields from the agf structure.
@@ -165,7 +166,8 @@ xfs_alloc_put_freelist(
165 struct xfs_trans *tp, /* transaction pointer */ 166 struct xfs_trans *tp, /* transaction pointer */
166 struct xfs_buf *agbp, /* buffer for a.g. freelist header */ 167 struct xfs_buf *agbp, /* buffer for a.g. freelist header */
167 struct xfs_buf *agflbp,/* buffer for a.g. free block array */ 168 struct xfs_buf *agflbp,/* buffer for a.g. free block array */
168 xfs_agblock_t bno); /* block being freed */ 169 xfs_agblock_t bno, /* block being freed */
170 int btreeblk); /* owner was a AGF btree */
169 171
170/* 172/*
171 * Read in the allocation group header (free/alloc section). 173 * Read in the allocation group header (free/alloc section).
diff --git a/fs/xfs/xfs_alloc_btree.c b/fs/xfs/xfs_alloc_btree.c
index 74cadf95d4e8..1603ce595853 100644
--- a/fs/xfs/xfs_alloc_btree.c
+++ b/fs/xfs/xfs_alloc_btree.c
@@ -226,8 +226,9 @@ xfs_alloc_delrec(
226 /* 226 /*
227 * Put this buffer/block on the ag's freelist. 227 * Put this buffer/block on the ag's freelist.
228 */ 228 */
229 if ((error = xfs_alloc_put_freelist(cur->bc_tp, 229 error = xfs_alloc_put_freelist(cur->bc_tp,
230 cur->bc_private.a.agbp, NULL, bno))) 230 cur->bc_private.a.agbp, NULL, bno, 1);
231 if (error)
231 return error; 232 return error;
232 /* 233 /*
233 * Since blocks move to the free list without the 234 * Since blocks move to the free list without the
@@ -549,8 +550,9 @@ xfs_alloc_delrec(
549 /* 550 /*
550 * Free the deleting block by putting it on the freelist. 551 * Free the deleting block by putting it on the freelist.
551 */ 552 */
552 if ((error = xfs_alloc_put_freelist(cur->bc_tp, cur->bc_private.a.agbp, 553 error = xfs_alloc_put_freelist(cur->bc_tp,
553 NULL, rbno))) 554 cur->bc_private.a.agbp, NULL, rbno, 1);
555 if (error)
554 return error; 556 return error;
555 /* 557 /*
556 * Since blocks move to the free list without the coordination 558 * Since blocks move to the free list without the coordination
@@ -1320,8 +1322,9 @@ xfs_alloc_newroot(
1320 /* 1322 /*
1321 * Get a buffer from the freelist blocks, for the new root. 1323 * Get a buffer from the freelist blocks, for the new root.
1322 */ 1324 */
1323 if ((error = xfs_alloc_get_freelist(cur->bc_tp, cur->bc_private.a.agbp, 1325 error = xfs_alloc_get_freelist(cur->bc_tp,
1324 &nbno))) 1326 cur->bc_private.a.agbp, &nbno, 1);
1327 if (error)
1325 return error; 1328 return error;
1326 /* 1329 /*
1327 * None available, we fail. 1330 * None available, we fail.
@@ -1604,8 +1607,9 @@ xfs_alloc_split(
1604 * Allocate the new block from the freelist. 1607 * Allocate the new block from the freelist.
1605 * If we can't do it, we're toast. Give up. 1608 * If we can't do it, we're toast. Give up.
1606 */ 1609 */
1607 if ((error = xfs_alloc_get_freelist(cur->bc_tp, cur->bc_private.a.agbp, 1610 error = xfs_alloc_get_freelist(cur->bc_tp,
1608 &rbno))) 1611 cur->bc_private.a.agbp, &rbno, 1);
1612 if (error)
1609 return error; 1613 return error;
1610 if (rbno == NULLAGBLOCK) { 1614 if (rbno == NULLAGBLOCK) {
1611 *stat = 0; 1615 *stat = 0;
diff --git a/fs/xfs/xfs_bit.c b/fs/xfs/xfs_bit.c
index 1afe07f67e3b..fab0b6d5a41b 100644
--- a/fs/xfs/xfs_bit.c
+++ b/fs/xfs/xfs_bit.c
@@ -66,44 +66,6 @@ static const char xfs_highbit[256] = {
66#endif 66#endif
67 67
68/* 68/*
69 * Count of bits set in byte, 0..8.
70 */
71static const char xfs_countbit[256] = {
72 0, 1, 1, 2, 1, 2, 2, 3, /* 00 .. 07 */
73 1, 2, 2, 3, 2, 3, 3, 4, /* 08 .. 0f */
74 1, 2, 2, 3, 2, 3, 3, 4, /* 10 .. 17 */
75 2, 3, 3, 4, 3, 4, 4, 5, /* 18 .. 1f */
76 1, 2, 2, 3, 2, 3, 3, 4, /* 20 .. 27 */
77 2, 3, 3, 4, 3, 4, 4, 5, /* 28 .. 2f */
78 2, 3, 3, 4, 3, 4, 4, 5, /* 30 .. 37 */
79 3, 4, 4, 5, 4, 5, 5, 6, /* 38 .. 3f */
80 1, 2, 2, 3, 2, 3, 3, 4, /* 40 .. 47 */
81 2, 3, 3, 4, 3, 4, 4, 5, /* 48 .. 4f */
82 2, 3, 3, 4, 3, 4, 4, 5, /* 50 .. 57 */
83 3, 4, 4, 5, 4, 5, 5, 6, /* 58 .. 5f */
84 2, 3, 3, 4, 3, 4, 4, 5, /* 60 .. 67 */
85 3, 4, 4, 5, 4, 5, 5, 6, /* 68 .. 6f */
86 3, 4, 4, 5, 4, 5, 5, 6, /* 70 .. 77 */
87 4, 5, 5, 6, 5, 6, 6, 7, /* 78 .. 7f */
88 1, 2, 2, 3, 2, 3, 3, 4, /* 80 .. 87 */
89 2, 3, 3, 4, 3, 4, 4, 5, /* 88 .. 8f */
90 2, 3, 3, 4, 3, 4, 4, 5, /* 90 .. 97 */
91 3, 4, 4, 5, 4, 5, 5, 6, /* 98 .. 9f */
92 2, 3, 3, 4, 3, 4, 4, 5, /* a0 .. a7 */
93 3, 4, 4, 5, 4, 5, 5, 6, /* a8 .. af */
94 3, 4, 4, 5, 4, 5, 5, 6, /* b0 .. b7 */
95 4, 5, 5, 6, 5, 6, 6, 7, /* b8 .. bf */
96 2, 3, 3, 4, 3, 4, 4, 5, /* c0 .. c7 */
97 3, 4, 4, 5, 4, 5, 5, 6, /* c8 .. cf */
98 3, 4, 4, 5, 4, 5, 5, 6, /* d0 .. d7 */
99 4, 5, 5, 6, 5, 6, 6, 7, /* d8 .. df */
100 3, 4, 4, 5, 4, 5, 5, 6, /* e0 .. e7 */
101 4, 5, 5, 6, 5, 6, 6, 7, /* e8 .. ef */
102 4, 5, 5, 6, 5, 6, 6, 7, /* f0 .. f7 */
103 5, 6, 6, 7, 6, 7, 7, 8, /* f8 .. ff */
104};
105
106/*
107 * xfs_highbit32: get high bit set out of 32-bit argument, -1 if none set. 69 * xfs_highbit32: get high bit set out of 32-bit argument, -1 if none set.
108 */ 70 */
109inline int 71inline int
@@ -167,56 +129,21 @@ xfs_highbit64(
167 129
168 130
169/* 131/*
170 * Count the number of bits set in the bitmap starting with bit 132 * Return whether bitmap is empty.
171 * start_bit. Size is the size of the bitmap in words. 133 * Size is number of words in the bitmap, which is padded to word boundary
172 * 134 * Returns 1 for empty, 0 for non-empty.
173 * Do the counting by mapping a byte value to the number of set
174 * bits for that value using the xfs_countbit array, i.e.
175 * xfs_countbit[0] == 0, xfs_countbit[1] == 1, xfs_countbit[2] == 1,
176 * xfs_countbit[3] == 2, etc.
177 */ 135 */
178int 136int
179xfs_count_bits(uint *map, uint size, uint start_bit) 137xfs_bitmap_empty(uint *map, uint size)
180{ 138{
181 register int bits; 139 uint i;
182 register unsigned char *bytep; 140 uint ret = 0;
183 register unsigned char *end_map;
184 int byte_bit;
185
186 bits = 0;
187 end_map = (char*)(map + size);
188 bytep = (char*)(map + (start_bit & ~0x7));
189 byte_bit = start_bit & 0x7;
190
191 /*
192 * If the caller fell off the end of the map, return 0.
193 */
194 if (bytep >= end_map) {
195 return (0);
196 }
197
198 /*
199 * If start_bit is not byte aligned, then process the
200 * first byte separately.
201 */
202 if (byte_bit != 0) {
203 /*
204 * Shift off the bits we don't want to look at,
205 * before indexing into xfs_countbit.
206 */
207 bits += xfs_countbit[(*bytep >> byte_bit)];
208 bytep++;
209 }
210 141
211 /* 142 for (i = 0; i < size; i++) {
212 * Count the bits in each byte until the end of the bitmap. 143 ret |= map[i];
213 */
214 while (bytep < end_map) {
215 bits += xfs_countbit[*bytep];
216 bytep++;
217 } 144 }
218 145
219 return (bits); 146 return (ret == 0);
220} 147}
221 148
222/* 149/*
diff --git a/fs/xfs/xfs_bit.h b/fs/xfs/xfs_bit.h
index 0bbe56817542..082641a9782c 100644
--- a/fs/xfs/xfs_bit.h
+++ b/fs/xfs/xfs_bit.h
@@ -55,8 +55,8 @@ extern int xfs_lowbit64(__uint64_t v);
55/* Get high bit set out of 64-bit argument, -1 if none set */ 55/* Get high bit set out of 64-bit argument, -1 if none set */
56extern int xfs_highbit64(__uint64_t); 56extern int xfs_highbit64(__uint64_t);
57 57
58/* Count set bits in map starting with start_bit */ 58/* Return whether bitmap is empty (1 == empty) */
59extern int xfs_count_bits(uint *map, uint size, uint start_bit); 59extern int xfs_bitmap_empty(uint *map, uint size);
60 60
61/* Count continuous one bits in map starting with start_bit */ 61/* Count continuous one bits in map starting with start_bit */
62extern int xfs_contig_bits(uint *map, uint size, uint start_bit); 62extern int xfs_contig_bits(uint *map, uint size, uint start_bit);
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index b1ea26e40aaf..94b5c5fe2681 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -52,6 +52,7 @@
52#include "xfs_quota.h" 52#include "xfs_quota.h"
53#include "xfs_trans_space.h" 53#include "xfs_trans_space.h"
54#include "xfs_buf_item.h" 54#include "xfs_buf_item.h"
55#include "xfs_filestream.h"
55 56
56 57
57#ifdef DEBUG 58#ifdef DEBUG
@@ -277,7 +278,7 @@ xfs_bmap_isaeof(
277STATIC void 278STATIC void
278xfs_bmap_trace_addentry( 279xfs_bmap_trace_addentry(
279 int opcode, /* operation */ 280 int opcode, /* operation */
280 char *fname, /* function name */ 281 const char *fname, /* function name */
281 char *desc, /* operation description */ 282 char *desc, /* operation description */
282 xfs_inode_t *ip, /* incore inode pointer */ 283 xfs_inode_t *ip, /* incore inode pointer */
283 xfs_extnum_t idx, /* index of entry(ies) */ 284 xfs_extnum_t idx, /* index of entry(ies) */
@@ -291,7 +292,7 @@ xfs_bmap_trace_addentry(
291 */ 292 */
292STATIC void 293STATIC void
293xfs_bmap_trace_delete( 294xfs_bmap_trace_delete(
294 char *fname, /* function name */ 295 const char *fname, /* function name */
295 char *desc, /* operation description */ 296 char *desc, /* operation description */
296 xfs_inode_t *ip, /* incore inode pointer */ 297 xfs_inode_t *ip, /* incore inode pointer */
297 xfs_extnum_t idx, /* index of entry(entries) deleted */ 298 xfs_extnum_t idx, /* index of entry(entries) deleted */
@@ -304,7 +305,7 @@ xfs_bmap_trace_delete(
304 */ 305 */
305STATIC void 306STATIC void
306xfs_bmap_trace_insert( 307xfs_bmap_trace_insert(
307 char *fname, /* function name */ 308 const char *fname, /* function name */
308 char *desc, /* operation description */ 309 char *desc, /* operation description */
309 xfs_inode_t *ip, /* incore inode pointer */ 310 xfs_inode_t *ip, /* incore inode pointer */
310 xfs_extnum_t idx, /* index of entry(entries) inserted */ 311 xfs_extnum_t idx, /* index of entry(entries) inserted */
@@ -318,7 +319,7 @@ xfs_bmap_trace_insert(
318 */ 319 */
319STATIC void 320STATIC void
320xfs_bmap_trace_post_update( 321xfs_bmap_trace_post_update(
321 char *fname, /* function name */ 322 const char *fname, /* function name */
322 char *desc, /* operation description */ 323 char *desc, /* operation description */
323 xfs_inode_t *ip, /* incore inode pointer */ 324 xfs_inode_t *ip, /* incore inode pointer */
324 xfs_extnum_t idx, /* index of entry updated */ 325 xfs_extnum_t idx, /* index of entry updated */
@@ -329,17 +330,25 @@ xfs_bmap_trace_post_update(
329 */ 330 */
330STATIC void 331STATIC void
331xfs_bmap_trace_pre_update( 332xfs_bmap_trace_pre_update(
332 char *fname, /* function name */ 333 const char *fname, /* function name */
333 char *desc, /* operation description */ 334 char *desc, /* operation description */
334 xfs_inode_t *ip, /* incore inode pointer */ 335 xfs_inode_t *ip, /* incore inode pointer */
335 xfs_extnum_t idx, /* index of entry to be updated */ 336 xfs_extnum_t idx, /* index of entry to be updated */
336 int whichfork); /* data or attr fork */ 337 int whichfork); /* data or attr fork */
337 338
339#define XFS_BMAP_TRACE_DELETE(d,ip,i,c,w) \
340 xfs_bmap_trace_delete(__FUNCTION__,d,ip,i,c,w)
341#define XFS_BMAP_TRACE_INSERT(d,ip,i,c,r1,r2,w) \
342 xfs_bmap_trace_insert(__FUNCTION__,d,ip,i,c,r1,r2,w)
343#define XFS_BMAP_TRACE_POST_UPDATE(d,ip,i,w) \
344 xfs_bmap_trace_post_update(__FUNCTION__,d,ip,i,w)
345#define XFS_BMAP_TRACE_PRE_UPDATE(d,ip,i,w) \
346 xfs_bmap_trace_pre_update(__FUNCTION__,d,ip,i,w)
338#else 347#else
339#define xfs_bmap_trace_delete(f,d,ip,i,c,w) 348#define XFS_BMAP_TRACE_DELETE(d,ip,i,c,w)
340#define xfs_bmap_trace_insert(f,d,ip,i,c,r1,r2,w) 349#define XFS_BMAP_TRACE_INSERT(d,ip,i,c,r1,r2,w)
341#define xfs_bmap_trace_post_update(f,d,ip,i,w) 350#define XFS_BMAP_TRACE_POST_UPDATE(d,ip,i,w)
342#define xfs_bmap_trace_pre_update(f,d,ip,i,w) 351#define XFS_BMAP_TRACE_PRE_UPDATE(d,ip,i,w)
343#endif /* XFS_BMAP_TRACE */ 352#endif /* XFS_BMAP_TRACE */
344 353
345/* 354/*
@@ -531,9 +540,6 @@ xfs_bmap_add_extent(
531 xfs_filblks_t da_new; /* new count del alloc blocks used */ 540 xfs_filblks_t da_new; /* new count del alloc blocks used */
532 xfs_filblks_t da_old; /* old count del alloc blocks used */ 541 xfs_filblks_t da_old; /* old count del alloc blocks used */
533 int error; /* error return value */ 542 int error; /* error return value */
534#ifdef XFS_BMAP_TRACE
535 static char fname[] = "xfs_bmap_add_extent";
536#endif
537 xfs_ifork_t *ifp; /* inode fork ptr */ 543 xfs_ifork_t *ifp; /* inode fork ptr */
538 int logflags; /* returned value */ 544 int logflags; /* returned value */
539 xfs_extnum_t nextents; /* number of extents in file now */ 545 xfs_extnum_t nextents; /* number of extents in file now */
@@ -551,8 +557,8 @@ xfs_bmap_add_extent(
551 * already extents in the list. 557 * already extents in the list.
552 */ 558 */
553 if (nextents == 0) { 559 if (nextents == 0) {
554 xfs_bmap_trace_insert(fname, "insert empty", ip, 0, 1, new, 560 XFS_BMAP_TRACE_INSERT("insert empty", ip, 0, 1, new, NULL,
555 NULL, whichfork); 561 whichfork);
556 xfs_iext_insert(ifp, 0, 1, new); 562 xfs_iext_insert(ifp, 0, 1, new);
557 ASSERT(cur == NULL); 563 ASSERT(cur == NULL);
558 ifp->if_lastex = 0; 564 ifp->if_lastex = 0;
@@ -710,9 +716,6 @@ xfs_bmap_add_extent_delay_real(
710 int diff; /* temp value */ 716 int diff; /* temp value */
711 xfs_bmbt_rec_t *ep; /* extent entry for idx */ 717 xfs_bmbt_rec_t *ep; /* extent entry for idx */
712 int error; /* error return value */ 718 int error; /* error return value */
713#ifdef XFS_BMAP_TRACE
714 static char fname[] = "xfs_bmap_add_extent_delay_real";
715#endif
716 int i; /* temp state */ 719 int i; /* temp state */
717 xfs_ifork_t *ifp; /* inode fork pointer */ 720 xfs_ifork_t *ifp; /* inode fork pointer */
718 xfs_fileoff_t new_endoff; /* end offset of new entry */ 721 xfs_fileoff_t new_endoff; /* end offset of new entry */
@@ -808,15 +811,14 @@ xfs_bmap_add_extent_delay_real(
808 * Filling in all of a previously delayed allocation extent. 811 * Filling in all of a previously delayed allocation extent.
809 * The left and right neighbors are both contiguous with new. 812 * The left and right neighbors are both contiguous with new.
810 */ 813 */
811 xfs_bmap_trace_pre_update(fname, "LF|RF|LC|RC", ip, idx - 1, 814 XFS_BMAP_TRACE_PRE_UPDATE("LF|RF|LC|RC", ip, idx - 1,
812 XFS_DATA_FORK); 815 XFS_DATA_FORK);
813 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), 816 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
814 LEFT.br_blockcount + PREV.br_blockcount + 817 LEFT.br_blockcount + PREV.br_blockcount +
815 RIGHT.br_blockcount); 818 RIGHT.br_blockcount);
816 xfs_bmap_trace_post_update(fname, "LF|RF|LC|RC", ip, idx - 1, 819 XFS_BMAP_TRACE_POST_UPDATE("LF|RF|LC|RC", ip, idx - 1,
817 XFS_DATA_FORK);
818 xfs_bmap_trace_delete(fname, "LF|RF|LC|RC", ip, idx, 2,
819 XFS_DATA_FORK); 820 XFS_DATA_FORK);
821 XFS_BMAP_TRACE_DELETE("LF|RF|LC|RC", ip, idx, 2, XFS_DATA_FORK);
820 xfs_iext_remove(ifp, idx, 2); 822 xfs_iext_remove(ifp, idx, 2);
821 ip->i_df.if_lastex = idx - 1; 823 ip->i_df.if_lastex = idx - 1;
822 ip->i_d.di_nextents--; 824 ip->i_d.di_nextents--;
@@ -855,15 +857,14 @@ xfs_bmap_add_extent_delay_real(
855 * Filling in all of a previously delayed allocation extent. 857 * Filling in all of a previously delayed allocation extent.
856 * The left neighbor is contiguous, the right is not. 858 * The left neighbor is contiguous, the right is not.
857 */ 859 */
858 xfs_bmap_trace_pre_update(fname, "LF|RF|LC", ip, idx - 1, 860 XFS_BMAP_TRACE_PRE_UPDATE("LF|RF|LC", ip, idx - 1,
859 XFS_DATA_FORK); 861 XFS_DATA_FORK);
860 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), 862 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
861 LEFT.br_blockcount + PREV.br_blockcount); 863 LEFT.br_blockcount + PREV.br_blockcount);
862 xfs_bmap_trace_post_update(fname, "LF|RF|LC", ip, idx - 1, 864 XFS_BMAP_TRACE_POST_UPDATE("LF|RF|LC", ip, idx - 1,
863 XFS_DATA_FORK); 865 XFS_DATA_FORK);
864 ip->i_df.if_lastex = idx - 1; 866 ip->i_df.if_lastex = idx - 1;
865 xfs_bmap_trace_delete(fname, "LF|RF|LC", ip, idx, 1, 867 XFS_BMAP_TRACE_DELETE("LF|RF|LC", ip, idx, 1, XFS_DATA_FORK);
866 XFS_DATA_FORK);
867 xfs_iext_remove(ifp, idx, 1); 868 xfs_iext_remove(ifp, idx, 1);
868 if (cur == NULL) 869 if (cur == NULL)
869 rval = XFS_ILOG_DEXT; 870 rval = XFS_ILOG_DEXT;
@@ -892,16 +893,13 @@ xfs_bmap_add_extent_delay_real(
892 * Filling in all of a previously delayed allocation extent. 893 * Filling in all of a previously delayed allocation extent.
893 * The right neighbor is contiguous, the left is not. 894 * The right neighbor is contiguous, the left is not.
894 */ 895 */
895 xfs_bmap_trace_pre_update(fname, "LF|RF|RC", ip, idx, 896 XFS_BMAP_TRACE_PRE_UPDATE("LF|RF|RC", ip, idx, XFS_DATA_FORK);
896 XFS_DATA_FORK);
897 xfs_bmbt_set_startblock(ep, new->br_startblock); 897 xfs_bmbt_set_startblock(ep, new->br_startblock);
898 xfs_bmbt_set_blockcount(ep, 898 xfs_bmbt_set_blockcount(ep,
899 PREV.br_blockcount + RIGHT.br_blockcount); 899 PREV.br_blockcount + RIGHT.br_blockcount);
900 xfs_bmap_trace_post_update(fname, "LF|RF|RC", ip, idx, 900 XFS_BMAP_TRACE_POST_UPDATE("LF|RF|RC", ip, idx, XFS_DATA_FORK);
901 XFS_DATA_FORK);
902 ip->i_df.if_lastex = idx; 901 ip->i_df.if_lastex = idx;
903 xfs_bmap_trace_delete(fname, "LF|RF|RC", ip, idx + 1, 1, 902 XFS_BMAP_TRACE_DELETE("LF|RF|RC", ip, idx + 1, 1, XFS_DATA_FORK);
904 XFS_DATA_FORK);
905 xfs_iext_remove(ifp, idx + 1, 1); 903 xfs_iext_remove(ifp, idx + 1, 1);
906 if (cur == NULL) 904 if (cur == NULL)
907 rval = XFS_ILOG_DEXT; 905 rval = XFS_ILOG_DEXT;
@@ -931,11 +929,9 @@ xfs_bmap_add_extent_delay_real(
931 * Neither the left nor right neighbors are contiguous with 929 * Neither the left nor right neighbors are contiguous with
932 * the new one. 930 * the new one.
933 */ 931 */
934 xfs_bmap_trace_pre_update(fname, "LF|RF", ip, idx, 932 XFS_BMAP_TRACE_PRE_UPDATE("LF|RF", ip, idx, XFS_DATA_FORK);
935 XFS_DATA_FORK);
936 xfs_bmbt_set_startblock(ep, new->br_startblock); 933 xfs_bmbt_set_startblock(ep, new->br_startblock);
937 xfs_bmap_trace_post_update(fname, "LF|RF", ip, idx, 934 XFS_BMAP_TRACE_POST_UPDATE("LF|RF", ip, idx, XFS_DATA_FORK);
938 XFS_DATA_FORK);
939 ip->i_df.if_lastex = idx; 935 ip->i_df.if_lastex = idx;
940 ip->i_d.di_nextents++; 936 ip->i_d.di_nextents++;
941 if (cur == NULL) 937 if (cur == NULL)
@@ -963,17 +959,14 @@ xfs_bmap_add_extent_delay_real(
963 * Filling in the first part of a previous delayed allocation. 959 * Filling in the first part of a previous delayed allocation.
964 * The left neighbor is contiguous. 960 * The left neighbor is contiguous.
965 */ 961 */
966 xfs_bmap_trace_pre_update(fname, "LF|LC", ip, idx - 1, 962 XFS_BMAP_TRACE_PRE_UPDATE("LF|LC", ip, idx - 1, XFS_DATA_FORK);
967 XFS_DATA_FORK);
968 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), 963 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
969 LEFT.br_blockcount + new->br_blockcount); 964 LEFT.br_blockcount + new->br_blockcount);
970 xfs_bmbt_set_startoff(ep, 965 xfs_bmbt_set_startoff(ep,
971 PREV.br_startoff + new->br_blockcount); 966 PREV.br_startoff + new->br_blockcount);
972 xfs_bmap_trace_post_update(fname, "LF|LC", ip, idx - 1, 967 XFS_BMAP_TRACE_POST_UPDATE("LF|LC", ip, idx - 1, XFS_DATA_FORK);
973 XFS_DATA_FORK);
974 temp = PREV.br_blockcount - new->br_blockcount; 968 temp = PREV.br_blockcount - new->br_blockcount;
975 xfs_bmap_trace_pre_update(fname, "LF|LC", ip, idx, 969 XFS_BMAP_TRACE_PRE_UPDATE("LF|LC", ip, idx, XFS_DATA_FORK);
976 XFS_DATA_FORK);
977 xfs_bmbt_set_blockcount(ep, temp); 970 xfs_bmbt_set_blockcount(ep, temp);
978 ip->i_df.if_lastex = idx - 1; 971 ip->i_df.if_lastex = idx - 1;
979 if (cur == NULL) 972 if (cur == NULL)
@@ -995,8 +988,7 @@ xfs_bmap_add_extent_delay_real(
995 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 988 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
996 STARTBLOCKVAL(PREV.br_startblock)); 989 STARTBLOCKVAL(PREV.br_startblock));
997 xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp)); 990 xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp));
998 xfs_bmap_trace_post_update(fname, "LF|LC", ip, idx, 991 XFS_BMAP_TRACE_POST_UPDATE("LF|LC", ip, idx, XFS_DATA_FORK);
999 XFS_DATA_FORK);
1000 *dnew = temp; 992 *dnew = temp;
1001 /* DELTA: The boundary between two in-core extents moved. */ 993 /* DELTA: The boundary between two in-core extents moved. */
1002 temp = LEFT.br_startoff; 994 temp = LEFT.br_startoff;
@@ -1009,11 +1001,11 @@ xfs_bmap_add_extent_delay_real(
1009 * Filling in the first part of a previous delayed allocation. 1001 * Filling in the first part of a previous delayed allocation.
1010 * The left neighbor is not contiguous. 1002 * The left neighbor is not contiguous.
1011 */ 1003 */
1012 xfs_bmap_trace_pre_update(fname, "LF", ip, idx, XFS_DATA_FORK); 1004 XFS_BMAP_TRACE_PRE_UPDATE("LF", ip, idx, XFS_DATA_FORK);
1013 xfs_bmbt_set_startoff(ep, new_endoff); 1005 xfs_bmbt_set_startoff(ep, new_endoff);
1014 temp = PREV.br_blockcount - new->br_blockcount; 1006 temp = PREV.br_blockcount - new->br_blockcount;
1015 xfs_bmbt_set_blockcount(ep, temp); 1007 xfs_bmbt_set_blockcount(ep, temp);
1016 xfs_bmap_trace_insert(fname, "LF", ip, idx, 1, new, NULL, 1008 XFS_BMAP_TRACE_INSERT("LF", ip, idx, 1, new, NULL,
1017 XFS_DATA_FORK); 1009 XFS_DATA_FORK);
1018 xfs_iext_insert(ifp, idx, 1, new); 1010 xfs_iext_insert(ifp, idx, 1, new);
1019 ip->i_df.if_lastex = idx; 1011 ip->i_df.if_lastex = idx;
@@ -1046,8 +1038,7 @@ xfs_bmap_add_extent_delay_real(
1046 (cur ? cur->bc_private.b.allocated : 0)); 1038 (cur ? cur->bc_private.b.allocated : 0));
1047 ep = xfs_iext_get_ext(ifp, idx + 1); 1039 ep = xfs_iext_get_ext(ifp, idx + 1);
1048 xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp)); 1040 xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp));
1049 xfs_bmap_trace_post_update(fname, "LF", ip, idx + 1, 1041 XFS_BMAP_TRACE_POST_UPDATE("LF", ip, idx + 1, XFS_DATA_FORK);
1050 XFS_DATA_FORK);
1051 *dnew = temp; 1042 *dnew = temp;
1052 /* DELTA: One in-core extent is split in two. */ 1043 /* DELTA: One in-core extent is split in two. */
1053 temp = PREV.br_startoff; 1044 temp = PREV.br_startoff;
@@ -1060,17 +1051,14 @@ xfs_bmap_add_extent_delay_real(
1060 * The right neighbor is contiguous with the new allocation. 1051 * The right neighbor is contiguous with the new allocation.
1061 */ 1052 */
1062 temp = PREV.br_blockcount - new->br_blockcount; 1053 temp = PREV.br_blockcount - new->br_blockcount;
1063 xfs_bmap_trace_pre_update(fname, "RF|RC", ip, idx, 1054 XFS_BMAP_TRACE_PRE_UPDATE("RF|RC", ip, idx, XFS_DATA_FORK);
1064 XFS_DATA_FORK); 1055 XFS_BMAP_TRACE_PRE_UPDATE("RF|RC", ip, idx + 1, XFS_DATA_FORK);
1065 xfs_bmap_trace_pre_update(fname, "RF|RC", ip, idx + 1,
1066 XFS_DATA_FORK);
1067 xfs_bmbt_set_blockcount(ep, temp); 1056 xfs_bmbt_set_blockcount(ep, temp);
1068 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, idx + 1), 1057 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, idx + 1),
1069 new->br_startoff, new->br_startblock, 1058 new->br_startoff, new->br_startblock,
1070 new->br_blockcount + RIGHT.br_blockcount, 1059 new->br_blockcount + RIGHT.br_blockcount,
1071 RIGHT.br_state); 1060 RIGHT.br_state);
1072 xfs_bmap_trace_post_update(fname, "RF|RC", ip, idx + 1, 1061 XFS_BMAP_TRACE_POST_UPDATE("RF|RC", ip, idx + 1, XFS_DATA_FORK);
1073 XFS_DATA_FORK);
1074 ip->i_df.if_lastex = idx + 1; 1062 ip->i_df.if_lastex = idx + 1;
1075 if (cur == NULL) 1063 if (cur == NULL)
1076 rval = XFS_ILOG_DEXT; 1064 rval = XFS_ILOG_DEXT;
@@ -1091,8 +1079,7 @@ xfs_bmap_add_extent_delay_real(
1091 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 1079 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
1092 STARTBLOCKVAL(PREV.br_startblock)); 1080 STARTBLOCKVAL(PREV.br_startblock));
1093 xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp)); 1081 xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp));
1094 xfs_bmap_trace_post_update(fname, "RF|RC", ip, idx, 1082 XFS_BMAP_TRACE_POST_UPDATE("RF|RC", ip, idx, XFS_DATA_FORK);
1095 XFS_DATA_FORK);
1096 *dnew = temp; 1083 *dnew = temp;
1097 /* DELTA: The boundary between two in-core extents moved. */ 1084 /* DELTA: The boundary between two in-core extents moved. */
1098 temp = PREV.br_startoff; 1085 temp = PREV.br_startoff;
@@ -1106,10 +1093,10 @@ xfs_bmap_add_extent_delay_real(
1106 * The right neighbor is not contiguous. 1093 * The right neighbor is not contiguous.
1107 */ 1094 */
1108 temp = PREV.br_blockcount - new->br_blockcount; 1095 temp = PREV.br_blockcount - new->br_blockcount;
1109 xfs_bmap_trace_pre_update(fname, "RF", ip, idx, XFS_DATA_FORK); 1096 XFS_BMAP_TRACE_PRE_UPDATE("RF", ip, idx, XFS_DATA_FORK);
1110 xfs_bmbt_set_blockcount(ep, temp); 1097 xfs_bmbt_set_blockcount(ep, temp);
1111 xfs_bmap_trace_insert(fname, "RF", ip, idx + 1, 1, 1098 XFS_BMAP_TRACE_INSERT("RF", ip, idx + 1, 1, new, NULL,
1112 new, NULL, XFS_DATA_FORK); 1099 XFS_DATA_FORK);
1113 xfs_iext_insert(ifp, idx + 1, 1, new); 1100 xfs_iext_insert(ifp, idx + 1, 1, new);
1114 ip->i_df.if_lastex = idx + 1; 1101 ip->i_df.if_lastex = idx + 1;
1115 ip->i_d.di_nextents++; 1102 ip->i_d.di_nextents++;
@@ -1141,7 +1128,7 @@ xfs_bmap_add_extent_delay_real(
1141 (cur ? cur->bc_private.b.allocated : 0)); 1128 (cur ? cur->bc_private.b.allocated : 0));
1142 ep = xfs_iext_get_ext(ifp, idx); 1129 ep = xfs_iext_get_ext(ifp, idx);
1143 xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp)); 1130 xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp));
1144 xfs_bmap_trace_post_update(fname, "RF", ip, idx, XFS_DATA_FORK); 1131 XFS_BMAP_TRACE_POST_UPDATE("RF", ip, idx, XFS_DATA_FORK);
1145 *dnew = temp; 1132 *dnew = temp;
1146 /* DELTA: One in-core extent is split in two. */ 1133 /* DELTA: One in-core extent is split in two. */
1147 temp = PREV.br_startoff; 1134 temp = PREV.br_startoff;
@@ -1155,7 +1142,7 @@ xfs_bmap_add_extent_delay_real(
1155 * This case is avoided almost all the time. 1142 * This case is avoided almost all the time.
1156 */ 1143 */
1157 temp = new->br_startoff - PREV.br_startoff; 1144 temp = new->br_startoff - PREV.br_startoff;
1158 xfs_bmap_trace_pre_update(fname, "0", ip, idx, XFS_DATA_FORK); 1145 XFS_BMAP_TRACE_PRE_UPDATE("0", ip, idx, XFS_DATA_FORK);
1159 xfs_bmbt_set_blockcount(ep, temp); 1146 xfs_bmbt_set_blockcount(ep, temp);
1160 r[0] = *new; 1147 r[0] = *new;
1161 r[1].br_state = PREV.br_state; 1148 r[1].br_state = PREV.br_state;
@@ -1163,7 +1150,7 @@ xfs_bmap_add_extent_delay_real(
1163 r[1].br_startoff = new_endoff; 1150 r[1].br_startoff = new_endoff;
1164 temp2 = PREV.br_startoff + PREV.br_blockcount - new_endoff; 1151 temp2 = PREV.br_startoff + PREV.br_blockcount - new_endoff;
1165 r[1].br_blockcount = temp2; 1152 r[1].br_blockcount = temp2;
1166 xfs_bmap_trace_insert(fname, "0", ip, idx + 1, 2, &r[0], &r[1], 1153 XFS_BMAP_TRACE_INSERT("0", ip, idx + 1, 2, &r[0], &r[1],
1167 XFS_DATA_FORK); 1154 XFS_DATA_FORK);
1168 xfs_iext_insert(ifp, idx + 1, 2, &r[0]); 1155 xfs_iext_insert(ifp, idx + 1, 2, &r[0]);
1169 ip->i_df.if_lastex = idx + 1; 1156 ip->i_df.if_lastex = idx + 1;
@@ -1222,13 +1209,11 @@ xfs_bmap_add_extent_delay_real(
1222 } 1209 }
1223 ep = xfs_iext_get_ext(ifp, idx); 1210 ep = xfs_iext_get_ext(ifp, idx);
1224 xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp)); 1211 xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp));
1225 xfs_bmap_trace_post_update(fname, "0", ip, idx, XFS_DATA_FORK); 1212 XFS_BMAP_TRACE_POST_UPDATE("0", ip, idx, XFS_DATA_FORK);
1226 xfs_bmap_trace_pre_update(fname, "0", ip, idx + 2, 1213 XFS_BMAP_TRACE_PRE_UPDATE("0", ip, idx + 2, XFS_DATA_FORK);
1227 XFS_DATA_FORK);
1228 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx + 2), 1214 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx + 2),
1229 NULLSTARTBLOCK((int)temp2)); 1215 NULLSTARTBLOCK((int)temp2));
1230 xfs_bmap_trace_post_update(fname, "0", ip, idx + 2, 1216 XFS_BMAP_TRACE_POST_UPDATE("0", ip, idx + 2, XFS_DATA_FORK);
1231 XFS_DATA_FORK);
1232 *dnew = temp + temp2; 1217 *dnew = temp + temp2;
1233 /* DELTA: One in-core extent is split in three. */ 1218 /* DELTA: One in-core extent is split in three. */
1234 temp = PREV.br_startoff; 1219 temp = PREV.br_startoff;
@@ -1287,9 +1272,6 @@ xfs_bmap_add_extent_unwritten_real(
1287 xfs_btree_cur_t *cur; /* btree cursor */ 1272 xfs_btree_cur_t *cur; /* btree cursor */
1288 xfs_bmbt_rec_t *ep; /* extent entry for idx */ 1273 xfs_bmbt_rec_t *ep; /* extent entry for idx */
1289 int error; /* error return value */ 1274 int error; /* error return value */
1290#ifdef XFS_BMAP_TRACE
1291 static char fname[] = "xfs_bmap_add_extent_unwritten_real";
1292#endif
1293 int i; /* temp state */ 1275 int i; /* temp state */
1294 xfs_ifork_t *ifp; /* inode fork pointer */ 1276 xfs_ifork_t *ifp; /* inode fork pointer */
1295 xfs_fileoff_t new_endoff; /* end offset of new entry */ 1277 xfs_fileoff_t new_endoff; /* end offset of new entry */
@@ -1390,15 +1372,14 @@ xfs_bmap_add_extent_unwritten_real(
1390 * Setting all of a previous oldext extent to newext. 1372 * Setting all of a previous oldext extent to newext.
1391 * The left and right neighbors are both contiguous with new. 1373 * The left and right neighbors are both contiguous with new.
1392 */ 1374 */
1393 xfs_bmap_trace_pre_update(fname, "LF|RF|LC|RC", ip, idx - 1, 1375 XFS_BMAP_TRACE_PRE_UPDATE("LF|RF|LC|RC", ip, idx - 1,
1394 XFS_DATA_FORK); 1376 XFS_DATA_FORK);
1395 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), 1377 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
1396 LEFT.br_blockcount + PREV.br_blockcount + 1378 LEFT.br_blockcount + PREV.br_blockcount +
1397 RIGHT.br_blockcount); 1379 RIGHT.br_blockcount);
1398 xfs_bmap_trace_post_update(fname, "LF|RF|LC|RC", ip, idx - 1, 1380 XFS_BMAP_TRACE_POST_UPDATE("LF|RF|LC|RC", ip, idx - 1,
1399 XFS_DATA_FORK);
1400 xfs_bmap_trace_delete(fname, "LF|RF|LC|RC", ip, idx, 2,
1401 XFS_DATA_FORK); 1381 XFS_DATA_FORK);
1382 XFS_BMAP_TRACE_DELETE("LF|RF|LC|RC", ip, idx, 2, XFS_DATA_FORK);
1402 xfs_iext_remove(ifp, idx, 2); 1383 xfs_iext_remove(ifp, idx, 2);
1403 ip->i_df.if_lastex = idx - 1; 1384 ip->i_df.if_lastex = idx - 1;
1404 ip->i_d.di_nextents -= 2; 1385 ip->i_d.di_nextents -= 2;
@@ -1441,15 +1422,14 @@ xfs_bmap_add_extent_unwritten_real(
1441 * Setting all of a previous oldext extent to newext. 1422 * Setting all of a previous oldext extent to newext.
1442 * The left neighbor is contiguous, the right is not. 1423 * The left neighbor is contiguous, the right is not.
1443 */ 1424 */
1444 xfs_bmap_trace_pre_update(fname, "LF|RF|LC", ip, idx - 1, 1425 XFS_BMAP_TRACE_PRE_UPDATE("LF|RF|LC", ip, idx - 1,
1445 XFS_DATA_FORK); 1426 XFS_DATA_FORK);
1446 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), 1427 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
1447 LEFT.br_blockcount + PREV.br_blockcount); 1428 LEFT.br_blockcount + PREV.br_blockcount);
1448 xfs_bmap_trace_post_update(fname, "LF|RF|LC", ip, idx - 1, 1429 XFS_BMAP_TRACE_POST_UPDATE("LF|RF|LC", ip, idx - 1,
1449 XFS_DATA_FORK); 1430 XFS_DATA_FORK);
1450 ip->i_df.if_lastex = idx - 1; 1431 ip->i_df.if_lastex = idx - 1;
1451 xfs_bmap_trace_delete(fname, "LF|RF|LC", ip, idx, 1, 1432 XFS_BMAP_TRACE_DELETE("LF|RF|LC", ip, idx, 1, XFS_DATA_FORK);
1452 XFS_DATA_FORK);
1453 xfs_iext_remove(ifp, idx, 1); 1433 xfs_iext_remove(ifp, idx, 1);
1454 ip->i_d.di_nextents--; 1434 ip->i_d.di_nextents--;
1455 if (cur == NULL) 1435 if (cur == NULL)
@@ -1484,16 +1464,15 @@ xfs_bmap_add_extent_unwritten_real(
1484 * Setting all of a previous oldext extent to newext. 1464 * Setting all of a previous oldext extent to newext.
1485 * The right neighbor is contiguous, the left is not. 1465 * The right neighbor is contiguous, the left is not.
1486 */ 1466 */
1487 xfs_bmap_trace_pre_update(fname, "LF|RF|RC", ip, idx, 1467 XFS_BMAP_TRACE_PRE_UPDATE("LF|RF|RC", ip, idx,
1488 XFS_DATA_FORK); 1468 XFS_DATA_FORK);
1489 xfs_bmbt_set_blockcount(ep, 1469 xfs_bmbt_set_blockcount(ep,
1490 PREV.br_blockcount + RIGHT.br_blockcount); 1470 PREV.br_blockcount + RIGHT.br_blockcount);
1491 xfs_bmbt_set_state(ep, newext); 1471 xfs_bmbt_set_state(ep, newext);
1492 xfs_bmap_trace_post_update(fname, "LF|RF|RC", ip, idx, 1472 XFS_BMAP_TRACE_POST_UPDATE("LF|RF|RC", ip, idx,
1493 XFS_DATA_FORK); 1473 XFS_DATA_FORK);
1494 ip->i_df.if_lastex = idx; 1474 ip->i_df.if_lastex = idx;
1495 xfs_bmap_trace_delete(fname, "LF|RF|RC", ip, idx + 1, 1, 1475 XFS_BMAP_TRACE_DELETE("LF|RF|RC", ip, idx + 1, 1, XFS_DATA_FORK);
1496 XFS_DATA_FORK);
1497 xfs_iext_remove(ifp, idx + 1, 1); 1476 xfs_iext_remove(ifp, idx + 1, 1);
1498 ip->i_d.di_nextents--; 1477 ip->i_d.di_nextents--;
1499 if (cur == NULL) 1478 if (cur == NULL)
@@ -1529,10 +1508,10 @@ xfs_bmap_add_extent_unwritten_real(
1529 * Neither the left nor right neighbors are contiguous with 1508 * Neither the left nor right neighbors are contiguous with
1530 * the new one. 1509 * the new one.
1531 */ 1510 */
1532 xfs_bmap_trace_pre_update(fname, "LF|RF", ip, idx, 1511 XFS_BMAP_TRACE_PRE_UPDATE("LF|RF", ip, idx,
1533 XFS_DATA_FORK); 1512 XFS_DATA_FORK);
1534 xfs_bmbt_set_state(ep, newext); 1513 xfs_bmbt_set_state(ep, newext);
1535 xfs_bmap_trace_post_update(fname, "LF|RF", ip, idx, 1514 XFS_BMAP_TRACE_POST_UPDATE("LF|RF", ip, idx,
1536 XFS_DATA_FORK); 1515 XFS_DATA_FORK);
1537 ip->i_df.if_lastex = idx; 1516 ip->i_df.if_lastex = idx;
1538 if (cur == NULL) 1517 if (cur == NULL)
@@ -1559,21 +1538,21 @@ xfs_bmap_add_extent_unwritten_real(
1559 * Setting the first part of a previous oldext extent to newext. 1538 * Setting the first part of a previous oldext extent to newext.
1560 * The left neighbor is contiguous. 1539 * The left neighbor is contiguous.
1561 */ 1540 */
1562 xfs_bmap_trace_pre_update(fname, "LF|LC", ip, idx - 1, 1541 XFS_BMAP_TRACE_PRE_UPDATE("LF|LC", ip, idx - 1,
1563 XFS_DATA_FORK); 1542 XFS_DATA_FORK);
1564 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), 1543 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
1565 LEFT.br_blockcount + new->br_blockcount); 1544 LEFT.br_blockcount + new->br_blockcount);
1566 xfs_bmbt_set_startoff(ep, 1545 xfs_bmbt_set_startoff(ep,
1567 PREV.br_startoff + new->br_blockcount); 1546 PREV.br_startoff + new->br_blockcount);
1568 xfs_bmap_trace_post_update(fname, "LF|LC", ip, idx - 1, 1547 XFS_BMAP_TRACE_POST_UPDATE("LF|LC", ip, idx - 1,
1569 XFS_DATA_FORK); 1548 XFS_DATA_FORK);
1570 xfs_bmap_trace_pre_update(fname, "LF|LC", ip, idx, 1549 XFS_BMAP_TRACE_PRE_UPDATE("LF|LC", ip, idx,
1571 XFS_DATA_FORK); 1550 XFS_DATA_FORK);
1572 xfs_bmbt_set_startblock(ep, 1551 xfs_bmbt_set_startblock(ep,
1573 new->br_startblock + new->br_blockcount); 1552 new->br_startblock + new->br_blockcount);
1574 xfs_bmbt_set_blockcount(ep, 1553 xfs_bmbt_set_blockcount(ep,
1575 PREV.br_blockcount - new->br_blockcount); 1554 PREV.br_blockcount - new->br_blockcount);
1576 xfs_bmap_trace_post_update(fname, "LF|LC", ip, idx, 1555 XFS_BMAP_TRACE_POST_UPDATE("LF|LC", ip, idx,
1577 XFS_DATA_FORK); 1556 XFS_DATA_FORK);
1578 ip->i_df.if_lastex = idx - 1; 1557 ip->i_df.if_lastex = idx - 1;
1579 if (cur == NULL) 1558 if (cur == NULL)
@@ -1610,15 +1589,15 @@ xfs_bmap_add_extent_unwritten_real(
1610 * Setting the first part of a previous oldext extent to newext. 1589 * Setting the first part of a previous oldext extent to newext.
1611 * The left neighbor is not contiguous. 1590 * The left neighbor is not contiguous.
1612 */ 1591 */
1613 xfs_bmap_trace_pre_update(fname, "LF", ip, idx, XFS_DATA_FORK); 1592 XFS_BMAP_TRACE_PRE_UPDATE("LF", ip, idx, XFS_DATA_FORK);
1614 ASSERT(ep && xfs_bmbt_get_state(ep) == oldext); 1593 ASSERT(ep && xfs_bmbt_get_state(ep) == oldext);
1615 xfs_bmbt_set_startoff(ep, new_endoff); 1594 xfs_bmbt_set_startoff(ep, new_endoff);
1616 xfs_bmbt_set_blockcount(ep, 1595 xfs_bmbt_set_blockcount(ep,
1617 PREV.br_blockcount - new->br_blockcount); 1596 PREV.br_blockcount - new->br_blockcount);
1618 xfs_bmbt_set_startblock(ep, 1597 xfs_bmbt_set_startblock(ep,
1619 new->br_startblock + new->br_blockcount); 1598 new->br_startblock + new->br_blockcount);
1620 xfs_bmap_trace_post_update(fname, "LF", ip, idx, XFS_DATA_FORK); 1599 XFS_BMAP_TRACE_POST_UPDATE("LF", ip, idx, XFS_DATA_FORK);
1621 xfs_bmap_trace_insert(fname, "LF", ip, idx, 1, new, NULL, 1600 XFS_BMAP_TRACE_INSERT("LF", ip, idx, 1, new, NULL,
1622 XFS_DATA_FORK); 1601 XFS_DATA_FORK);
1623 xfs_iext_insert(ifp, idx, 1, new); 1602 xfs_iext_insert(ifp, idx, 1, new);
1624 ip->i_df.if_lastex = idx; 1603 ip->i_df.if_lastex = idx;
@@ -1653,18 +1632,18 @@ xfs_bmap_add_extent_unwritten_real(
1653 * Setting the last part of a previous oldext extent to newext. 1632 * Setting the last part of a previous oldext extent to newext.
1654 * The right neighbor is contiguous with the new allocation. 1633 * The right neighbor is contiguous with the new allocation.
1655 */ 1634 */
1656 xfs_bmap_trace_pre_update(fname, "RF|RC", ip, idx, 1635 XFS_BMAP_TRACE_PRE_UPDATE("RF|RC", ip, idx,
1657 XFS_DATA_FORK); 1636 XFS_DATA_FORK);
1658 xfs_bmap_trace_pre_update(fname, "RF|RC", ip, idx + 1, 1637 XFS_BMAP_TRACE_PRE_UPDATE("RF|RC", ip, idx + 1,
1659 XFS_DATA_FORK); 1638 XFS_DATA_FORK);
1660 xfs_bmbt_set_blockcount(ep, 1639 xfs_bmbt_set_blockcount(ep,
1661 PREV.br_blockcount - new->br_blockcount); 1640 PREV.br_blockcount - new->br_blockcount);
1662 xfs_bmap_trace_post_update(fname, "RF|RC", ip, idx, 1641 XFS_BMAP_TRACE_POST_UPDATE("RF|RC", ip, idx,
1663 XFS_DATA_FORK); 1642 XFS_DATA_FORK);
1664 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, idx + 1), 1643 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, idx + 1),
1665 new->br_startoff, new->br_startblock, 1644 new->br_startoff, new->br_startblock,
1666 new->br_blockcount + RIGHT.br_blockcount, newext); 1645 new->br_blockcount + RIGHT.br_blockcount, newext);
1667 xfs_bmap_trace_post_update(fname, "RF|RC", ip, idx + 1, 1646 XFS_BMAP_TRACE_POST_UPDATE("RF|RC", ip, idx + 1,
1668 XFS_DATA_FORK); 1647 XFS_DATA_FORK);
1669 ip->i_df.if_lastex = idx + 1; 1648 ip->i_df.if_lastex = idx + 1;
1670 if (cur == NULL) 1649 if (cur == NULL)
@@ -1700,12 +1679,12 @@ xfs_bmap_add_extent_unwritten_real(
1700 * Setting the last part of a previous oldext extent to newext. 1679 * Setting the last part of a previous oldext extent to newext.
1701 * The right neighbor is not contiguous. 1680 * The right neighbor is not contiguous.
1702 */ 1681 */
1703 xfs_bmap_trace_pre_update(fname, "RF", ip, idx, XFS_DATA_FORK); 1682 XFS_BMAP_TRACE_PRE_UPDATE("RF", ip, idx, XFS_DATA_FORK);
1704 xfs_bmbt_set_blockcount(ep, 1683 xfs_bmbt_set_blockcount(ep,
1705 PREV.br_blockcount - new->br_blockcount); 1684 PREV.br_blockcount - new->br_blockcount);
1706 xfs_bmap_trace_post_update(fname, "RF", ip, idx, XFS_DATA_FORK); 1685 XFS_BMAP_TRACE_POST_UPDATE("RF", ip, idx, XFS_DATA_FORK);
1707 xfs_bmap_trace_insert(fname, "RF", ip, idx + 1, 1, 1686 XFS_BMAP_TRACE_INSERT("RF", ip, idx + 1, 1, new, NULL,
1708 new, NULL, XFS_DATA_FORK); 1687 XFS_DATA_FORK);
1709 xfs_iext_insert(ifp, idx + 1, 1, new); 1688 xfs_iext_insert(ifp, idx + 1, 1, new);
1710 ip->i_df.if_lastex = idx + 1; 1689 ip->i_df.if_lastex = idx + 1;
1711 ip->i_d.di_nextents++; 1690 ip->i_d.di_nextents++;
@@ -1744,17 +1723,17 @@ xfs_bmap_add_extent_unwritten_real(
1744 * newext. Contiguity is impossible here. 1723 * newext. Contiguity is impossible here.
1745 * One extent becomes three extents. 1724 * One extent becomes three extents.
1746 */ 1725 */
1747 xfs_bmap_trace_pre_update(fname, "0", ip, idx, XFS_DATA_FORK); 1726 XFS_BMAP_TRACE_PRE_UPDATE("0", ip, idx, XFS_DATA_FORK);
1748 xfs_bmbt_set_blockcount(ep, 1727 xfs_bmbt_set_blockcount(ep,
1749 new->br_startoff - PREV.br_startoff); 1728 new->br_startoff - PREV.br_startoff);
1750 xfs_bmap_trace_post_update(fname, "0", ip, idx, XFS_DATA_FORK); 1729 XFS_BMAP_TRACE_POST_UPDATE("0", ip, idx, XFS_DATA_FORK);
1751 r[0] = *new; 1730 r[0] = *new;
1752 r[1].br_startoff = new_endoff; 1731 r[1].br_startoff = new_endoff;
1753 r[1].br_blockcount = 1732 r[1].br_blockcount =
1754 PREV.br_startoff + PREV.br_blockcount - new_endoff; 1733 PREV.br_startoff + PREV.br_blockcount - new_endoff;
1755 r[1].br_startblock = new->br_startblock + new->br_blockcount; 1734 r[1].br_startblock = new->br_startblock + new->br_blockcount;
1756 r[1].br_state = oldext; 1735 r[1].br_state = oldext;
1757 xfs_bmap_trace_insert(fname, "0", ip, idx + 1, 2, &r[0], &r[1], 1736 XFS_BMAP_TRACE_INSERT("0", ip, idx + 1, 2, &r[0], &r[1],
1758 XFS_DATA_FORK); 1737 XFS_DATA_FORK);
1759 xfs_iext_insert(ifp, idx + 1, 2, &r[0]); 1738 xfs_iext_insert(ifp, idx + 1, 2, &r[0]);
1760 ip->i_df.if_lastex = idx + 1; 1739 ip->i_df.if_lastex = idx + 1;
@@ -1845,9 +1824,6 @@ xfs_bmap_add_extent_hole_delay(
1845 int rsvd) /* OK to allocate reserved blocks */ 1824 int rsvd) /* OK to allocate reserved blocks */
1846{ 1825{
1847 xfs_bmbt_rec_t *ep; /* extent record for idx */ 1826 xfs_bmbt_rec_t *ep; /* extent record for idx */
1848#ifdef XFS_BMAP_TRACE
1849 static char fname[] = "xfs_bmap_add_extent_hole_delay";
1850#endif
1851 xfs_ifork_t *ifp; /* inode fork pointer */ 1827 xfs_ifork_t *ifp; /* inode fork pointer */
1852 xfs_bmbt_irec_t left; /* left neighbor extent entry */ 1828 xfs_bmbt_irec_t left; /* left neighbor extent entry */
1853 xfs_filblks_t newlen=0; /* new indirect size */ 1829 xfs_filblks_t newlen=0; /* new indirect size */
@@ -1919,7 +1895,7 @@ xfs_bmap_add_extent_hole_delay(
1919 */ 1895 */
1920 temp = left.br_blockcount + new->br_blockcount + 1896 temp = left.br_blockcount + new->br_blockcount +
1921 right.br_blockcount; 1897 right.br_blockcount;
1922 xfs_bmap_trace_pre_update(fname, "LC|RC", ip, idx - 1, 1898 XFS_BMAP_TRACE_PRE_UPDATE("LC|RC", ip, idx - 1,
1923 XFS_DATA_FORK); 1899 XFS_DATA_FORK);
1924 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), temp); 1900 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), temp);
1925 oldlen = STARTBLOCKVAL(left.br_startblock) + 1901 oldlen = STARTBLOCKVAL(left.br_startblock) +
@@ -1928,10 +1904,9 @@ xfs_bmap_add_extent_hole_delay(
1928 newlen = xfs_bmap_worst_indlen(ip, temp); 1904 newlen = xfs_bmap_worst_indlen(ip, temp);
1929 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx - 1), 1905 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx - 1),
1930 NULLSTARTBLOCK((int)newlen)); 1906 NULLSTARTBLOCK((int)newlen));
1931 xfs_bmap_trace_post_update(fname, "LC|RC", ip, idx - 1, 1907 XFS_BMAP_TRACE_POST_UPDATE("LC|RC", ip, idx - 1,
1932 XFS_DATA_FORK);
1933 xfs_bmap_trace_delete(fname, "LC|RC", ip, idx, 1,
1934 XFS_DATA_FORK); 1908 XFS_DATA_FORK);
1909 XFS_BMAP_TRACE_DELETE("LC|RC", ip, idx, 1, XFS_DATA_FORK);
1935 xfs_iext_remove(ifp, idx, 1); 1910 xfs_iext_remove(ifp, idx, 1);
1936 ip->i_df.if_lastex = idx - 1; 1911 ip->i_df.if_lastex = idx - 1;
1937 /* DELTA: Two in-core extents were replaced by one. */ 1912 /* DELTA: Two in-core extents were replaced by one. */
@@ -1946,7 +1921,7 @@ xfs_bmap_add_extent_hole_delay(
1946 * Merge the new allocation with the left neighbor. 1921 * Merge the new allocation with the left neighbor.
1947 */ 1922 */
1948 temp = left.br_blockcount + new->br_blockcount; 1923 temp = left.br_blockcount + new->br_blockcount;
1949 xfs_bmap_trace_pre_update(fname, "LC", ip, idx - 1, 1924 XFS_BMAP_TRACE_PRE_UPDATE("LC", ip, idx - 1,
1950 XFS_DATA_FORK); 1925 XFS_DATA_FORK);
1951 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), temp); 1926 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), temp);
1952 oldlen = STARTBLOCKVAL(left.br_startblock) + 1927 oldlen = STARTBLOCKVAL(left.br_startblock) +
@@ -1954,7 +1929,7 @@ xfs_bmap_add_extent_hole_delay(
1954 newlen = xfs_bmap_worst_indlen(ip, temp); 1929 newlen = xfs_bmap_worst_indlen(ip, temp);
1955 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx - 1), 1930 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx - 1),
1956 NULLSTARTBLOCK((int)newlen)); 1931 NULLSTARTBLOCK((int)newlen));
1957 xfs_bmap_trace_post_update(fname, "LC", ip, idx - 1, 1932 XFS_BMAP_TRACE_POST_UPDATE("LC", ip, idx - 1,
1958 XFS_DATA_FORK); 1933 XFS_DATA_FORK);
1959 ip->i_df.if_lastex = idx - 1; 1934 ip->i_df.if_lastex = idx - 1;
1960 /* DELTA: One in-core extent grew into a hole. */ 1935 /* DELTA: One in-core extent grew into a hole. */
@@ -1968,14 +1943,14 @@ xfs_bmap_add_extent_hole_delay(
1968 * on the right. 1943 * on the right.
1969 * Merge the new allocation with the right neighbor. 1944 * Merge the new allocation with the right neighbor.
1970 */ 1945 */
1971 xfs_bmap_trace_pre_update(fname, "RC", ip, idx, XFS_DATA_FORK); 1946 XFS_BMAP_TRACE_PRE_UPDATE("RC", ip, idx, XFS_DATA_FORK);
1972 temp = new->br_blockcount + right.br_blockcount; 1947 temp = new->br_blockcount + right.br_blockcount;
1973 oldlen = STARTBLOCKVAL(new->br_startblock) + 1948 oldlen = STARTBLOCKVAL(new->br_startblock) +
1974 STARTBLOCKVAL(right.br_startblock); 1949 STARTBLOCKVAL(right.br_startblock);
1975 newlen = xfs_bmap_worst_indlen(ip, temp); 1950 newlen = xfs_bmap_worst_indlen(ip, temp);
1976 xfs_bmbt_set_allf(ep, new->br_startoff, 1951 xfs_bmbt_set_allf(ep, new->br_startoff,
1977 NULLSTARTBLOCK((int)newlen), temp, right.br_state); 1952 NULLSTARTBLOCK((int)newlen), temp, right.br_state);
1978 xfs_bmap_trace_post_update(fname, "RC", ip, idx, XFS_DATA_FORK); 1953 XFS_BMAP_TRACE_POST_UPDATE("RC", ip, idx, XFS_DATA_FORK);
1979 ip->i_df.if_lastex = idx; 1954 ip->i_df.if_lastex = idx;
1980 /* DELTA: One in-core extent grew into a hole. */ 1955 /* DELTA: One in-core extent grew into a hole. */
1981 temp2 = temp; 1956 temp2 = temp;
@@ -1989,7 +1964,7 @@ xfs_bmap_add_extent_hole_delay(
1989 * Insert a new entry. 1964 * Insert a new entry.
1990 */ 1965 */
1991 oldlen = newlen = 0; 1966 oldlen = newlen = 0;
1992 xfs_bmap_trace_insert(fname, "0", ip, idx, 1, new, NULL, 1967 XFS_BMAP_TRACE_INSERT("0", ip, idx, 1, new, NULL,
1993 XFS_DATA_FORK); 1968 XFS_DATA_FORK);
1994 xfs_iext_insert(ifp, idx, 1, new); 1969 xfs_iext_insert(ifp, idx, 1, new);
1995 ip->i_df.if_lastex = idx; 1970 ip->i_df.if_lastex = idx;
@@ -2039,9 +2014,6 @@ xfs_bmap_add_extent_hole_real(
2039{ 2014{
2040 xfs_bmbt_rec_t *ep; /* pointer to extent entry ins. point */ 2015 xfs_bmbt_rec_t *ep; /* pointer to extent entry ins. point */
2041 int error; /* error return value */ 2016 int error; /* error return value */
2042#ifdef XFS_BMAP_TRACE
2043 static char fname[] = "xfs_bmap_add_extent_hole_real";
2044#endif
2045 int i; /* temp state */ 2017 int i; /* temp state */
2046 xfs_ifork_t *ifp; /* inode fork pointer */ 2018 xfs_ifork_t *ifp; /* inode fork pointer */
2047 xfs_bmbt_irec_t left; /* left neighbor extent entry */ 2019 xfs_bmbt_irec_t left; /* left neighbor extent entry */
@@ -2118,15 +2090,14 @@ xfs_bmap_add_extent_hole_real(
2118 * left and on the right. 2090 * left and on the right.
2119 * Merge all three into a single extent record. 2091 * Merge all three into a single extent record.
2120 */ 2092 */
2121 xfs_bmap_trace_pre_update(fname, "LC|RC", ip, idx - 1, 2093 XFS_BMAP_TRACE_PRE_UPDATE("LC|RC", ip, idx - 1,
2122 whichfork); 2094 whichfork);
2123 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), 2095 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
2124 left.br_blockcount + new->br_blockcount + 2096 left.br_blockcount + new->br_blockcount +
2125 right.br_blockcount); 2097 right.br_blockcount);
2126 xfs_bmap_trace_post_update(fname, "LC|RC", ip, idx - 1, 2098 XFS_BMAP_TRACE_POST_UPDATE("LC|RC", ip, idx - 1,
2127 whichfork); 2099 whichfork);
2128 xfs_bmap_trace_delete(fname, "LC|RC", ip, 2100 XFS_BMAP_TRACE_DELETE("LC|RC", ip, idx, 1, whichfork);
2129 idx, 1, whichfork);
2130 xfs_iext_remove(ifp, idx, 1); 2101 xfs_iext_remove(ifp, idx, 1);
2131 ifp->if_lastex = idx - 1; 2102 ifp->if_lastex = idx - 1;
2132 XFS_IFORK_NEXT_SET(ip, whichfork, 2103 XFS_IFORK_NEXT_SET(ip, whichfork,
@@ -2168,10 +2139,10 @@ xfs_bmap_add_extent_hole_real(
2168 * on the left. 2139 * on the left.
2169 * Merge the new allocation with the left neighbor. 2140 * Merge the new allocation with the left neighbor.
2170 */ 2141 */
2171 xfs_bmap_trace_pre_update(fname, "LC", ip, idx - 1, whichfork); 2142 XFS_BMAP_TRACE_PRE_UPDATE("LC", ip, idx - 1, whichfork);
2172 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), 2143 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
2173 left.br_blockcount + new->br_blockcount); 2144 left.br_blockcount + new->br_blockcount);
2174 xfs_bmap_trace_post_update(fname, "LC", ip, idx - 1, whichfork); 2145 XFS_BMAP_TRACE_POST_UPDATE("LC", ip, idx - 1, whichfork);
2175 ifp->if_lastex = idx - 1; 2146 ifp->if_lastex = idx - 1;
2176 if (cur == NULL) { 2147 if (cur == NULL) {
2177 rval = XFS_ILOG_FEXT(whichfork); 2148 rval = XFS_ILOG_FEXT(whichfork);
@@ -2202,11 +2173,11 @@ xfs_bmap_add_extent_hole_real(
2202 * on the right. 2173 * on the right.
2203 * Merge the new allocation with the right neighbor. 2174 * Merge the new allocation with the right neighbor.
2204 */ 2175 */
2205 xfs_bmap_trace_pre_update(fname, "RC", ip, idx, whichfork); 2176 XFS_BMAP_TRACE_PRE_UPDATE("RC", ip, idx, whichfork);
2206 xfs_bmbt_set_allf(ep, new->br_startoff, new->br_startblock, 2177 xfs_bmbt_set_allf(ep, new->br_startoff, new->br_startblock,
2207 new->br_blockcount + right.br_blockcount, 2178 new->br_blockcount + right.br_blockcount,
2208 right.br_state); 2179 right.br_state);
2209 xfs_bmap_trace_post_update(fname, "RC", ip, idx, whichfork); 2180 XFS_BMAP_TRACE_POST_UPDATE("RC", ip, idx, whichfork);
2210 ifp->if_lastex = idx; 2181 ifp->if_lastex = idx;
2211 if (cur == NULL) { 2182 if (cur == NULL) {
2212 rval = XFS_ILOG_FEXT(whichfork); 2183 rval = XFS_ILOG_FEXT(whichfork);
@@ -2237,8 +2208,7 @@ xfs_bmap_add_extent_hole_real(
2237 * real allocation. 2208 * real allocation.
2238 * Insert a new entry. 2209 * Insert a new entry.
2239 */ 2210 */
2240 xfs_bmap_trace_insert(fname, "0", ip, idx, 1, new, NULL, 2211 XFS_BMAP_TRACE_INSERT("0", ip, idx, 1, new, NULL, whichfork);
2241 whichfork);
2242 xfs_iext_insert(ifp, idx, 1, new); 2212 xfs_iext_insert(ifp, idx, 1, new);
2243 ifp->if_lastex = idx; 2213 ifp->if_lastex = idx;
2244 XFS_IFORK_NEXT_SET(ip, whichfork, 2214 XFS_IFORK_NEXT_SET(ip, whichfork,
@@ -2605,12 +2575,10 @@ xfs_bmap_rtalloc(
2605 xfs_extlen_t prod = 0; /* product factor for allocators */ 2575 xfs_extlen_t prod = 0; /* product factor for allocators */
2606 xfs_extlen_t ralen = 0; /* realtime allocation length */ 2576 xfs_extlen_t ralen = 0; /* realtime allocation length */
2607 xfs_extlen_t align; /* minimum allocation alignment */ 2577 xfs_extlen_t align; /* minimum allocation alignment */
2608 xfs_rtblock_t rtx; /* realtime extent number */
2609 xfs_rtblock_t rtb; 2578 xfs_rtblock_t rtb;
2610 2579
2611 mp = ap->ip->i_mount; 2580 mp = ap->ip->i_mount;
2612 align = ap->ip->i_d.di_extsize ? 2581 align = xfs_get_extsz_hint(ap->ip);
2613 ap->ip->i_d.di_extsize : mp->m_sb.sb_rextsize;
2614 prod = align / mp->m_sb.sb_rextsize; 2582 prod = align / mp->m_sb.sb_rextsize;
2615 error = xfs_bmap_extsize_align(mp, ap->gotp, ap->prevp, 2583 error = xfs_bmap_extsize_align(mp, ap->gotp, ap->prevp,
2616 align, 1, ap->eof, 0, 2584 align, 1, ap->eof, 0,
@@ -2644,6 +2612,8 @@ xfs_bmap_rtalloc(
2644 * pick an extent that will space things out in the rt area. 2612 * pick an extent that will space things out in the rt area.
2645 */ 2613 */
2646 if (ap->eof && ap->off == 0) { 2614 if (ap->eof && ap->off == 0) {
2615 xfs_rtblock_t uninitialized_var(rtx); /* realtime extent no */
2616
2647 error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx); 2617 error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx);
2648 if (error) 2618 if (error)
2649 return error; 2619 return error;
@@ -2715,9 +2685,7 @@ xfs_bmap_btalloc(
2715 int error; 2685 int error;
2716 2686
2717 mp = ap->ip->i_mount; 2687 mp = ap->ip->i_mount;
2718 align = (ap->userdata && ap->ip->i_d.di_extsize && 2688 align = ap->userdata ? xfs_get_extsz_hint(ap->ip) : 0;
2719 (ap->ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE)) ?
2720 ap->ip->i_d.di_extsize : 0;
2721 if (unlikely(align)) { 2689 if (unlikely(align)) {
2722 error = xfs_bmap_extsize_align(mp, ap->gotp, ap->prevp, 2690 error = xfs_bmap_extsize_align(mp, ap->gotp, ap->prevp,
2723 align, 0, ap->eof, 0, ap->conv, 2691 align, 0, ap->eof, 0, ap->conv,
@@ -2727,9 +2695,15 @@ xfs_bmap_btalloc(
2727 } 2695 }
2728 nullfb = ap->firstblock == NULLFSBLOCK; 2696 nullfb = ap->firstblock == NULLFSBLOCK;
2729 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, ap->firstblock); 2697 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, ap->firstblock);
2730 if (nullfb) 2698 if (nullfb) {
2731 ap->rval = XFS_INO_TO_FSB(mp, ap->ip->i_ino); 2699 if (ap->userdata && xfs_inode_is_filestream(ap->ip)) {
2732 else 2700 ag = xfs_filestream_lookup_ag(ap->ip);
2701 ag = (ag != NULLAGNUMBER) ? ag : 0;
2702 ap->rval = XFS_AGB_TO_FSB(mp, ag, 0);
2703 } else {
2704 ap->rval = XFS_INO_TO_FSB(mp, ap->ip->i_ino);
2705 }
2706 } else
2733 ap->rval = ap->firstblock; 2707 ap->rval = ap->firstblock;
2734 2708
2735 xfs_bmap_adjacent(ap); 2709 xfs_bmap_adjacent(ap);
@@ -2753,13 +2727,22 @@ xfs_bmap_btalloc(
2753 args.firstblock = ap->firstblock; 2727 args.firstblock = ap->firstblock;
2754 blen = 0; 2728 blen = 0;
2755 if (nullfb) { 2729 if (nullfb) {
2756 args.type = XFS_ALLOCTYPE_START_BNO; 2730 if (ap->userdata && xfs_inode_is_filestream(ap->ip))
2731 args.type = XFS_ALLOCTYPE_NEAR_BNO;
2732 else
2733 args.type = XFS_ALLOCTYPE_START_BNO;
2757 args.total = ap->total; 2734 args.total = ap->total;
2735
2758 /* 2736 /*
2759 * Find the longest available space. 2737 * Search for an allocation group with a single extent
2760 * We're going to try for the whole allocation at once. 2738 * large enough for the request.
2739 *
2740 * If one isn't found, then adjust the minimum allocation
2741 * size to the largest space found.
2761 */ 2742 */
2762 startag = ag = XFS_FSB_TO_AGNO(mp, args.fsbno); 2743 startag = ag = XFS_FSB_TO_AGNO(mp, args.fsbno);
2744 if (startag == NULLAGNUMBER)
2745 startag = ag = 0;
2763 notinit = 0; 2746 notinit = 0;
2764 down_read(&mp->m_peraglock); 2747 down_read(&mp->m_peraglock);
2765 while (blen < ap->alen) { 2748 while (blen < ap->alen) {
@@ -2785,6 +2768,35 @@ xfs_bmap_btalloc(
2785 blen = longest; 2768 blen = longest;
2786 } else 2769 } else
2787 notinit = 1; 2770 notinit = 1;
2771
2772 if (xfs_inode_is_filestream(ap->ip)) {
2773 if (blen >= ap->alen)
2774 break;
2775
2776 if (ap->userdata) {
2777 /*
2778 * If startag is an invalid AG, we've
2779 * come here once before and
2780 * xfs_filestream_new_ag picked the
2781 * best currently available.
2782 *
2783 * Don't continue looping, since we
2784 * could loop forever.
2785 */
2786 if (startag == NULLAGNUMBER)
2787 break;
2788
2789 error = xfs_filestream_new_ag(ap, &ag);
2790 if (error) {
2791 up_read(&mp->m_peraglock);
2792 return error;
2793 }
2794
2795 /* loop again to set 'blen'*/
2796 startag = NULLAGNUMBER;
2797 continue;
2798 }
2799 }
2788 if (++ag == mp->m_sb.sb_agcount) 2800 if (++ag == mp->m_sb.sb_agcount)
2789 ag = 0; 2801 ag = 0;
2790 if (ag == startag) 2802 if (ag == startag)
@@ -2809,17 +2821,27 @@ xfs_bmap_btalloc(
2809 */ 2821 */
2810 else 2822 else
2811 args.minlen = ap->alen; 2823 args.minlen = ap->alen;
2824
2825 /*
2826 * set the failure fallback case to look in the selected
2827 * AG as the stream may have moved.
2828 */
2829 if (xfs_inode_is_filestream(ap->ip))
2830 ap->rval = args.fsbno = XFS_AGB_TO_FSB(mp, ag, 0);
2812 } else if (ap->low) { 2831 } else if (ap->low) {
2813 args.type = XFS_ALLOCTYPE_START_BNO; 2832 if (xfs_inode_is_filestream(ap->ip))
2833 args.type = XFS_ALLOCTYPE_FIRST_AG;
2834 else
2835 args.type = XFS_ALLOCTYPE_START_BNO;
2814 args.total = args.minlen = ap->minlen; 2836 args.total = args.minlen = ap->minlen;
2815 } else { 2837 } else {
2816 args.type = XFS_ALLOCTYPE_NEAR_BNO; 2838 args.type = XFS_ALLOCTYPE_NEAR_BNO;
2817 args.total = ap->total; 2839 args.total = ap->total;
2818 args.minlen = ap->minlen; 2840 args.minlen = ap->minlen;
2819 } 2841 }
2820 if (unlikely(ap->userdata && ap->ip->i_d.di_extsize && 2842 /* apply extent size hints if obtained earlier */
2821 (ap->ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE))) { 2843 if (unlikely(align)) {
2822 args.prod = ap->ip->i_d.di_extsize; 2844 args.prod = align;
2823 if ((args.mod = (xfs_extlen_t)do_mod(ap->off, args.prod))) 2845 if ((args.mod = (xfs_extlen_t)do_mod(ap->off, args.prod)))
2824 args.mod = (xfs_extlen_t)(args.prod - args.mod); 2846 args.mod = (xfs_extlen_t)(args.prod - args.mod);
2825 } else if (mp->m_sb.sb_blocksize >= NBPP) { 2847 } else if (mp->m_sb.sb_blocksize >= NBPP) {
@@ -3051,9 +3073,6 @@ xfs_bmap_del_extent(
3051 xfs_bmbt_rec_t *ep; /* current extent entry pointer */ 3073 xfs_bmbt_rec_t *ep; /* current extent entry pointer */
3052 int error; /* error return value */ 3074 int error; /* error return value */
3053 int flags; /* inode logging flags */ 3075 int flags; /* inode logging flags */
3054#ifdef XFS_BMAP_TRACE
3055 static char fname[] = "xfs_bmap_del_extent";
3056#endif
3057 xfs_bmbt_irec_t got; /* current extent entry */ 3076 xfs_bmbt_irec_t got; /* current extent entry */
3058 xfs_fileoff_t got_endoff; /* first offset past got */ 3077 xfs_fileoff_t got_endoff; /* first offset past got */
3059 int i; /* temp state */ 3078 int i; /* temp state */
@@ -3147,7 +3166,7 @@ xfs_bmap_del_extent(
3147 /* 3166 /*
3148 * Matches the whole extent. Delete the entry. 3167 * Matches the whole extent. Delete the entry.
3149 */ 3168 */
3150 xfs_bmap_trace_delete(fname, "3", ip, idx, 1, whichfork); 3169 XFS_BMAP_TRACE_DELETE("3", ip, idx, 1, whichfork);
3151 xfs_iext_remove(ifp, idx, 1); 3170 xfs_iext_remove(ifp, idx, 1);
3152 ifp->if_lastex = idx; 3171 ifp->if_lastex = idx;
3153 if (delay) 3172 if (delay)
@@ -3168,7 +3187,7 @@ xfs_bmap_del_extent(
3168 /* 3187 /*
3169 * Deleting the first part of the extent. 3188 * Deleting the first part of the extent.
3170 */ 3189 */
3171 xfs_bmap_trace_pre_update(fname, "2", ip, idx, whichfork); 3190 XFS_BMAP_TRACE_PRE_UPDATE("2", ip, idx, whichfork);
3172 xfs_bmbt_set_startoff(ep, del_endoff); 3191 xfs_bmbt_set_startoff(ep, del_endoff);
3173 temp = got.br_blockcount - del->br_blockcount; 3192 temp = got.br_blockcount - del->br_blockcount;
3174 xfs_bmbt_set_blockcount(ep, temp); 3193 xfs_bmbt_set_blockcount(ep, temp);
@@ -3177,13 +3196,13 @@ xfs_bmap_del_extent(
3177 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 3196 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
3178 da_old); 3197 da_old);
3179 xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp)); 3198 xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp));
3180 xfs_bmap_trace_post_update(fname, "2", ip, idx, 3199 XFS_BMAP_TRACE_POST_UPDATE("2", ip, idx,
3181 whichfork); 3200 whichfork);
3182 da_new = temp; 3201 da_new = temp;
3183 break; 3202 break;
3184 } 3203 }
3185 xfs_bmbt_set_startblock(ep, del_endblock); 3204 xfs_bmbt_set_startblock(ep, del_endblock);
3186 xfs_bmap_trace_post_update(fname, "2", ip, idx, whichfork); 3205 XFS_BMAP_TRACE_POST_UPDATE("2", ip, idx, whichfork);
3187 if (!cur) { 3206 if (!cur) {
3188 flags |= XFS_ILOG_FEXT(whichfork); 3207 flags |= XFS_ILOG_FEXT(whichfork);
3189 break; 3208 break;
@@ -3199,19 +3218,19 @@ xfs_bmap_del_extent(
3199 * Deleting the last part of the extent. 3218 * Deleting the last part of the extent.
3200 */ 3219 */
3201 temp = got.br_blockcount - del->br_blockcount; 3220 temp = got.br_blockcount - del->br_blockcount;
3202 xfs_bmap_trace_pre_update(fname, "1", ip, idx, whichfork); 3221 XFS_BMAP_TRACE_PRE_UPDATE("1", ip, idx, whichfork);
3203 xfs_bmbt_set_blockcount(ep, temp); 3222 xfs_bmbt_set_blockcount(ep, temp);
3204 ifp->if_lastex = idx; 3223 ifp->if_lastex = idx;
3205 if (delay) { 3224 if (delay) {
3206 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 3225 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
3207 da_old); 3226 da_old);
3208 xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp)); 3227 xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp));
3209 xfs_bmap_trace_post_update(fname, "1", ip, idx, 3228 XFS_BMAP_TRACE_POST_UPDATE("1", ip, idx,
3210 whichfork); 3229 whichfork);
3211 da_new = temp; 3230 da_new = temp;
3212 break; 3231 break;
3213 } 3232 }
3214 xfs_bmap_trace_post_update(fname, "1", ip, idx, whichfork); 3233 XFS_BMAP_TRACE_POST_UPDATE("1", ip, idx, whichfork);
3215 if (!cur) { 3234 if (!cur) {
3216 flags |= XFS_ILOG_FEXT(whichfork); 3235 flags |= XFS_ILOG_FEXT(whichfork);
3217 break; 3236 break;
@@ -3228,7 +3247,7 @@ xfs_bmap_del_extent(
3228 * Deleting the middle of the extent. 3247 * Deleting the middle of the extent.
3229 */ 3248 */
3230 temp = del->br_startoff - got.br_startoff; 3249 temp = del->br_startoff - got.br_startoff;
3231 xfs_bmap_trace_pre_update(fname, "0", ip, idx, whichfork); 3250 XFS_BMAP_TRACE_PRE_UPDATE("0", ip, idx, whichfork);
3232 xfs_bmbt_set_blockcount(ep, temp); 3251 xfs_bmbt_set_blockcount(ep, temp);
3233 new.br_startoff = del_endoff; 3252 new.br_startoff = del_endoff;
3234 temp2 = got_endoff - del_endoff; 3253 temp2 = got_endoff - del_endoff;
@@ -3315,8 +3334,8 @@ xfs_bmap_del_extent(
3315 } 3334 }
3316 } 3335 }
3317 } 3336 }
3318 xfs_bmap_trace_post_update(fname, "0", ip, idx, whichfork); 3337 XFS_BMAP_TRACE_POST_UPDATE("0", ip, idx, whichfork);
3319 xfs_bmap_trace_insert(fname, "0", ip, idx + 1, 1, &new, NULL, 3338 XFS_BMAP_TRACE_INSERT("0", ip, idx + 1, 1, &new, NULL,
3320 whichfork); 3339 whichfork);
3321 xfs_iext_insert(ifp, idx + 1, 1, &new); 3340 xfs_iext_insert(ifp, idx + 1, 1, &new);
3322 ifp->if_lastex = idx + 1; 3341 ifp->if_lastex = idx + 1;
@@ -3556,9 +3575,6 @@ xfs_bmap_local_to_extents(
3556{ 3575{
3557 int error; /* error return value */ 3576 int error; /* error return value */
3558 int flags; /* logging flags returned */ 3577 int flags; /* logging flags returned */
3559#ifdef XFS_BMAP_TRACE
3560 static char fname[] = "xfs_bmap_local_to_extents";
3561#endif
3562 xfs_ifork_t *ifp; /* inode fork pointer */ 3578 xfs_ifork_t *ifp; /* inode fork pointer */
3563 3579
3564 /* 3580 /*
@@ -3613,7 +3629,7 @@ xfs_bmap_local_to_extents(
3613 xfs_iext_add(ifp, 0, 1); 3629 xfs_iext_add(ifp, 0, 1);
3614 ep = xfs_iext_get_ext(ifp, 0); 3630 ep = xfs_iext_get_ext(ifp, 0);
3615 xfs_bmbt_set_allf(ep, 0, args.fsbno, 1, XFS_EXT_NORM); 3631 xfs_bmbt_set_allf(ep, 0, args.fsbno, 1, XFS_EXT_NORM);
3616 xfs_bmap_trace_post_update(fname, "new", ip, 0, whichfork); 3632 XFS_BMAP_TRACE_POST_UPDATE("new", ip, 0, whichfork);
3617 XFS_IFORK_NEXT_SET(ip, whichfork, 1); 3633 XFS_IFORK_NEXT_SET(ip, whichfork, 1);
3618 ip->i_d.di_nblocks = 1; 3634 ip->i_d.di_nblocks = 1;
3619 XFS_TRANS_MOD_DQUOT_BYINO(args.mp, tp, ip, 3635 XFS_TRANS_MOD_DQUOT_BYINO(args.mp, tp, ip,
@@ -3736,7 +3752,7 @@ ktrace_t *xfs_bmap_trace_buf;
3736STATIC void 3752STATIC void
3737xfs_bmap_trace_addentry( 3753xfs_bmap_trace_addentry(
3738 int opcode, /* operation */ 3754 int opcode, /* operation */
3739 char *fname, /* function name */ 3755 const char *fname, /* function name */
3740 char *desc, /* operation description */ 3756 char *desc, /* operation description */
3741 xfs_inode_t *ip, /* incore inode pointer */ 3757 xfs_inode_t *ip, /* incore inode pointer */
3742 xfs_extnum_t idx, /* index of entry(ies) */ 3758 xfs_extnum_t idx, /* index of entry(ies) */
@@ -3795,7 +3811,7 @@ xfs_bmap_trace_addentry(
3795 */ 3811 */
3796STATIC void 3812STATIC void
3797xfs_bmap_trace_delete( 3813xfs_bmap_trace_delete(
3798 char *fname, /* function name */ 3814 const char *fname, /* function name */
3799 char *desc, /* operation description */ 3815 char *desc, /* operation description */
3800 xfs_inode_t *ip, /* incore inode pointer */ 3816 xfs_inode_t *ip, /* incore inode pointer */
3801 xfs_extnum_t idx, /* index of entry(entries) deleted */ 3817 xfs_extnum_t idx, /* index of entry(entries) deleted */
@@ -3817,7 +3833,7 @@ xfs_bmap_trace_delete(
3817 */ 3833 */
3818STATIC void 3834STATIC void
3819xfs_bmap_trace_insert( 3835xfs_bmap_trace_insert(
3820 char *fname, /* function name */ 3836 const char *fname, /* function name */
3821 char *desc, /* operation description */ 3837 char *desc, /* operation description */
3822 xfs_inode_t *ip, /* incore inode pointer */ 3838 xfs_inode_t *ip, /* incore inode pointer */
3823 xfs_extnum_t idx, /* index of entry(entries) inserted */ 3839 xfs_extnum_t idx, /* index of entry(entries) inserted */
@@ -3846,7 +3862,7 @@ xfs_bmap_trace_insert(
3846 */ 3862 */
3847STATIC void 3863STATIC void
3848xfs_bmap_trace_post_update( 3864xfs_bmap_trace_post_update(
3849 char *fname, /* function name */ 3865 const char *fname, /* function name */
3850 char *desc, /* operation description */ 3866 char *desc, /* operation description */
3851 xfs_inode_t *ip, /* incore inode pointer */ 3867 xfs_inode_t *ip, /* incore inode pointer */
3852 xfs_extnum_t idx, /* index of entry updated */ 3868 xfs_extnum_t idx, /* index of entry updated */
@@ -3864,7 +3880,7 @@ xfs_bmap_trace_post_update(
3864 */ 3880 */
3865STATIC void 3881STATIC void
3866xfs_bmap_trace_pre_update( 3882xfs_bmap_trace_pre_update(
3867 char *fname, /* function name */ 3883 const char *fname, /* function name */
3868 char *desc, /* operation description */ 3884 char *desc, /* operation description */
3869 xfs_inode_t *ip, /* incore inode pointer */ 3885 xfs_inode_t *ip, /* incore inode pointer */
3870 xfs_extnum_t idx, /* index of entry to be updated */ 3886 xfs_extnum_t idx, /* index of entry to be updated */
@@ -4481,9 +4497,6 @@ xfs_bmap_read_extents(
4481 xfs_buf_t *bp; /* buffer for "block" */ 4497 xfs_buf_t *bp; /* buffer for "block" */
4482 int error; /* error return value */ 4498 int error; /* error return value */
4483 xfs_exntfmt_t exntf; /* XFS_EXTFMT_NOSTATE, if checking */ 4499 xfs_exntfmt_t exntf; /* XFS_EXTFMT_NOSTATE, if checking */
4484#ifdef XFS_BMAP_TRACE
4485 static char fname[] = "xfs_bmap_read_extents";
4486#endif
4487 xfs_extnum_t i, j; /* index into the extents list */ 4500 xfs_extnum_t i, j; /* index into the extents list */
4488 xfs_ifork_t *ifp; /* fork structure */ 4501 xfs_ifork_t *ifp; /* fork structure */
4489 int level; /* btree level, for checking */ 4502 int level; /* btree level, for checking */
@@ -4600,7 +4613,7 @@ xfs_bmap_read_extents(
4600 } 4613 }
4601 ASSERT(i == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))); 4614 ASSERT(i == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)));
4602 ASSERT(i == XFS_IFORK_NEXTENTS(ip, whichfork)); 4615 ASSERT(i == XFS_IFORK_NEXTENTS(ip, whichfork));
4603 xfs_bmap_trace_exlist(fname, ip, i, whichfork); 4616 XFS_BMAP_TRACE_EXLIST(ip, i, whichfork);
4604 return 0; 4617 return 0;
4605error0: 4618error0:
4606 xfs_trans_brelse(tp, bp); 4619 xfs_trans_brelse(tp, bp);
@@ -4613,7 +4626,7 @@ error0:
4613 */ 4626 */
4614void 4627void
4615xfs_bmap_trace_exlist( 4628xfs_bmap_trace_exlist(
4616 char *fname, /* function name */ 4629 const char *fname, /* function name */
4617 xfs_inode_t *ip, /* incore inode pointer */ 4630 xfs_inode_t *ip, /* incore inode pointer */
4618 xfs_extnum_t cnt, /* count of entries in the list */ 4631 xfs_extnum_t cnt, /* count of entries in the list */
4619 int whichfork) /* data or attr fork */ 4632 int whichfork) /* data or attr fork */
@@ -4628,7 +4641,7 @@ xfs_bmap_trace_exlist(
4628 for (idx = 0; idx < cnt; idx++) { 4641 for (idx = 0; idx < cnt; idx++) {
4629 ep = xfs_iext_get_ext(ifp, idx); 4642 ep = xfs_iext_get_ext(ifp, idx);
4630 xfs_bmbt_get_all(ep, &s); 4643 xfs_bmbt_get_all(ep, &s);
4631 xfs_bmap_trace_insert(fname, "exlist", ip, idx, 1, &s, NULL, 4644 XFS_BMAP_TRACE_INSERT("exlist", ip, idx, 1, &s, NULL,
4632 whichfork); 4645 whichfork);
4633 } 4646 }
4634} 4647}
@@ -4868,12 +4881,7 @@ xfs_bmapi(
4868 xfs_extlen_t extsz; 4881 xfs_extlen_t extsz;
4869 4882
4870 /* Figure out the extent size, adjust alen */ 4883 /* Figure out the extent size, adjust alen */
4871 if (rt) { 4884 extsz = xfs_get_extsz_hint(ip);
4872 if (!(extsz = ip->i_d.di_extsize))
4873 extsz = mp->m_sb.sb_rextsize;
4874 } else {
4875 extsz = ip->i_d.di_extsize;
4876 }
4877 if (extsz) { 4885 if (extsz) {
4878 error = xfs_bmap_extsize_align(mp, 4886 error = xfs_bmap_extsize_align(mp,
4879 &got, &prev, extsz, 4887 &got, &prev, extsz,
@@ -5219,10 +5227,10 @@ xfs_bmapi(
5219 * Else go on to the next record. 5227 * Else go on to the next record.
5220 */ 5228 */
5221 ep = xfs_iext_get_ext(ifp, ++lastx); 5229 ep = xfs_iext_get_ext(ifp, ++lastx);
5222 if (lastx >= nextents) { 5230 prev = got;
5231 if (lastx >= nextents)
5223 eof = 1; 5232 eof = 1;
5224 prev = got; 5233 else
5225 } else
5226 xfs_bmbt_get_all(ep, &got); 5234 xfs_bmbt_get_all(ep, &got);
5227 } 5235 }
5228 ifp->if_lastex = lastx; 5236 ifp->if_lastex = lastx;
@@ -5813,8 +5821,7 @@ xfs_getbmap(
5813 ip->i_d.di_format != XFS_DINODE_FMT_LOCAL) 5821 ip->i_d.di_format != XFS_DINODE_FMT_LOCAL)
5814 return XFS_ERROR(EINVAL); 5822 return XFS_ERROR(EINVAL);
5815 if (whichfork == XFS_DATA_FORK) { 5823 if (whichfork == XFS_DATA_FORK) {
5816 if ((ip->i_d.di_extsize && (ip->i_d.di_flags & 5824 if (xfs_get_extsz_hint(ip) ||
5817 (XFS_DIFLAG_REALTIME|XFS_DIFLAG_EXTSIZE))) ||
5818 ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)){ 5825 ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)){
5819 prealloced = 1; 5826 prealloced = 1;
5820 fixlen = XFS_MAXIOFFSET(mp); 5827 fixlen = XFS_MAXIOFFSET(mp);
diff --git a/fs/xfs/xfs_bmap.h b/fs/xfs/xfs_bmap.h
index 4f24c7e39b31..524b1c9d5246 100644
--- a/fs/xfs/xfs_bmap.h
+++ b/fs/xfs/xfs_bmap.h
@@ -144,12 +144,14 @@ extern ktrace_t *xfs_bmap_trace_buf;
144 */ 144 */
145void 145void
146xfs_bmap_trace_exlist( 146xfs_bmap_trace_exlist(
147 char *fname, /* function name */ 147 const char *fname, /* function name */
148 struct xfs_inode *ip, /* incore inode pointer */ 148 struct xfs_inode *ip, /* incore inode pointer */
149 xfs_extnum_t cnt, /* count of entries in list */ 149 xfs_extnum_t cnt, /* count of entries in list */
150 int whichfork); /* data or attr fork */ 150 int whichfork); /* data or attr fork */
151#define XFS_BMAP_TRACE_EXLIST(ip,c,w) \
152 xfs_bmap_trace_exlist(__FUNCTION__,ip,c,w)
151#else 153#else
152#define xfs_bmap_trace_exlist(f,ip,c,w) 154#define XFS_BMAP_TRACE_EXLIST(ip,c,w)
153#endif 155#endif
154 156
155/* 157/*
diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c
index 0bf192fea3eb..89b891f51cfb 100644
--- a/fs/xfs/xfs_bmap_btree.c
+++ b/fs/xfs/xfs_bmap_btree.c
@@ -76,7 +76,7 @@ static char EXIT[] = "exit";
76 */ 76 */
77STATIC void 77STATIC void
78xfs_bmbt_trace_enter( 78xfs_bmbt_trace_enter(
79 char *func, 79 const char *func,
80 xfs_btree_cur_t *cur, 80 xfs_btree_cur_t *cur,
81 char *s, 81 char *s,
82 int type, 82 int type,
@@ -117,7 +117,7 @@ xfs_bmbt_trace_enter(
117 */ 117 */
118STATIC void 118STATIC void
119xfs_bmbt_trace_argbi( 119xfs_bmbt_trace_argbi(
120 char *func, 120 const char *func,
121 xfs_btree_cur_t *cur, 121 xfs_btree_cur_t *cur,
122 xfs_buf_t *b, 122 xfs_buf_t *b,
123 int i, 123 int i,
@@ -134,7 +134,7 @@ xfs_bmbt_trace_argbi(
134 */ 134 */
135STATIC void 135STATIC void
136xfs_bmbt_trace_argbii( 136xfs_bmbt_trace_argbii(
137 char *func, 137 const char *func,
138 xfs_btree_cur_t *cur, 138 xfs_btree_cur_t *cur,
139 xfs_buf_t *b, 139 xfs_buf_t *b,
140 int i0, 140 int i0,
@@ -153,7 +153,7 @@ xfs_bmbt_trace_argbii(
153 */ 153 */
154STATIC void 154STATIC void
155xfs_bmbt_trace_argfffi( 155xfs_bmbt_trace_argfffi(
156 char *func, 156 const char *func,
157 xfs_btree_cur_t *cur, 157 xfs_btree_cur_t *cur,
158 xfs_dfiloff_t o, 158 xfs_dfiloff_t o,
159 xfs_dfsbno_t b, 159 xfs_dfsbno_t b,
@@ -172,7 +172,7 @@ xfs_bmbt_trace_argfffi(
172 */ 172 */
173STATIC void 173STATIC void
174xfs_bmbt_trace_argi( 174xfs_bmbt_trace_argi(
175 char *func, 175 const char *func,
176 xfs_btree_cur_t *cur, 176 xfs_btree_cur_t *cur,
177 int i, 177 int i,
178 int line) 178 int line)
@@ -188,7 +188,7 @@ xfs_bmbt_trace_argi(
188 */ 188 */
189STATIC void 189STATIC void
190xfs_bmbt_trace_argifk( 190xfs_bmbt_trace_argifk(
191 char *func, 191 const char *func,
192 xfs_btree_cur_t *cur, 192 xfs_btree_cur_t *cur,
193 int i, 193 int i,
194 xfs_fsblock_t f, 194 xfs_fsblock_t f,
@@ -206,7 +206,7 @@ xfs_bmbt_trace_argifk(
206 */ 206 */
207STATIC void 207STATIC void
208xfs_bmbt_trace_argifr( 208xfs_bmbt_trace_argifr(
209 char *func, 209 const char *func,
210 xfs_btree_cur_t *cur, 210 xfs_btree_cur_t *cur,
211 int i, 211 int i,
212 xfs_fsblock_t f, 212 xfs_fsblock_t f,
@@ -235,7 +235,7 @@ xfs_bmbt_trace_argifr(
235 */ 235 */
236STATIC void 236STATIC void
237xfs_bmbt_trace_argik( 237xfs_bmbt_trace_argik(
238 char *func, 238 const char *func,
239 xfs_btree_cur_t *cur, 239 xfs_btree_cur_t *cur,
240 int i, 240 int i,
241 xfs_bmbt_key_t *k, 241 xfs_bmbt_key_t *k,
@@ -255,7 +255,7 @@ xfs_bmbt_trace_argik(
255 */ 255 */
256STATIC void 256STATIC void
257xfs_bmbt_trace_cursor( 257xfs_bmbt_trace_cursor(
258 char *func, 258 const char *func,
259 xfs_btree_cur_t *cur, 259 xfs_btree_cur_t *cur,
260 char *s, 260 char *s,
261 int line) 261 int line)
@@ -274,21 +274,21 @@ xfs_bmbt_trace_cursor(
274} 274}
275 275
276#define XFS_BMBT_TRACE_ARGBI(c,b,i) \ 276#define XFS_BMBT_TRACE_ARGBI(c,b,i) \
277 xfs_bmbt_trace_argbi(fname, c, b, i, __LINE__) 277 xfs_bmbt_trace_argbi(__FUNCTION__, c, b, i, __LINE__)
278#define XFS_BMBT_TRACE_ARGBII(c,b,i,j) \ 278#define XFS_BMBT_TRACE_ARGBII(c,b,i,j) \
279 xfs_bmbt_trace_argbii(fname, c, b, i, j, __LINE__) 279 xfs_bmbt_trace_argbii(__FUNCTION__, c, b, i, j, __LINE__)
280#define XFS_BMBT_TRACE_ARGFFFI(c,o,b,i,j) \ 280#define XFS_BMBT_TRACE_ARGFFFI(c,o,b,i,j) \
281 xfs_bmbt_trace_argfffi(fname, c, o, b, i, j, __LINE__) 281 xfs_bmbt_trace_argfffi(__FUNCTION__, c, o, b, i, j, __LINE__)
282#define XFS_BMBT_TRACE_ARGI(c,i) \ 282#define XFS_BMBT_TRACE_ARGI(c,i) \
283 xfs_bmbt_trace_argi(fname, c, i, __LINE__) 283 xfs_bmbt_trace_argi(__FUNCTION__, c, i, __LINE__)
284#define XFS_BMBT_TRACE_ARGIFK(c,i,f,s) \ 284#define XFS_BMBT_TRACE_ARGIFK(c,i,f,s) \
285 xfs_bmbt_trace_argifk(fname, c, i, f, s, __LINE__) 285 xfs_bmbt_trace_argifk(__FUNCTION__, c, i, f, s, __LINE__)
286#define XFS_BMBT_TRACE_ARGIFR(c,i,f,r) \ 286#define XFS_BMBT_TRACE_ARGIFR(c,i,f,r) \
287 xfs_bmbt_trace_argifr(fname, c, i, f, r, __LINE__) 287 xfs_bmbt_trace_argifr(__FUNCTION__, c, i, f, r, __LINE__)
288#define XFS_BMBT_TRACE_ARGIK(c,i,k) \ 288#define XFS_BMBT_TRACE_ARGIK(c,i,k) \
289 xfs_bmbt_trace_argik(fname, c, i, k, __LINE__) 289 xfs_bmbt_trace_argik(__FUNCTION__, c, i, k, __LINE__)
290#define XFS_BMBT_TRACE_CURSOR(c,s) \ 290#define XFS_BMBT_TRACE_CURSOR(c,s) \
291 xfs_bmbt_trace_cursor(fname, c, s, __LINE__) 291 xfs_bmbt_trace_cursor(__FUNCTION__, c, s, __LINE__)
292#else 292#else
293#define XFS_BMBT_TRACE_ARGBI(c,b,i) 293#define XFS_BMBT_TRACE_ARGBI(c,b,i)
294#define XFS_BMBT_TRACE_ARGBII(c,b,i,j) 294#define XFS_BMBT_TRACE_ARGBII(c,b,i,j)
@@ -318,9 +318,6 @@ xfs_bmbt_delrec(
318 xfs_fsblock_t bno; /* fs-relative block number */ 318 xfs_fsblock_t bno; /* fs-relative block number */
319 xfs_buf_t *bp; /* buffer for block */ 319 xfs_buf_t *bp; /* buffer for block */
320 int error; /* error return value */ 320 int error; /* error return value */
321#ifdef XFS_BMBT_TRACE
322 static char fname[] = "xfs_bmbt_delrec";
323#endif
324 int i; /* loop counter */ 321 int i; /* loop counter */
325 int j; /* temp state */ 322 int j; /* temp state */
326 xfs_bmbt_key_t key; /* bmap btree key */ 323 xfs_bmbt_key_t key; /* bmap btree key */
@@ -694,9 +691,6 @@ xfs_bmbt_insrec(
694 xfs_bmbt_block_t *block; /* bmap btree block */ 691 xfs_bmbt_block_t *block; /* bmap btree block */
695 xfs_buf_t *bp; /* buffer for block */ 692 xfs_buf_t *bp; /* buffer for block */
696 int error; /* error return value */ 693 int error; /* error return value */
697#ifdef XFS_BMBT_TRACE
698 static char fname[] = "xfs_bmbt_insrec";
699#endif
700 int i; /* loop index */ 694 int i; /* loop index */
701 xfs_bmbt_key_t key; /* bmap btree key */ 695 xfs_bmbt_key_t key; /* bmap btree key */
702 xfs_bmbt_key_t *kp=NULL; /* pointer to bmap btree key */ 696 xfs_bmbt_key_t *kp=NULL; /* pointer to bmap btree key */
@@ -881,9 +875,6 @@ xfs_bmbt_killroot(
881#ifdef DEBUG 875#ifdef DEBUG
882 int error; 876 int error;
883#endif 877#endif
884#ifdef XFS_BMBT_TRACE
885 static char fname[] = "xfs_bmbt_killroot";
886#endif
887 int i; 878 int i;
888 xfs_bmbt_key_t *kp; 879 xfs_bmbt_key_t *kp;
889 xfs_inode_t *ip; 880 xfs_inode_t *ip;
@@ -973,9 +964,6 @@ xfs_bmbt_log_keys(
973 int kfirst, 964 int kfirst,
974 int klast) 965 int klast)
975{ 966{
976#ifdef XFS_BMBT_TRACE
977 static char fname[] = "xfs_bmbt_log_keys";
978#endif
979 xfs_trans_t *tp; 967 xfs_trans_t *tp;
980 968
981 XFS_BMBT_TRACE_CURSOR(cur, ENTRY); 969 XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
@@ -1012,9 +1000,6 @@ xfs_bmbt_log_ptrs(
1012 int pfirst, 1000 int pfirst,
1013 int plast) 1001 int plast)
1014{ 1002{
1015#ifdef XFS_BMBT_TRACE
1016 static char fname[] = "xfs_bmbt_log_ptrs";
1017#endif
1018 xfs_trans_t *tp; 1003 xfs_trans_t *tp;
1019 1004
1020 XFS_BMBT_TRACE_CURSOR(cur, ENTRY); 1005 XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
@@ -1055,9 +1040,6 @@ xfs_bmbt_lookup(
1055 xfs_daddr_t d; 1040 xfs_daddr_t d;
1056 xfs_sfiloff_t diff; 1041 xfs_sfiloff_t diff;
1057 int error; /* error return value */ 1042 int error; /* error return value */
1058#ifdef XFS_BMBT_TRACE
1059 static char fname[] = "xfs_bmbt_lookup";
1060#endif
1061 xfs_fsblock_t fsbno=0; 1043 xfs_fsblock_t fsbno=0;
1062 int high; 1044 int high;
1063 int i; 1045 int i;
@@ -1195,9 +1177,6 @@ xfs_bmbt_lshift(
1195 int *stat) /* success/failure */ 1177 int *stat) /* success/failure */
1196{ 1178{
1197 int error; /* error return value */ 1179 int error; /* error return value */
1198#ifdef XFS_BMBT_TRACE
1199 static char fname[] = "xfs_bmbt_lshift";
1200#endif
1201#ifdef DEBUG 1180#ifdef DEBUG
1202 int i; /* loop counter */ 1181 int i; /* loop counter */
1203#endif 1182#endif
@@ -1331,9 +1310,6 @@ xfs_bmbt_rshift(
1331 int *stat) /* success/failure */ 1310 int *stat) /* success/failure */
1332{ 1311{
1333 int error; /* error return value */ 1312 int error; /* error return value */
1334#ifdef XFS_BMBT_TRACE
1335 static char fname[] = "xfs_bmbt_rshift";
1336#endif
1337 int i; /* loop counter */ 1313 int i; /* loop counter */
1338 xfs_bmbt_key_t key; /* bmap btree key */ 1314 xfs_bmbt_key_t key; /* bmap btree key */
1339 xfs_buf_t *lbp; /* left buffer pointer */ 1315 xfs_buf_t *lbp; /* left buffer pointer */
@@ -1492,9 +1468,6 @@ xfs_bmbt_split(
1492{ 1468{
1493 xfs_alloc_arg_t args; /* block allocation args */ 1469 xfs_alloc_arg_t args; /* block allocation args */
1494 int error; /* error return value */ 1470 int error; /* error return value */
1495#ifdef XFS_BMBT_TRACE
1496 static char fname[] = "xfs_bmbt_split";
1497#endif
1498 int i; /* loop counter */ 1471 int i; /* loop counter */
1499 xfs_fsblock_t lbno; /* left sibling block number */ 1472 xfs_fsblock_t lbno; /* left sibling block number */
1500 xfs_buf_t *lbp; /* left buffer pointer */ 1473 xfs_buf_t *lbp; /* left buffer pointer */
@@ -1641,9 +1614,6 @@ xfs_bmbt_updkey(
1641#ifdef DEBUG 1614#ifdef DEBUG
1642 int error; 1615 int error;
1643#endif 1616#endif
1644#ifdef XFS_BMBT_TRACE
1645 static char fname[] = "xfs_bmbt_updkey";
1646#endif
1647 xfs_bmbt_key_t *kp; 1617 xfs_bmbt_key_t *kp;
1648 int ptr; 1618 int ptr;
1649 1619
@@ -1712,9 +1682,6 @@ xfs_bmbt_decrement(
1712 xfs_bmbt_block_t *block; 1682 xfs_bmbt_block_t *block;
1713 xfs_buf_t *bp; 1683 xfs_buf_t *bp;
1714 int error; /* error return value */ 1684 int error; /* error return value */
1715#ifdef XFS_BMBT_TRACE
1716 static char fname[] = "xfs_bmbt_decrement";
1717#endif
1718 xfs_fsblock_t fsbno; 1685 xfs_fsblock_t fsbno;
1719 int lev; 1686 int lev;
1720 xfs_mount_t *mp; 1687 xfs_mount_t *mp;
@@ -1785,9 +1752,6 @@ xfs_bmbt_delete(
1785 int *stat) /* success/failure */ 1752 int *stat) /* success/failure */
1786{ 1753{
1787 int error; /* error return value */ 1754 int error; /* error return value */
1788#ifdef XFS_BMBT_TRACE
1789 static char fname[] = "xfs_bmbt_delete";
1790#endif
1791 int i; 1755 int i;
1792 int level; 1756 int level;
1793 1757
@@ -2000,9 +1964,6 @@ xfs_bmbt_increment(
2000 xfs_bmbt_block_t *block; 1964 xfs_bmbt_block_t *block;
2001 xfs_buf_t *bp; 1965 xfs_buf_t *bp;
2002 int error; /* error return value */ 1966 int error; /* error return value */
2003#ifdef XFS_BMBT_TRACE
2004 static char fname[] = "xfs_bmbt_increment";
2005#endif
2006 xfs_fsblock_t fsbno; 1967 xfs_fsblock_t fsbno;
2007 int lev; 1968 int lev;
2008 xfs_mount_t *mp; 1969 xfs_mount_t *mp;
@@ -2080,9 +2041,6 @@ xfs_bmbt_insert(
2080 int *stat) /* success/failure */ 2041 int *stat) /* success/failure */
2081{ 2042{
2082 int error; /* error return value */ 2043 int error; /* error return value */
2083#ifdef XFS_BMBT_TRACE
2084 static char fname[] = "xfs_bmbt_insert";
2085#endif
2086 int i; 2044 int i;
2087 int level; 2045 int level;
2088 xfs_fsblock_t nbno; 2046 xfs_fsblock_t nbno;
@@ -2142,9 +2100,6 @@ xfs_bmbt_log_block(
2142 int fields) 2100 int fields)
2143{ 2101{
2144 int first; 2102 int first;
2145#ifdef XFS_BMBT_TRACE
2146 static char fname[] = "xfs_bmbt_log_block";
2147#endif
2148 int last; 2103 int last;
2149 xfs_trans_t *tp; 2104 xfs_trans_t *tp;
2150 static const short offsets[] = { 2105 static const short offsets[] = {
@@ -2181,9 +2136,6 @@ xfs_bmbt_log_recs(
2181{ 2136{
2182 xfs_bmbt_block_t *block; 2137 xfs_bmbt_block_t *block;
2183 int first; 2138 int first;
2184#ifdef XFS_BMBT_TRACE
2185 static char fname[] = "xfs_bmbt_log_recs";
2186#endif
2187 int last; 2139 int last;
2188 xfs_bmbt_rec_t *rp; 2140 xfs_bmbt_rec_t *rp;
2189 xfs_trans_t *tp; 2141 xfs_trans_t *tp;
@@ -2245,9 +2197,6 @@ xfs_bmbt_newroot(
2245 xfs_bmbt_key_t *ckp; /* child key pointer */ 2197 xfs_bmbt_key_t *ckp; /* child key pointer */
2246 xfs_bmbt_ptr_t *cpp; /* child ptr pointer */ 2198 xfs_bmbt_ptr_t *cpp; /* child ptr pointer */
2247 int error; /* error return code */ 2199 int error; /* error return code */
2248#ifdef XFS_BMBT_TRACE
2249 static char fname[] = "xfs_bmbt_newroot";
2250#endif
2251#ifdef DEBUG 2200#ifdef DEBUG
2252 int i; /* loop counter */ 2201 int i; /* loop counter */
2253#endif 2202#endif
@@ -2630,9 +2579,6 @@ xfs_bmbt_update(
2630 xfs_bmbt_block_t *block; 2579 xfs_bmbt_block_t *block;
2631 xfs_buf_t *bp; 2580 xfs_buf_t *bp;
2632 int error; 2581 int error;
2633#ifdef XFS_BMBT_TRACE
2634 static char fname[] = "xfs_bmbt_update";
2635#endif
2636 xfs_bmbt_key_t key; 2582 xfs_bmbt_key_t key;
2637 int ptr; 2583 int ptr;
2638 xfs_bmbt_rec_t *rp; 2584 xfs_bmbt_rec_t *rp;
diff --git a/fs/xfs/xfs_btree.h b/fs/xfs/xfs_btree.h
index 4e27d55a1e73..6e40a0a198ff 100644
--- a/fs/xfs/xfs_btree.h
+++ b/fs/xfs/xfs_btree.h
@@ -444,30 +444,14 @@ xfs_btree_setbuf(
444/* 444/*
445 * Min and max functions for extlen, agblock, fileoff, and filblks types. 445 * Min and max functions for extlen, agblock, fileoff, and filblks types.
446 */ 446 */
447#define XFS_EXTLEN_MIN(a,b) \ 447#define XFS_EXTLEN_MIN(a,b) min_t(xfs_extlen_t, (a), (b))
448 ((xfs_extlen_t)(a) < (xfs_extlen_t)(b) ? \ 448#define XFS_EXTLEN_MAX(a,b) max_t(xfs_extlen_t, (a), (b))
449 (xfs_extlen_t)(a) : (xfs_extlen_t)(b)) 449#define XFS_AGBLOCK_MIN(a,b) min_t(xfs_agblock_t, (a), (b))
450#define XFS_EXTLEN_MAX(a,b) \ 450#define XFS_AGBLOCK_MAX(a,b) max_t(xfs_agblock_t, (a), (b))
451 ((xfs_extlen_t)(a) > (xfs_extlen_t)(b) ? \ 451#define XFS_FILEOFF_MIN(a,b) min_t(xfs_fileoff_t, (a), (b))
452 (xfs_extlen_t)(a) : (xfs_extlen_t)(b)) 452#define XFS_FILEOFF_MAX(a,b) max_t(xfs_fileoff_t, (a), (b))
453#define XFS_AGBLOCK_MIN(a,b) \ 453#define XFS_FILBLKS_MIN(a,b) min_t(xfs_filblks_t, (a), (b))
454 ((xfs_agblock_t)(a) < (xfs_agblock_t)(b) ? \ 454#define XFS_FILBLKS_MAX(a,b) max_t(xfs_filblks_t, (a), (b))
455 (xfs_agblock_t)(a) : (xfs_agblock_t)(b))
456#define XFS_AGBLOCK_MAX(a,b) \
457 ((xfs_agblock_t)(a) > (xfs_agblock_t)(b) ? \
458 (xfs_agblock_t)(a) : (xfs_agblock_t)(b))
459#define XFS_FILEOFF_MIN(a,b) \
460 ((xfs_fileoff_t)(a) < (xfs_fileoff_t)(b) ? \
461 (xfs_fileoff_t)(a) : (xfs_fileoff_t)(b))
462#define XFS_FILEOFF_MAX(a,b) \
463 ((xfs_fileoff_t)(a) > (xfs_fileoff_t)(b) ? \
464 (xfs_fileoff_t)(a) : (xfs_fileoff_t)(b))
465#define XFS_FILBLKS_MIN(a,b) \
466 ((xfs_filblks_t)(a) < (xfs_filblks_t)(b) ? \
467 (xfs_filblks_t)(a) : (xfs_filblks_t)(b))
468#define XFS_FILBLKS_MAX(a,b) \
469 ((xfs_filblks_t)(a) > (xfs_filblks_t)(b) ? \
470 (xfs_filblks_t)(a) : (xfs_filblks_t)(b))
471 455
472#define XFS_FSB_SANITY_CHECK(mp,fsb) \ 456#define XFS_FSB_SANITY_CHECK(mp,fsb) \
473 (XFS_FSB_TO_AGNO(mp, fsb) < mp->m_sb.sb_agcount && \ 457 (XFS_FSB_TO_AGNO(mp, fsb) < mp->m_sb.sb_agcount && \
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index 6c1bddc04e31..b0667cb27d66 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -580,8 +580,8 @@ xfs_buf_item_unlock(
580 * If the buf item isn't tracking any data, free it. 580 * If the buf item isn't tracking any data, free it.
581 * Otherwise, if XFS_BLI_HOLD is set clear it. 581 * Otherwise, if XFS_BLI_HOLD is set clear it.
582 */ 582 */
583 if (xfs_count_bits(bip->bli_format.blf_data_map, 583 if (xfs_bitmap_empty(bip->bli_format.blf_data_map,
584 bip->bli_format.blf_map_size, 0) == 0) { 584 bip->bli_format.blf_map_size)) {
585 xfs_buf_item_relse(bp); 585 xfs_buf_item_relse(bp);
586 } else if (hold) { 586 } else if (hold) {
587 bip->bli_flags &= ~XFS_BLI_HOLD; 587 bip->bli_flags &= ~XFS_BLI_HOLD;
diff --git a/fs/xfs/xfs_clnt.h b/fs/xfs/xfs_clnt.h
index 5b7eb81453be..f89196cb08d2 100644
--- a/fs/xfs/xfs_clnt.h
+++ b/fs/xfs/xfs_clnt.h
@@ -99,5 +99,7 @@ struct xfs_mount_args {
99 */ 99 */
100#define XFSMNT2_COMPAT_IOSIZE 0x00000001 /* don't report large preferred 100#define XFSMNT2_COMPAT_IOSIZE 0x00000001 /* don't report large preferred
101 * I/O size in stat(2) */ 101 * I/O size in stat(2) */
102#define XFSMNT2_FILESTREAMS 0x00000002 /* enable the filestreams
103 * allocator */
102 104
103#endif /* __XFS_CLNT_H__ */ 105#endif /* __XFS_CLNT_H__ */
diff --git a/fs/xfs/xfs_dinode.h b/fs/xfs/xfs_dinode.h
index b33826961c45..fefd0116bac9 100644
--- a/fs/xfs/xfs_dinode.h
+++ b/fs/xfs/xfs_dinode.h
@@ -257,6 +257,7 @@ typedef enum xfs_dinode_fmt
257#define XFS_DIFLAG_EXTSIZE_BIT 11 /* inode extent size allocator hint */ 257#define XFS_DIFLAG_EXTSIZE_BIT 11 /* inode extent size allocator hint */
258#define XFS_DIFLAG_EXTSZINHERIT_BIT 12 /* inherit inode extent size */ 258#define XFS_DIFLAG_EXTSZINHERIT_BIT 12 /* inherit inode extent size */
259#define XFS_DIFLAG_NODEFRAG_BIT 13 /* do not reorganize/defragment */ 259#define XFS_DIFLAG_NODEFRAG_BIT 13 /* do not reorganize/defragment */
260#define XFS_DIFLAG_FILESTREAM_BIT 14 /* use filestream allocator */
260#define XFS_DIFLAG_REALTIME (1 << XFS_DIFLAG_REALTIME_BIT) 261#define XFS_DIFLAG_REALTIME (1 << XFS_DIFLAG_REALTIME_BIT)
261#define XFS_DIFLAG_PREALLOC (1 << XFS_DIFLAG_PREALLOC_BIT) 262#define XFS_DIFLAG_PREALLOC (1 << XFS_DIFLAG_PREALLOC_BIT)
262#define XFS_DIFLAG_NEWRTBM (1 << XFS_DIFLAG_NEWRTBM_BIT) 263#define XFS_DIFLAG_NEWRTBM (1 << XFS_DIFLAG_NEWRTBM_BIT)
@@ -271,12 +272,13 @@ typedef enum xfs_dinode_fmt
271#define XFS_DIFLAG_EXTSIZE (1 << XFS_DIFLAG_EXTSIZE_BIT) 272#define XFS_DIFLAG_EXTSIZE (1 << XFS_DIFLAG_EXTSIZE_BIT)
272#define XFS_DIFLAG_EXTSZINHERIT (1 << XFS_DIFLAG_EXTSZINHERIT_BIT) 273#define XFS_DIFLAG_EXTSZINHERIT (1 << XFS_DIFLAG_EXTSZINHERIT_BIT)
273#define XFS_DIFLAG_NODEFRAG (1 << XFS_DIFLAG_NODEFRAG_BIT) 274#define XFS_DIFLAG_NODEFRAG (1 << XFS_DIFLAG_NODEFRAG_BIT)
275#define XFS_DIFLAG_FILESTREAM (1 << XFS_DIFLAG_FILESTREAM_BIT)
274 276
275#define XFS_DIFLAG_ANY \ 277#define XFS_DIFLAG_ANY \
276 (XFS_DIFLAG_REALTIME | XFS_DIFLAG_PREALLOC | XFS_DIFLAG_NEWRTBM | \ 278 (XFS_DIFLAG_REALTIME | XFS_DIFLAG_PREALLOC | XFS_DIFLAG_NEWRTBM | \
277 XFS_DIFLAG_IMMUTABLE | XFS_DIFLAG_APPEND | XFS_DIFLAG_SYNC | \ 279 XFS_DIFLAG_IMMUTABLE | XFS_DIFLAG_APPEND | XFS_DIFLAG_SYNC | \
278 XFS_DIFLAG_NOATIME | XFS_DIFLAG_NODUMP | XFS_DIFLAG_RTINHERIT | \ 280 XFS_DIFLAG_NOATIME | XFS_DIFLAG_NODUMP | XFS_DIFLAG_RTINHERIT | \
279 XFS_DIFLAG_PROJINHERIT | XFS_DIFLAG_NOSYMLINKS | XFS_DIFLAG_EXTSIZE | \ 281 XFS_DIFLAG_PROJINHERIT | XFS_DIFLAG_NOSYMLINKS | XFS_DIFLAG_EXTSIZE | \
280 XFS_DIFLAG_EXTSZINHERIT | XFS_DIFLAG_NODEFRAG) 282 XFS_DIFLAG_EXTSZINHERIT | XFS_DIFLAG_NODEFRAG | XFS_DIFLAG_FILESTREAM)
281 283
282#endif /* __XFS_DINODE_H__ */ 284#endif /* __XFS_DINODE_H__ */
diff --git a/fs/xfs/xfs_dir2.c b/fs/xfs/xfs_dir2.c
index 8e8e5279334a..29e091914df4 100644
--- a/fs/xfs/xfs_dir2.c
+++ b/fs/xfs/xfs_dir2.c
@@ -55,9 +55,9 @@ xfs_dir_mount(
55 XFS_MAX_BLOCKSIZE); 55 XFS_MAX_BLOCKSIZE);
56 mp->m_dirblksize = 1 << (mp->m_sb.sb_blocklog + mp->m_sb.sb_dirblklog); 56 mp->m_dirblksize = 1 << (mp->m_sb.sb_blocklog + mp->m_sb.sb_dirblklog);
57 mp->m_dirblkfsbs = 1 << mp->m_sb.sb_dirblklog; 57 mp->m_dirblkfsbs = 1 << mp->m_sb.sb_dirblklog;
58 mp->m_dirdatablk = XFS_DIR2_DB_TO_DA(mp, XFS_DIR2_DATA_FIRSTDB(mp)); 58 mp->m_dirdatablk = xfs_dir2_db_to_da(mp, XFS_DIR2_DATA_FIRSTDB(mp));
59 mp->m_dirleafblk = XFS_DIR2_DB_TO_DA(mp, XFS_DIR2_LEAF_FIRSTDB(mp)); 59 mp->m_dirleafblk = xfs_dir2_db_to_da(mp, XFS_DIR2_LEAF_FIRSTDB(mp));
60 mp->m_dirfreeblk = XFS_DIR2_DB_TO_DA(mp, XFS_DIR2_FREE_FIRSTDB(mp)); 60 mp->m_dirfreeblk = xfs_dir2_db_to_da(mp, XFS_DIR2_FREE_FIRSTDB(mp));
61 mp->m_attr_node_ents = 61 mp->m_attr_node_ents =
62 (mp->m_sb.sb_blocksize - (uint)sizeof(xfs_da_node_hdr_t)) / 62 (mp->m_sb.sb_blocksize - (uint)sizeof(xfs_da_node_hdr_t)) /
63 (uint)sizeof(xfs_da_node_entry_t); 63 (uint)sizeof(xfs_da_node_entry_t);
@@ -554,7 +554,7 @@ xfs_dir2_grow_inode(
554 */ 554 */
555 if (mapp != &map) 555 if (mapp != &map)
556 kmem_free(mapp, sizeof(*mapp) * count); 556 kmem_free(mapp, sizeof(*mapp) * count);
557 *dbp = XFS_DIR2_DA_TO_DB(mp, (xfs_dablk_t)bno); 557 *dbp = xfs_dir2_da_to_db(mp, (xfs_dablk_t)bno);
558 /* 558 /*
559 * Update file's size if this is the data space and it grew. 559 * Update file's size if this is the data space and it grew.
560 */ 560 */
@@ -706,7 +706,7 @@ xfs_dir2_shrink_inode(
706 dp = args->dp; 706 dp = args->dp;
707 mp = dp->i_mount; 707 mp = dp->i_mount;
708 tp = args->trans; 708 tp = args->trans;
709 da = XFS_DIR2_DB_TO_DA(mp, db); 709 da = xfs_dir2_db_to_da(mp, db);
710 /* 710 /*
711 * Unmap the fsblock(s). 711 * Unmap the fsblock(s).
712 */ 712 */
@@ -742,7 +742,7 @@ xfs_dir2_shrink_inode(
742 /* 742 /*
743 * If the block isn't the last one in the directory, we're done. 743 * If the block isn't the last one in the directory, we're done.
744 */ 744 */
745 if (dp->i_d.di_size > XFS_DIR2_DB_OFF_TO_BYTE(mp, db + 1, 0)) 745 if (dp->i_d.di_size > xfs_dir2_db_off_to_byte(mp, db + 1, 0))
746 return 0; 746 return 0;
747 bno = da; 747 bno = da;
748 if ((error = xfs_bmap_last_before(tp, dp, &bno, XFS_DATA_FORK))) { 748 if ((error = xfs_bmap_last_before(tp, dp, &bno, XFS_DATA_FORK))) {
diff --git a/fs/xfs/xfs_dir2_block.c b/fs/xfs/xfs_dir2_block.c
index 3accc1dcd6c9..e4df1aaae2a2 100644
--- a/fs/xfs/xfs_dir2_block.c
+++ b/fs/xfs/xfs_dir2_block.c
@@ -115,13 +115,13 @@ xfs_dir2_block_addname(
115 xfs_da_brelse(tp, bp); 115 xfs_da_brelse(tp, bp);
116 return XFS_ERROR(EFSCORRUPTED); 116 return XFS_ERROR(EFSCORRUPTED);
117 } 117 }
118 len = XFS_DIR2_DATA_ENTSIZE(args->namelen); 118 len = xfs_dir2_data_entsize(args->namelen);
119 /* 119 /*
120 * Set up pointers to parts of the block. 120 * Set up pointers to parts of the block.
121 */ 121 */
122 bf = block->hdr.bestfree; 122 bf = block->hdr.bestfree;
123 btp = XFS_DIR2_BLOCK_TAIL_P(mp, block); 123 btp = xfs_dir2_block_tail_p(mp, block);
124 blp = XFS_DIR2_BLOCK_LEAF_P(btp); 124 blp = xfs_dir2_block_leaf_p(btp);
125 /* 125 /*
126 * No stale entries? Need space for entry and new leaf. 126 * No stale entries? Need space for entry and new leaf.
127 */ 127 */
@@ -396,7 +396,7 @@ xfs_dir2_block_addname(
396 * Fill in the leaf entry. 396 * Fill in the leaf entry.
397 */ 397 */
398 blp[mid].hashval = cpu_to_be32(args->hashval); 398 blp[mid].hashval = cpu_to_be32(args->hashval);
399 blp[mid].address = cpu_to_be32(XFS_DIR2_BYTE_TO_DATAPTR(mp, 399 blp[mid].address = cpu_to_be32(xfs_dir2_byte_to_dataptr(mp,
400 (char *)dep - (char *)block)); 400 (char *)dep - (char *)block));
401 xfs_dir2_block_log_leaf(tp, bp, lfloglow, lfloghigh); 401 xfs_dir2_block_log_leaf(tp, bp, lfloglow, lfloghigh);
402 /* 402 /*
@@ -411,7 +411,7 @@ xfs_dir2_block_addname(
411 dep->inumber = cpu_to_be64(args->inumber); 411 dep->inumber = cpu_to_be64(args->inumber);
412 dep->namelen = args->namelen; 412 dep->namelen = args->namelen;
413 memcpy(dep->name, args->name, args->namelen); 413 memcpy(dep->name, args->name, args->namelen);
414 tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep); 414 tagp = xfs_dir2_data_entry_tag_p(dep);
415 *tagp = cpu_to_be16((char *)dep - (char *)block); 415 *tagp = cpu_to_be16((char *)dep - (char *)block);
416 /* 416 /*
417 * Clean up the bestfree array and log the header, tail, and entry. 417 * Clean up the bestfree array and log the header, tail, and entry.
@@ -455,7 +455,7 @@ xfs_dir2_block_getdents(
455 /* 455 /*
456 * If the block number in the offset is out of range, we're done. 456 * If the block number in the offset is out of range, we're done.
457 */ 457 */
458 if (XFS_DIR2_DATAPTR_TO_DB(mp, uio->uio_offset) > mp->m_dirdatablk) { 458 if (xfs_dir2_dataptr_to_db(mp, uio->uio_offset) > mp->m_dirdatablk) {
459 *eofp = 1; 459 *eofp = 1;
460 return 0; 460 return 0;
461 } 461 }
@@ -471,15 +471,15 @@ xfs_dir2_block_getdents(
471 * Extract the byte offset we start at from the seek pointer. 471 * Extract the byte offset we start at from the seek pointer.
472 * We'll skip entries before this. 472 * We'll skip entries before this.
473 */ 473 */
474 wantoff = XFS_DIR2_DATAPTR_TO_OFF(mp, uio->uio_offset); 474 wantoff = xfs_dir2_dataptr_to_off(mp, uio->uio_offset);
475 block = bp->data; 475 block = bp->data;
476 xfs_dir2_data_check(dp, bp); 476 xfs_dir2_data_check(dp, bp);
477 /* 477 /*
478 * Set up values for the loop. 478 * Set up values for the loop.
479 */ 479 */
480 btp = XFS_DIR2_BLOCK_TAIL_P(mp, block); 480 btp = xfs_dir2_block_tail_p(mp, block);
481 ptr = (char *)block->u; 481 ptr = (char *)block->u;
482 endptr = (char *)XFS_DIR2_BLOCK_LEAF_P(btp); 482 endptr = (char *)xfs_dir2_block_leaf_p(btp);
483 p.dbp = dbp; 483 p.dbp = dbp;
484 p.put = put; 484 p.put = put;
485 p.uio = uio; 485 p.uio = uio;
@@ -502,7 +502,7 @@ xfs_dir2_block_getdents(
502 /* 502 /*
503 * Bump pointer for the next iteration. 503 * Bump pointer for the next iteration.
504 */ 504 */
505 ptr += XFS_DIR2_DATA_ENTSIZE(dep->namelen); 505 ptr += xfs_dir2_data_entsize(dep->namelen);
506 /* 506 /*
507 * The entry is before the desired starting point, skip it. 507 * The entry is before the desired starting point, skip it.
508 */ 508 */
@@ -513,7 +513,7 @@ xfs_dir2_block_getdents(
513 */ 513 */
514 p.namelen = dep->namelen; 514 p.namelen = dep->namelen;
515 515
516 p.cook = XFS_DIR2_DB_OFF_TO_DATAPTR(mp, mp->m_dirdatablk, 516 p.cook = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk,
517 ptr - (char *)block); 517 ptr - (char *)block);
518 p.ino = be64_to_cpu(dep->inumber); 518 p.ino = be64_to_cpu(dep->inumber);
519#if XFS_BIG_INUMS 519#if XFS_BIG_INUMS
@@ -531,7 +531,7 @@ xfs_dir2_block_getdents(
531 */ 531 */
532 if (!p.done) { 532 if (!p.done) {
533 uio->uio_offset = 533 uio->uio_offset =
534 XFS_DIR2_DB_OFF_TO_DATAPTR(mp, mp->m_dirdatablk, 534 xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk,
535 (char *)dep - (char *)block); 535 (char *)dep - (char *)block);
536 xfs_da_brelse(tp, bp); 536 xfs_da_brelse(tp, bp);
537 return error; 537 return error;
@@ -545,7 +545,7 @@ xfs_dir2_block_getdents(
545 *eofp = 1; 545 *eofp = 1;
546 546
547 uio->uio_offset = 547 uio->uio_offset =
548 XFS_DIR2_DB_OFF_TO_DATAPTR(mp, mp->m_dirdatablk + 1, 0); 548 xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk + 1, 0);
549 549
550 xfs_da_brelse(tp, bp); 550 xfs_da_brelse(tp, bp);
551 551
@@ -569,8 +569,8 @@ xfs_dir2_block_log_leaf(
569 569
570 mp = tp->t_mountp; 570 mp = tp->t_mountp;
571 block = bp->data; 571 block = bp->data;
572 btp = XFS_DIR2_BLOCK_TAIL_P(mp, block); 572 btp = xfs_dir2_block_tail_p(mp, block);
573 blp = XFS_DIR2_BLOCK_LEAF_P(btp); 573 blp = xfs_dir2_block_leaf_p(btp);
574 xfs_da_log_buf(tp, bp, (uint)((char *)&blp[first] - (char *)block), 574 xfs_da_log_buf(tp, bp, (uint)((char *)&blp[first] - (char *)block),
575 (uint)((char *)&blp[last + 1] - (char *)block - 1)); 575 (uint)((char *)&blp[last + 1] - (char *)block - 1));
576} 576}
@@ -589,7 +589,7 @@ xfs_dir2_block_log_tail(
589 589
590 mp = tp->t_mountp; 590 mp = tp->t_mountp;
591 block = bp->data; 591 block = bp->data;
592 btp = XFS_DIR2_BLOCK_TAIL_P(mp, block); 592 btp = xfs_dir2_block_tail_p(mp, block);
593 xfs_da_log_buf(tp, bp, (uint)((char *)btp - (char *)block), 593 xfs_da_log_buf(tp, bp, (uint)((char *)btp - (char *)block),
594 (uint)((char *)(btp + 1) - (char *)block - 1)); 594 (uint)((char *)(btp + 1) - (char *)block - 1));
595} 595}
@@ -623,13 +623,13 @@ xfs_dir2_block_lookup(
623 mp = dp->i_mount; 623 mp = dp->i_mount;
624 block = bp->data; 624 block = bp->data;
625 xfs_dir2_data_check(dp, bp); 625 xfs_dir2_data_check(dp, bp);
626 btp = XFS_DIR2_BLOCK_TAIL_P(mp, block); 626 btp = xfs_dir2_block_tail_p(mp, block);
627 blp = XFS_DIR2_BLOCK_LEAF_P(btp); 627 blp = xfs_dir2_block_leaf_p(btp);
628 /* 628 /*
629 * Get the offset from the leaf entry, to point to the data. 629 * Get the offset from the leaf entry, to point to the data.
630 */ 630 */
631 dep = (xfs_dir2_data_entry_t *) 631 dep = (xfs_dir2_data_entry_t *)
632 ((char *)block + XFS_DIR2_DATAPTR_TO_OFF(mp, be32_to_cpu(blp[ent].address))); 632 ((char *)block + xfs_dir2_dataptr_to_off(mp, be32_to_cpu(blp[ent].address)));
633 /* 633 /*
634 * Fill in inode number, release the block. 634 * Fill in inode number, release the block.
635 */ 635 */
@@ -675,8 +675,8 @@ xfs_dir2_block_lookup_int(
675 ASSERT(bp != NULL); 675 ASSERT(bp != NULL);
676 block = bp->data; 676 block = bp->data;
677 xfs_dir2_data_check(dp, bp); 677 xfs_dir2_data_check(dp, bp);
678 btp = XFS_DIR2_BLOCK_TAIL_P(mp, block); 678 btp = xfs_dir2_block_tail_p(mp, block);
679 blp = XFS_DIR2_BLOCK_LEAF_P(btp); 679 blp = xfs_dir2_block_leaf_p(btp);
680 /* 680 /*
681 * Loop doing a binary search for our hash value. 681 * Loop doing a binary search for our hash value.
682 * Find our entry, ENOENT if it's not there. 682 * Find our entry, ENOENT if it's not there.
@@ -713,7 +713,7 @@ xfs_dir2_block_lookup_int(
713 * Get pointer to the entry from the leaf. 713 * Get pointer to the entry from the leaf.
714 */ 714 */
715 dep = (xfs_dir2_data_entry_t *) 715 dep = (xfs_dir2_data_entry_t *)
716 ((char *)block + XFS_DIR2_DATAPTR_TO_OFF(mp, addr)); 716 ((char *)block + xfs_dir2_dataptr_to_off(mp, addr));
717 /* 717 /*
718 * Compare, if it's right give back buffer & entry number. 718 * Compare, if it's right give back buffer & entry number.
719 */ 719 */
@@ -768,20 +768,20 @@ xfs_dir2_block_removename(
768 tp = args->trans; 768 tp = args->trans;
769 mp = dp->i_mount; 769 mp = dp->i_mount;
770 block = bp->data; 770 block = bp->data;
771 btp = XFS_DIR2_BLOCK_TAIL_P(mp, block); 771 btp = xfs_dir2_block_tail_p(mp, block);
772 blp = XFS_DIR2_BLOCK_LEAF_P(btp); 772 blp = xfs_dir2_block_leaf_p(btp);
773 /* 773 /*
774 * Point to the data entry using the leaf entry. 774 * Point to the data entry using the leaf entry.
775 */ 775 */
776 dep = (xfs_dir2_data_entry_t *) 776 dep = (xfs_dir2_data_entry_t *)
777 ((char *)block + XFS_DIR2_DATAPTR_TO_OFF(mp, be32_to_cpu(blp[ent].address))); 777 ((char *)block + xfs_dir2_dataptr_to_off(mp, be32_to_cpu(blp[ent].address)));
778 /* 778 /*
779 * Mark the data entry's space free. 779 * Mark the data entry's space free.
780 */ 780 */
781 needlog = needscan = 0; 781 needlog = needscan = 0;
782 xfs_dir2_data_make_free(tp, bp, 782 xfs_dir2_data_make_free(tp, bp,
783 (xfs_dir2_data_aoff_t)((char *)dep - (char *)block), 783 (xfs_dir2_data_aoff_t)((char *)dep - (char *)block),
784 XFS_DIR2_DATA_ENTSIZE(dep->namelen), &needlog, &needscan); 784 xfs_dir2_data_entsize(dep->namelen), &needlog, &needscan);
785 /* 785 /*
786 * Fix up the block tail. 786 * Fix up the block tail.
787 */ 787 */
@@ -843,13 +843,13 @@ xfs_dir2_block_replace(
843 dp = args->dp; 843 dp = args->dp;
844 mp = dp->i_mount; 844 mp = dp->i_mount;
845 block = bp->data; 845 block = bp->data;
846 btp = XFS_DIR2_BLOCK_TAIL_P(mp, block); 846 btp = xfs_dir2_block_tail_p(mp, block);
847 blp = XFS_DIR2_BLOCK_LEAF_P(btp); 847 blp = xfs_dir2_block_leaf_p(btp);
848 /* 848 /*
849 * Point to the data entry we need to change. 849 * Point to the data entry we need to change.
850 */ 850 */
851 dep = (xfs_dir2_data_entry_t *) 851 dep = (xfs_dir2_data_entry_t *)
852 ((char *)block + XFS_DIR2_DATAPTR_TO_OFF(mp, be32_to_cpu(blp[ent].address))); 852 ((char *)block + xfs_dir2_dataptr_to_off(mp, be32_to_cpu(blp[ent].address)));
853 ASSERT(be64_to_cpu(dep->inumber) != args->inumber); 853 ASSERT(be64_to_cpu(dep->inumber) != args->inumber);
854 /* 854 /*
855 * Change the inode number to the new value. 855 * Change the inode number to the new value.
@@ -912,7 +912,7 @@ xfs_dir2_leaf_to_block(
912 mp = dp->i_mount; 912 mp = dp->i_mount;
913 leaf = lbp->data; 913 leaf = lbp->data;
914 ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAF1_MAGIC); 914 ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAF1_MAGIC);
915 ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf); 915 ltp = xfs_dir2_leaf_tail_p(mp, leaf);
916 /* 916 /*
917 * If there are data blocks other than the first one, take this 917 * If there are data blocks other than the first one, take this
918 * opportunity to remove trailing empty data blocks that may have 918 * opportunity to remove trailing empty data blocks that may have
@@ -920,7 +920,7 @@ xfs_dir2_leaf_to_block(
920 * These will show up in the leaf bests table. 920 * These will show up in the leaf bests table.
921 */ 921 */
922 while (dp->i_d.di_size > mp->m_dirblksize) { 922 while (dp->i_d.di_size > mp->m_dirblksize) {
923 bestsp = XFS_DIR2_LEAF_BESTS_P(ltp); 923 bestsp = xfs_dir2_leaf_bests_p(ltp);
924 if (be16_to_cpu(bestsp[be32_to_cpu(ltp->bestcount) - 1]) == 924 if (be16_to_cpu(bestsp[be32_to_cpu(ltp->bestcount) - 1]) ==
925 mp->m_dirblksize - (uint)sizeof(block->hdr)) { 925 mp->m_dirblksize - (uint)sizeof(block->hdr)) {
926 if ((error = 926 if ((error =
@@ -974,14 +974,14 @@ xfs_dir2_leaf_to_block(
974 /* 974 /*
975 * Initialize the block tail. 975 * Initialize the block tail.
976 */ 976 */
977 btp = XFS_DIR2_BLOCK_TAIL_P(mp, block); 977 btp = xfs_dir2_block_tail_p(mp, block);
978 btp->count = cpu_to_be32(be16_to_cpu(leaf->hdr.count) - be16_to_cpu(leaf->hdr.stale)); 978 btp->count = cpu_to_be32(be16_to_cpu(leaf->hdr.count) - be16_to_cpu(leaf->hdr.stale));
979 btp->stale = 0; 979 btp->stale = 0;
980 xfs_dir2_block_log_tail(tp, dbp); 980 xfs_dir2_block_log_tail(tp, dbp);
981 /* 981 /*
982 * Initialize the block leaf area. We compact out stale entries. 982 * Initialize the block leaf area. We compact out stale entries.
983 */ 983 */
984 lep = XFS_DIR2_BLOCK_LEAF_P(btp); 984 lep = xfs_dir2_block_leaf_p(btp);
985 for (from = to = 0; from < be16_to_cpu(leaf->hdr.count); from++) { 985 for (from = to = 0; from < be16_to_cpu(leaf->hdr.count); from++) {
986 if (be32_to_cpu(leaf->ents[from].address) == XFS_DIR2_NULL_DATAPTR) 986 if (be32_to_cpu(leaf->ents[from].address) == XFS_DIR2_NULL_DATAPTR)
987 continue; 987 continue;
@@ -1067,7 +1067,7 @@ xfs_dir2_sf_to_block(
1067 ASSERT(dp->i_df.if_bytes == dp->i_d.di_size); 1067 ASSERT(dp->i_df.if_bytes == dp->i_d.di_size);
1068 ASSERT(dp->i_df.if_u1.if_data != NULL); 1068 ASSERT(dp->i_df.if_u1.if_data != NULL);
1069 sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; 1069 sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
1070 ASSERT(dp->i_d.di_size >= XFS_DIR2_SF_HDR_SIZE(sfp->hdr.i8count)); 1070 ASSERT(dp->i_d.di_size >= xfs_dir2_sf_hdr_size(sfp->hdr.i8count));
1071 /* 1071 /*
1072 * Copy the directory into the stack buffer. 1072 * Copy the directory into the stack buffer.
1073 * Then pitch the incore inode data so we can make extents. 1073 * Then pitch the incore inode data so we can make extents.
@@ -1119,10 +1119,10 @@ xfs_dir2_sf_to_block(
1119 /* 1119 /*
1120 * Fill in the tail. 1120 * Fill in the tail.
1121 */ 1121 */
1122 btp = XFS_DIR2_BLOCK_TAIL_P(mp, block); 1122 btp = xfs_dir2_block_tail_p(mp, block);
1123 btp->count = cpu_to_be32(sfp->hdr.count + 2); /* ., .. */ 1123 btp->count = cpu_to_be32(sfp->hdr.count + 2); /* ., .. */
1124 btp->stale = 0; 1124 btp->stale = 0;
1125 blp = XFS_DIR2_BLOCK_LEAF_P(btp); 1125 blp = xfs_dir2_block_leaf_p(btp);
1126 endoffset = (uint)((char *)blp - (char *)block); 1126 endoffset = (uint)((char *)blp - (char *)block);
1127 /* 1127 /*
1128 * Remove the freespace, we'll manage it. 1128 * Remove the freespace, we'll manage it.
@@ -1138,25 +1138,25 @@ xfs_dir2_sf_to_block(
1138 dep->inumber = cpu_to_be64(dp->i_ino); 1138 dep->inumber = cpu_to_be64(dp->i_ino);
1139 dep->namelen = 1; 1139 dep->namelen = 1;
1140 dep->name[0] = '.'; 1140 dep->name[0] = '.';
1141 tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep); 1141 tagp = xfs_dir2_data_entry_tag_p(dep);
1142 *tagp = cpu_to_be16((char *)dep - (char *)block); 1142 *tagp = cpu_to_be16((char *)dep - (char *)block);
1143 xfs_dir2_data_log_entry(tp, bp, dep); 1143 xfs_dir2_data_log_entry(tp, bp, dep);
1144 blp[0].hashval = cpu_to_be32(xfs_dir_hash_dot); 1144 blp[0].hashval = cpu_to_be32(xfs_dir_hash_dot);
1145 blp[0].address = cpu_to_be32(XFS_DIR2_BYTE_TO_DATAPTR(mp, 1145 blp[0].address = cpu_to_be32(xfs_dir2_byte_to_dataptr(mp,
1146 (char *)dep - (char *)block)); 1146 (char *)dep - (char *)block));
1147 /* 1147 /*
1148 * Create entry for .. 1148 * Create entry for ..
1149 */ 1149 */
1150 dep = (xfs_dir2_data_entry_t *) 1150 dep = (xfs_dir2_data_entry_t *)
1151 ((char *)block + XFS_DIR2_DATA_DOTDOT_OFFSET); 1151 ((char *)block + XFS_DIR2_DATA_DOTDOT_OFFSET);
1152 dep->inumber = cpu_to_be64(XFS_DIR2_SF_GET_INUMBER(sfp, &sfp->hdr.parent)); 1152 dep->inumber = cpu_to_be64(xfs_dir2_sf_get_inumber(sfp, &sfp->hdr.parent));
1153 dep->namelen = 2; 1153 dep->namelen = 2;
1154 dep->name[0] = dep->name[1] = '.'; 1154 dep->name[0] = dep->name[1] = '.';
1155 tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep); 1155 tagp = xfs_dir2_data_entry_tag_p(dep);
1156 *tagp = cpu_to_be16((char *)dep - (char *)block); 1156 *tagp = cpu_to_be16((char *)dep - (char *)block);
1157 xfs_dir2_data_log_entry(tp, bp, dep); 1157 xfs_dir2_data_log_entry(tp, bp, dep);
1158 blp[1].hashval = cpu_to_be32(xfs_dir_hash_dotdot); 1158 blp[1].hashval = cpu_to_be32(xfs_dir_hash_dotdot);
1159 blp[1].address = cpu_to_be32(XFS_DIR2_BYTE_TO_DATAPTR(mp, 1159 blp[1].address = cpu_to_be32(xfs_dir2_byte_to_dataptr(mp,
1160 (char *)dep - (char *)block)); 1160 (char *)dep - (char *)block));
1161 offset = XFS_DIR2_DATA_FIRST_OFFSET; 1161 offset = XFS_DIR2_DATA_FIRST_OFFSET;
1162 /* 1162 /*
@@ -1165,7 +1165,7 @@ xfs_dir2_sf_to_block(
1165 if ((i = 0) == sfp->hdr.count) 1165 if ((i = 0) == sfp->hdr.count)
1166 sfep = NULL; 1166 sfep = NULL;
1167 else 1167 else
1168 sfep = XFS_DIR2_SF_FIRSTENTRY(sfp); 1168 sfep = xfs_dir2_sf_firstentry(sfp);
1169 /* 1169 /*
1170 * Need to preserve the existing offset values in the sf directory. 1170 * Need to preserve the existing offset values in the sf directory.
1171 * Insert holes (unused entries) where necessary. 1171 * Insert holes (unused entries) where necessary.
@@ -1177,7 +1177,7 @@ xfs_dir2_sf_to_block(
1177 if (sfep == NULL) 1177 if (sfep == NULL)
1178 newoffset = endoffset; 1178 newoffset = endoffset;
1179 else 1179 else
1180 newoffset = XFS_DIR2_SF_GET_OFFSET(sfep); 1180 newoffset = xfs_dir2_sf_get_offset(sfep);
1181 /* 1181 /*
1182 * There should be a hole here, make one. 1182 * There should be a hole here, make one.
1183 */ 1183 */
@@ -1186,7 +1186,7 @@ xfs_dir2_sf_to_block(
1186 ((char *)block + offset); 1186 ((char *)block + offset);
1187 dup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG); 1187 dup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG);
1188 dup->length = cpu_to_be16(newoffset - offset); 1188 dup->length = cpu_to_be16(newoffset - offset);
1189 *XFS_DIR2_DATA_UNUSED_TAG_P(dup) = cpu_to_be16( 1189 *xfs_dir2_data_unused_tag_p(dup) = cpu_to_be16(
1190 ((char *)dup - (char *)block)); 1190 ((char *)dup - (char *)block));
1191 xfs_dir2_data_log_unused(tp, bp, dup); 1191 xfs_dir2_data_log_unused(tp, bp, dup);
1192 (void)xfs_dir2_data_freeinsert((xfs_dir2_data_t *)block, 1192 (void)xfs_dir2_data_freeinsert((xfs_dir2_data_t *)block,
@@ -1198,22 +1198,22 @@ xfs_dir2_sf_to_block(
1198 * Copy a real entry. 1198 * Copy a real entry.
1199 */ 1199 */
1200 dep = (xfs_dir2_data_entry_t *)((char *)block + newoffset); 1200 dep = (xfs_dir2_data_entry_t *)((char *)block + newoffset);
1201 dep->inumber = cpu_to_be64(XFS_DIR2_SF_GET_INUMBER(sfp, 1201 dep->inumber = cpu_to_be64(xfs_dir2_sf_get_inumber(sfp,
1202 XFS_DIR2_SF_INUMBERP(sfep))); 1202 xfs_dir2_sf_inumberp(sfep)));
1203 dep->namelen = sfep->namelen; 1203 dep->namelen = sfep->namelen;
1204 memcpy(dep->name, sfep->name, dep->namelen); 1204 memcpy(dep->name, sfep->name, dep->namelen);
1205 tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep); 1205 tagp = xfs_dir2_data_entry_tag_p(dep);
1206 *tagp = cpu_to_be16((char *)dep - (char *)block); 1206 *tagp = cpu_to_be16((char *)dep - (char *)block);
1207 xfs_dir2_data_log_entry(tp, bp, dep); 1207 xfs_dir2_data_log_entry(tp, bp, dep);
1208 blp[2 + i].hashval = cpu_to_be32(xfs_da_hashname( 1208 blp[2 + i].hashval = cpu_to_be32(xfs_da_hashname(
1209 (char *)sfep->name, sfep->namelen)); 1209 (char *)sfep->name, sfep->namelen));
1210 blp[2 + i].address = cpu_to_be32(XFS_DIR2_BYTE_TO_DATAPTR(mp, 1210 blp[2 + i].address = cpu_to_be32(xfs_dir2_byte_to_dataptr(mp,
1211 (char *)dep - (char *)block)); 1211 (char *)dep - (char *)block));
1212 offset = (int)((char *)(tagp + 1) - (char *)block); 1212 offset = (int)((char *)(tagp + 1) - (char *)block);
1213 if (++i == sfp->hdr.count) 1213 if (++i == sfp->hdr.count)
1214 sfep = NULL; 1214 sfep = NULL;
1215 else 1215 else
1216 sfep = XFS_DIR2_SF_NEXTENTRY(sfp, sfep); 1216 sfep = xfs_dir2_sf_nextentry(sfp, sfep);
1217 } 1217 }
1218 /* Done with the temporary buffer */ 1218 /* Done with the temporary buffer */
1219 kmem_free(buf, buf_len); 1219 kmem_free(buf, buf_len);
diff --git a/fs/xfs/xfs_dir2_block.h b/fs/xfs/xfs_dir2_block.h
index 6722effd0b20..e7c2606161e9 100644
--- a/fs/xfs/xfs_dir2_block.h
+++ b/fs/xfs/xfs_dir2_block.h
@@ -60,7 +60,6 @@ typedef struct xfs_dir2_block {
60/* 60/*
61 * Pointer to the leaf header embedded in a data block (1-block format) 61 * Pointer to the leaf header embedded in a data block (1-block format)
62 */ 62 */
63#define XFS_DIR2_BLOCK_TAIL_P(mp,block) xfs_dir2_block_tail_p(mp,block)
64static inline xfs_dir2_block_tail_t * 63static inline xfs_dir2_block_tail_t *
65xfs_dir2_block_tail_p(struct xfs_mount *mp, xfs_dir2_block_t *block) 64xfs_dir2_block_tail_p(struct xfs_mount *mp, xfs_dir2_block_t *block)
66{ 65{
@@ -71,7 +70,6 @@ xfs_dir2_block_tail_p(struct xfs_mount *mp, xfs_dir2_block_t *block)
71/* 70/*
72 * Pointer to the leaf entries embedded in a data block (1-block format) 71 * Pointer to the leaf entries embedded in a data block (1-block format)
73 */ 72 */
74#define XFS_DIR2_BLOCK_LEAF_P(btp) xfs_dir2_block_leaf_p(btp)
75static inline struct xfs_dir2_leaf_entry * 73static inline struct xfs_dir2_leaf_entry *
76xfs_dir2_block_leaf_p(xfs_dir2_block_tail_t *btp) 74xfs_dir2_block_leaf_p(xfs_dir2_block_tail_t *btp)
77{ 75{
diff --git a/fs/xfs/xfs_dir2_data.c b/fs/xfs/xfs_dir2_data.c
index c211c37ef67c..7ebe295bd6d3 100644
--- a/fs/xfs/xfs_dir2_data.c
+++ b/fs/xfs/xfs_dir2_data.c
@@ -72,8 +72,8 @@ xfs_dir2_data_check(
72 bf = d->hdr.bestfree; 72 bf = d->hdr.bestfree;
73 p = (char *)d->u; 73 p = (char *)d->u;
74 if (be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC) { 74 if (be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC) {
75 btp = XFS_DIR2_BLOCK_TAIL_P(mp, (xfs_dir2_block_t *)d); 75 btp = xfs_dir2_block_tail_p(mp, (xfs_dir2_block_t *)d);
76 lep = XFS_DIR2_BLOCK_LEAF_P(btp); 76 lep = xfs_dir2_block_leaf_p(btp);
77 endp = (char *)lep; 77 endp = (char *)lep;
78 } else 78 } else
79 endp = (char *)d + mp->m_dirblksize; 79 endp = (char *)d + mp->m_dirblksize;
@@ -107,7 +107,7 @@ xfs_dir2_data_check(
107 */ 107 */
108 if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) { 108 if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) {
109 ASSERT(lastfree == 0); 109 ASSERT(lastfree == 0);
110 ASSERT(be16_to_cpu(*XFS_DIR2_DATA_UNUSED_TAG_P(dup)) == 110 ASSERT(be16_to_cpu(*xfs_dir2_data_unused_tag_p(dup)) ==
111 (char *)dup - (char *)d); 111 (char *)dup - (char *)d);
112 dfp = xfs_dir2_data_freefind(d, dup); 112 dfp = xfs_dir2_data_freefind(d, dup);
113 if (dfp) { 113 if (dfp) {
@@ -131,12 +131,12 @@ xfs_dir2_data_check(
131 dep = (xfs_dir2_data_entry_t *)p; 131 dep = (xfs_dir2_data_entry_t *)p;
132 ASSERT(dep->namelen != 0); 132 ASSERT(dep->namelen != 0);
133 ASSERT(xfs_dir_ino_validate(mp, be64_to_cpu(dep->inumber)) == 0); 133 ASSERT(xfs_dir_ino_validate(mp, be64_to_cpu(dep->inumber)) == 0);
134 ASSERT(be16_to_cpu(*XFS_DIR2_DATA_ENTRY_TAG_P(dep)) == 134 ASSERT(be16_to_cpu(*xfs_dir2_data_entry_tag_p(dep)) ==
135 (char *)dep - (char *)d); 135 (char *)dep - (char *)d);
136 count++; 136 count++;
137 lastfree = 0; 137 lastfree = 0;
138 if (be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC) { 138 if (be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC) {
139 addr = XFS_DIR2_DB_OFF_TO_DATAPTR(mp, mp->m_dirdatablk, 139 addr = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk,
140 (xfs_dir2_data_aoff_t) 140 (xfs_dir2_data_aoff_t)
141 ((char *)dep - (char *)d)); 141 ((char *)dep - (char *)d));
142 hash = xfs_da_hashname((char *)dep->name, dep->namelen); 142 hash = xfs_da_hashname((char *)dep->name, dep->namelen);
@@ -147,7 +147,7 @@ xfs_dir2_data_check(
147 } 147 }
148 ASSERT(i < be32_to_cpu(btp->count)); 148 ASSERT(i < be32_to_cpu(btp->count));
149 } 149 }
150 p += XFS_DIR2_DATA_ENTSIZE(dep->namelen); 150 p += xfs_dir2_data_entsize(dep->namelen);
151 } 151 }
152 /* 152 /*
153 * Need to have seen all the entries and all the bestfree slots. 153 * Need to have seen all the entries and all the bestfree slots.
@@ -346,8 +346,8 @@ xfs_dir2_data_freescan(
346 */ 346 */
347 p = (char *)d->u; 347 p = (char *)d->u;
348 if (be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC) { 348 if (be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC) {
349 btp = XFS_DIR2_BLOCK_TAIL_P(mp, (xfs_dir2_block_t *)d); 349 btp = xfs_dir2_block_tail_p(mp, (xfs_dir2_block_t *)d);
350 endp = (char *)XFS_DIR2_BLOCK_LEAF_P(btp); 350 endp = (char *)xfs_dir2_block_leaf_p(btp);
351 } else 351 } else
352 endp = (char *)d + mp->m_dirblksize; 352 endp = (char *)d + mp->m_dirblksize;
353 /* 353 /*
@@ -360,7 +360,7 @@ xfs_dir2_data_freescan(
360 */ 360 */
361 if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) { 361 if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) {
362 ASSERT((char *)dup - (char *)d == 362 ASSERT((char *)dup - (char *)d ==
363 be16_to_cpu(*XFS_DIR2_DATA_UNUSED_TAG_P(dup))); 363 be16_to_cpu(*xfs_dir2_data_unused_tag_p(dup)));
364 xfs_dir2_data_freeinsert(d, dup, loghead); 364 xfs_dir2_data_freeinsert(d, dup, loghead);
365 p += be16_to_cpu(dup->length); 365 p += be16_to_cpu(dup->length);
366 } 366 }
@@ -370,8 +370,8 @@ xfs_dir2_data_freescan(
370 else { 370 else {
371 dep = (xfs_dir2_data_entry_t *)p; 371 dep = (xfs_dir2_data_entry_t *)p;
372 ASSERT((char *)dep - (char *)d == 372 ASSERT((char *)dep - (char *)d ==
373 be16_to_cpu(*XFS_DIR2_DATA_ENTRY_TAG_P(dep))); 373 be16_to_cpu(*xfs_dir2_data_entry_tag_p(dep)));
374 p += XFS_DIR2_DATA_ENTSIZE(dep->namelen); 374 p += xfs_dir2_data_entsize(dep->namelen);
375 } 375 }
376 } 376 }
377} 377}
@@ -402,7 +402,7 @@ xfs_dir2_data_init(
402 /* 402 /*
403 * Get the buffer set up for the block. 403 * Get the buffer set up for the block.
404 */ 404 */
405 error = xfs_da_get_buf(tp, dp, XFS_DIR2_DB_TO_DA(mp, blkno), -1, &bp, 405 error = xfs_da_get_buf(tp, dp, xfs_dir2_db_to_da(mp, blkno), -1, &bp,
406 XFS_DATA_FORK); 406 XFS_DATA_FORK);
407 if (error) { 407 if (error) {
408 return error; 408 return error;
@@ -427,7 +427,7 @@ xfs_dir2_data_init(
427 t=mp->m_dirblksize - (uint)sizeof(d->hdr); 427 t=mp->m_dirblksize - (uint)sizeof(d->hdr);
428 d->hdr.bestfree[0].length = cpu_to_be16(t); 428 d->hdr.bestfree[0].length = cpu_to_be16(t);
429 dup->length = cpu_to_be16(t); 429 dup->length = cpu_to_be16(t);
430 *XFS_DIR2_DATA_UNUSED_TAG_P(dup) = cpu_to_be16((char *)dup - (char *)d); 430 *xfs_dir2_data_unused_tag_p(dup) = cpu_to_be16((char *)dup - (char *)d);
431 /* 431 /*
432 * Log it and return it. 432 * Log it and return it.
433 */ 433 */
@@ -452,7 +452,7 @@ xfs_dir2_data_log_entry(
452 ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC || 452 ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC ||
453 be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC); 453 be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC);
454 xfs_da_log_buf(tp, bp, (uint)((char *)dep - (char *)d), 454 xfs_da_log_buf(tp, bp, (uint)((char *)dep - (char *)d),
455 (uint)((char *)(XFS_DIR2_DATA_ENTRY_TAG_P(dep) + 1) - 455 (uint)((char *)(xfs_dir2_data_entry_tag_p(dep) + 1) -
456 (char *)d - 1)); 456 (char *)d - 1));
457} 457}
458 458
@@ -497,8 +497,8 @@ xfs_dir2_data_log_unused(
497 * Log the end (tag) of the unused entry. 497 * Log the end (tag) of the unused entry.
498 */ 498 */
499 xfs_da_log_buf(tp, bp, 499 xfs_da_log_buf(tp, bp,
500 (uint)((char *)XFS_DIR2_DATA_UNUSED_TAG_P(dup) - (char *)d), 500 (uint)((char *)xfs_dir2_data_unused_tag_p(dup) - (char *)d),
501 (uint)((char *)XFS_DIR2_DATA_UNUSED_TAG_P(dup) - (char *)d + 501 (uint)((char *)xfs_dir2_data_unused_tag_p(dup) - (char *)d +
502 sizeof(xfs_dir2_data_off_t) - 1)); 502 sizeof(xfs_dir2_data_off_t) - 1));
503} 503}
504 504
@@ -535,8 +535,8 @@ xfs_dir2_data_make_free(
535 xfs_dir2_block_tail_t *btp; /* block tail */ 535 xfs_dir2_block_tail_t *btp; /* block tail */
536 536
537 ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC); 537 ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC);
538 btp = XFS_DIR2_BLOCK_TAIL_P(mp, (xfs_dir2_block_t *)d); 538 btp = xfs_dir2_block_tail_p(mp, (xfs_dir2_block_t *)d);
539 endptr = (char *)XFS_DIR2_BLOCK_LEAF_P(btp); 539 endptr = (char *)xfs_dir2_block_leaf_p(btp);
540 } 540 }
541 /* 541 /*
542 * If this isn't the start of the block, then back up to 542 * If this isn't the start of the block, then back up to
@@ -587,7 +587,7 @@ xfs_dir2_data_make_free(
587 * Fix up the new big freespace. 587 * Fix up the new big freespace.
588 */ 588 */
589 be16_add(&prevdup->length, len + be16_to_cpu(postdup->length)); 589 be16_add(&prevdup->length, len + be16_to_cpu(postdup->length));
590 *XFS_DIR2_DATA_UNUSED_TAG_P(prevdup) = 590 *xfs_dir2_data_unused_tag_p(prevdup) =
591 cpu_to_be16((char *)prevdup - (char *)d); 591 cpu_to_be16((char *)prevdup - (char *)d);
592 xfs_dir2_data_log_unused(tp, bp, prevdup); 592 xfs_dir2_data_log_unused(tp, bp, prevdup);
593 if (!needscan) { 593 if (!needscan) {
@@ -621,7 +621,7 @@ xfs_dir2_data_make_free(
621 else if (prevdup) { 621 else if (prevdup) {
622 dfp = xfs_dir2_data_freefind(d, prevdup); 622 dfp = xfs_dir2_data_freefind(d, prevdup);
623 be16_add(&prevdup->length, len); 623 be16_add(&prevdup->length, len);
624 *XFS_DIR2_DATA_UNUSED_TAG_P(prevdup) = 624 *xfs_dir2_data_unused_tag_p(prevdup) =
625 cpu_to_be16((char *)prevdup - (char *)d); 625 cpu_to_be16((char *)prevdup - (char *)d);
626 xfs_dir2_data_log_unused(tp, bp, prevdup); 626 xfs_dir2_data_log_unused(tp, bp, prevdup);
627 /* 627 /*
@@ -649,7 +649,7 @@ xfs_dir2_data_make_free(
649 newdup = (xfs_dir2_data_unused_t *)((char *)d + offset); 649 newdup = (xfs_dir2_data_unused_t *)((char *)d + offset);
650 newdup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG); 650 newdup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG);
651 newdup->length = cpu_to_be16(len + be16_to_cpu(postdup->length)); 651 newdup->length = cpu_to_be16(len + be16_to_cpu(postdup->length));
652 *XFS_DIR2_DATA_UNUSED_TAG_P(newdup) = 652 *xfs_dir2_data_unused_tag_p(newdup) =
653 cpu_to_be16((char *)newdup - (char *)d); 653 cpu_to_be16((char *)newdup - (char *)d);
654 xfs_dir2_data_log_unused(tp, bp, newdup); 654 xfs_dir2_data_log_unused(tp, bp, newdup);
655 /* 655 /*
@@ -676,7 +676,7 @@ xfs_dir2_data_make_free(
676 newdup = (xfs_dir2_data_unused_t *)((char *)d + offset); 676 newdup = (xfs_dir2_data_unused_t *)((char *)d + offset);
677 newdup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG); 677 newdup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG);
678 newdup->length = cpu_to_be16(len); 678 newdup->length = cpu_to_be16(len);
679 *XFS_DIR2_DATA_UNUSED_TAG_P(newdup) = 679 *xfs_dir2_data_unused_tag_p(newdup) =
680 cpu_to_be16((char *)newdup - (char *)d); 680 cpu_to_be16((char *)newdup - (char *)d);
681 xfs_dir2_data_log_unused(tp, bp, newdup); 681 xfs_dir2_data_log_unused(tp, bp, newdup);
682 (void)xfs_dir2_data_freeinsert(d, newdup, needlogp); 682 (void)xfs_dir2_data_freeinsert(d, newdup, needlogp);
@@ -712,7 +712,7 @@ xfs_dir2_data_use_free(
712 ASSERT(be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG); 712 ASSERT(be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG);
713 ASSERT(offset >= (char *)dup - (char *)d); 713 ASSERT(offset >= (char *)dup - (char *)d);
714 ASSERT(offset + len <= (char *)dup + be16_to_cpu(dup->length) - (char *)d); 714 ASSERT(offset + len <= (char *)dup + be16_to_cpu(dup->length) - (char *)d);
715 ASSERT((char *)dup - (char *)d == be16_to_cpu(*XFS_DIR2_DATA_UNUSED_TAG_P(dup))); 715 ASSERT((char *)dup - (char *)d == be16_to_cpu(*xfs_dir2_data_unused_tag_p(dup)));
716 /* 716 /*
717 * Look up the entry in the bestfree table. 717 * Look up the entry in the bestfree table.
718 */ 718 */
@@ -745,7 +745,7 @@ xfs_dir2_data_use_free(
745 newdup = (xfs_dir2_data_unused_t *)((char *)d + offset + len); 745 newdup = (xfs_dir2_data_unused_t *)((char *)d + offset + len);
746 newdup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG); 746 newdup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG);
747 newdup->length = cpu_to_be16(oldlen - len); 747 newdup->length = cpu_to_be16(oldlen - len);
748 *XFS_DIR2_DATA_UNUSED_TAG_P(newdup) = 748 *xfs_dir2_data_unused_tag_p(newdup) =
749 cpu_to_be16((char *)newdup - (char *)d); 749 cpu_to_be16((char *)newdup - (char *)d);
750 xfs_dir2_data_log_unused(tp, bp, newdup); 750 xfs_dir2_data_log_unused(tp, bp, newdup);
751 /* 751 /*
@@ -772,7 +772,7 @@ xfs_dir2_data_use_free(
772 else if (matchback) { 772 else if (matchback) {
773 newdup = dup; 773 newdup = dup;
774 newdup->length = cpu_to_be16(((char *)d + offset) - (char *)newdup); 774 newdup->length = cpu_to_be16(((char *)d + offset) - (char *)newdup);
775 *XFS_DIR2_DATA_UNUSED_TAG_P(newdup) = 775 *xfs_dir2_data_unused_tag_p(newdup) =
776 cpu_to_be16((char *)newdup - (char *)d); 776 cpu_to_be16((char *)newdup - (char *)d);
777 xfs_dir2_data_log_unused(tp, bp, newdup); 777 xfs_dir2_data_log_unused(tp, bp, newdup);
778 /* 778 /*
@@ -799,13 +799,13 @@ xfs_dir2_data_use_free(
799 else { 799 else {
800 newdup = dup; 800 newdup = dup;
801 newdup->length = cpu_to_be16(((char *)d + offset) - (char *)newdup); 801 newdup->length = cpu_to_be16(((char *)d + offset) - (char *)newdup);
802 *XFS_DIR2_DATA_UNUSED_TAG_P(newdup) = 802 *xfs_dir2_data_unused_tag_p(newdup) =
803 cpu_to_be16((char *)newdup - (char *)d); 803 cpu_to_be16((char *)newdup - (char *)d);
804 xfs_dir2_data_log_unused(tp, bp, newdup); 804 xfs_dir2_data_log_unused(tp, bp, newdup);
805 newdup2 = (xfs_dir2_data_unused_t *)((char *)d + offset + len); 805 newdup2 = (xfs_dir2_data_unused_t *)((char *)d + offset + len);
806 newdup2->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG); 806 newdup2->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG);
807 newdup2->length = cpu_to_be16(oldlen - len - be16_to_cpu(newdup->length)); 807 newdup2->length = cpu_to_be16(oldlen - len - be16_to_cpu(newdup->length));
808 *XFS_DIR2_DATA_UNUSED_TAG_P(newdup2) = 808 *xfs_dir2_data_unused_tag_p(newdup2) =
809 cpu_to_be16((char *)newdup2 - (char *)d); 809 cpu_to_be16((char *)newdup2 - (char *)d);
810 xfs_dir2_data_log_unused(tp, bp, newdup2); 810 xfs_dir2_data_log_unused(tp, bp, newdup2);
811 /* 811 /*
diff --git a/fs/xfs/xfs_dir2_data.h b/fs/xfs/xfs_dir2_data.h
index c94c9099cfb1..b816e0252739 100644
--- a/fs/xfs/xfs_dir2_data.h
+++ b/fs/xfs/xfs_dir2_data.h
@@ -44,7 +44,7 @@ struct xfs_trans;
44#define XFS_DIR2_DATA_SPACE 0 44#define XFS_DIR2_DATA_SPACE 0
45#define XFS_DIR2_DATA_OFFSET (XFS_DIR2_DATA_SPACE * XFS_DIR2_SPACE_SIZE) 45#define XFS_DIR2_DATA_OFFSET (XFS_DIR2_DATA_SPACE * XFS_DIR2_SPACE_SIZE)
46#define XFS_DIR2_DATA_FIRSTDB(mp) \ 46#define XFS_DIR2_DATA_FIRSTDB(mp) \
47 XFS_DIR2_BYTE_TO_DB(mp, XFS_DIR2_DATA_OFFSET) 47 xfs_dir2_byte_to_db(mp, XFS_DIR2_DATA_OFFSET)
48 48
49/* 49/*
50 * Offsets of . and .. in data space (always block 0) 50 * Offsets of . and .. in data space (always block 0)
@@ -52,9 +52,9 @@ struct xfs_trans;
52#define XFS_DIR2_DATA_DOT_OFFSET \ 52#define XFS_DIR2_DATA_DOT_OFFSET \
53 ((xfs_dir2_data_aoff_t)sizeof(xfs_dir2_data_hdr_t)) 53 ((xfs_dir2_data_aoff_t)sizeof(xfs_dir2_data_hdr_t))
54#define XFS_DIR2_DATA_DOTDOT_OFFSET \ 54#define XFS_DIR2_DATA_DOTDOT_OFFSET \
55 (XFS_DIR2_DATA_DOT_OFFSET + XFS_DIR2_DATA_ENTSIZE(1)) 55 (XFS_DIR2_DATA_DOT_OFFSET + xfs_dir2_data_entsize(1))
56#define XFS_DIR2_DATA_FIRST_OFFSET \ 56#define XFS_DIR2_DATA_FIRST_OFFSET \
57 (XFS_DIR2_DATA_DOTDOT_OFFSET + XFS_DIR2_DATA_ENTSIZE(2)) 57 (XFS_DIR2_DATA_DOTDOT_OFFSET + xfs_dir2_data_entsize(2))
58 58
59/* 59/*
60 * Structures. 60 * Structures.
@@ -123,7 +123,6 @@ typedef struct xfs_dir2_data {
123/* 123/*
124 * Size of a data entry. 124 * Size of a data entry.
125 */ 125 */
126#define XFS_DIR2_DATA_ENTSIZE(n) xfs_dir2_data_entsize(n)
127static inline int xfs_dir2_data_entsize(int n) 126static inline int xfs_dir2_data_entsize(int n)
128{ 127{
129 return (int)roundup(offsetof(xfs_dir2_data_entry_t, name[0]) + (n) + \ 128 return (int)roundup(offsetof(xfs_dir2_data_entry_t, name[0]) + (n) + \
@@ -133,19 +132,16 @@ static inline int xfs_dir2_data_entsize(int n)
133/* 132/*
134 * Pointer to an entry's tag word. 133 * Pointer to an entry's tag word.
135 */ 134 */
136#define XFS_DIR2_DATA_ENTRY_TAG_P(dep) xfs_dir2_data_entry_tag_p(dep)
137static inline __be16 * 135static inline __be16 *
138xfs_dir2_data_entry_tag_p(xfs_dir2_data_entry_t *dep) 136xfs_dir2_data_entry_tag_p(xfs_dir2_data_entry_t *dep)
139{ 137{
140 return (__be16 *)((char *)dep + 138 return (__be16 *)((char *)dep +
141 XFS_DIR2_DATA_ENTSIZE(dep->namelen) - sizeof(__be16)); 139 xfs_dir2_data_entsize(dep->namelen) - sizeof(__be16));
142} 140}
143 141
144/* 142/*
145 * Pointer to a freespace's tag word. 143 * Pointer to a freespace's tag word.
146 */ 144 */
147#define XFS_DIR2_DATA_UNUSED_TAG_P(dup) \
148 xfs_dir2_data_unused_tag_p(dup)
149static inline __be16 * 145static inline __be16 *
150xfs_dir2_data_unused_tag_p(xfs_dir2_data_unused_t *dup) 146xfs_dir2_data_unused_tag_p(xfs_dir2_data_unused_t *dup)
151{ 147{
diff --git a/fs/xfs/xfs_dir2_leaf.c b/fs/xfs/xfs_dir2_leaf.c
index db14ea71459f..1b73c9ad646a 100644
--- a/fs/xfs/xfs_dir2_leaf.c
+++ b/fs/xfs/xfs_dir2_leaf.c
@@ -92,7 +92,7 @@ xfs_dir2_block_to_leaf(
92 if ((error = xfs_da_grow_inode(args, &blkno))) { 92 if ((error = xfs_da_grow_inode(args, &blkno))) {
93 return error; 93 return error;
94 } 94 }
95 ldb = XFS_DIR2_DA_TO_DB(mp, blkno); 95 ldb = xfs_dir2_da_to_db(mp, blkno);
96 ASSERT(ldb == XFS_DIR2_LEAF_FIRSTDB(mp)); 96 ASSERT(ldb == XFS_DIR2_LEAF_FIRSTDB(mp));
97 /* 97 /*
98 * Initialize the leaf block, get a buffer for it. 98 * Initialize the leaf block, get a buffer for it.
@@ -104,8 +104,8 @@ xfs_dir2_block_to_leaf(
104 leaf = lbp->data; 104 leaf = lbp->data;
105 block = dbp->data; 105 block = dbp->data;
106 xfs_dir2_data_check(dp, dbp); 106 xfs_dir2_data_check(dp, dbp);
107 btp = XFS_DIR2_BLOCK_TAIL_P(mp, block); 107 btp = xfs_dir2_block_tail_p(mp, block);
108 blp = XFS_DIR2_BLOCK_LEAF_P(btp); 108 blp = xfs_dir2_block_leaf_p(btp);
109 /* 109 /*
110 * Set the counts in the leaf header. 110 * Set the counts in the leaf header.
111 */ 111 */
@@ -137,9 +137,9 @@ xfs_dir2_block_to_leaf(
137 /* 137 /*
138 * Set up leaf tail and bests table. 138 * Set up leaf tail and bests table.
139 */ 139 */
140 ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf); 140 ltp = xfs_dir2_leaf_tail_p(mp, leaf);
141 ltp->bestcount = cpu_to_be32(1); 141 ltp->bestcount = cpu_to_be32(1);
142 bestsp = XFS_DIR2_LEAF_BESTS_P(ltp); 142 bestsp = xfs_dir2_leaf_bests_p(ltp);
143 bestsp[0] = block->hdr.bestfree[0].length; 143 bestsp[0] = block->hdr.bestfree[0].length;
144 /* 144 /*
145 * Log the data header and leaf bests table. 145 * Log the data header and leaf bests table.
@@ -209,9 +209,9 @@ xfs_dir2_leaf_addname(
209 */ 209 */
210 index = xfs_dir2_leaf_search_hash(args, lbp); 210 index = xfs_dir2_leaf_search_hash(args, lbp);
211 leaf = lbp->data; 211 leaf = lbp->data;
212 ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf); 212 ltp = xfs_dir2_leaf_tail_p(mp, leaf);
213 bestsp = XFS_DIR2_LEAF_BESTS_P(ltp); 213 bestsp = xfs_dir2_leaf_bests_p(ltp);
214 length = XFS_DIR2_DATA_ENTSIZE(args->namelen); 214 length = xfs_dir2_data_entsize(args->namelen);
215 /* 215 /*
216 * See if there are any entries with the same hash value 216 * See if there are any entries with the same hash value
217 * and space in their block for the new entry. 217 * and space in their block for the new entry.
@@ -223,7 +223,7 @@ xfs_dir2_leaf_addname(
223 index++, lep++) { 223 index++, lep++) {
224 if (be32_to_cpu(lep->address) == XFS_DIR2_NULL_DATAPTR) 224 if (be32_to_cpu(lep->address) == XFS_DIR2_NULL_DATAPTR)
225 continue; 225 continue;
226 i = XFS_DIR2_DATAPTR_TO_DB(mp, be32_to_cpu(lep->address)); 226 i = xfs_dir2_dataptr_to_db(mp, be32_to_cpu(lep->address));
227 ASSERT(i < be32_to_cpu(ltp->bestcount)); 227 ASSERT(i < be32_to_cpu(ltp->bestcount));
228 ASSERT(be16_to_cpu(bestsp[i]) != NULLDATAOFF); 228 ASSERT(be16_to_cpu(bestsp[i]) != NULLDATAOFF);
229 if (be16_to_cpu(bestsp[i]) >= length) { 229 if (be16_to_cpu(bestsp[i]) >= length) {
@@ -378,7 +378,7 @@ xfs_dir2_leaf_addname(
378 */ 378 */
379 else { 379 else {
380 if ((error = 380 if ((error =
381 xfs_da_read_buf(tp, dp, XFS_DIR2_DB_TO_DA(mp, use_block), 381 xfs_da_read_buf(tp, dp, xfs_dir2_db_to_da(mp, use_block),
382 -1, &dbp, XFS_DATA_FORK))) { 382 -1, &dbp, XFS_DATA_FORK))) {
383 xfs_da_brelse(tp, lbp); 383 xfs_da_brelse(tp, lbp);
384 return error; 384 return error;
@@ -407,7 +407,7 @@ xfs_dir2_leaf_addname(
407 dep->inumber = cpu_to_be64(args->inumber); 407 dep->inumber = cpu_to_be64(args->inumber);
408 dep->namelen = args->namelen; 408 dep->namelen = args->namelen;
409 memcpy(dep->name, args->name, dep->namelen); 409 memcpy(dep->name, args->name, dep->namelen);
410 tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep); 410 tagp = xfs_dir2_data_entry_tag_p(dep);
411 *tagp = cpu_to_be16((char *)dep - (char *)data); 411 *tagp = cpu_to_be16((char *)dep - (char *)data);
412 /* 412 /*
413 * Need to scan fix up the bestfree table. 413 * Need to scan fix up the bestfree table.
@@ -529,7 +529,7 @@ xfs_dir2_leaf_addname(
529 * Fill in the new leaf entry. 529 * Fill in the new leaf entry.
530 */ 530 */
531 lep->hashval = cpu_to_be32(args->hashval); 531 lep->hashval = cpu_to_be32(args->hashval);
532 lep->address = cpu_to_be32(XFS_DIR2_DB_OFF_TO_DATAPTR(mp, use_block, 532 lep->address = cpu_to_be32(xfs_dir2_db_off_to_dataptr(mp, use_block,
533 be16_to_cpu(*tagp))); 533 be16_to_cpu(*tagp)));
534 /* 534 /*
535 * Log the leaf fields and give up the buffers. 535 * Log the leaf fields and give up the buffers.
@@ -567,13 +567,13 @@ xfs_dir2_leaf_check(
567 * Should factor in the size of the bests table as well. 567 * Should factor in the size of the bests table as well.
568 * We can deduce a value for that from di_size. 568 * We can deduce a value for that from di_size.
569 */ 569 */
570 ASSERT(be16_to_cpu(leaf->hdr.count) <= XFS_DIR2_MAX_LEAF_ENTS(mp)); 570 ASSERT(be16_to_cpu(leaf->hdr.count) <= xfs_dir2_max_leaf_ents(mp));
571 ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf); 571 ltp = xfs_dir2_leaf_tail_p(mp, leaf);
572 /* 572 /*
573 * Leaves and bests don't overlap. 573 * Leaves and bests don't overlap.
574 */ 574 */
575 ASSERT((char *)&leaf->ents[be16_to_cpu(leaf->hdr.count)] <= 575 ASSERT((char *)&leaf->ents[be16_to_cpu(leaf->hdr.count)] <=
576 (char *)XFS_DIR2_LEAF_BESTS_P(ltp)); 576 (char *)xfs_dir2_leaf_bests_p(ltp));
577 /* 577 /*
578 * Check hash value order, count stale entries. 578 * Check hash value order, count stale entries.
579 */ 579 */
@@ -815,12 +815,12 @@ xfs_dir2_leaf_getdents(
815 * Inside the loop we keep the main offset value as a byte offset 815 * Inside the loop we keep the main offset value as a byte offset
816 * in the directory file. 816 * in the directory file.
817 */ 817 */
818 curoff = XFS_DIR2_DATAPTR_TO_BYTE(mp, uio->uio_offset); 818 curoff = xfs_dir2_dataptr_to_byte(mp, uio->uio_offset);
819 /* 819 /*
820 * Force this conversion through db so we truncate the offset 820 * Force this conversion through db so we truncate the offset
821 * down to get the start of the data block. 821 * down to get the start of the data block.
822 */ 822 */
823 map_off = XFS_DIR2_DB_TO_DA(mp, XFS_DIR2_BYTE_TO_DB(mp, curoff)); 823 map_off = xfs_dir2_db_to_da(mp, xfs_dir2_byte_to_db(mp, curoff));
824 /* 824 /*
825 * Loop over directory entries until we reach the end offset. 825 * Loop over directory entries until we reach the end offset.
826 * Get more blocks and readahead as necessary. 826 * Get more blocks and readahead as necessary.
@@ -870,7 +870,7 @@ xfs_dir2_leaf_getdents(
870 */ 870 */
871 if (1 + ra_want > map_blocks && 871 if (1 + ra_want > map_blocks &&
872 map_off < 872 map_off <
873 XFS_DIR2_BYTE_TO_DA(mp, XFS_DIR2_LEAF_OFFSET)) { 873 xfs_dir2_byte_to_da(mp, XFS_DIR2_LEAF_OFFSET)) {
874 /* 874 /*
875 * Get more bmaps, fill in after the ones 875 * Get more bmaps, fill in after the ones
876 * we already have in the table. 876 * we already have in the table.
@@ -878,7 +878,7 @@ xfs_dir2_leaf_getdents(
878 nmap = map_size - map_valid; 878 nmap = map_size - map_valid;
879 error = xfs_bmapi(tp, dp, 879 error = xfs_bmapi(tp, dp,
880 map_off, 880 map_off,
881 XFS_DIR2_BYTE_TO_DA(mp, 881 xfs_dir2_byte_to_da(mp,
882 XFS_DIR2_LEAF_OFFSET) - map_off, 882 XFS_DIR2_LEAF_OFFSET) - map_off,
883 XFS_BMAPI_METADATA, NULL, 0, 883 XFS_BMAPI_METADATA, NULL, 0,
884 &map[map_valid], &nmap, NULL, NULL); 884 &map[map_valid], &nmap, NULL, NULL);
@@ -903,7 +903,7 @@ xfs_dir2_leaf_getdents(
903 map[map_valid + nmap - 1].br_blockcount; 903 map[map_valid + nmap - 1].br_blockcount;
904 else 904 else
905 map_off = 905 map_off =
906 XFS_DIR2_BYTE_TO_DA(mp, 906 xfs_dir2_byte_to_da(mp,
907 XFS_DIR2_LEAF_OFFSET); 907 XFS_DIR2_LEAF_OFFSET);
908 /* 908 /*
909 * Look for holes in the mapping, and 909 * Look for holes in the mapping, and
@@ -931,14 +931,14 @@ xfs_dir2_leaf_getdents(
931 * No valid mappings, so no more data blocks. 931 * No valid mappings, so no more data blocks.
932 */ 932 */
933 if (!map_valid) { 933 if (!map_valid) {
934 curoff = XFS_DIR2_DA_TO_BYTE(mp, map_off); 934 curoff = xfs_dir2_da_to_byte(mp, map_off);
935 break; 935 break;
936 } 936 }
937 /* 937 /*
938 * Read the directory block starting at the first 938 * Read the directory block starting at the first
939 * mapping. 939 * mapping.
940 */ 940 */
941 curdb = XFS_DIR2_DA_TO_DB(mp, map->br_startoff); 941 curdb = xfs_dir2_da_to_db(mp, map->br_startoff);
942 error = xfs_da_read_buf(tp, dp, map->br_startoff, 942 error = xfs_da_read_buf(tp, dp, map->br_startoff,
943 map->br_blockcount >= mp->m_dirblkfsbs ? 943 map->br_blockcount >= mp->m_dirblkfsbs ?
944 XFS_FSB_TO_DADDR(mp, map->br_startblock) : 944 XFS_FSB_TO_DADDR(mp, map->br_startblock) :
@@ -1014,7 +1014,7 @@ xfs_dir2_leaf_getdents(
1014 /* 1014 /*
1015 * Having done a read, we need to set a new offset. 1015 * Having done a read, we need to set a new offset.
1016 */ 1016 */
1017 newoff = XFS_DIR2_DB_OFF_TO_BYTE(mp, curdb, 0); 1017 newoff = xfs_dir2_db_off_to_byte(mp, curdb, 0);
1018 /* 1018 /*
1019 * Start of the current block. 1019 * Start of the current block.
1020 */ 1020 */
@@ -1024,7 +1024,7 @@ xfs_dir2_leaf_getdents(
1024 * Make sure we're in the right block. 1024 * Make sure we're in the right block.
1025 */ 1025 */
1026 else if (curoff > newoff) 1026 else if (curoff > newoff)
1027 ASSERT(XFS_DIR2_BYTE_TO_DB(mp, curoff) == 1027 ASSERT(xfs_dir2_byte_to_db(mp, curoff) ==
1028 curdb); 1028 curdb);
1029 data = bp->data; 1029 data = bp->data;
1030 xfs_dir2_data_check(dp, bp); 1030 xfs_dir2_data_check(dp, bp);
@@ -1032,7 +1032,7 @@ xfs_dir2_leaf_getdents(
1032 * Find our position in the block. 1032 * Find our position in the block.
1033 */ 1033 */
1034 ptr = (char *)&data->u; 1034 ptr = (char *)&data->u;
1035 byteoff = XFS_DIR2_BYTE_TO_OFF(mp, curoff); 1035 byteoff = xfs_dir2_byte_to_off(mp, curoff);
1036 /* 1036 /*
1037 * Skip past the header. 1037 * Skip past the header.
1038 */ 1038 */
@@ -1054,15 +1054,15 @@ xfs_dir2_leaf_getdents(
1054 } 1054 }
1055 dep = (xfs_dir2_data_entry_t *)ptr; 1055 dep = (xfs_dir2_data_entry_t *)ptr;
1056 length = 1056 length =
1057 XFS_DIR2_DATA_ENTSIZE(dep->namelen); 1057 xfs_dir2_data_entsize(dep->namelen);
1058 ptr += length; 1058 ptr += length;
1059 } 1059 }
1060 /* 1060 /*
1061 * Now set our real offset. 1061 * Now set our real offset.
1062 */ 1062 */
1063 curoff = 1063 curoff =
1064 XFS_DIR2_DB_OFF_TO_BYTE(mp, 1064 xfs_dir2_db_off_to_byte(mp,
1065 XFS_DIR2_BYTE_TO_DB(mp, curoff), 1065 xfs_dir2_byte_to_db(mp, curoff),
1066 (char *)ptr - (char *)data); 1066 (char *)ptr - (char *)data);
1067 if (ptr >= (char *)data + mp->m_dirblksize) { 1067 if (ptr >= (char *)data + mp->m_dirblksize) {
1068 continue; 1068 continue;
@@ -1091,9 +1091,9 @@ xfs_dir2_leaf_getdents(
1091 1091
1092 p->namelen = dep->namelen; 1092 p->namelen = dep->namelen;
1093 1093
1094 length = XFS_DIR2_DATA_ENTSIZE(p->namelen); 1094 length = xfs_dir2_data_entsize(p->namelen);
1095 1095
1096 p->cook = XFS_DIR2_BYTE_TO_DATAPTR(mp, curoff + length); 1096 p->cook = xfs_dir2_byte_to_dataptr(mp, curoff + length);
1097 1097
1098 p->ino = be64_to_cpu(dep->inumber); 1098 p->ino = be64_to_cpu(dep->inumber);
1099#if XFS_BIG_INUMS 1099#if XFS_BIG_INUMS
@@ -1121,10 +1121,10 @@ xfs_dir2_leaf_getdents(
1121 * All done. Set output offset value to current offset. 1121 * All done. Set output offset value to current offset.
1122 */ 1122 */
1123 *eofp = eof; 1123 *eofp = eof;
1124 if (curoff > XFS_DIR2_DATAPTR_TO_BYTE(mp, XFS_DIR2_MAX_DATAPTR)) 1124 if (curoff > xfs_dir2_dataptr_to_byte(mp, XFS_DIR2_MAX_DATAPTR))
1125 uio->uio_offset = XFS_DIR2_MAX_DATAPTR; 1125 uio->uio_offset = XFS_DIR2_MAX_DATAPTR;
1126 else 1126 else
1127 uio->uio_offset = XFS_DIR2_BYTE_TO_DATAPTR(mp, curoff); 1127 uio->uio_offset = xfs_dir2_byte_to_dataptr(mp, curoff);
1128 kmem_free(map, map_size * sizeof(*map)); 1128 kmem_free(map, map_size * sizeof(*map));
1129 kmem_free(p, sizeof(*p)); 1129 kmem_free(p, sizeof(*p));
1130 if (bp) 1130 if (bp)
@@ -1159,7 +1159,7 @@ xfs_dir2_leaf_init(
1159 /* 1159 /*
1160 * Get the buffer for the block. 1160 * Get the buffer for the block.
1161 */ 1161 */
1162 error = xfs_da_get_buf(tp, dp, XFS_DIR2_DB_TO_DA(mp, bno), -1, &bp, 1162 error = xfs_da_get_buf(tp, dp, xfs_dir2_db_to_da(mp, bno), -1, &bp,
1163 XFS_DATA_FORK); 1163 XFS_DATA_FORK);
1164 if (error) { 1164 if (error) {
1165 return error; 1165 return error;
@@ -1181,7 +1181,7 @@ xfs_dir2_leaf_init(
1181 * the block. 1181 * the block.
1182 */ 1182 */
1183 if (magic == XFS_DIR2_LEAF1_MAGIC) { 1183 if (magic == XFS_DIR2_LEAF1_MAGIC) {
1184 ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf); 1184 ltp = xfs_dir2_leaf_tail_p(mp, leaf);
1185 ltp->bestcount = 0; 1185 ltp->bestcount = 0;
1186 xfs_dir2_leaf_log_tail(tp, bp); 1186 xfs_dir2_leaf_log_tail(tp, bp);
1187 } 1187 }
@@ -1206,9 +1206,9 @@ xfs_dir2_leaf_log_bests(
1206 1206
1207 leaf = bp->data; 1207 leaf = bp->data;
1208 ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAF1_MAGIC); 1208 ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAF1_MAGIC);
1209 ltp = XFS_DIR2_LEAF_TAIL_P(tp->t_mountp, leaf); 1209 ltp = xfs_dir2_leaf_tail_p(tp->t_mountp, leaf);
1210 firstb = XFS_DIR2_LEAF_BESTS_P(ltp) + first; 1210 firstb = xfs_dir2_leaf_bests_p(ltp) + first;
1211 lastb = XFS_DIR2_LEAF_BESTS_P(ltp) + last; 1211 lastb = xfs_dir2_leaf_bests_p(ltp) + last;
1212 xfs_da_log_buf(tp, bp, (uint)((char *)firstb - (char *)leaf), 1212 xfs_da_log_buf(tp, bp, (uint)((char *)firstb - (char *)leaf),
1213 (uint)((char *)lastb - (char *)leaf + sizeof(*lastb) - 1)); 1213 (uint)((char *)lastb - (char *)leaf + sizeof(*lastb) - 1));
1214} 1214}
@@ -1268,7 +1268,7 @@ xfs_dir2_leaf_log_tail(
1268 mp = tp->t_mountp; 1268 mp = tp->t_mountp;
1269 leaf = bp->data; 1269 leaf = bp->data;
1270 ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAF1_MAGIC); 1270 ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAF1_MAGIC);
1271 ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf); 1271 ltp = xfs_dir2_leaf_tail_p(mp, leaf);
1272 xfs_da_log_buf(tp, bp, (uint)((char *)ltp - (char *)leaf), 1272 xfs_da_log_buf(tp, bp, (uint)((char *)ltp - (char *)leaf),
1273 (uint)(mp->m_dirblksize - 1)); 1273 (uint)(mp->m_dirblksize - 1));
1274} 1274}
@@ -1312,7 +1312,7 @@ xfs_dir2_leaf_lookup(
1312 */ 1312 */
1313 dep = (xfs_dir2_data_entry_t *) 1313 dep = (xfs_dir2_data_entry_t *)
1314 ((char *)dbp->data + 1314 ((char *)dbp->data +
1315 XFS_DIR2_DATAPTR_TO_OFF(dp->i_mount, be32_to_cpu(lep->address))); 1315 xfs_dir2_dataptr_to_off(dp->i_mount, be32_to_cpu(lep->address)));
1316 /* 1316 /*
1317 * Return the found inode number. 1317 * Return the found inode number.
1318 */ 1318 */
@@ -1381,7 +1381,7 @@ xfs_dir2_leaf_lookup_int(
1381 /* 1381 /*
1382 * Get the new data block number. 1382 * Get the new data block number.
1383 */ 1383 */
1384 newdb = XFS_DIR2_DATAPTR_TO_DB(mp, be32_to_cpu(lep->address)); 1384 newdb = xfs_dir2_dataptr_to_db(mp, be32_to_cpu(lep->address));
1385 /* 1385 /*
1386 * If it's not the same as the old data block number, 1386 * If it's not the same as the old data block number,
1387 * need to pitch the old one and read the new one. 1387 * need to pitch the old one and read the new one.
@@ -1391,7 +1391,7 @@ xfs_dir2_leaf_lookup_int(
1391 xfs_da_brelse(tp, dbp); 1391 xfs_da_brelse(tp, dbp);
1392 if ((error = 1392 if ((error =
1393 xfs_da_read_buf(tp, dp, 1393 xfs_da_read_buf(tp, dp,
1394 XFS_DIR2_DB_TO_DA(mp, newdb), -1, &dbp, 1394 xfs_dir2_db_to_da(mp, newdb), -1, &dbp,
1395 XFS_DATA_FORK))) { 1395 XFS_DATA_FORK))) {
1396 xfs_da_brelse(tp, lbp); 1396 xfs_da_brelse(tp, lbp);
1397 return error; 1397 return error;
@@ -1404,7 +1404,7 @@ xfs_dir2_leaf_lookup_int(
1404 */ 1404 */
1405 dep = (xfs_dir2_data_entry_t *) 1405 dep = (xfs_dir2_data_entry_t *)
1406 ((char *)dbp->data + 1406 ((char *)dbp->data +
1407 XFS_DIR2_DATAPTR_TO_OFF(mp, be32_to_cpu(lep->address))); 1407 xfs_dir2_dataptr_to_off(mp, be32_to_cpu(lep->address)));
1408 /* 1408 /*
1409 * If it matches then return it. 1409 * If it matches then return it.
1410 */ 1410 */
@@ -1469,20 +1469,20 @@ xfs_dir2_leaf_removename(
1469 * Point to the leaf entry, use that to point to the data entry. 1469 * Point to the leaf entry, use that to point to the data entry.
1470 */ 1470 */
1471 lep = &leaf->ents[index]; 1471 lep = &leaf->ents[index];
1472 db = XFS_DIR2_DATAPTR_TO_DB(mp, be32_to_cpu(lep->address)); 1472 db = xfs_dir2_dataptr_to_db(mp, be32_to_cpu(lep->address));
1473 dep = (xfs_dir2_data_entry_t *) 1473 dep = (xfs_dir2_data_entry_t *)
1474 ((char *)data + XFS_DIR2_DATAPTR_TO_OFF(mp, be32_to_cpu(lep->address))); 1474 ((char *)data + xfs_dir2_dataptr_to_off(mp, be32_to_cpu(lep->address)));
1475 needscan = needlog = 0; 1475 needscan = needlog = 0;
1476 oldbest = be16_to_cpu(data->hdr.bestfree[0].length); 1476 oldbest = be16_to_cpu(data->hdr.bestfree[0].length);
1477 ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf); 1477 ltp = xfs_dir2_leaf_tail_p(mp, leaf);
1478 bestsp = XFS_DIR2_LEAF_BESTS_P(ltp); 1478 bestsp = xfs_dir2_leaf_bests_p(ltp);
1479 ASSERT(be16_to_cpu(bestsp[db]) == oldbest); 1479 ASSERT(be16_to_cpu(bestsp[db]) == oldbest);
1480 /* 1480 /*
1481 * Mark the former data entry unused. 1481 * Mark the former data entry unused.
1482 */ 1482 */
1483 xfs_dir2_data_make_free(tp, dbp, 1483 xfs_dir2_data_make_free(tp, dbp,
1484 (xfs_dir2_data_aoff_t)((char *)dep - (char *)data), 1484 (xfs_dir2_data_aoff_t)((char *)dep - (char *)data),
1485 XFS_DIR2_DATA_ENTSIZE(dep->namelen), &needlog, &needscan); 1485 xfs_dir2_data_entsize(dep->namelen), &needlog, &needscan);
1486 /* 1486 /*
1487 * We just mark the leaf entry stale by putting a null in it. 1487 * We just mark the leaf entry stale by putting a null in it.
1488 */ 1488 */
@@ -1602,7 +1602,7 @@ xfs_dir2_leaf_replace(
1602 */ 1602 */
1603 dep = (xfs_dir2_data_entry_t *) 1603 dep = (xfs_dir2_data_entry_t *)
1604 ((char *)dbp->data + 1604 ((char *)dbp->data +
1605 XFS_DIR2_DATAPTR_TO_OFF(dp->i_mount, be32_to_cpu(lep->address))); 1605 xfs_dir2_dataptr_to_off(dp->i_mount, be32_to_cpu(lep->address)));
1606 ASSERT(args->inumber != be64_to_cpu(dep->inumber)); 1606 ASSERT(args->inumber != be64_to_cpu(dep->inumber));
1607 /* 1607 /*
1608 * Put the new inode number in, log it. 1608 * Put the new inode number in, log it.
@@ -1698,7 +1698,7 @@ xfs_dir2_leaf_trim_data(
1698 /* 1698 /*
1699 * Read the offending data block. We need its buffer. 1699 * Read the offending data block. We need its buffer.
1700 */ 1700 */
1701 if ((error = xfs_da_read_buf(tp, dp, XFS_DIR2_DB_TO_DA(mp, db), -1, &dbp, 1701 if ((error = xfs_da_read_buf(tp, dp, xfs_dir2_db_to_da(mp, db), -1, &dbp,
1702 XFS_DATA_FORK))) { 1702 XFS_DATA_FORK))) {
1703 return error; 1703 return error;
1704 } 1704 }
@@ -1712,7 +1712,7 @@ xfs_dir2_leaf_trim_data(
1712 */ 1712 */
1713 1713
1714 leaf = lbp->data; 1714 leaf = lbp->data;
1715 ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf); 1715 ltp = xfs_dir2_leaf_tail_p(mp, leaf);
1716 ASSERT(be16_to_cpu(data->hdr.bestfree[0].length) == 1716 ASSERT(be16_to_cpu(data->hdr.bestfree[0].length) ==
1717 mp->m_dirblksize - (uint)sizeof(data->hdr)); 1717 mp->m_dirblksize - (uint)sizeof(data->hdr));
1718 ASSERT(db == be32_to_cpu(ltp->bestcount) - 1); 1718 ASSERT(db == be32_to_cpu(ltp->bestcount) - 1);
@@ -1727,7 +1727,7 @@ xfs_dir2_leaf_trim_data(
1727 /* 1727 /*
1728 * Eliminate the last bests entry from the table. 1728 * Eliminate the last bests entry from the table.
1729 */ 1729 */
1730 bestsp = XFS_DIR2_LEAF_BESTS_P(ltp); 1730 bestsp = xfs_dir2_leaf_bests_p(ltp);
1731 be32_add(&ltp->bestcount, -1); 1731 be32_add(&ltp->bestcount, -1);
1732 memmove(&bestsp[1], &bestsp[0], be32_to_cpu(ltp->bestcount) * sizeof(*bestsp)); 1732 memmove(&bestsp[1], &bestsp[0], be32_to_cpu(ltp->bestcount) * sizeof(*bestsp));
1733 xfs_dir2_leaf_log_tail(tp, lbp); 1733 xfs_dir2_leaf_log_tail(tp, lbp);
@@ -1838,12 +1838,12 @@ xfs_dir2_node_to_leaf(
1838 /* 1838 /*
1839 * Set up the leaf tail from the freespace block. 1839 * Set up the leaf tail from the freespace block.
1840 */ 1840 */
1841 ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf); 1841 ltp = xfs_dir2_leaf_tail_p(mp, leaf);
1842 ltp->bestcount = free->hdr.nvalid; 1842 ltp->bestcount = free->hdr.nvalid;
1843 /* 1843 /*
1844 * Set up the leaf bests table. 1844 * Set up the leaf bests table.
1845 */ 1845 */
1846 memcpy(XFS_DIR2_LEAF_BESTS_P(ltp), free->bests, 1846 memcpy(xfs_dir2_leaf_bests_p(ltp), free->bests,
1847 be32_to_cpu(ltp->bestcount) * sizeof(leaf->bests[0])); 1847 be32_to_cpu(ltp->bestcount) * sizeof(leaf->bests[0]));
1848 xfs_dir2_leaf_log_bests(tp, lbp, 0, be32_to_cpu(ltp->bestcount) - 1); 1848 xfs_dir2_leaf_log_bests(tp, lbp, 0, be32_to_cpu(ltp->bestcount) - 1);
1849 xfs_dir2_leaf_log_tail(tp, lbp); 1849 xfs_dir2_leaf_log_tail(tp, lbp);
diff --git a/fs/xfs/xfs_dir2_leaf.h b/fs/xfs/xfs_dir2_leaf.h
index f57ca1162412..70c97f3f815e 100644
--- a/fs/xfs/xfs_dir2_leaf.h
+++ b/fs/xfs/xfs_dir2_leaf.h
@@ -32,7 +32,7 @@ struct xfs_trans;
32#define XFS_DIR2_LEAF_SPACE 1 32#define XFS_DIR2_LEAF_SPACE 1
33#define XFS_DIR2_LEAF_OFFSET (XFS_DIR2_LEAF_SPACE * XFS_DIR2_SPACE_SIZE) 33#define XFS_DIR2_LEAF_OFFSET (XFS_DIR2_LEAF_SPACE * XFS_DIR2_SPACE_SIZE)
34#define XFS_DIR2_LEAF_FIRSTDB(mp) \ 34#define XFS_DIR2_LEAF_FIRSTDB(mp) \
35 XFS_DIR2_BYTE_TO_DB(mp, XFS_DIR2_LEAF_OFFSET) 35 xfs_dir2_byte_to_db(mp, XFS_DIR2_LEAF_OFFSET)
36 36
37/* 37/*
38 * Offset in data space of a data entry. 38 * Offset in data space of a data entry.
@@ -82,7 +82,6 @@ typedef struct xfs_dir2_leaf {
82 * DB blocks here are logical directory block numbers, not filesystem blocks. 82 * DB blocks here are logical directory block numbers, not filesystem blocks.
83 */ 83 */
84 84
85#define XFS_DIR2_MAX_LEAF_ENTS(mp) xfs_dir2_max_leaf_ents(mp)
86static inline int xfs_dir2_max_leaf_ents(struct xfs_mount *mp) 85static inline int xfs_dir2_max_leaf_ents(struct xfs_mount *mp)
87{ 86{
88 return (int)(((mp)->m_dirblksize - (uint)sizeof(xfs_dir2_leaf_hdr_t)) / 87 return (int)(((mp)->m_dirblksize - (uint)sizeof(xfs_dir2_leaf_hdr_t)) /
@@ -92,7 +91,6 @@ static inline int xfs_dir2_max_leaf_ents(struct xfs_mount *mp)
92/* 91/*
93 * Get address of the bestcount field in the single-leaf block. 92 * Get address of the bestcount field in the single-leaf block.
94 */ 93 */
95#define XFS_DIR2_LEAF_TAIL_P(mp,lp) xfs_dir2_leaf_tail_p(mp, lp)
96static inline xfs_dir2_leaf_tail_t * 94static inline xfs_dir2_leaf_tail_t *
97xfs_dir2_leaf_tail_p(struct xfs_mount *mp, xfs_dir2_leaf_t *lp) 95xfs_dir2_leaf_tail_p(struct xfs_mount *mp, xfs_dir2_leaf_t *lp)
98{ 96{
@@ -104,7 +102,6 @@ xfs_dir2_leaf_tail_p(struct xfs_mount *mp, xfs_dir2_leaf_t *lp)
104/* 102/*
105 * Get address of the bests array in the single-leaf block. 103 * Get address of the bests array in the single-leaf block.
106 */ 104 */
107#define XFS_DIR2_LEAF_BESTS_P(ltp) xfs_dir2_leaf_bests_p(ltp)
108static inline __be16 * 105static inline __be16 *
109xfs_dir2_leaf_bests_p(xfs_dir2_leaf_tail_t *ltp) 106xfs_dir2_leaf_bests_p(xfs_dir2_leaf_tail_t *ltp)
110{ 107{
@@ -114,7 +111,6 @@ xfs_dir2_leaf_bests_p(xfs_dir2_leaf_tail_t *ltp)
114/* 111/*
115 * Convert dataptr to byte in file space 112 * Convert dataptr to byte in file space
116 */ 113 */
117#define XFS_DIR2_DATAPTR_TO_BYTE(mp,dp) xfs_dir2_dataptr_to_byte(mp, dp)
118static inline xfs_dir2_off_t 114static inline xfs_dir2_off_t
119xfs_dir2_dataptr_to_byte(struct xfs_mount *mp, xfs_dir2_dataptr_t dp) 115xfs_dir2_dataptr_to_byte(struct xfs_mount *mp, xfs_dir2_dataptr_t dp)
120{ 116{
@@ -124,7 +120,6 @@ xfs_dir2_dataptr_to_byte(struct xfs_mount *mp, xfs_dir2_dataptr_t dp)
124/* 120/*
125 * Convert byte in file space to dataptr. It had better be aligned. 121 * Convert byte in file space to dataptr. It had better be aligned.
126 */ 122 */
127#define XFS_DIR2_BYTE_TO_DATAPTR(mp,by) xfs_dir2_byte_to_dataptr(mp,by)
128static inline xfs_dir2_dataptr_t 123static inline xfs_dir2_dataptr_t
129xfs_dir2_byte_to_dataptr(struct xfs_mount *mp, xfs_dir2_off_t by) 124xfs_dir2_byte_to_dataptr(struct xfs_mount *mp, xfs_dir2_off_t by)
130{ 125{
@@ -134,7 +129,6 @@ xfs_dir2_byte_to_dataptr(struct xfs_mount *mp, xfs_dir2_off_t by)
134/* 129/*
135 * Convert byte in space to (DB) block 130 * Convert byte in space to (DB) block
136 */ 131 */
137#define XFS_DIR2_BYTE_TO_DB(mp,by) xfs_dir2_byte_to_db(mp, by)
138static inline xfs_dir2_db_t 132static inline xfs_dir2_db_t
139xfs_dir2_byte_to_db(struct xfs_mount *mp, xfs_dir2_off_t by) 133xfs_dir2_byte_to_db(struct xfs_mount *mp, xfs_dir2_off_t by)
140{ 134{
@@ -145,17 +139,15 @@ xfs_dir2_byte_to_db(struct xfs_mount *mp, xfs_dir2_off_t by)
145/* 139/*
146 * Convert dataptr to a block number 140 * Convert dataptr to a block number
147 */ 141 */
148#define XFS_DIR2_DATAPTR_TO_DB(mp,dp) xfs_dir2_dataptr_to_db(mp, dp)
149static inline xfs_dir2_db_t 142static inline xfs_dir2_db_t
150xfs_dir2_dataptr_to_db(struct xfs_mount *mp, xfs_dir2_dataptr_t dp) 143xfs_dir2_dataptr_to_db(struct xfs_mount *mp, xfs_dir2_dataptr_t dp)
151{ 144{
152 return XFS_DIR2_BYTE_TO_DB(mp, XFS_DIR2_DATAPTR_TO_BYTE(mp, dp)); 145 return xfs_dir2_byte_to_db(mp, xfs_dir2_dataptr_to_byte(mp, dp));
153} 146}
154 147
155/* 148/*
156 * Convert byte in space to offset in a block 149 * Convert byte in space to offset in a block
157 */ 150 */
158#define XFS_DIR2_BYTE_TO_OFF(mp,by) xfs_dir2_byte_to_off(mp, by)
159static inline xfs_dir2_data_aoff_t 151static inline xfs_dir2_data_aoff_t
160xfs_dir2_byte_to_off(struct xfs_mount *mp, xfs_dir2_off_t by) 152xfs_dir2_byte_to_off(struct xfs_mount *mp, xfs_dir2_off_t by)
161{ 153{
@@ -166,18 +158,15 @@ xfs_dir2_byte_to_off(struct xfs_mount *mp, xfs_dir2_off_t by)
166/* 158/*
167 * Convert dataptr to a byte offset in a block 159 * Convert dataptr to a byte offset in a block
168 */ 160 */
169#define XFS_DIR2_DATAPTR_TO_OFF(mp,dp) xfs_dir2_dataptr_to_off(mp, dp)
170static inline xfs_dir2_data_aoff_t 161static inline xfs_dir2_data_aoff_t
171xfs_dir2_dataptr_to_off(struct xfs_mount *mp, xfs_dir2_dataptr_t dp) 162xfs_dir2_dataptr_to_off(struct xfs_mount *mp, xfs_dir2_dataptr_t dp)
172{ 163{
173 return XFS_DIR2_BYTE_TO_OFF(mp, XFS_DIR2_DATAPTR_TO_BYTE(mp, dp)); 164 return xfs_dir2_byte_to_off(mp, xfs_dir2_dataptr_to_byte(mp, dp));
174} 165}
175 166
176/* 167/*
177 * Convert block and offset to byte in space 168 * Convert block and offset to byte in space
178 */ 169 */
179#define XFS_DIR2_DB_OFF_TO_BYTE(mp,db,o) \
180 xfs_dir2_db_off_to_byte(mp, db, o)
181static inline xfs_dir2_off_t 170static inline xfs_dir2_off_t
182xfs_dir2_db_off_to_byte(struct xfs_mount *mp, xfs_dir2_db_t db, 171xfs_dir2_db_off_to_byte(struct xfs_mount *mp, xfs_dir2_db_t db,
183 xfs_dir2_data_aoff_t o) 172 xfs_dir2_data_aoff_t o)
@@ -189,7 +178,6 @@ xfs_dir2_db_off_to_byte(struct xfs_mount *mp, xfs_dir2_db_t db,
189/* 178/*
190 * Convert block (DB) to block (dablk) 179 * Convert block (DB) to block (dablk)
191 */ 180 */
192#define XFS_DIR2_DB_TO_DA(mp,db) xfs_dir2_db_to_da(mp, db)
193static inline xfs_dablk_t 181static inline xfs_dablk_t
194xfs_dir2_db_to_da(struct xfs_mount *mp, xfs_dir2_db_t db) 182xfs_dir2_db_to_da(struct xfs_mount *mp, xfs_dir2_db_t db)
195{ 183{
@@ -199,29 +187,25 @@ xfs_dir2_db_to_da(struct xfs_mount *mp, xfs_dir2_db_t db)
199/* 187/*
200 * Convert byte in space to (DA) block 188 * Convert byte in space to (DA) block
201 */ 189 */
202#define XFS_DIR2_BYTE_TO_DA(mp,by) xfs_dir2_byte_to_da(mp, by)
203static inline xfs_dablk_t 190static inline xfs_dablk_t
204xfs_dir2_byte_to_da(struct xfs_mount *mp, xfs_dir2_off_t by) 191xfs_dir2_byte_to_da(struct xfs_mount *mp, xfs_dir2_off_t by)
205{ 192{
206 return XFS_DIR2_DB_TO_DA(mp, XFS_DIR2_BYTE_TO_DB(mp, by)); 193 return xfs_dir2_db_to_da(mp, xfs_dir2_byte_to_db(mp, by));
207} 194}
208 195
209/* 196/*
210 * Convert block and offset to dataptr 197 * Convert block and offset to dataptr
211 */ 198 */
212#define XFS_DIR2_DB_OFF_TO_DATAPTR(mp,db,o) \
213 xfs_dir2_db_off_to_dataptr(mp, db, o)
214static inline xfs_dir2_dataptr_t 199static inline xfs_dir2_dataptr_t
215xfs_dir2_db_off_to_dataptr(struct xfs_mount *mp, xfs_dir2_db_t db, 200xfs_dir2_db_off_to_dataptr(struct xfs_mount *mp, xfs_dir2_db_t db,
216 xfs_dir2_data_aoff_t o) 201 xfs_dir2_data_aoff_t o)
217{ 202{
218 return XFS_DIR2_BYTE_TO_DATAPTR(mp, XFS_DIR2_DB_OFF_TO_BYTE(mp, db, o)); 203 return xfs_dir2_byte_to_dataptr(mp, xfs_dir2_db_off_to_byte(mp, db, o));
219} 204}
220 205
221/* 206/*
222 * Convert block (dablk) to block (DB) 207 * Convert block (dablk) to block (DB)
223 */ 208 */
224#define XFS_DIR2_DA_TO_DB(mp,da) xfs_dir2_da_to_db(mp, da)
225static inline xfs_dir2_db_t 209static inline xfs_dir2_db_t
226xfs_dir2_da_to_db(struct xfs_mount *mp, xfs_dablk_t da) 210xfs_dir2_da_to_db(struct xfs_mount *mp, xfs_dablk_t da)
227{ 211{
@@ -231,11 +215,10 @@ xfs_dir2_da_to_db(struct xfs_mount *mp, xfs_dablk_t da)
231/* 215/*
232 * Convert block (dablk) to byte offset in space 216 * Convert block (dablk) to byte offset in space
233 */ 217 */
234#define XFS_DIR2_DA_TO_BYTE(mp,da) xfs_dir2_da_to_byte(mp, da)
235static inline xfs_dir2_off_t 218static inline xfs_dir2_off_t
236xfs_dir2_da_to_byte(struct xfs_mount *mp, xfs_dablk_t da) 219xfs_dir2_da_to_byte(struct xfs_mount *mp, xfs_dablk_t da)
237{ 220{
238 return XFS_DIR2_DB_OFF_TO_BYTE(mp, XFS_DIR2_DA_TO_DB(mp, da), 0); 221 return xfs_dir2_db_off_to_byte(mp, xfs_dir2_da_to_db(mp, da), 0);
239} 222}
240 223
241/* 224/*
diff --git a/fs/xfs/xfs_dir2_node.c b/fs/xfs/xfs_dir2_node.c
index d083c3819934..91c61d9632c8 100644
--- a/fs/xfs/xfs_dir2_node.c
+++ b/fs/xfs/xfs_dir2_node.c
@@ -136,14 +136,14 @@ xfs_dir2_leaf_to_node(
136 /* 136 /*
137 * Get the buffer for the new freespace block. 137 * Get the buffer for the new freespace block.
138 */ 138 */
139 if ((error = xfs_da_get_buf(tp, dp, XFS_DIR2_DB_TO_DA(mp, fdb), -1, &fbp, 139 if ((error = xfs_da_get_buf(tp, dp, xfs_dir2_db_to_da(mp, fdb), -1, &fbp,
140 XFS_DATA_FORK))) { 140 XFS_DATA_FORK))) {
141 return error; 141 return error;
142 } 142 }
143 ASSERT(fbp != NULL); 143 ASSERT(fbp != NULL);
144 free = fbp->data; 144 free = fbp->data;
145 leaf = lbp->data; 145 leaf = lbp->data;
146 ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf); 146 ltp = xfs_dir2_leaf_tail_p(mp, leaf);
147 /* 147 /*
148 * Initialize the freespace block header. 148 * Initialize the freespace block header.
149 */ 149 */
@@ -155,7 +155,7 @@ xfs_dir2_leaf_to_node(
155 * Copy freespace entries from the leaf block to the new block. 155 * Copy freespace entries from the leaf block to the new block.
156 * Count active entries. 156 * Count active entries.
157 */ 157 */
158 for (i = n = 0, from = XFS_DIR2_LEAF_BESTS_P(ltp), to = free->bests; 158 for (i = n = 0, from = xfs_dir2_leaf_bests_p(ltp), to = free->bests;
159 i < be32_to_cpu(ltp->bestcount); i++, from++, to++) { 159 i < be32_to_cpu(ltp->bestcount); i++, from++, to++) {
160 if ((off = be16_to_cpu(*from)) != NULLDATAOFF) 160 if ((off = be16_to_cpu(*from)) != NULLDATAOFF)
161 n++; 161 n++;
@@ -215,7 +215,7 @@ xfs_dir2_leafn_add(
215 * a compact. 215 * a compact.
216 */ 216 */
217 217
218 if (be16_to_cpu(leaf->hdr.count) == XFS_DIR2_MAX_LEAF_ENTS(mp)) { 218 if (be16_to_cpu(leaf->hdr.count) == xfs_dir2_max_leaf_ents(mp)) {
219 if (!leaf->hdr.stale) 219 if (!leaf->hdr.stale)
220 return XFS_ERROR(ENOSPC); 220 return XFS_ERROR(ENOSPC);
221 compact = be16_to_cpu(leaf->hdr.stale) > 1; 221 compact = be16_to_cpu(leaf->hdr.stale) > 1;
@@ -327,7 +327,7 @@ xfs_dir2_leafn_add(
327 * Insert the new entry, log everything. 327 * Insert the new entry, log everything.
328 */ 328 */
329 lep->hashval = cpu_to_be32(args->hashval); 329 lep->hashval = cpu_to_be32(args->hashval);
330 lep->address = cpu_to_be32(XFS_DIR2_DB_OFF_TO_DATAPTR(mp, 330 lep->address = cpu_to_be32(xfs_dir2_db_off_to_dataptr(mp,
331 args->blkno, args->index)); 331 args->blkno, args->index));
332 xfs_dir2_leaf_log_header(tp, bp); 332 xfs_dir2_leaf_log_header(tp, bp);
333 xfs_dir2_leaf_log_ents(tp, bp, lfloglow, lfloghigh); 333 xfs_dir2_leaf_log_ents(tp, bp, lfloglow, lfloghigh);
@@ -352,7 +352,7 @@ xfs_dir2_leafn_check(
352 leaf = bp->data; 352 leaf = bp->data;
353 mp = dp->i_mount; 353 mp = dp->i_mount;
354 ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); 354 ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC);
355 ASSERT(be16_to_cpu(leaf->hdr.count) <= XFS_DIR2_MAX_LEAF_ENTS(mp)); 355 ASSERT(be16_to_cpu(leaf->hdr.count) <= xfs_dir2_max_leaf_ents(mp));
356 for (i = stale = 0; i < be16_to_cpu(leaf->hdr.count); i++) { 356 for (i = stale = 0; i < be16_to_cpu(leaf->hdr.count); i++) {
357 if (i + 1 < be16_to_cpu(leaf->hdr.count)) { 357 if (i + 1 < be16_to_cpu(leaf->hdr.count)) {
358 ASSERT(be32_to_cpu(leaf->ents[i].hashval) <= 358 ASSERT(be32_to_cpu(leaf->ents[i].hashval) <=
@@ -440,7 +440,7 @@ xfs_dir2_leafn_lookup_int(
440 if (args->addname) { 440 if (args->addname) {
441 curfdb = curbp ? state->extrablk.blkno : -1; 441 curfdb = curbp ? state->extrablk.blkno : -1;
442 curdb = -1; 442 curdb = -1;
443 length = XFS_DIR2_DATA_ENTSIZE(args->namelen); 443 length = xfs_dir2_data_entsize(args->namelen);
444 if ((free = (curbp ? curbp->data : NULL))) 444 if ((free = (curbp ? curbp->data : NULL)))
445 ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC); 445 ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC);
446 } 446 }
@@ -465,7 +465,7 @@ xfs_dir2_leafn_lookup_int(
465 /* 465 /*
466 * Pull the data block number from the entry. 466 * Pull the data block number from the entry.
467 */ 467 */
468 newdb = XFS_DIR2_DATAPTR_TO_DB(mp, be32_to_cpu(lep->address)); 468 newdb = xfs_dir2_dataptr_to_db(mp, be32_to_cpu(lep->address));
469 /* 469 /*
470 * For addname, we're looking for a place to put the new entry. 470 * For addname, we're looking for a place to put the new entry.
471 * We want to use a data block with an entry of equal 471 * We want to use a data block with an entry of equal
@@ -482,7 +482,7 @@ xfs_dir2_leafn_lookup_int(
482 * Convert the data block to the free block 482 * Convert the data block to the free block
483 * holding its freespace information. 483 * holding its freespace information.
484 */ 484 */
485 newfdb = XFS_DIR2_DB_TO_FDB(mp, newdb); 485 newfdb = xfs_dir2_db_to_fdb(mp, newdb);
486 /* 486 /*
487 * If it's not the one we have in hand, 487 * If it's not the one we have in hand,
488 * read it in. 488 * read it in.
@@ -497,7 +497,7 @@ xfs_dir2_leafn_lookup_int(
497 * Read the free block. 497 * Read the free block.
498 */ 498 */
499 if ((error = xfs_da_read_buf(tp, dp, 499 if ((error = xfs_da_read_buf(tp, dp,
500 XFS_DIR2_DB_TO_DA(mp, 500 xfs_dir2_db_to_da(mp,
501 newfdb), 501 newfdb),
502 -1, &curbp, 502 -1, &curbp,
503 XFS_DATA_FORK))) { 503 XFS_DATA_FORK))) {
@@ -517,7 +517,7 @@ xfs_dir2_leafn_lookup_int(
517 /* 517 /*
518 * Get the index for our entry. 518 * Get the index for our entry.
519 */ 519 */
520 fi = XFS_DIR2_DB_TO_FDINDEX(mp, curdb); 520 fi = xfs_dir2_db_to_fdindex(mp, curdb);
521 /* 521 /*
522 * If it has room, return it. 522 * If it has room, return it.
523 */ 523 */
@@ -561,7 +561,7 @@ xfs_dir2_leafn_lookup_int(
561 */ 561 */
562 if ((error = 562 if ((error =
563 xfs_da_read_buf(tp, dp, 563 xfs_da_read_buf(tp, dp,
564 XFS_DIR2_DB_TO_DA(mp, newdb), -1, 564 xfs_dir2_db_to_da(mp, newdb), -1,
565 &curbp, XFS_DATA_FORK))) { 565 &curbp, XFS_DATA_FORK))) {
566 return error; 566 return error;
567 } 567 }
@@ -573,7 +573,7 @@ xfs_dir2_leafn_lookup_int(
573 */ 573 */
574 dep = (xfs_dir2_data_entry_t *) 574 dep = (xfs_dir2_data_entry_t *)
575 ((char *)curbp->data + 575 ((char *)curbp->data +
576 XFS_DIR2_DATAPTR_TO_OFF(mp, be32_to_cpu(lep->address))); 576 xfs_dir2_dataptr_to_off(mp, be32_to_cpu(lep->address)));
577 /* 577 /*
578 * Compare the entry, return it if it matches. 578 * Compare the entry, return it if it matches.
579 */ 579 */
@@ -876,9 +876,9 @@ xfs_dir2_leafn_remove(
876 /* 876 /*
877 * Extract the data block and offset from the entry. 877 * Extract the data block and offset from the entry.
878 */ 878 */
879 db = XFS_DIR2_DATAPTR_TO_DB(mp, be32_to_cpu(lep->address)); 879 db = xfs_dir2_dataptr_to_db(mp, be32_to_cpu(lep->address));
880 ASSERT(dblk->blkno == db); 880 ASSERT(dblk->blkno == db);
881 off = XFS_DIR2_DATAPTR_TO_OFF(mp, be32_to_cpu(lep->address)); 881 off = xfs_dir2_dataptr_to_off(mp, be32_to_cpu(lep->address));
882 ASSERT(dblk->index == off); 882 ASSERT(dblk->index == off);
883 /* 883 /*
884 * Kill the leaf entry by marking it stale. 884 * Kill the leaf entry by marking it stale.
@@ -898,7 +898,7 @@ xfs_dir2_leafn_remove(
898 longest = be16_to_cpu(data->hdr.bestfree[0].length); 898 longest = be16_to_cpu(data->hdr.bestfree[0].length);
899 needlog = needscan = 0; 899 needlog = needscan = 0;
900 xfs_dir2_data_make_free(tp, dbp, off, 900 xfs_dir2_data_make_free(tp, dbp, off,
901 XFS_DIR2_DATA_ENTSIZE(dep->namelen), &needlog, &needscan); 901 xfs_dir2_data_entsize(dep->namelen), &needlog, &needscan);
902 /* 902 /*
903 * Rescan the data block freespaces for bestfree. 903 * Rescan the data block freespaces for bestfree.
904 * Log the data block header if needed. 904 * Log the data block header if needed.
@@ -924,8 +924,8 @@ xfs_dir2_leafn_remove(
924 * Convert the data block number to a free block, 924 * Convert the data block number to a free block,
925 * read in the free block. 925 * read in the free block.
926 */ 926 */
927 fdb = XFS_DIR2_DB_TO_FDB(mp, db); 927 fdb = xfs_dir2_db_to_fdb(mp, db);
928 if ((error = xfs_da_read_buf(tp, dp, XFS_DIR2_DB_TO_DA(mp, fdb), 928 if ((error = xfs_da_read_buf(tp, dp, xfs_dir2_db_to_da(mp, fdb),
929 -1, &fbp, XFS_DATA_FORK))) { 929 -1, &fbp, XFS_DATA_FORK))) {
930 return error; 930 return error;
931 } 931 }
@@ -937,7 +937,7 @@ xfs_dir2_leafn_remove(
937 /* 937 /*
938 * Calculate which entry we need to fix. 938 * Calculate which entry we need to fix.
939 */ 939 */
940 findex = XFS_DIR2_DB_TO_FDINDEX(mp, db); 940 findex = xfs_dir2_db_to_fdindex(mp, db);
941 longest = be16_to_cpu(data->hdr.bestfree[0].length); 941 longest = be16_to_cpu(data->hdr.bestfree[0].length);
942 /* 942 /*
943 * If the data block is now empty we can get rid of it 943 * If the data block is now empty we can get rid of it
@@ -1073,7 +1073,7 @@ xfs_dir2_leafn_split(
1073 /* 1073 /*
1074 * Initialize the new leaf block. 1074 * Initialize the new leaf block.
1075 */ 1075 */
1076 error = xfs_dir2_leaf_init(args, XFS_DIR2_DA_TO_DB(mp, blkno), 1076 error = xfs_dir2_leaf_init(args, xfs_dir2_da_to_db(mp, blkno),
1077 &newblk->bp, XFS_DIR2_LEAFN_MAGIC); 1077 &newblk->bp, XFS_DIR2_LEAFN_MAGIC);
1078 if (error) { 1078 if (error) {
1079 return error; 1079 return error;
@@ -1385,7 +1385,7 @@ xfs_dir2_node_addname_int(
1385 dp = args->dp; 1385 dp = args->dp;
1386 mp = dp->i_mount; 1386 mp = dp->i_mount;
1387 tp = args->trans; 1387 tp = args->trans;
1388 length = XFS_DIR2_DATA_ENTSIZE(args->namelen); 1388 length = xfs_dir2_data_entsize(args->namelen);
1389 /* 1389 /*
1390 * If we came in with a freespace block that means that lookup 1390 * If we came in with a freespace block that means that lookup
1391 * found an entry with our hash value. This is the freespace 1391 * found an entry with our hash value. This is the freespace
@@ -1438,7 +1438,7 @@ xfs_dir2_node_addname_int(
1438 1438
1439 if ((error = xfs_bmap_last_offset(tp, dp, &fo, XFS_DATA_FORK))) 1439 if ((error = xfs_bmap_last_offset(tp, dp, &fo, XFS_DATA_FORK)))
1440 return error; 1440 return error;
1441 lastfbno = XFS_DIR2_DA_TO_DB(mp, (xfs_dablk_t)fo); 1441 lastfbno = xfs_dir2_da_to_db(mp, (xfs_dablk_t)fo);
1442 fbno = ifbno; 1442 fbno = ifbno;
1443 } 1443 }
1444 /* 1444 /*
@@ -1474,7 +1474,7 @@ xfs_dir2_node_addname_int(
1474 * to avoid it. 1474 * to avoid it.
1475 */ 1475 */
1476 if ((error = xfs_da_read_buf(tp, dp, 1476 if ((error = xfs_da_read_buf(tp, dp,
1477 XFS_DIR2_DB_TO_DA(mp, fbno), -2, &fbp, 1477 xfs_dir2_db_to_da(mp, fbno), -2, &fbp,
1478 XFS_DATA_FORK))) { 1478 XFS_DATA_FORK))) {
1479 return error; 1479 return error;
1480 } 1480 }
@@ -1550,9 +1550,9 @@ xfs_dir2_node_addname_int(
1550 * Get the freespace block corresponding to the data block 1550 * Get the freespace block corresponding to the data block
1551 * that was just allocated. 1551 * that was just allocated.
1552 */ 1552 */
1553 fbno = XFS_DIR2_DB_TO_FDB(mp, dbno); 1553 fbno = xfs_dir2_db_to_fdb(mp, dbno);
1554 if (unlikely(error = xfs_da_read_buf(tp, dp, 1554 if (unlikely(error = xfs_da_read_buf(tp, dp,
1555 XFS_DIR2_DB_TO_DA(mp, fbno), -2, &fbp, 1555 xfs_dir2_db_to_da(mp, fbno), -2, &fbp,
1556 XFS_DATA_FORK))) { 1556 XFS_DATA_FORK))) {
1557 xfs_da_buf_done(dbp); 1557 xfs_da_buf_done(dbp);
1558 return error; 1558 return error;
@@ -1567,14 +1567,14 @@ xfs_dir2_node_addname_int(
1567 return error; 1567 return error;
1568 } 1568 }
1569 1569
1570 if (unlikely(XFS_DIR2_DB_TO_FDB(mp, dbno) != fbno)) { 1570 if (unlikely(xfs_dir2_db_to_fdb(mp, dbno) != fbno)) {
1571 cmn_err(CE_ALERT, 1571 cmn_err(CE_ALERT,
1572 "xfs_dir2_node_addname_int: dir ino " 1572 "xfs_dir2_node_addname_int: dir ino "
1573 "%llu needed freesp block %lld for\n" 1573 "%llu needed freesp block %lld for\n"
1574 " data block %lld, got %lld\n" 1574 " data block %lld, got %lld\n"
1575 " ifbno %llu lastfbno %d\n", 1575 " ifbno %llu lastfbno %d\n",
1576 (unsigned long long)dp->i_ino, 1576 (unsigned long long)dp->i_ino,
1577 (long long)XFS_DIR2_DB_TO_FDB(mp, dbno), 1577 (long long)xfs_dir2_db_to_fdb(mp, dbno),
1578 (long long)dbno, (long long)fbno, 1578 (long long)dbno, (long long)fbno,
1579 (unsigned long long)ifbno, lastfbno); 1579 (unsigned long long)ifbno, lastfbno);
1580 if (fblk) { 1580 if (fblk) {
@@ -1598,7 +1598,7 @@ xfs_dir2_node_addname_int(
1598 * Get a buffer for the new block. 1598 * Get a buffer for the new block.
1599 */ 1599 */
1600 if ((error = xfs_da_get_buf(tp, dp, 1600 if ((error = xfs_da_get_buf(tp, dp,
1601 XFS_DIR2_DB_TO_DA(mp, fbno), 1601 xfs_dir2_db_to_da(mp, fbno),
1602 -1, &fbp, XFS_DATA_FORK))) { 1602 -1, &fbp, XFS_DATA_FORK))) {
1603 return error; 1603 return error;
1604 } 1604 }
@@ -1623,7 +1623,7 @@ xfs_dir2_node_addname_int(
1623 /* 1623 /*
1624 * Set the freespace block index from the data block number. 1624 * Set the freespace block index from the data block number.
1625 */ 1625 */
1626 findex = XFS_DIR2_DB_TO_FDINDEX(mp, dbno); 1626 findex = xfs_dir2_db_to_fdindex(mp, dbno);
1627 /* 1627 /*
1628 * If it's after the end of the current entries in the 1628 * If it's after the end of the current entries in the
1629 * freespace block, extend that table. 1629 * freespace block, extend that table.
@@ -1669,7 +1669,7 @@ xfs_dir2_node_addname_int(
1669 * Read the data block in. 1669 * Read the data block in.
1670 */ 1670 */
1671 if (unlikely( 1671 if (unlikely(
1672 error = xfs_da_read_buf(tp, dp, XFS_DIR2_DB_TO_DA(mp, dbno), 1672 error = xfs_da_read_buf(tp, dp, xfs_dir2_db_to_da(mp, dbno),
1673 -1, &dbp, XFS_DATA_FORK))) { 1673 -1, &dbp, XFS_DATA_FORK))) {
1674 if ((fblk == NULL || fblk->bp == NULL) && fbp != NULL) 1674 if ((fblk == NULL || fblk->bp == NULL) && fbp != NULL)
1675 xfs_da_buf_done(fbp); 1675 xfs_da_buf_done(fbp);
@@ -1698,7 +1698,7 @@ xfs_dir2_node_addname_int(
1698 dep->inumber = cpu_to_be64(args->inumber); 1698 dep->inumber = cpu_to_be64(args->inumber);
1699 dep->namelen = args->namelen; 1699 dep->namelen = args->namelen;
1700 memcpy(dep->name, args->name, dep->namelen); 1700 memcpy(dep->name, args->name, dep->namelen);
1701 tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep); 1701 tagp = xfs_dir2_data_entry_tag_p(dep);
1702 *tagp = cpu_to_be16((char *)dep - (char *)data); 1702 *tagp = cpu_to_be16((char *)dep - (char *)data);
1703 xfs_dir2_data_log_entry(tp, dbp, dep); 1703 xfs_dir2_data_log_entry(tp, dbp, dep);
1704 /* 1704 /*
@@ -1904,7 +1904,7 @@ xfs_dir2_node_replace(
1904 ASSERT(be32_to_cpu(data->hdr.magic) == XFS_DIR2_DATA_MAGIC); 1904 ASSERT(be32_to_cpu(data->hdr.magic) == XFS_DIR2_DATA_MAGIC);
1905 dep = (xfs_dir2_data_entry_t *) 1905 dep = (xfs_dir2_data_entry_t *)
1906 ((char *)data + 1906 ((char *)data +
1907 XFS_DIR2_DATAPTR_TO_OFF(state->mp, be32_to_cpu(lep->address))); 1907 xfs_dir2_dataptr_to_off(state->mp, be32_to_cpu(lep->address)));
1908 ASSERT(inum != be64_to_cpu(dep->inumber)); 1908 ASSERT(inum != be64_to_cpu(dep->inumber));
1909 /* 1909 /*
1910 * Fill in the new inode number and log the entry. 1910 * Fill in the new inode number and log the entry.
@@ -1980,7 +1980,7 @@ xfs_dir2_node_trim_free(
1980 * Blow the block away. 1980 * Blow the block away.
1981 */ 1981 */
1982 if ((error = 1982 if ((error =
1983 xfs_dir2_shrink_inode(args, XFS_DIR2_DA_TO_DB(mp, (xfs_dablk_t)fo), 1983 xfs_dir2_shrink_inode(args, xfs_dir2_da_to_db(mp, (xfs_dablk_t)fo),
1984 bp))) { 1984 bp))) {
1985 /* 1985 /*
1986 * Can't fail with ENOSPC since that only happens with no 1986 * Can't fail with ENOSPC since that only happens with no
diff --git a/fs/xfs/xfs_dir2_node.h b/fs/xfs/xfs_dir2_node.h
index c7c870ee7857..dde72db3d695 100644
--- a/fs/xfs/xfs_dir2_node.h
+++ b/fs/xfs/xfs_dir2_node.h
@@ -36,7 +36,7 @@ struct xfs_trans;
36#define XFS_DIR2_FREE_SPACE 2 36#define XFS_DIR2_FREE_SPACE 2
37#define XFS_DIR2_FREE_OFFSET (XFS_DIR2_FREE_SPACE * XFS_DIR2_SPACE_SIZE) 37#define XFS_DIR2_FREE_OFFSET (XFS_DIR2_FREE_SPACE * XFS_DIR2_SPACE_SIZE)
38#define XFS_DIR2_FREE_FIRSTDB(mp) \ 38#define XFS_DIR2_FREE_FIRSTDB(mp) \
39 XFS_DIR2_BYTE_TO_DB(mp, XFS_DIR2_FREE_OFFSET) 39 xfs_dir2_byte_to_db(mp, XFS_DIR2_FREE_OFFSET)
40 40
41#define XFS_DIR2_FREE_MAGIC 0x58443246 /* XD2F */ 41#define XFS_DIR2_FREE_MAGIC 0x58443246 /* XD2F */
42 42
@@ -60,7 +60,6 @@ typedef struct xfs_dir2_free {
60/* 60/*
61 * Convert data space db to the corresponding free db. 61 * Convert data space db to the corresponding free db.
62 */ 62 */
63#define XFS_DIR2_DB_TO_FDB(mp,db) xfs_dir2_db_to_fdb(mp, db)
64static inline xfs_dir2_db_t 63static inline xfs_dir2_db_t
65xfs_dir2_db_to_fdb(struct xfs_mount *mp, xfs_dir2_db_t db) 64xfs_dir2_db_to_fdb(struct xfs_mount *mp, xfs_dir2_db_t db)
66{ 65{
@@ -70,7 +69,6 @@ xfs_dir2_db_to_fdb(struct xfs_mount *mp, xfs_dir2_db_t db)
70/* 69/*
71 * Convert data space db to the corresponding index in a free db. 70 * Convert data space db to the corresponding index in a free db.
72 */ 71 */
73#define XFS_DIR2_DB_TO_FDINDEX(mp,db) xfs_dir2_db_to_fdindex(mp, db)
74static inline int 72static inline int
75xfs_dir2_db_to_fdindex(struct xfs_mount *mp, xfs_dir2_db_t db) 73xfs_dir2_db_to_fdindex(struct xfs_mount *mp, xfs_dir2_db_t db)
76{ 74{
diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
index 0cd77b17bf92..38fc4f22b76d 100644
--- a/fs/xfs/xfs_dir2_sf.c
+++ b/fs/xfs/xfs_dir2_sf.c
@@ -89,8 +89,8 @@ xfs_dir2_block_sfsize(
89 mp = dp->i_mount; 89 mp = dp->i_mount;
90 90
91 count = i8count = namelen = 0; 91 count = i8count = namelen = 0;
92 btp = XFS_DIR2_BLOCK_TAIL_P(mp, block); 92 btp = xfs_dir2_block_tail_p(mp, block);
93 blp = XFS_DIR2_BLOCK_LEAF_P(btp); 93 blp = xfs_dir2_block_leaf_p(btp);
94 94
95 /* 95 /*
96 * Iterate over the block's data entries by using the leaf pointers. 96 * Iterate over the block's data entries by using the leaf pointers.
@@ -102,7 +102,7 @@ xfs_dir2_block_sfsize(
102 * Calculate the pointer to the entry at hand. 102 * Calculate the pointer to the entry at hand.
103 */ 103 */
104 dep = (xfs_dir2_data_entry_t *) 104 dep = (xfs_dir2_data_entry_t *)
105 ((char *)block + XFS_DIR2_DATAPTR_TO_OFF(mp, addr)); 105 ((char *)block + xfs_dir2_dataptr_to_off(mp, addr));
106 /* 106 /*
107 * Detect . and .., so we can special-case them. 107 * Detect . and .., so we can special-case them.
108 * . is not included in sf directories. 108 * . is not included in sf directories.
@@ -124,7 +124,7 @@ xfs_dir2_block_sfsize(
124 /* 124 /*
125 * Calculate the new size, see if we should give up yet. 125 * Calculate the new size, see if we should give up yet.
126 */ 126 */
127 size = XFS_DIR2_SF_HDR_SIZE(i8count) + /* header */ 127 size = xfs_dir2_sf_hdr_size(i8count) + /* header */
128 count + /* namelen */ 128 count + /* namelen */
129 count * (uint)sizeof(xfs_dir2_sf_off_t) + /* offset */ 129 count * (uint)sizeof(xfs_dir2_sf_off_t) + /* offset */
130 namelen + /* name */ 130 namelen + /* name */
@@ -139,7 +139,7 @@ xfs_dir2_block_sfsize(
139 */ 139 */
140 sfhp->count = count; 140 sfhp->count = count;
141 sfhp->i8count = i8count; 141 sfhp->i8count = i8count;
142 XFS_DIR2_SF_PUT_INUMBER((xfs_dir2_sf_t *)sfhp, &parent, &sfhp->parent); 142 xfs_dir2_sf_put_inumber((xfs_dir2_sf_t *)sfhp, &parent, &sfhp->parent);
143 return size; 143 return size;
144} 144}
145 145
@@ -199,15 +199,15 @@ xfs_dir2_block_to_sf(
199 * Copy the header into the newly allocate local space. 199 * Copy the header into the newly allocate local space.
200 */ 200 */
201 sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; 201 sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
202 memcpy(sfp, sfhp, XFS_DIR2_SF_HDR_SIZE(sfhp->i8count)); 202 memcpy(sfp, sfhp, xfs_dir2_sf_hdr_size(sfhp->i8count));
203 dp->i_d.di_size = size; 203 dp->i_d.di_size = size;
204 /* 204 /*
205 * Set up to loop over the block's entries. 205 * Set up to loop over the block's entries.
206 */ 206 */
207 btp = XFS_DIR2_BLOCK_TAIL_P(mp, block); 207 btp = xfs_dir2_block_tail_p(mp, block);
208 ptr = (char *)block->u; 208 ptr = (char *)block->u;
209 endptr = (char *)XFS_DIR2_BLOCK_LEAF_P(btp); 209 endptr = (char *)xfs_dir2_block_leaf_p(btp);
210 sfep = XFS_DIR2_SF_FIRSTENTRY(sfp); 210 sfep = xfs_dir2_sf_firstentry(sfp);
211 /* 211 /*
212 * Loop over the active and unused entries. 212 * Loop over the active and unused entries.
213 * Stop when we reach the leaf/tail portion of the block. 213 * Stop when we reach the leaf/tail portion of the block.
@@ -233,22 +233,22 @@ xfs_dir2_block_to_sf(
233 else if (dep->namelen == 2 && 233 else if (dep->namelen == 2 &&
234 dep->name[0] == '.' && dep->name[1] == '.') 234 dep->name[0] == '.' && dep->name[1] == '.')
235 ASSERT(be64_to_cpu(dep->inumber) == 235 ASSERT(be64_to_cpu(dep->inumber) ==
236 XFS_DIR2_SF_GET_INUMBER(sfp, &sfp->hdr.parent)); 236 xfs_dir2_sf_get_inumber(sfp, &sfp->hdr.parent));
237 /* 237 /*
238 * Normal entry, copy it into shortform. 238 * Normal entry, copy it into shortform.
239 */ 239 */
240 else { 240 else {
241 sfep->namelen = dep->namelen; 241 sfep->namelen = dep->namelen;
242 XFS_DIR2_SF_PUT_OFFSET(sfep, 242 xfs_dir2_sf_put_offset(sfep,
243 (xfs_dir2_data_aoff_t) 243 (xfs_dir2_data_aoff_t)
244 ((char *)dep - (char *)block)); 244 ((char *)dep - (char *)block));
245 memcpy(sfep->name, dep->name, dep->namelen); 245 memcpy(sfep->name, dep->name, dep->namelen);
246 temp = be64_to_cpu(dep->inumber); 246 temp = be64_to_cpu(dep->inumber);
247 XFS_DIR2_SF_PUT_INUMBER(sfp, &temp, 247 xfs_dir2_sf_put_inumber(sfp, &temp,
248 XFS_DIR2_SF_INUMBERP(sfep)); 248 xfs_dir2_sf_inumberp(sfep));
249 sfep = XFS_DIR2_SF_NEXTENTRY(sfp, sfep); 249 sfep = xfs_dir2_sf_nextentry(sfp, sfep);
250 } 250 }
251 ptr += XFS_DIR2_DATA_ENTSIZE(dep->namelen); 251 ptr += xfs_dir2_data_entsize(dep->namelen);
252 } 252 }
253 ASSERT((char *)sfep - (char *)sfp == size); 253 ASSERT((char *)sfep - (char *)sfp == size);
254 xfs_dir2_sf_check(args); 254 xfs_dir2_sf_check(args);
@@ -294,11 +294,11 @@ xfs_dir2_sf_addname(
294 ASSERT(dp->i_df.if_bytes == dp->i_d.di_size); 294 ASSERT(dp->i_df.if_bytes == dp->i_d.di_size);
295 ASSERT(dp->i_df.if_u1.if_data != NULL); 295 ASSERT(dp->i_df.if_u1.if_data != NULL);
296 sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; 296 sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
297 ASSERT(dp->i_d.di_size >= XFS_DIR2_SF_HDR_SIZE(sfp->hdr.i8count)); 297 ASSERT(dp->i_d.di_size >= xfs_dir2_sf_hdr_size(sfp->hdr.i8count));
298 /* 298 /*
299 * Compute entry (and change in) size. 299 * Compute entry (and change in) size.
300 */ 300 */
301 add_entsize = XFS_DIR2_SF_ENTSIZE_BYNAME(sfp, args->namelen); 301 add_entsize = xfs_dir2_sf_entsize_byname(sfp, args->namelen);
302 incr_isize = add_entsize; 302 incr_isize = add_entsize;
303 objchange = 0; 303 objchange = 0;
304#if XFS_BIG_INUMS 304#if XFS_BIG_INUMS
@@ -392,7 +392,7 @@ xfs_dir2_sf_addname_easy(
392 /* 392 /*
393 * Grow the in-inode space. 393 * Grow the in-inode space.
394 */ 394 */
395 xfs_idata_realloc(dp, XFS_DIR2_SF_ENTSIZE_BYNAME(sfp, args->namelen), 395 xfs_idata_realloc(dp, xfs_dir2_sf_entsize_byname(sfp, args->namelen),
396 XFS_DATA_FORK); 396 XFS_DATA_FORK);
397 /* 397 /*
398 * Need to set up again due to realloc of the inode data. 398 * Need to set up again due to realloc of the inode data.
@@ -403,10 +403,10 @@ xfs_dir2_sf_addname_easy(
403 * Fill in the new entry. 403 * Fill in the new entry.
404 */ 404 */
405 sfep->namelen = args->namelen; 405 sfep->namelen = args->namelen;
406 XFS_DIR2_SF_PUT_OFFSET(sfep, offset); 406 xfs_dir2_sf_put_offset(sfep, offset);
407 memcpy(sfep->name, args->name, sfep->namelen); 407 memcpy(sfep->name, args->name, sfep->namelen);
408 XFS_DIR2_SF_PUT_INUMBER(sfp, &args->inumber, 408 xfs_dir2_sf_put_inumber(sfp, &args->inumber,
409 XFS_DIR2_SF_INUMBERP(sfep)); 409 xfs_dir2_sf_inumberp(sfep));
410 /* 410 /*
411 * Update the header and inode. 411 * Update the header and inode.
412 */ 412 */
@@ -463,14 +463,14 @@ xfs_dir2_sf_addname_hard(
463 * If it's going to end up at the end then oldsfep will point there. 463 * If it's going to end up at the end then oldsfep will point there.
464 */ 464 */
465 for (offset = XFS_DIR2_DATA_FIRST_OFFSET, 465 for (offset = XFS_DIR2_DATA_FIRST_OFFSET,
466 oldsfep = XFS_DIR2_SF_FIRSTENTRY(oldsfp), 466 oldsfep = xfs_dir2_sf_firstentry(oldsfp),
467 add_datasize = XFS_DIR2_DATA_ENTSIZE(args->namelen), 467 add_datasize = xfs_dir2_data_entsize(args->namelen),
468 eof = (char *)oldsfep == &buf[old_isize]; 468 eof = (char *)oldsfep == &buf[old_isize];
469 !eof; 469 !eof;
470 offset = new_offset + XFS_DIR2_DATA_ENTSIZE(oldsfep->namelen), 470 offset = new_offset + xfs_dir2_data_entsize(oldsfep->namelen),
471 oldsfep = XFS_DIR2_SF_NEXTENTRY(oldsfp, oldsfep), 471 oldsfep = xfs_dir2_sf_nextentry(oldsfp, oldsfep),
472 eof = (char *)oldsfep == &buf[old_isize]) { 472 eof = (char *)oldsfep == &buf[old_isize]) {
473 new_offset = XFS_DIR2_SF_GET_OFFSET(oldsfep); 473 new_offset = xfs_dir2_sf_get_offset(oldsfep);
474 if (offset + add_datasize <= new_offset) 474 if (offset + add_datasize <= new_offset)
475 break; 475 break;
476 } 476 }
@@ -495,10 +495,10 @@ xfs_dir2_sf_addname_hard(
495 * Fill in the new entry, and update the header counts. 495 * Fill in the new entry, and update the header counts.
496 */ 496 */
497 sfep->namelen = args->namelen; 497 sfep->namelen = args->namelen;
498 XFS_DIR2_SF_PUT_OFFSET(sfep, offset); 498 xfs_dir2_sf_put_offset(sfep, offset);
499 memcpy(sfep->name, args->name, sfep->namelen); 499 memcpy(sfep->name, args->name, sfep->namelen);
500 XFS_DIR2_SF_PUT_INUMBER(sfp, &args->inumber, 500 xfs_dir2_sf_put_inumber(sfp, &args->inumber,
501 XFS_DIR2_SF_INUMBERP(sfep)); 501 xfs_dir2_sf_inumberp(sfep));
502 sfp->hdr.count++; 502 sfp->hdr.count++;
503#if XFS_BIG_INUMS 503#if XFS_BIG_INUMS
504 if (args->inumber > XFS_DIR2_MAX_SHORT_INUM && !objchange) 504 if (args->inumber > XFS_DIR2_MAX_SHORT_INUM && !objchange)
@@ -508,7 +508,7 @@ xfs_dir2_sf_addname_hard(
508 * If there's more left to copy, do that. 508 * If there's more left to copy, do that.
509 */ 509 */
510 if (!eof) { 510 if (!eof) {
511 sfep = XFS_DIR2_SF_NEXTENTRY(sfp, sfep); 511 sfep = xfs_dir2_sf_nextentry(sfp, sfep);
512 memcpy(sfep, oldsfep, old_isize - nbytes); 512 memcpy(sfep, oldsfep, old_isize - nbytes);
513 } 513 }
514 kmem_free(buf, old_isize); 514 kmem_free(buf, old_isize);
@@ -544,9 +544,9 @@ xfs_dir2_sf_addname_pick(
544 mp = dp->i_mount; 544 mp = dp->i_mount;
545 545
546 sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; 546 sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
547 size = XFS_DIR2_DATA_ENTSIZE(args->namelen); 547 size = xfs_dir2_data_entsize(args->namelen);
548 offset = XFS_DIR2_DATA_FIRST_OFFSET; 548 offset = XFS_DIR2_DATA_FIRST_OFFSET;
549 sfep = XFS_DIR2_SF_FIRSTENTRY(sfp); 549 sfep = xfs_dir2_sf_firstentry(sfp);
550 holefit = 0; 550 holefit = 0;
551 /* 551 /*
552 * Loop over sf entries. 552 * Loop over sf entries.
@@ -555,10 +555,10 @@ xfs_dir2_sf_addname_pick(
555 */ 555 */
556 for (i = 0; i < sfp->hdr.count; i++) { 556 for (i = 0; i < sfp->hdr.count; i++) {
557 if (!holefit) 557 if (!holefit)
558 holefit = offset + size <= XFS_DIR2_SF_GET_OFFSET(sfep); 558 holefit = offset + size <= xfs_dir2_sf_get_offset(sfep);
559 offset = XFS_DIR2_SF_GET_OFFSET(sfep) + 559 offset = xfs_dir2_sf_get_offset(sfep) +
560 XFS_DIR2_DATA_ENTSIZE(sfep->namelen); 560 xfs_dir2_data_entsize(sfep->namelen);
561 sfep = XFS_DIR2_SF_NEXTENTRY(sfp, sfep); 561 sfep = xfs_dir2_sf_nextentry(sfp, sfep);
562 } 562 }
563 /* 563 /*
564 * Calculate data bytes used excluding the new entry, if this 564 * Calculate data bytes used excluding the new entry, if this
@@ -617,18 +617,18 @@ xfs_dir2_sf_check(
617 617
618 sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; 618 sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
619 offset = XFS_DIR2_DATA_FIRST_OFFSET; 619 offset = XFS_DIR2_DATA_FIRST_OFFSET;
620 ino = XFS_DIR2_SF_GET_INUMBER(sfp, &sfp->hdr.parent); 620 ino = xfs_dir2_sf_get_inumber(sfp, &sfp->hdr.parent);
621 i8count = ino > XFS_DIR2_MAX_SHORT_INUM; 621 i8count = ino > XFS_DIR2_MAX_SHORT_INUM;
622 622
623 for (i = 0, sfep = XFS_DIR2_SF_FIRSTENTRY(sfp); 623 for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp);
624 i < sfp->hdr.count; 624 i < sfp->hdr.count;
625 i++, sfep = XFS_DIR2_SF_NEXTENTRY(sfp, sfep)) { 625 i++, sfep = xfs_dir2_sf_nextentry(sfp, sfep)) {
626 ASSERT(XFS_DIR2_SF_GET_OFFSET(sfep) >= offset); 626 ASSERT(xfs_dir2_sf_get_offset(sfep) >= offset);
627 ino = XFS_DIR2_SF_GET_INUMBER(sfp, XFS_DIR2_SF_INUMBERP(sfep)); 627 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
628 i8count += ino > XFS_DIR2_MAX_SHORT_INUM; 628 i8count += ino > XFS_DIR2_MAX_SHORT_INUM;
629 offset = 629 offset =
630 XFS_DIR2_SF_GET_OFFSET(sfep) + 630 xfs_dir2_sf_get_offset(sfep) +
631 XFS_DIR2_DATA_ENTSIZE(sfep->namelen); 631 xfs_dir2_data_entsize(sfep->namelen);
632 } 632 }
633 ASSERT(i8count == sfp->hdr.i8count); 633 ASSERT(i8count == sfp->hdr.i8count);
634 ASSERT(XFS_BIG_INUMS || i8count == 0); 634 ASSERT(XFS_BIG_INUMS || i8count == 0);
@@ -671,7 +671,7 @@ xfs_dir2_sf_create(
671 ASSERT(dp->i_df.if_flags & XFS_IFINLINE); 671 ASSERT(dp->i_df.if_flags & XFS_IFINLINE);
672 ASSERT(dp->i_df.if_bytes == 0); 672 ASSERT(dp->i_df.if_bytes == 0);
673 i8count = pino > XFS_DIR2_MAX_SHORT_INUM; 673 i8count = pino > XFS_DIR2_MAX_SHORT_INUM;
674 size = XFS_DIR2_SF_HDR_SIZE(i8count); 674 size = xfs_dir2_sf_hdr_size(i8count);
675 /* 675 /*
676 * Make a buffer for the data. 676 * Make a buffer for the data.
677 */ 677 */
@@ -684,7 +684,7 @@ xfs_dir2_sf_create(
684 /* 684 /*
685 * Now can put in the inode number, since i8count is set. 685 * Now can put in the inode number, since i8count is set.
686 */ 686 */
687 XFS_DIR2_SF_PUT_INUMBER(sfp, &pino, &sfp->hdr.parent); 687 xfs_dir2_sf_put_inumber(sfp, &pino, &sfp->hdr.parent);
688 sfp->hdr.count = 0; 688 sfp->hdr.count = 0;
689 dp->i_d.di_size = size; 689 dp->i_d.di_size = size;
690 xfs_dir2_sf_check(args); 690 xfs_dir2_sf_check(args);
@@ -727,12 +727,12 @@ xfs_dir2_sf_getdents(
727 727
728 sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; 728 sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
729 729
730 ASSERT(dp->i_d.di_size >= XFS_DIR2_SF_HDR_SIZE(sfp->hdr.i8count)); 730 ASSERT(dp->i_d.di_size >= xfs_dir2_sf_hdr_size(sfp->hdr.i8count));
731 731
732 /* 732 /*
733 * If the block number in the offset is out of range, we're done. 733 * If the block number in the offset is out of range, we're done.
734 */ 734 */
735 if (XFS_DIR2_DATAPTR_TO_DB(mp, dir_offset) > mp->m_dirdatablk) { 735 if (xfs_dir2_dataptr_to_db(mp, dir_offset) > mp->m_dirdatablk) {
736 *eofp = 1; 736 *eofp = 1;
737 return 0; 737 return 0;
738 } 738 }
@@ -747,9 +747,9 @@ xfs_dir2_sf_getdents(
747 * Put . entry unless we're starting past it. 747 * Put . entry unless we're starting past it.
748 */ 748 */
749 if (dir_offset <= 749 if (dir_offset <=
750 XFS_DIR2_DB_OFF_TO_DATAPTR(mp, mp->m_dirdatablk, 750 xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk,
751 XFS_DIR2_DATA_DOT_OFFSET)) { 751 XFS_DIR2_DATA_DOT_OFFSET)) {
752 p.cook = XFS_DIR2_DB_OFF_TO_DATAPTR(mp, 0, 752 p.cook = xfs_dir2_db_off_to_dataptr(mp, 0,
753 XFS_DIR2_DATA_DOTDOT_OFFSET); 753 XFS_DIR2_DATA_DOTDOT_OFFSET);
754 p.ino = dp->i_ino; 754 p.ino = dp->i_ino;
755#if XFS_BIG_INUMS 755#if XFS_BIG_INUMS
@@ -762,7 +762,7 @@ xfs_dir2_sf_getdents(
762 762
763 if (!p.done) { 763 if (!p.done) {
764 uio->uio_offset = 764 uio->uio_offset =
765 XFS_DIR2_DB_OFF_TO_DATAPTR(mp, mp->m_dirdatablk, 765 xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk,
766 XFS_DIR2_DATA_DOT_OFFSET); 766 XFS_DIR2_DATA_DOT_OFFSET);
767 return error; 767 return error;
768 } 768 }
@@ -772,11 +772,11 @@ xfs_dir2_sf_getdents(
772 * Put .. entry unless we're starting past it. 772 * Put .. entry unless we're starting past it.
773 */ 773 */
774 if (dir_offset <= 774 if (dir_offset <=
775 XFS_DIR2_DB_OFF_TO_DATAPTR(mp, mp->m_dirdatablk, 775 xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk,
776 XFS_DIR2_DATA_DOTDOT_OFFSET)) { 776 XFS_DIR2_DATA_DOTDOT_OFFSET)) {
777 p.cook = XFS_DIR2_DB_OFF_TO_DATAPTR(mp, mp->m_dirdatablk, 777 p.cook = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk,
778 XFS_DIR2_DATA_FIRST_OFFSET); 778 XFS_DIR2_DATA_FIRST_OFFSET);
779 p.ino = XFS_DIR2_SF_GET_INUMBER(sfp, &sfp->hdr.parent); 779 p.ino = xfs_dir2_sf_get_inumber(sfp, &sfp->hdr.parent);
780#if XFS_BIG_INUMS 780#if XFS_BIG_INUMS
781 p.ino += mp->m_inoadd; 781 p.ino += mp->m_inoadd;
782#endif 782#endif
@@ -787,7 +787,7 @@ xfs_dir2_sf_getdents(
787 787
788 if (!p.done) { 788 if (!p.done) {
789 uio->uio_offset = 789 uio->uio_offset =
790 XFS_DIR2_DB_OFF_TO_DATAPTR(mp, mp->m_dirdatablk, 790 xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk,
791 XFS_DIR2_DATA_DOTDOT_OFFSET); 791 XFS_DIR2_DATA_DOTDOT_OFFSET);
792 return error; 792 return error;
793 } 793 }
@@ -796,23 +796,23 @@ xfs_dir2_sf_getdents(
796 /* 796 /*
797 * Loop while there are more entries and put'ing works. 797 * Loop while there are more entries and put'ing works.
798 */ 798 */
799 for (i = 0, sfep = XFS_DIR2_SF_FIRSTENTRY(sfp); 799 for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp);
800 i < sfp->hdr.count; 800 i < sfp->hdr.count;
801 i++, sfep = XFS_DIR2_SF_NEXTENTRY(sfp, sfep)) { 801 i++, sfep = xfs_dir2_sf_nextentry(sfp, sfep)) {
802 802
803 off = XFS_DIR2_DB_OFF_TO_DATAPTR(mp, mp->m_dirdatablk, 803 off = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk,
804 XFS_DIR2_SF_GET_OFFSET(sfep)); 804 xfs_dir2_sf_get_offset(sfep));
805 805
806 if (dir_offset > off) 806 if (dir_offset > off)
807 continue; 807 continue;
808 808
809 p.namelen = sfep->namelen; 809 p.namelen = sfep->namelen;
810 810
811 p.cook = XFS_DIR2_DB_OFF_TO_DATAPTR(mp, mp->m_dirdatablk, 811 p.cook = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk,
812 XFS_DIR2_SF_GET_OFFSET(sfep) + 812 xfs_dir2_sf_get_offset(sfep) +
813 XFS_DIR2_DATA_ENTSIZE(p.namelen)); 813 xfs_dir2_data_entsize(p.namelen));
814 814
815 p.ino = XFS_DIR2_SF_GET_INUMBER(sfp, XFS_DIR2_SF_INUMBERP(sfep)); 815 p.ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
816#if XFS_BIG_INUMS 816#if XFS_BIG_INUMS
817 p.ino += mp->m_inoadd; 817 p.ino += mp->m_inoadd;
818#endif 818#endif
@@ -832,7 +832,7 @@ xfs_dir2_sf_getdents(
832 *eofp = 1; 832 *eofp = 1;
833 833
834 uio->uio_offset = 834 uio->uio_offset =
835 XFS_DIR2_DB_OFF_TO_DATAPTR(mp, mp->m_dirdatablk + 1, 0); 835 xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk + 1, 0);
836 836
837 return 0; 837 return 0;
838} 838}
@@ -865,7 +865,7 @@ xfs_dir2_sf_lookup(
865 ASSERT(dp->i_df.if_bytes == dp->i_d.di_size); 865 ASSERT(dp->i_df.if_bytes == dp->i_d.di_size);
866 ASSERT(dp->i_df.if_u1.if_data != NULL); 866 ASSERT(dp->i_df.if_u1.if_data != NULL);
867 sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; 867 sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
868 ASSERT(dp->i_d.di_size >= XFS_DIR2_SF_HDR_SIZE(sfp->hdr.i8count)); 868 ASSERT(dp->i_d.di_size >= xfs_dir2_sf_hdr_size(sfp->hdr.i8count));
869 /* 869 /*
870 * Special case for . 870 * Special case for .
871 */ 871 */
@@ -878,21 +878,21 @@ xfs_dir2_sf_lookup(
878 */ 878 */
879 if (args->namelen == 2 && 879 if (args->namelen == 2 &&
880 args->name[0] == '.' && args->name[1] == '.') { 880 args->name[0] == '.' && args->name[1] == '.') {
881 args->inumber = XFS_DIR2_SF_GET_INUMBER(sfp, &sfp->hdr.parent); 881 args->inumber = xfs_dir2_sf_get_inumber(sfp, &sfp->hdr.parent);
882 return XFS_ERROR(EEXIST); 882 return XFS_ERROR(EEXIST);
883 } 883 }
884 /* 884 /*
885 * Loop over all the entries trying to match ours. 885 * Loop over all the entries trying to match ours.
886 */ 886 */
887 for (i = 0, sfep = XFS_DIR2_SF_FIRSTENTRY(sfp); 887 for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp);
888 i < sfp->hdr.count; 888 i < sfp->hdr.count;
889 i++, sfep = XFS_DIR2_SF_NEXTENTRY(sfp, sfep)) { 889 i++, sfep = xfs_dir2_sf_nextentry(sfp, sfep)) {
890 if (sfep->namelen == args->namelen && 890 if (sfep->namelen == args->namelen &&
891 sfep->name[0] == args->name[0] && 891 sfep->name[0] == args->name[0] &&
892 memcmp(args->name, sfep->name, args->namelen) == 0) { 892 memcmp(args->name, sfep->name, args->namelen) == 0) {
893 args->inumber = 893 args->inumber =
894 XFS_DIR2_SF_GET_INUMBER(sfp, 894 xfs_dir2_sf_get_inumber(sfp,
895 XFS_DIR2_SF_INUMBERP(sfep)); 895 xfs_dir2_sf_inumberp(sfep));
896 return XFS_ERROR(EEXIST); 896 return XFS_ERROR(EEXIST);
897 } 897 }
898 } 898 }
@@ -934,19 +934,19 @@ xfs_dir2_sf_removename(
934 ASSERT(dp->i_df.if_bytes == oldsize); 934 ASSERT(dp->i_df.if_bytes == oldsize);
935 ASSERT(dp->i_df.if_u1.if_data != NULL); 935 ASSERT(dp->i_df.if_u1.if_data != NULL);
936 sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; 936 sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
937 ASSERT(oldsize >= XFS_DIR2_SF_HDR_SIZE(sfp->hdr.i8count)); 937 ASSERT(oldsize >= xfs_dir2_sf_hdr_size(sfp->hdr.i8count));
938 /* 938 /*
939 * Loop over the old directory entries. 939 * Loop over the old directory entries.
940 * Find the one we're deleting. 940 * Find the one we're deleting.
941 */ 941 */
942 for (i = 0, sfep = XFS_DIR2_SF_FIRSTENTRY(sfp); 942 for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp);
943 i < sfp->hdr.count; 943 i < sfp->hdr.count;
944 i++, sfep = XFS_DIR2_SF_NEXTENTRY(sfp, sfep)) { 944 i++, sfep = xfs_dir2_sf_nextentry(sfp, sfep)) {
945 if (sfep->namelen == args->namelen && 945 if (sfep->namelen == args->namelen &&
946 sfep->name[0] == args->name[0] && 946 sfep->name[0] == args->name[0] &&
947 memcmp(sfep->name, args->name, args->namelen) == 0) { 947 memcmp(sfep->name, args->name, args->namelen) == 0) {
948 ASSERT(XFS_DIR2_SF_GET_INUMBER(sfp, 948 ASSERT(xfs_dir2_sf_get_inumber(sfp,
949 XFS_DIR2_SF_INUMBERP(sfep)) == 949 xfs_dir2_sf_inumberp(sfep)) ==
950 args->inumber); 950 args->inumber);
951 break; 951 break;
952 } 952 }
@@ -961,7 +961,7 @@ xfs_dir2_sf_removename(
961 * Calculate sizes. 961 * Calculate sizes.
962 */ 962 */
963 byteoff = (int)((char *)sfep - (char *)sfp); 963 byteoff = (int)((char *)sfep - (char *)sfp);
964 entsize = XFS_DIR2_SF_ENTSIZE_BYNAME(sfp, args->namelen); 964 entsize = xfs_dir2_sf_entsize_byname(sfp, args->namelen);
965 newsize = oldsize - entsize; 965 newsize = oldsize - entsize;
966 /* 966 /*
967 * Copy the part if any after the removed entry, sliding it down. 967 * Copy the part if any after the removed entry, sliding it down.
@@ -1027,7 +1027,7 @@ xfs_dir2_sf_replace(
1027 ASSERT(dp->i_df.if_bytes == dp->i_d.di_size); 1027 ASSERT(dp->i_df.if_bytes == dp->i_d.di_size);
1028 ASSERT(dp->i_df.if_u1.if_data != NULL); 1028 ASSERT(dp->i_df.if_u1.if_data != NULL);
1029 sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; 1029 sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
1030 ASSERT(dp->i_d.di_size >= XFS_DIR2_SF_HDR_SIZE(sfp->hdr.i8count)); 1030 ASSERT(dp->i_d.di_size >= xfs_dir2_sf_hdr_size(sfp->hdr.i8count));
1031#if XFS_BIG_INUMS 1031#if XFS_BIG_INUMS
1032 /* 1032 /*
1033 * New inode number is large, and need to convert to 8-byte inodes. 1033 * New inode number is large, and need to convert to 8-byte inodes.
@@ -1067,28 +1067,28 @@ xfs_dir2_sf_replace(
1067 if (args->namelen == 2 && 1067 if (args->namelen == 2 &&
1068 args->name[0] == '.' && args->name[1] == '.') { 1068 args->name[0] == '.' && args->name[1] == '.') {
1069#if XFS_BIG_INUMS || defined(DEBUG) 1069#if XFS_BIG_INUMS || defined(DEBUG)
1070 ino = XFS_DIR2_SF_GET_INUMBER(sfp, &sfp->hdr.parent); 1070 ino = xfs_dir2_sf_get_inumber(sfp, &sfp->hdr.parent);
1071 ASSERT(args->inumber != ino); 1071 ASSERT(args->inumber != ino);
1072#endif 1072#endif
1073 XFS_DIR2_SF_PUT_INUMBER(sfp, &args->inumber, &sfp->hdr.parent); 1073 xfs_dir2_sf_put_inumber(sfp, &args->inumber, &sfp->hdr.parent);
1074 } 1074 }
1075 /* 1075 /*
1076 * Normal entry, look for the name. 1076 * Normal entry, look for the name.
1077 */ 1077 */
1078 else { 1078 else {
1079 for (i = 0, sfep = XFS_DIR2_SF_FIRSTENTRY(sfp); 1079 for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp);
1080 i < sfp->hdr.count; 1080 i < sfp->hdr.count;
1081 i++, sfep = XFS_DIR2_SF_NEXTENTRY(sfp, sfep)) { 1081 i++, sfep = xfs_dir2_sf_nextentry(sfp, sfep)) {
1082 if (sfep->namelen == args->namelen && 1082 if (sfep->namelen == args->namelen &&
1083 sfep->name[0] == args->name[0] && 1083 sfep->name[0] == args->name[0] &&
1084 memcmp(args->name, sfep->name, args->namelen) == 0) { 1084 memcmp(args->name, sfep->name, args->namelen) == 0) {
1085#if XFS_BIG_INUMS || defined(DEBUG) 1085#if XFS_BIG_INUMS || defined(DEBUG)
1086 ino = XFS_DIR2_SF_GET_INUMBER(sfp, 1086 ino = xfs_dir2_sf_get_inumber(sfp,
1087 XFS_DIR2_SF_INUMBERP(sfep)); 1087 xfs_dir2_sf_inumberp(sfep));
1088 ASSERT(args->inumber != ino); 1088 ASSERT(args->inumber != ino);
1089#endif 1089#endif
1090 XFS_DIR2_SF_PUT_INUMBER(sfp, &args->inumber, 1090 xfs_dir2_sf_put_inumber(sfp, &args->inumber,
1091 XFS_DIR2_SF_INUMBERP(sfep)); 1091 xfs_dir2_sf_inumberp(sfep));
1092 break; 1092 break;
1093 } 1093 }
1094 } 1094 }
@@ -1189,22 +1189,22 @@ xfs_dir2_sf_toino4(
1189 */ 1189 */
1190 sfp->hdr.count = oldsfp->hdr.count; 1190 sfp->hdr.count = oldsfp->hdr.count;
1191 sfp->hdr.i8count = 0; 1191 sfp->hdr.i8count = 0;
1192 ino = XFS_DIR2_SF_GET_INUMBER(oldsfp, &oldsfp->hdr.parent); 1192 ino = xfs_dir2_sf_get_inumber(oldsfp, &oldsfp->hdr.parent);
1193 XFS_DIR2_SF_PUT_INUMBER(sfp, &ino, &sfp->hdr.parent); 1193 xfs_dir2_sf_put_inumber(sfp, &ino, &sfp->hdr.parent);
1194 /* 1194 /*
1195 * Copy the entries field by field. 1195 * Copy the entries field by field.
1196 */ 1196 */
1197 for (i = 0, sfep = XFS_DIR2_SF_FIRSTENTRY(sfp), 1197 for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp),
1198 oldsfep = XFS_DIR2_SF_FIRSTENTRY(oldsfp); 1198 oldsfep = xfs_dir2_sf_firstentry(oldsfp);
1199 i < sfp->hdr.count; 1199 i < sfp->hdr.count;
1200 i++, sfep = XFS_DIR2_SF_NEXTENTRY(sfp, sfep), 1200 i++, sfep = xfs_dir2_sf_nextentry(sfp, sfep),
1201 oldsfep = XFS_DIR2_SF_NEXTENTRY(oldsfp, oldsfep)) { 1201 oldsfep = xfs_dir2_sf_nextentry(oldsfp, oldsfep)) {
1202 sfep->namelen = oldsfep->namelen; 1202 sfep->namelen = oldsfep->namelen;
1203 sfep->offset = oldsfep->offset; 1203 sfep->offset = oldsfep->offset;
1204 memcpy(sfep->name, oldsfep->name, sfep->namelen); 1204 memcpy(sfep->name, oldsfep->name, sfep->namelen);
1205 ino = XFS_DIR2_SF_GET_INUMBER(oldsfp, 1205 ino = xfs_dir2_sf_get_inumber(oldsfp,
1206 XFS_DIR2_SF_INUMBERP(oldsfep)); 1206 xfs_dir2_sf_inumberp(oldsfep));
1207 XFS_DIR2_SF_PUT_INUMBER(sfp, &ino, XFS_DIR2_SF_INUMBERP(sfep)); 1207 xfs_dir2_sf_put_inumber(sfp, &ino, xfs_dir2_sf_inumberp(sfep));
1208 } 1208 }
1209 /* 1209 /*
1210 * Clean up the inode. 1210 * Clean up the inode.
@@ -1266,22 +1266,22 @@ xfs_dir2_sf_toino8(
1266 */ 1266 */
1267 sfp->hdr.count = oldsfp->hdr.count; 1267 sfp->hdr.count = oldsfp->hdr.count;
1268 sfp->hdr.i8count = 1; 1268 sfp->hdr.i8count = 1;
1269 ino = XFS_DIR2_SF_GET_INUMBER(oldsfp, &oldsfp->hdr.parent); 1269 ino = xfs_dir2_sf_get_inumber(oldsfp, &oldsfp->hdr.parent);
1270 XFS_DIR2_SF_PUT_INUMBER(sfp, &ino, &sfp->hdr.parent); 1270 xfs_dir2_sf_put_inumber(sfp, &ino, &sfp->hdr.parent);
1271 /* 1271 /*
1272 * Copy the entries field by field. 1272 * Copy the entries field by field.
1273 */ 1273 */
1274 for (i = 0, sfep = XFS_DIR2_SF_FIRSTENTRY(sfp), 1274 for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp),
1275 oldsfep = XFS_DIR2_SF_FIRSTENTRY(oldsfp); 1275 oldsfep = xfs_dir2_sf_firstentry(oldsfp);
1276 i < sfp->hdr.count; 1276 i < sfp->hdr.count;
1277 i++, sfep = XFS_DIR2_SF_NEXTENTRY(sfp, sfep), 1277 i++, sfep = xfs_dir2_sf_nextentry(sfp, sfep),
1278 oldsfep = XFS_DIR2_SF_NEXTENTRY(oldsfp, oldsfep)) { 1278 oldsfep = xfs_dir2_sf_nextentry(oldsfp, oldsfep)) {
1279 sfep->namelen = oldsfep->namelen; 1279 sfep->namelen = oldsfep->namelen;
1280 sfep->offset = oldsfep->offset; 1280 sfep->offset = oldsfep->offset;
1281 memcpy(sfep->name, oldsfep->name, sfep->namelen); 1281 memcpy(sfep->name, oldsfep->name, sfep->namelen);
1282 ino = XFS_DIR2_SF_GET_INUMBER(oldsfp, 1282 ino = xfs_dir2_sf_get_inumber(oldsfp,
1283 XFS_DIR2_SF_INUMBERP(oldsfep)); 1283 xfs_dir2_sf_inumberp(oldsfep));
1284 XFS_DIR2_SF_PUT_INUMBER(sfp, &ino, XFS_DIR2_SF_INUMBERP(sfep)); 1284 xfs_dir2_sf_put_inumber(sfp, &ino, xfs_dir2_sf_inumberp(sfep));
1285 } 1285 }
1286 /* 1286 /*
1287 * Clean up the inode. 1287 * Clean up the inode.
diff --git a/fs/xfs/xfs_dir2_sf.h b/fs/xfs/xfs_dir2_sf.h
index 42f015b70018..11e503209afa 100644
--- a/fs/xfs/xfs_dir2_sf.h
+++ b/fs/xfs/xfs_dir2_sf.h
@@ -90,7 +90,6 @@ typedef struct xfs_dir2_sf {
90 xfs_dir2_sf_entry_t list[1]; /* shortform entries */ 90 xfs_dir2_sf_entry_t list[1]; /* shortform entries */
91} xfs_dir2_sf_t; 91} xfs_dir2_sf_t;
92 92
93#define XFS_DIR2_SF_HDR_SIZE(i8count) xfs_dir2_sf_hdr_size(i8count)
94static inline int xfs_dir2_sf_hdr_size(int i8count) 93static inline int xfs_dir2_sf_hdr_size(int i8count)
95{ 94{
96 return ((uint)sizeof(xfs_dir2_sf_hdr_t) - \ 95 return ((uint)sizeof(xfs_dir2_sf_hdr_t) - \
@@ -98,14 +97,11 @@ static inline int xfs_dir2_sf_hdr_size(int i8count)
98 ((uint)sizeof(xfs_dir2_ino8_t) - (uint)sizeof(xfs_dir2_ino4_t))); 97 ((uint)sizeof(xfs_dir2_ino8_t) - (uint)sizeof(xfs_dir2_ino4_t)));
99} 98}
100 99
101#define XFS_DIR2_SF_INUMBERP(sfep) xfs_dir2_sf_inumberp(sfep)
102static inline xfs_dir2_inou_t *xfs_dir2_sf_inumberp(xfs_dir2_sf_entry_t *sfep) 100static inline xfs_dir2_inou_t *xfs_dir2_sf_inumberp(xfs_dir2_sf_entry_t *sfep)
103{ 101{
104 return (xfs_dir2_inou_t *)&(sfep)->name[(sfep)->namelen]; 102 return (xfs_dir2_inou_t *)&(sfep)->name[(sfep)->namelen];
105} 103}
106 104
107#define XFS_DIR2_SF_GET_INUMBER(sfp, from) \
108 xfs_dir2_sf_get_inumber(sfp, from)
109static inline xfs_intino_t 105static inline xfs_intino_t
110xfs_dir2_sf_get_inumber(xfs_dir2_sf_t *sfp, xfs_dir2_inou_t *from) 106xfs_dir2_sf_get_inumber(xfs_dir2_sf_t *sfp, xfs_dir2_inou_t *from)
111{ 107{
@@ -114,8 +110,6 @@ xfs_dir2_sf_get_inumber(xfs_dir2_sf_t *sfp, xfs_dir2_inou_t *from)
114 (xfs_intino_t)XFS_GET_DIR_INO8((from)->i8)); 110 (xfs_intino_t)XFS_GET_DIR_INO8((from)->i8));
115} 111}
116 112
117#define XFS_DIR2_SF_PUT_INUMBER(sfp,from,to) \
118 xfs_dir2_sf_put_inumber(sfp,from,to)
119static inline void xfs_dir2_sf_put_inumber(xfs_dir2_sf_t *sfp, xfs_ino_t *from, 113static inline void xfs_dir2_sf_put_inumber(xfs_dir2_sf_t *sfp, xfs_ino_t *from,
120 xfs_dir2_inou_t *to) 114 xfs_dir2_inou_t *to)
121{ 115{
@@ -125,24 +119,18 @@ static inline void xfs_dir2_sf_put_inumber(xfs_dir2_sf_t *sfp, xfs_ino_t *from,
125 XFS_PUT_DIR_INO8(*(from), (to)->i8); 119 XFS_PUT_DIR_INO8(*(from), (to)->i8);
126} 120}
127 121
128#define XFS_DIR2_SF_GET_OFFSET(sfep) \
129 xfs_dir2_sf_get_offset(sfep)
130static inline xfs_dir2_data_aoff_t 122static inline xfs_dir2_data_aoff_t
131xfs_dir2_sf_get_offset(xfs_dir2_sf_entry_t *sfep) 123xfs_dir2_sf_get_offset(xfs_dir2_sf_entry_t *sfep)
132{ 124{
133 return INT_GET_UNALIGNED_16_BE(&(sfep)->offset.i); 125 return INT_GET_UNALIGNED_16_BE(&(sfep)->offset.i);
134} 126}
135 127
136#define XFS_DIR2_SF_PUT_OFFSET(sfep,off) \
137 xfs_dir2_sf_put_offset(sfep,off)
138static inline void 128static inline void
139xfs_dir2_sf_put_offset(xfs_dir2_sf_entry_t *sfep, xfs_dir2_data_aoff_t off) 129xfs_dir2_sf_put_offset(xfs_dir2_sf_entry_t *sfep, xfs_dir2_data_aoff_t off)
140{ 130{
141 INT_SET_UNALIGNED_16_BE(&(sfep)->offset.i, off); 131 INT_SET_UNALIGNED_16_BE(&(sfep)->offset.i, off);
142} 132}
143 133
144#define XFS_DIR2_SF_ENTSIZE_BYNAME(sfp,len) \
145 xfs_dir2_sf_entsize_byname(sfp,len)
146static inline int xfs_dir2_sf_entsize_byname(xfs_dir2_sf_t *sfp, int len) 134static inline int xfs_dir2_sf_entsize_byname(xfs_dir2_sf_t *sfp, int len)
147{ 135{
148 return ((uint)sizeof(xfs_dir2_sf_entry_t) - 1 + (len) - \ 136 return ((uint)sizeof(xfs_dir2_sf_entry_t) - 1 + (len) - \
@@ -150,8 +138,6 @@ static inline int xfs_dir2_sf_entsize_byname(xfs_dir2_sf_t *sfp, int len)
150 ((uint)sizeof(xfs_dir2_ino8_t) - (uint)sizeof(xfs_dir2_ino4_t))); 138 ((uint)sizeof(xfs_dir2_ino8_t) - (uint)sizeof(xfs_dir2_ino4_t)));
151} 139}
152 140
153#define XFS_DIR2_SF_ENTSIZE_BYENTRY(sfp,sfep) \
154 xfs_dir2_sf_entsize_byentry(sfp,sfep)
155static inline int 141static inline int
156xfs_dir2_sf_entsize_byentry(xfs_dir2_sf_t *sfp, xfs_dir2_sf_entry_t *sfep) 142xfs_dir2_sf_entsize_byentry(xfs_dir2_sf_t *sfp, xfs_dir2_sf_entry_t *sfep)
157{ 143{
@@ -160,19 +146,17 @@ xfs_dir2_sf_entsize_byentry(xfs_dir2_sf_t *sfp, xfs_dir2_sf_entry_t *sfep)
160 ((uint)sizeof(xfs_dir2_ino8_t) - (uint)sizeof(xfs_dir2_ino4_t))); 146 ((uint)sizeof(xfs_dir2_ino8_t) - (uint)sizeof(xfs_dir2_ino4_t)));
161} 147}
162 148
163#define XFS_DIR2_SF_FIRSTENTRY(sfp) xfs_dir2_sf_firstentry(sfp)
164static inline xfs_dir2_sf_entry_t *xfs_dir2_sf_firstentry(xfs_dir2_sf_t *sfp) 149static inline xfs_dir2_sf_entry_t *xfs_dir2_sf_firstentry(xfs_dir2_sf_t *sfp)
165{ 150{
166 return ((xfs_dir2_sf_entry_t *) \ 151 return ((xfs_dir2_sf_entry_t *) \
167 ((char *)(sfp) + XFS_DIR2_SF_HDR_SIZE(sfp->hdr.i8count))); 152 ((char *)(sfp) + xfs_dir2_sf_hdr_size(sfp->hdr.i8count)));
168} 153}
169 154
170#define XFS_DIR2_SF_NEXTENTRY(sfp,sfep) xfs_dir2_sf_nextentry(sfp,sfep)
171static inline xfs_dir2_sf_entry_t * 155static inline xfs_dir2_sf_entry_t *
172xfs_dir2_sf_nextentry(xfs_dir2_sf_t *sfp, xfs_dir2_sf_entry_t *sfep) 156xfs_dir2_sf_nextentry(xfs_dir2_sf_t *sfp, xfs_dir2_sf_entry_t *sfep)
173{ 157{
174 return ((xfs_dir2_sf_entry_t *) \ 158 return ((xfs_dir2_sf_entry_t *) \
175 ((char *)(sfep) + XFS_DIR2_SF_ENTSIZE_BYENTRY(sfp,sfep))); 159 ((char *)(sfep) + xfs_dir2_sf_entsize_byentry(sfp,sfep)));
176} 160}
177 161
178/* 162/*
diff --git a/fs/xfs/xfs_filestream.c b/fs/xfs/xfs_filestream.c
new file mode 100644
index 000000000000..ce2278611bb7
--- /dev/null
+++ b/fs/xfs/xfs_filestream.c
@@ -0,0 +1,771 @@
1/*
2 * Copyright (c) 2006-2007 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#include "xfs.h"
19#include "xfs_bmap_btree.h"
20#include "xfs_inum.h"
21#include "xfs_dir2.h"
22#include "xfs_dir2_sf.h"
23#include "xfs_attr_sf.h"
24#include "xfs_dinode.h"
25#include "xfs_inode.h"
26#include "xfs_ag.h"
27#include "xfs_dmapi.h"
28#include "xfs_log.h"
29#include "xfs_trans.h"
30#include "xfs_sb.h"
31#include "xfs_mount.h"
32#include "xfs_bmap.h"
33#include "xfs_alloc.h"
34#include "xfs_utils.h"
35#include "xfs_mru_cache.h"
36#include "xfs_filestream.h"
37
38#ifdef XFS_FILESTREAMS_TRACE
39
40ktrace_t *xfs_filestreams_trace_buf;
41
42STATIC void
43xfs_filestreams_trace(
44 xfs_mount_t *mp, /* mount point */
45 int type, /* type of trace */
46 const char *func, /* source function */
47 int line, /* source line number */
48 __psunsigned_t arg0,
49 __psunsigned_t arg1,
50 __psunsigned_t arg2,
51 __psunsigned_t arg3,
52 __psunsigned_t arg4,
53 __psunsigned_t arg5)
54{
55 ktrace_enter(xfs_filestreams_trace_buf,
56 (void *)(__psint_t)(type | (line << 16)),
57 (void *)func,
58 (void *)(__psunsigned_t)current_pid(),
59 (void *)mp,
60 (void *)(__psunsigned_t)arg0,
61 (void *)(__psunsigned_t)arg1,
62 (void *)(__psunsigned_t)arg2,
63 (void *)(__psunsigned_t)arg3,
64 (void *)(__psunsigned_t)arg4,
65 (void *)(__psunsigned_t)arg5,
66 NULL, NULL, NULL, NULL, NULL, NULL);
67}
68
69#define TRACE0(mp,t) TRACE6(mp,t,0,0,0,0,0,0)
70#define TRACE1(mp,t,a0) TRACE6(mp,t,a0,0,0,0,0,0)
71#define TRACE2(mp,t,a0,a1) TRACE6(mp,t,a0,a1,0,0,0,0)
72#define TRACE3(mp,t,a0,a1,a2) TRACE6(mp,t,a0,a1,a2,0,0,0)
73#define TRACE4(mp,t,a0,a1,a2,a3) TRACE6(mp,t,a0,a1,a2,a3,0,0)
74#define TRACE5(mp,t,a0,a1,a2,a3,a4) TRACE6(mp,t,a0,a1,a2,a3,a4,0)
75#define TRACE6(mp,t,a0,a1,a2,a3,a4,a5) \
76 xfs_filestreams_trace(mp, t, __FUNCTION__, __LINE__, \
77 (__psunsigned_t)a0, (__psunsigned_t)a1, \
78 (__psunsigned_t)a2, (__psunsigned_t)a3, \
79 (__psunsigned_t)a4, (__psunsigned_t)a5)
80
81#define TRACE_AG_SCAN(mp, ag, ag2) \
82 TRACE2(mp, XFS_FSTRM_KTRACE_AGSCAN, ag, ag2);
83#define TRACE_AG_PICK1(mp, max_ag, maxfree) \
84 TRACE2(mp, XFS_FSTRM_KTRACE_AGPICK1, max_ag, maxfree);
85#define TRACE_AG_PICK2(mp, ag, ag2, cnt, free, scan, flag) \
86 TRACE6(mp, XFS_FSTRM_KTRACE_AGPICK2, ag, ag2, \
87 cnt, free, scan, flag)
88#define TRACE_UPDATE(mp, ip, ag, cnt, ag2, cnt2) \
89 TRACE5(mp, XFS_FSTRM_KTRACE_UPDATE, ip, ag, cnt, ag2, cnt2)
90#define TRACE_FREE(mp, ip, pip, ag, cnt) \
91 TRACE4(mp, XFS_FSTRM_KTRACE_FREE, ip, pip, ag, cnt)
92#define TRACE_LOOKUP(mp, ip, pip, ag, cnt) \
93 TRACE4(mp, XFS_FSTRM_KTRACE_ITEM_LOOKUP, ip, pip, ag, cnt)
94#define TRACE_ASSOCIATE(mp, ip, pip, ag, cnt) \
95 TRACE4(mp, XFS_FSTRM_KTRACE_ASSOCIATE, ip, pip, ag, cnt)
96#define TRACE_MOVEAG(mp, ip, pip, oag, ocnt, nag, ncnt) \
97 TRACE6(mp, XFS_FSTRM_KTRACE_MOVEAG, ip, pip, oag, ocnt, nag, ncnt)
98#define TRACE_ORPHAN(mp, ip, ag) \
99 TRACE2(mp, XFS_FSTRM_KTRACE_ORPHAN, ip, ag);
100
101
102#else
103#define TRACE_AG_SCAN(mp, ag, ag2)
104#define TRACE_AG_PICK1(mp, max_ag, maxfree)
105#define TRACE_AG_PICK2(mp, ag, ag2, cnt, free, scan, flag)
106#define TRACE_UPDATE(mp, ip, ag, cnt, ag2, cnt2)
107#define TRACE_FREE(mp, ip, pip, ag, cnt)
108#define TRACE_LOOKUP(mp, ip, pip, ag, cnt)
109#define TRACE_ASSOCIATE(mp, ip, pip, ag, cnt)
110#define TRACE_MOVEAG(mp, ip, pip, oag, ocnt, nag, ncnt)
111#define TRACE_ORPHAN(mp, ip, ag)
112#endif
113
114static kmem_zone_t *item_zone;
115
116/*
117 * Structure for associating a file or a directory with an allocation group.
118 * The parent directory pointer is only needed for files, but since there will
119 * generally be vastly more files than directories in the cache, using the same
120 * data structure simplifies the code with very little memory overhead.
121 */
122typedef struct fstrm_item
123{
124 xfs_agnumber_t ag; /* AG currently in use for the file/directory. */
125 xfs_inode_t *ip; /* inode self-pointer. */
126 xfs_inode_t *pip; /* Parent directory inode pointer. */
127} fstrm_item_t;
128
129
130/*
131 * Scan the AGs starting at startag looking for an AG that isn't in use and has
132 * at least minlen blocks free.
133 */
134static int
135_xfs_filestream_pick_ag(
136 xfs_mount_t *mp,
137 xfs_agnumber_t startag,
138 xfs_agnumber_t *agp,
139 int flags,
140 xfs_extlen_t minlen)
141{
142 int err, trylock, nscan;
143 xfs_extlen_t delta, longest, need, free, minfree, maxfree = 0;
144 xfs_agnumber_t ag, max_ag = NULLAGNUMBER;
145 struct xfs_perag *pag;
146
147 /* 2% of an AG's blocks must be free for it to be chosen. */
148 minfree = mp->m_sb.sb_agblocks / 50;
149
150 ag = startag;
151 *agp = NULLAGNUMBER;
152
153 /* For the first pass, don't sleep trying to init the per-AG. */
154 trylock = XFS_ALLOC_FLAG_TRYLOCK;
155
156 for (nscan = 0; 1; nscan++) {
157
158 TRACE_AG_SCAN(mp, ag, xfs_filestream_peek_ag(mp, ag));
159
160 pag = mp->m_perag + ag;
161
162 if (!pag->pagf_init) {
163 err = xfs_alloc_pagf_init(mp, NULL, ag, trylock);
164 if (err && !trylock)
165 return err;
166 }
167
168 /* Might fail sometimes during the 1st pass with trylock set. */
169 if (!pag->pagf_init)
170 goto next_ag;
171
172 /* Keep track of the AG with the most free blocks. */
173 if (pag->pagf_freeblks > maxfree) {
174 maxfree = pag->pagf_freeblks;
175 max_ag = ag;
176 }
177
178 /*
179 * The AG reference count does two things: it enforces mutual
180 * exclusion when examining the suitability of an AG in this
181 * loop, and it guards against two filestreams being established
182 * in the same AG as each other.
183 */
184 if (xfs_filestream_get_ag(mp, ag) > 1) {
185 xfs_filestream_put_ag(mp, ag);
186 goto next_ag;
187 }
188
189 need = XFS_MIN_FREELIST_PAG(pag, mp);
190 delta = need > pag->pagf_flcount ? need - pag->pagf_flcount : 0;
191 longest = (pag->pagf_longest > delta) ?
192 (pag->pagf_longest - delta) :
193 (pag->pagf_flcount > 0 || pag->pagf_longest > 0);
194
195 if (((minlen && longest >= minlen) ||
196 (!minlen && pag->pagf_freeblks >= minfree)) &&
197 (!pag->pagf_metadata || !(flags & XFS_PICK_USERDATA) ||
198 (flags & XFS_PICK_LOWSPACE))) {
199
200 /* Break out, retaining the reference on the AG. */
201 free = pag->pagf_freeblks;
202 *agp = ag;
203 break;
204 }
205
206 /* Drop the reference on this AG, it's not usable. */
207 xfs_filestream_put_ag(mp, ag);
208next_ag:
209 /* Move to the next AG, wrapping to AG 0 if necessary. */
210 if (++ag >= mp->m_sb.sb_agcount)
211 ag = 0;
212
213 /* If a full pass of the AGs hasn't been done yet, continue. */
214 if (ag != startag)
215 continue;
216
217 /* Allow sleeping in xfs_alloc_pagf_init() on the 2nd pass. */
218 if (trylock != 0) {
219 trylock = 0;
220 continue;
221 }
222
223 /* Finally, if lowspace wasn't set, set it for the 3rd pass. */
224 if (!(flags & XFS_PICK_LOWSPACE)) {
225 flags |= XFS_PICK_LOWSPACE;
226 continue;
227 }
228
229 /*
230 * Take the AG with the most free space, regardless of whether
231 * it's already in use by another filestream.
232 */
233 if (max_ag != NULLAGNUMBER) {
234 xfs_filestream_get_ag(mp, max_ag);
235 TRACE_AG_PICK1(mp, max_ag, maxfree);
236 free = maxfree;
237 *agp = max_ag;
238 break;
239 }
240
241 /* take AG 0 if none matched */
242 TRACE_AG_PICK1(mp, max_ag, maxfree);
243 *agp = 0;
244 return 0;
245 }
246
247 TRACE_AG_PICK2(mp, startag, *agp, xfs_filestream_peek_ag(mp, *agp),
248 free, nscan, flags);
249
250 return 0;
251}
252
253/*
254 * Set the allocation group number for a file or a directory, updating inode
255 * references and per-AG references as appropriate. Must be called with the
256 * m_peraglock held in read mode.
257 */
258static int
259_xfs_filestream_update_ag(
260 xfs_inode_t *ip,
261 xfs_inode_t *pip,
262 xfs_agnumber_t ag)
263{
264 int err = 0;
265 xfs_mount_t *mp;
266 xfs_mru_cache_t *cache;
267 fstrm_item_t *item;
268 xfs_agnumber_t old_ag;
269 xfs_inode_t *old_pip;
270
271 /*
272 * Either ip is a regular file and pip is a directory, or ip is a
273 * directory and pip is NULL.
274 */
275 ASSERT(ip && (((ip->i_d.di_mode & S_IFREG) && pip &&
276 (pip->i_d.di_mode & S_IFDIR)) ||
277 ((ip->i_d.di_mode & S_IFDIR) && !pip)));
278
279 mp = ip->i_mount;
280 cache = mp->m_filestream;
281
282 item = xfs_mru_cache_lookup(cache, ip->i_ino);
283 if (item) {
284 ASSERT(item->ip == ip);
285 old_ag = item->ag;
286 item->ag = ag;
287 old_pip = item->pip;
288 item->pip = pip;
289 xfs_mru_cache_done(cache);
290
291 /*
292 * If the AG has changed, drop the old ref and take a new one,
293 * effectively transferring the reference from old to new AG.
294 */
295 if (ag != old_ag) {
296 xfs_filestream_put_ag(mp, old_ag);
297 xfs_filestream_get_ag(mp, ag);
298 }
299
300 /*
301 * If ip is a file and its pip has changed, drop the old ref and
302 * take a new one.
303 */
304 if (pip && pip != old_pip) {
305 IRELE(old_pip);
306 IHOLD(pip);
307 }
308
309 TRACE_UPDATE(mp, ip, old_ag, xfs_filestream_peek_ag(mp, old_ag),
310 ag, xfs_filestream_peek_ag(mp, ag));
311 return 0;
312 }
313
314 item = kmem_zone_zalloc(item_zone, KM_MAYFAIL);
315 if (!item)
316 return ENOMEM;
317
318 item->ag = ag;
319 item->ip = ip;
320 item->pip = pip;
321
322 err = xfs_mru_cache_insert(cache, ip->i_ino, item);
323 if (err) {
324 kmem_zone_free(item_zone, item);
325 return err;
326 }
327
328 /* Take a reference on the AG. */
329 xfs_filestream_get_ag(mp, ag);
330
331 /*
332 * Take a reference on the inode itself regardless of whether it's a
333 * regular file or a directory.
334 */
335 IHOLD(ip);
336
337 /*
338 * In the case of a regular file, take a reference on the parent inode
339 * as well to ensure it remains in-core.
340 */
341 if (pip)
342 IHOLD(pip);
343
344 TRACE_UPDATE(mp, ip, ag, xfs_filestream_peek_ag(mp, ag),
345 ag, xfs_filestream_peek_ag(mp, ag));
346
347 return 0;
348}
349
350/* xfs_fstrm_free_func(): callback for freeing cached stream items. */
351void
352xfs_fstrm_free_func(
353 xfs_ino_t ino,
354 fstrm_item_t *item)
355{
356 xfs_inode_t *ip = item->ip;
357 int ref;
358
359 ASSERT(ip->i_ino == ino);
360
361 xfs_iflags_clear(ip, XFS_IFILESTREAM);
362
363 /* Drop the reference taken on the AG when the item was added. */
364 ref = xfs_filestream_put_ag(ip->i_mount, item->ag);
365
366 ASSERT(ref >= 0);
367 TRACE_FREE(ip->i_mount, ip, item->pip, item->ag,
368 xfs_filestream_peek_ag(ip->i_mount, item->ag));
369
370 /*
371 * _xfs_filestream_update_ag() always takes a reference on the inode
372 * itself, whether it's a file or a directory. Release it here.
373 * This can result in the inode being freed and so we must
374 * not hold any inode locks when freeing filesstreams objects
375 * otherwise we can deadlock here.
376 */
377 IRELE(ip);
378
379 /*
380 * In the case of a regular file, _xfs_filestream_update_ag() also
381 * takes a ref on the parent inode to keep it in-core. Release that
382 * too.
383 */
384 if (item->pip)
385 IRELE(item->pip);
386
387 /* Finally, free the memory allocated for the item. */
388 kmem_zone_free(item_zone, item);
389}
390
391/*
392 * xfs_filestream_init() is called at xfs initialisation time to set up the
393 * memory zone that will be used for filestream data structure allocation.
394 */
395int
396xfs_filestream_init(void)
397{
398 item_zone = kmem_zone_init(sizeof(fstrm_item_t), "fstrm_item");
399#ifdef XFS_FILESTREAMS_TRACE
400 xfs_filestreams_trace_buf = ktrace_alloc(XFS_FSTRM_KTRACE_SIZE, KM_SLEEP);
401#endif
402 return item_zone ? 0 : -ENOMEM;
403}
404
405/*
406 * xfs_filestream_uninit() is called at xfs termination time to destroy the
407 * memory zone that was used for filestream data structure allocation.
408 */
409void
410xfs_filestream_uninit(void)
411{
412#ifdef XFS_FILESTREAMS_TRACE
413 ktrace_free(xfs_filestreams_trace_buf);
414#endif
415 kmem_zone_destroy(item_zone);
416}
417
418/*
419 * xfs_filestream_mount() is called when a file system is mounted with the
420 * filestream option. It is responsible for allocating the data structures
421 * needed to track the new file system's file streams.
422 */
423int
424xfs_filestream_mount(
425 xfs_mount_t *mp)
426{
427 int err;
428 unsigned int lifetime, grp_count;
429
430 /*
431 * The filestream timer tunable is currently fixed within the range of
432 * one second to four minutes, with five seconds being the default. The
433 * group count is somewhat arbitrary, but it'd be nice to adhere to the
434 * timer tunable to within about 10 percent. This requires at least 10
435 * groups.
436 */
437 lifetime = xfs_fstrm_centisecs * 10;
438 grp_count = 10;
439
440 err = xfs_mru_cache_create(&mp->m_filestream, lifetime, grp_count,
441 (xfs_mru_cache_free_func_t)xfs_fstrm_free_func);
442
443 return err;
444}
445
446/*
447 * xfs_filestream_unmount() is called when a file system that was mounted with
448 * the filestream option is unmounted. It drains the data structures created
449 * to track the file system's file streams and frees all the memory that was
450 * allocated.
451 */
452void
453xfs_filestream_unmount(
454 xfs_mount_t *mp)
455{
456 xfs_mru_cache_destroy(mp->m_filestream);
457}
458
459/*
460 * If the mount point's m_perag array is going to be reallocated, all
461 * outstanding cache entries must be flushed to avoid accessing reference count
462 * addresses that have been freed. The call to xfs_filestream_flush() must be
463 * made inside the block that holds the m_peraglock in write mode to do the
464 * reallocation.
465 */
466void
467xfs_filestream_flush(
468 xfs_mount_t *mp)
469{
470 /* point in time flush, so keep the reaper running */
471 xfs_mru_cache_flush(mp->m_filestream, 1);
472}
473
474/*
475 * Return the AG of the filestream the file or directory belongs to, or
476 * NULLAGNUMBER otherwise.
477 */
478xfs_agnumber_t
479xfs_filestream_lookup_ag(
480 xfs_inode_t *ip)
481{
482 xfs_mru_cache_t *cache;
483 fstrm_item_t *item;
484 xfs_agnumber_t ag;
485 int ref;
486
487 if (!(ip->i_d.di_mode & (S_IFREG | S_IFDIR))) {
488 ASSERT(0);
489 return NULLAGNUMBER;
490 }
491
492 cache = ip->i_mount->m_filestream;
493 item = xfs_mru_cache_lookup(cache, ip->i_ino);
494 if (!item) {
495 TRACE_LOOKUP(ip->i_mount, ip, NULL, NULLAGNUMBER, 0);
496 return NULLAGNUMBER;
497 }
498
499 ASSERT(ip == item->ip);
500 ag = item->ag;
501 ref = xfs_filestream_peek_ag(ip->i_mount, ag);
502 xfs_mru_cache_done(cache);
503
504 TRACE_LOOKUP(ip->i_mount, ip, item->pip, ag, ref);
505 return ag;
506}
507
508/*
509 * xfs_filestream_associate() should only be called to associate a regular file
510 * with its parent directory. Calling it with a child directory isn't
511 * appropriate because filestreams don't apply to entire directory hierarchies.
512 * Creating a file in a child directory of an existing filestream directory
513 * starts a new filestream with its own allocation group association.
514 *
515 * Returns < 0 on error, 0 if successful association occurred, > 0 if
516 * we failed to get an association because of locking issues.
517 */
518int
519xfs_filestream_associate(
520 xfs_inode_t *pip,
521 xfs_inode_t *ip)
522{
523 xfs_mount_t *mp;
524 xfs_mru_cache_t *cache;
525 fstrm_item_t *item;
526 xfs_agnumber_t ag, rotorstep, startag;
527 int err = 0;
528
529 ASSERT(pip->i_d.di_mode & S_IFDIR);
530 ASSERT(ip->i_d.di_mode & S_IFREG);
531 if (!(pip->i_d.di_mode & S_IFDIR) || !(ip->i_d.di_mode & S_IFREG))
532 return -EINVAL;
533
534 mp = pip->i_mount;
535 cache = mp->m_filestream;
536 down_read(&mp->m_peraglock);
537
538 /*
539 * We have a problem, Houston.
540 *
541 * Taking the iolock here violates inode locking order - we already
542 * hold the ilock. Hence if we block getting this lock we may never
543 * wake. Unfortunately, that means if we can't get the lock, we're
544 * screwed in terms of getting a stream association - we can't spin
545 * waiting for the lock because someone else is waiting on the lock we
546 * hold and we cannot drop that as we are in a transaction here.
547 *
548 * Lucky for us, this inversion is rarely a problem because it's a
549 * directory inode that we are trying to lock here and that means the
550 * only place that matters is xfs_sync_inodes() and SYNC_DELWRI is
551 * used. i.e. freeze, remount-ro, quotasync or unmount.
552 *
553 * So, if we can't get the iolock without sleeping then just give up
554 */
555 if (!xfs_ilock_nowait(pip, XFS_IOLOCK_EXCL)) {
556 up_read(&mp->m_peraglock);
557 return 1;
558 }
559
560 /* If the parent directory is already in the cache, use its AG. */
561 item = xfs_mru_cache_lookup(cache, pip->i_ino);
562 if (item) {
563 ASSERT(item->ip == pip);
564 ag = item->ag;
565 xfs_mru_cache_done(cache);
566
567 TRACE_LOOKUP(mp, pip, pip, ag, xfs_filestream_peek_ag(mp, ag));
568 err = _xfs_filestream_update_ag(ip, pip, ag);
569
570 goto exit;
571 }
572
573 /*
574 * Set the starting AG using the rotor for inode32, otherwise
575 * use the directory inode's AG.
576 */
577 if (mp->m_flags & XFS_MOUNT_32BITINODES) {
578 rotorstep = xfs_rotorstep;
579 startag = (mp->m_agfrotor / rotorstep) % mp->m_sb.sb_agcount;
580 mp->m_agfrotor = (mp->m_agfrotor + 1) %
581 (mp->m_sb.sb_agcount * rotorstep);
582 } else
583 startag = XFS_INO_TO_AGNO(mp, pip->i_ino);
584
585 /* Pick a new AG for the parent inode starting at startag. */
586 err = _xfs_filestream_pick_ag(mp, startag, &ag, 0, 0);
587 if (err || ag == NULLAGNUMBER)
588 goto exit_did_pick;
589
590 /* Associate the parent inode with the AG. */
591 err = _xfs_filestream_update_ag(pip, NULL, ag);
592 if (err)
593 goto exit_did_pick;
594
595 /* Associate the file inode with the AG. */
596 err = _xfs_filestream_update_ag(ip, pip, ag);
597 if (err)
598 goto exit_did_pick;
599
600 TRACE_ASSOCIATE(mp, ip, pip, ag, xfs_filestream_peek_ag(mp, ag));
601
602exit_did_pick:
603 /*
604 * If _xfs_filestream_pick_ag() returned a valid AG, remove the
605 * reference it took on it, since the file and directory will have taken
606 * their own now if they were successfully cached.
607 */
608 if (ag != NULLAGNUMBER)
609 xfs_filestream_put_ag(mp, ag);
610
611exit:
612 xfs_iunlock(pip, XFS_IOLOCK_EXCL);
613 up_read(&mp->m_peraglock);
614 return -err;
615}
616
617/*
618 * Pick a new allocation group for the current file and its file stream. This
619 * function is called by xfs_bmap_filestreams() with the mount point's per-ag
620 * lock held.
621 */
622int
623xfs_filestream_new_ag(
624 xfs_bmalloca_t *ap,
625 xfs_agnumber_t *agp)
626{
627 int flags, err;
628 xfs_inode_t *ip, *pip = NULL;
629 xfs_mount_t *mp;
630 xfs_mru_cache_t *cache;
631 xfs_extlen_t minlen;
632 fstrm_item_t *dir, *file;
633 xfs_agnumber_t ag = NULLAGNUMBER;
634
635 ip = ap->ip;
636 mp = ip->i_mount;
637 cache = mp->m_filestream;
638 minlen = ap->alen;
639 *agp = NULLAGNUMBER;
640
641 /*
642 * Look for the file in the cache, removing it if it's found. Doing
643 * this allows it to be held across the dir lookup that follows.
644 */
645 file = xfs_mru_cache_remove(cache, ip->i_ino);
646 if (file) {
647 ASSERT(ip == file->ip);
648
649 /* Save the file's parent inode and old AG number for later. */
650 pip = file->pip;
651 ag = file->ag;
652
653 /* Look for the file's directory in the cache. */
654 dir = xfs_mru_cache_lookup(cache, pip->i_ino);
655 if (dir) {
656 ASSERT(pip == dir->ip);
657
658 /*
659 * If the directory has already moved on to a new AG,
660 * use that AG as the new AG for the file. Don't
661 * forget to twiddle the AG refcounts to match the
662 * movement.
663 */
664 if (dir->ag != file->ag) {
665 xfs_filestream_put_ag(mp, file->ag);
666 xfs_filestream_get_ag(mp, dir->ag);
667 *agp = file->ag = dir->ag;
668 }
669
670 xfs_mru_cache_done(cache);
671 }
672
673 /*
674 * Put the file back in the cache. If this fails, the free
675 * function needs to be called to tidy up in the same way as if
676 * the item had simply expired from the cache.
677 */
678 err = xfs_mru_cache_insert(cache, ip->i_ino, file);
679 if (err) {
680 xfs_fstrm_free_func(ip->i_ino, file);
681 return err;
682 }
683
684 /*
685 * If the file's AG was moved to the directory's new AG, there's
686 * nothing more to be done.
687 */
688 if (*agp != NULLAGNUMBER) {
689 TRACE_MOVEAG(mp, ip, pip,
690 ag, xfs_filestream_peek_ag(mp, ag),
691 *agp, xfs_filestream_peek_ag(mp, *agp));
692 return 0;
693 }
694 }
695
696 /*
697 * If the file's parent directory is known, take its iolock in exclusive
698 * mode to prevent two sibling files from racing each other to migrate
699 * themselves and their parent to different AGs.
700 */
701 if (pip)
702 xfs_ilock(pip, XFS_IOLOCK_EXCL);
703
704 /*
705 * A new AG needs to be found for the file. If the file's parent
706 * directory is also known, it will be moved to the new AG as well to
707 * ensure that files created inside it in future use the new AG.
708 */
709 ag = (ag == NULLAGNUMBER) ? 0 : (ag + 1) % mp->m_sb.sb_agcount;
710 flags = (ap->userdata ? XFS_PICK_USERDATA : 0) |
711 (ap->low ? XFS_PICK_LOWSPACE : 0);
712
713 err = _xfs_filestream_pick_ag(mp, ag, agp, flags, minlen);
714 if (err || *agp == NULLAGNUMBER)
715 goto exit;
716
717 /*
718 * If the file wasn't found in the file cache, then its parent directory
719 * inode isn't known. For this to have happened, the file must either
720 * be pre-existing, or it was created long enough ago that its cache
721 * entry has expired. This isn't the sort of usage that the filestreams
722 * allocator is trying to optimise, so there's no point trying to track
723 * its new AG somehow in the filestream data structures.
724 */
725 if (!pip) {
726 TRACE_ORPHAN(mp, ip, *agp);
727 goto exit;
728 }
729
730 /* Associate the parent inode with the AG. */
731 err = _xfs_filestream_update_ag(pip, NULL, *agp);
732 if (err)
733 goto exit;
734
735 /* Associate the file inode with the AG. */
736 err = _xfs_filestream_update_ag(ip, pip, *agp);
737 if (err)
738 goto exit;
739
740 TRACE_MOVEAG(mp, ip, pip, NULLAGNUMBER, 0,
741 *agp, xfs_filestream_peek_ag(mp, *agp));
742
743exit:
744 /*
745 * If _xfs_filestream_pick_ag() returned a valid AG, remove the
746 * reference it took on it, since the file and directory will have taken
747 * their own now if they were successfully cached.
748 */
749 if (*agp != NULLAGNUMBER)
750 xfs_filestream_put_ag(mp, *agp);
751 else
752 *agp = 0;
753
754 if (pip)
755 xfs_iunlock(pip, XFS_IOLOCK_EXCL);
756
757 return err;
758}
759
760/*
761 * Remove an association between an inode and a filestream object.
762 * Typically this is done on last close of an unlinked file.
763 */
764void
765xfs_filestream_deassociate(
766 xfs_inode_t *ip)
767{
768 xfs_mru_cache_t *cache = ip->i_mount->m_filestream;
769
770 xfs_mru_cache_delete(cache, ip->i_ino);
771}
diff --git a/fs/xfs/xfs_filestream.h b/fs/xfs/xfs_filestream.h
new file mode 100644
index 000000000000..f655f7dc334c
--- /dev/null
+++ b/fs/xfs/xfs_filestream.h
@@ -0,0 +1,136 @@
1/*
2 * Copyright (c) 2006-2007 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#ifndef __XFS_FILESTREAM_H__
19#define __XFS_FILESTREAM_H__
20
21#ifdef __KERNEL__
22
23struct xfs_mount;
24struct xfs_inode;
25struct xfs_perag;
26struct xfs_bmalloca;
27
28#ifdef XFS_FILESTREAMS_TRACE
29#define XFS_FSTRM_KTRACE_INFO 1
30#define XFS_FSTRM_KTRACE_AGSCAN 2
31#define XFS_FSTRM_KTRACE_AGPICK1 3
32#define XFS_FSTRM_KTRACE_AGPICK2 4
33#define XFS_FSTRM_KTRACE_UPDATE 5
34#define XFS_FSTRM_KTRACE_FREE 6
35#define XFS_FSTRM_KTRACE_ITEM_LOOKUP 7
36#define XFS_FSTRM_KTRACE_ASSOCIATE 8
37#define XFS_FSTRM_KTRACE_MOVEAG 9
38#define XFS_FSTRM_KTRACE_ORPHAN 10
39
40#define XFS_FSTRM_KTRACE_SIZE 16384
41extern ktrace_t *xfs_filestreams_trace_buf;
42
43#endif
44
45/*
46 * Allocation group filestream associations are tracked with per-ag atomic
47 * counters. These counters allow _xfs_filestream_pick_ag() to tell whether a
48 * particular AG already has active filestreams associated with it. The mount
49 * point's m_peraglock is used to protect these counters from per-ag array
50 * re-allocation during a growfs operation. When xfs_growfs_data_private() is
51 * about to reallocate the array, it calls xfs_filestream_flush() with the
52 * m_peraglock held in write mode.
53 *
54 * Since xfs_mru_cache_flush() guarantees that all the free functions for all
55 * the cache elements have finished executing before it returns, it's safe for
56 * the free functions to use the atomic counters without m_peraglock protection.
57 * This allows the implementation of xfs_fstrm_free_func() to be agnostic about
58 * whether it was called with the m_peraglock held in read mode, write mode or
59 * not held at all. The race condition this addresses is the following:
60 *
61 * - The work queue scheduler fires and pulls a filestream directory cache
62 * element off the LRU end of the cache for deletion, then gets pre-empted.
63 * - A growfs operation grabs the m_peraglock in write mode, flushes all the
64 * remaining items from the cache and reallocates the mount point's per-ag
65 * array, resetting all the counters to zero.
66 * - The work queue thread resumes and calls the free function for the element
67 * it started cleaning up earlier. In the process it decrements the
68 * filestreams counter for an AG that now has no references.
69 *
70 * With a shrinkfs feature, the above scenario could panic the system.
71 *
72 * All other uses of the following macros should be protected by either the
73 * m_peraglock held in read mode, or the cache's internal locking exposed by the
74 * interval between a call to xfs_mru_cache_lookup() and a call to
75 * xfs_mru_cache_done(). In addition, the m_peraglock must be held in read mode
76 * when new elements are added to the cache.
77 *
78 * Combined, these locking rules ensure that no associations will ever exist in
79 * the cache that reference per-ag array elements that have since been
80 * reallocated.
81 */
82STATIC_INLINE int
83xfs_filestream_peek_ag(
84 xfs_mount_t *mp,
85 xfs_agnumber_t agno)
86{
87 return atomic_read(&mp->m_perag[agno].pagf_fstrms);
88}
89
90STATIC_INLINE int
91xfs_filestream_get_ag(
92 xfs_mount_t *mp,
93 xfs_agnumber_t agno)
94{
95 return atomic_inc_return(&mp->m_perag[agno].pagf_fstrms);
96}
97
98STATIC_INLINE int
99xfs_filestream_put_ag(
100 xfs_mount_t *mp,
101 xfs_agnumber_t agno)
102{
103 return atomic_dec_return(&mp->m_perag[agno].pagf_fstrms);
104}
105
106/* allocation selection flags */
107typedef enum xfs_fstrm_alloc {
108 XFS_PICK_USERDATA = 1,
109 XFS_PICK_LOWSPACE = 2,
110} xfs_fstrm_alloc_t;
111
112/* prototypes for filestream.c */
113int xfs_filestream_init(void);
114void xfs_filestream_uninit(void);
115int xfs_filestream_mount(struct xfs_mount *mp);
116void xfs_filestream_unmount(struct xfs_mount *mp);
117void xfs_filestream_flush(struct xfs_mount *mp);
118xfs_agnumber_t xfs_filestream_lookup_ag(struct xfs_inode *ip);
119int xfs_filestream_associate(struct xfs_inode *dip, struct xfs_inode *ip);
120void xfs_filestream_deassociate(struct xfs_inode *ip);
121int xfs_filestream_new_ag(struct xfs_bmalloca *ap, xfs_agnumber_t *agp);
122
123
124/* filestreams for the inode? */
125STATIC_INLINE int
126xfs_inode_is_filestream(
127 struct xfs_inode *ip)
128{
129 return (ip->i_mount->m_flags & XFS_MOUNT_FILESTREAMS) ||
130 xfs_iflags_test(ip, XFS_IFILESTREAM) ||
131 (ip->i_d.di_flags & XFS_DIFLAG_FILESTREAM);
132}
133
134#endif /* __KERNEL__ */
135
136#endif /* __XFS_FILESTREAM_H__ */
diff --git a/fs/xfs/xfs_fs.h b/fs/xfs/xfs_fs.h
index 1335449841cd..ec3c9c27e0de 100644
--- a/fs/xfs/xfs_fs.h
+++ b/fs/xfs/xfs_fs.h
@@ -66,6 +66,7 @@ struct fsxattr {
66#define XFS_XFLAG_EXTSIZE 0x00000800 /* extent size allocator hint */ 66#define XFS_XFLAG_EXTSIZE 0x00000800 /* extent size allocator hint */
67#define XFS_XFLAG_EXTSZINHERIT 0x00001000 /* inherit inode extent size */ 67#define XFS_XFLAG_EXTSZINHERIT 0x00001000 /* inherit inode extent size */
68#define XFS_XFLAG_NODEFRAG 0x00002000 /* do not defragment */ 68#define XFS_XFLAG_NODEFRAG 0x00002000 /* do not defragment */
69#define XFS_XFLAG_FILESTREAM 0x00004000 /* use filestream allocator */
69#define XFS_XFLAG_HASATTR 0x80000000 /* no DIFLAG for this */ 70#define XFS_XFLAG_HASATTR 0x80000000 /* no DIFLAG for this */
70 71
71/* 72/*
@@ -238,6 +239,7 @@ typedef struct xfs_fsop_resblks {
238#define XFS_FSOP_GEOM_FLAGS_LOGV2 0x0100 /* log format version 2 */ 239#define XFS_FSOP_GEOM_FLAGS_LOGV2 0x0100 /* log format version 2 */
239#define XFS_FSOP_GEOM_FLAGS_SECTOR 0x0200 /* sector sizes >1BB */ 240#define XFS_FSOP_GEOM_FLAGS_SECTOR 0x0200 /* sector sizes >1BB */
240#define XFS_FSOP_GEOM_FLAGS_ATTR2 0x0400 /* inline attributes rework */ 241#define XFS_FSOP_GEOM_FLAGS_ATTR2 0x0400 /* inline attributes rework */
242#define XFS_FSOP_GEOM_FLAGS_LAZYSB 0x4000 /* lazy superblock counters */
241 243
242 244
243/* 245/*
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index b599e6be9ec1..432e82347ed6 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -44,6 +44,7 @@
44#include "xfs_trans_space.h" 44#include "xfs_trans_space.h"
45#include "xfs_rtalloc.h" 45#include "xfs_rtalloc.h"
46#include "xfs_rw.h" 46#include "xfs_rw.h"
47#include "xfs_filestream.h"
47 48
48/* 49/*
49 * File system operations 50 * File system operations
@@ -94,6 +95,8 @@ xfs_fs_geometry(
94 XFS_FSOP_GEOM_FLAGS_DIRV2 : 0) | 95 XFS_FSOP_GEOM_FLAGS_DIRV2 : 0) |
95 (XFS_SB_VERSION_HASSECTOR(&mp->m_sb) ? 96 (XFS_SB_VERSION_HASSECTOR(&mp->m_sb) ?
96 XFS_FSOP_GEOM_FLAGS_SECTOR : 0) | 97 XFS_FSOP_GEOM_FLAGS_SECTOR : 0) |
98 (xfs_sb_version_haslazysbcount(&mp->m_sb) ?
99 XFS_FSOP_GEOM_FLAGS_LAZYSB : 0) |
97 (XFS_SB_VERSION_HASATTR2(&mp->m_sb) ? 100 (XFS_SB_VERSION_HASATTR2(&mp->m_sb) ?
98 XFS_FSOP_GEOM_FLAGS_ATTR2 : 0); 101 XFS_FSOP_GEOM_FLAGS_ATTR2 : 0);
99 geo->logsectsize = XFS_SB_VERSION_HASSECTOR(&mp->m_sb) ? 102 geo->logsectsize = XFS_SB_VERSION_HASSECTOR(&mp->m_sb) ?
@@ -140,6 +143,8 @@ xfs_growfs_data_private(
140 pct = in->imaxpct; 143 pct = in->imaxpct;
141 if (nb < mp->m_sb.sb_dblocks || pct < 0 || pct > 100) 144 if (nb < mp->m_sb.sb_dblocks || pct < 0 || pct > 100)
142 return XFS_ERROR(EINVAL); 145 return XFS_ERROR(EINVAL);
146 if ((error = xfs_sb_validate_fsb_count(&mp->m_sb, nb)))
147 return error;
143 dpct = pct - mp->m_sb.sb_imax_pct; 148 dpct = pct - mp->m_sb.sb_imax_pct;
144 error = xfs_read_buf(mp, mp->m_ddev_targp, 149 error = xfs_read_buf(mp, mp->m_ddev_targp,
145 XFS_FSB_TO_BB(mp, nb) - XFS_FSS_TO_BB(mp, 1), 150 XFS_FSB_TO_BB(mp, nb) - XFS_FSS_TO_BB(mp, 1),
@@ -161,6 +166,7 @@ xfs_growfs_data_private(
161 new = nb - mp->m_sb.sb_dblocks; 166 new = nb - mp->m_sb.sb_dblocks;
162 oagcount = mp->m_sb.sb_agcount; 167 oagcount = mp->m_sb.sb_agcount;
163 if (nagcount > oagcount) { 168 if (nagcount > oagcount) {
169 xfs_filestream_flush(mp);
164 down_write(&mp->m_peraglock); 170 down_write(&mp->m_peraglock);
165 mp->m_perag = kmem_realloc(mp->m_perag, 171 mp->m_perag = kmem_realloc(mp->m_perag,
166 sizeof(xfs_perag_t) * nagcount, 172 sizeof(xfs_perag_t) * nagcount,
@@ -173,6 +179,7 @@ xfs_growfs_data_private(
173 up_write(&mp->m_peraglock); 179 up_write(&mp->m_peraglock);
174 } 180 }
175 tp = xfs_trans_alloc(mp, XFS_TRANS_GROWFS); 181 tp = xfs_trans_alloc(mp, XFS_TRANS_GROWFS);
182 tp->t_flags |= XFS_TRANS_RESERVE;
176 if ((error = xfs_trans_reserve(tp, XFS_GROWFS_SPACE_RES(mp), 183 if ((error = xfs_trans_reserve(tp, XFS_GROWFS_SPACE_RES(mp),
177 XFS_GROWDATA_LOG_RES(mp), 0, 0, 0))) { 184 XFS_GROWDATA_LOG_RES(mp), 0, 0, 0))) {
178 xfs_trans_cancel(tp, 0); 185 xfs_trans_cancel(tp, 0);
@@ -328,6 +335,7 @@ xfs_growfs_data_private(
328 be32_add(&agf->agf_length, new); 335 be32_add(&agf->agf_length, new);
329 ASSERT(be32_to_cpu(agf->agf_length) == 336 ASSERT(be32_to_cpu(agf->agf_length) ==
330 be32_to_cpu(agi->agi_length)); 337 be32_to_cpu(agi->agi_length));
338 xfs_alloc_log_agf(tp, bp, XFS_AGF_LENGTH);
331 /* 339 /*
332 * Free the new space. 340 * Free the new space.
333 */ 341 */
@@ -494,8 +502,9 @@ xfs_reserve_blocks(
494 unsigned long s; 502 unsigned long s;
495 503
496 /* If inval is null, report current values and return */ 504 /* If inval is null, report current values and return */
497
498 if (inval == (__uint64_t *)NULL) { 505 if (inval == (__uint64_t *)NULL) {
506 if (!outval)
507 return EINVAL;
499 outval->resblks = mp->m_resblks; 508 outval->resblks = mp->m_resblks;
500 outval->resblks_avail = mp->m_resblks_avail; 509 outval->resblks_avail = mp->m_resblks_avail;
501 return 0; 510 return 0;
@@ -558,8 +567,10 @@ retry:
558 } 567 }
559 } 568 }
560out: 569out:
561 outval->resblks = mp->m_resblks; 570 if (outval) {
562 outval->resblks_avail = mp->m_resblks_avail; 571 outval->resblks = mp->m_resblks;
572 outval->resblks_avail = mp->m_resblks_avail;
573 }
563 XFS_SB_UNLOCK(mp, s); 574 XFS_SB_UNLOCK(mp, s);
564 575
565 if (fdblks_delta) { 576 if (fdblks_delta) {
diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c
index b5feb3e77116..f943368c9b93 100644
--- a/fs/xfs/xfs_ialloc.c
+++ b/fs/xfs/xfs_ialloc.c
@@ -123,6 +123,7 @@ xfs_ialloc_ag_alloc(
123 int blks_per_cluster; /* fs blocks per inode cluster */ 123 int blks_per_cluster; /* fs blocks per inode cluster */
124 xfs_btree_cur_t *cur; /* inode btree cursor */ 124 xfs_btree_cur_t *cur; /* inode btree cursor */
125 xfs_daddr_t d; /* disk addr of buffer */ 125 xfs_daddr_t d; /* disk addr of buffer */
126 xfs_agnumber_t agno;
126 int error; 127 int error;
127 xfs_buf_t *fbuf; /* new free inodes' buffer */ 128 xfs_buf_t *fbuf; /* new free inodes' buffer */
128 xfs_dinode_t *free; /* new free inode structure */ 129 xfs_dinode_t *free; /* new free inode structure */
@@ -302,15 +303,15 @@ xfs_ialloc_ag_alloc(
302 } 303 }
303 be32_add(&agi->agi_count, newlen); 304 be32_add(&agi->agi_count, newlen);
304 be32_add(&agi->agi_freecount, newlen); 305 be32_add(&agi->agi_freecount, newlen);
306 agno = be32_to_cpu(agi->agi_seqno);
305 down_read(&args.mp->m_peraglock); 307 down_read(&args.mp->m_peraglock);
306 args.mp->m_perag[be32_to_cpu(agi->agi_seqno)].pagi_freecount += newlen; 308 args.mp->m_perag[agno].pagi_freecount += newlen;
307 up_read(&args.mp->m_peraglock); 309 up_read(&args.mp->m_peraglock);
308 agi->agi_newino = cpu_to_be32(newino); 310 agi->agi_newino = cpu_to_be32(newino);
309 /* 311 /*
310 * Insert records describing the new inode chunk into the btree. 312 * Insert records describing the new inode chunk into the btree.
311 */ 313 */
312 cur = xfs_btree_init_cursor(args.mp, tp, agbp, 314 cur = xfs_btree_init_cursor(args.mp, tp, agbp, agno,
313 be32_to_cpu(agi->agi_seqno),
314 XFS_BTNUM_INO, (xfs_inode_t *)0, 0); 315 XFS_BTNUM_INO, (xfs_inode_t *)0, 0);
315 for (thisino = newino; 316 for (thisino = newino;
316 thisino < newino + newlen; 317 thisino < newino + newlen;
@@ -1387,6 +1388,7 @@ xfs_ialloc_read_agi(
1387 pag = &mp->m_perag[agno]; 1388 pag = &mp->m_perag[agno];
1388 if (!pag->pagi_init) { 1389 if (!pag->pagi_init) {
1389 pag->pagi_freecount = be32_to_cpu(agi->agi_freecount); 1390 pag->pagi_freecount = be32_to_cpu(agi->agi_freecount);
1391 pag->pagi_count = be32_to_cpu(agi->agi_count);
1390 pag->pagi_init = 1; 1392 pag->pagi_init = 1;
1391 } else { 1393 } else {
1392 /* 1394 /*
@@ -1410,3 +1412,23 @@ xfs_ialloc_read_agi(
1410 *bpp = bp; 1412 *bpp = bp;
1411 return 0; 1413 return 0;
1412} 1414}
1415
1416/*
1417 * Read in the agi to initialise the per-ag data in the mount structure
1418 */
1419int
1420xfs_ialloc_pagi_init(
1421 xfs_mount_t *mp, /* file system mount structure */
1422 xfs_trans_t *tp, /* transaction pointer */
1423 xfs_agnumber_t agno) /* allocation group number */
1424{
1425 xfs_buf_t *bp = NULL;
1426 int error;
1427
1428 error = xfs_ialloc_read_agi(mp, tp, agno, &bp);
1429 if (error)
1430 return error;
1431 if (bp)
1432 xfs_trans_brelse(tp, bp);
1433 return 0;
1434}
diff --git a/fs/xfs/xfs_ialloc.h b/fs/xfs/xfs_ialloc.h
index 7f5debe1acb6..97f4040931ca 100644
--- a/fs/xfs/xfs_ialloc.h
+++ b/fs/xfs/xfs_ialloc.h
@@ -149,6 +149,16 @@ xfs_ialloc_read_agi(
149 xfs_agnumber_t agno, /* allocation group number */ 149 xfs_agnumber_t agno, /* allocation group number */
150 struct xfs_buf **bpp); /* allocation group hdr buf */ 150 struct xfs_buf **bpp); /* allocation group hdr buf */
151 151
152/*
153 * Read in the allocation group header to initialise the per-ag data
154 * in the mount structure
155 */
156int
157xfs_ialloc_pagi_init(
158 struct xfs_mount *mp, /* file system mount structure */
159 struct xfs_trans *tp, /* transaction pointer */
160 xfs_agnumber_t agno); /* allocation group number */
161
152#endif /* __KERNEL__ */ 162#endif /* __KERNEL__ */
153 163
154#endif /* __XFS_IALLOC_H__ */ 164#endif /* __XFS_IALLOC_H__ */
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 3ca5d43b8345..cdc4c28926d0 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -48,7 +48,9 @@
48#include "xfs_dir2_trace.h" 48#include "xfs_dir2_trace.h"
49#include "xfs_quota.h" 49#include "xfs_quota.h"
50#include "xfs_acl.h" 50#include "xfs_acl.h"
51#include "xfs_filestream.h"
51 52
53#include <linux/log2.h>
52 54
53kmem_zone_t *xfs_ifork_zone; 55kmem_zone_t *xfs_ifork_zone;
54kmem_zone_t *xfs_inode_zone; 56kmem_zone_t *xfs_inode_zone;
@@ -643,8 +645,7 @@ xfs_iformat_extents(
643 ep->l1 = INT_GET(get_unaligned((__uint64_t*)&dp->l1), 645 ep->l1 = INT_GET(get_unaligned((__uint64_t*)&dp->l1),
644 ARCH_CONVERT); 646 ARCH_CONVERT);
645 } 647 }
646 xfs_bmap_trace_exlist("xfs_iformat_extents", ip, nex, 648 XFS_BMAP_TRACE_EXLIST(ip, nex, whichfork);
647 whichfork);
648 if (whichfork != XFS_DATA_FORK || 649 if (whichfork != XFS_DATA_FORK ||
649 XFS_EXTFMT_INODE(ip) == XFS_EXTFMT_NOSTATE) 650 XFS_EXTFMT_INODE(ip) == XFS_EXTFMT_NOSTATE)
650 if (unlikely(xfs_check_nostate_extents( 651 if (unlikely(xfs_check_nostate_extents(
@@ -817,6 +818,8 @@ _xfs_dic2xflags(
817 flags |= XFS_XFLAG_EXTSZINHERIT; 818 flags |= XFS_XFLAG_EXTSZINHERIT;
818 if (di_flags & XFS_DIFLAG_NODEFRAG) 819 if (di_flags & XFS_DIFLAG_NODEFRAG)
819 flags |= XFS_XFLAG_NODEFRAG; 820 flags |= XFS_XFLAG_NODEFRAG;
821 if (di_flags & XFS_DIFLAG_FILESTREAM)
822 flags |= XFS_XFLAG_FILESTREAM;
820 } 823 }
821 824
822 return flags; 825 return flags;
@@ -1074,6 +1077,11 @@ xfs_iread_extents(
1074 * also returns the [locked] bp pointing to the head of the freelist 1077 * also returns the [locked] bp pointing to the head of the freelist
1075 * as ialloc_context. The caller should hold this buffer across 1078 * as ialloc_context. The caller should hold this buffer across
1076 * the commit and pass it back into this routine on the second call. 1079 * the commit and pass it back into this routine on the second call.
1080 *
1081 * If we are allocating quota inodes, we do not have a parent inode
1082 * to attach to or associate with (i.e. pip == NULL) because they
1083 * are not linked into the directory structure - they are attached
1084 * directly to the superblock - and so have no parent.
1077 */ 1085 */
1078int 1086int
1079xfs_ialloc( 1087xfs_ialloc(
@@ -1099,7 +1107,7 @@ xfs_ialloc(
1099 * Call the space management code to pick 1107 * Call the space management code to pick
1100 * the on-disk inode to be allocated. 1108 * the on-disk inode to be allocated.
1101 */ 1109 */
1102 error = xfs_dialloc(tp, pip->i_ino, mode, okalloc, 1110 error = xfs_dialloc(tp, pip ? pip->i_ino : 0, mode, okalloc,
1103 ialloc_context, call_again, &ino); 1111 ialloc_context, call_again, &ino);
1104 if (error != 0) { 1112 if (error != 0) {
1105 return error; 1113 return error;
@@ -1150,10 +1158,10 @@ xfs_ialloc(
1150 /* 1158 /*
1151 * Project ids won't be stored on disk if we are using a version 1 inode. 1159 * Project ids won't be stored on disk if we are using a version 1 inode.
1152 */ 1160 */
1153 if ( (prid != 0) && (ip->i_d.di_version == XFS_DINODE_VERSION_1)) 1161 if ((prid != 0) && (ip->i_d.di_version == XFS_DINODE_VERSION_1))
1154 xfs_bump_ino_vers2(tp, ip); 1162 xfs_bump_ino_vers2(tp, ip);
1155 1163
1156 if (XFS_INHERIT_GID(pip, vp->v_vfsp)) { 1164 if (pip && XFS_INHERIT_GID(pip, vp->v_vfsp)) {
1157 ip->i_d.di_gid = pip->i_d.di_gid; 1165 ip->i_d.di_gid = pip->i_d.di_gid;
1158 if ((pip->i_d.di_mode & S_ISGID) && (mode & S_IFMT) == S_IFDIR) { 1166 if ((pip->i_d.di_mode & S_ISGID) && (mode & S_IFMT) == S_IFDIR) {
1159 ip->i_d.di_mode |= S_ISGID; 1167 ip->i_d.di_mode |= S_ISGID;
@@ -1195,8 +1203,16 @@ xfs_ialloc(
1195 flags |= XFS_ILOG_DEV; 1203 flags |= XFS_ILOG_DEV;
1196 break; 1204 break;
1197 case S_IFREG: 1205 case S_IFREG:
1206 if (pip && xfs_inode_is_filestream(pip)) {
1207 error = xfs_filestream_associate(pip, ip);
1208 if (error < 0)
1209 return -error;
1210 if (!error)
1211 xfs_iflags_set(ip, XFS_IFILESTREAM);
1212 }
1213 /* fall through */
1198 case S_IFDIR: 1214 case S_IFDIR:
1199 if (unlikely(pip->i_d.di_flags & XFS_DIFLAG_ANY)) { 1215 if (pip && (pip->i_d.di_flags & XFS_DIFLAG_ANY)) {
1200 uint di_flags = 0; 1216 uint di_flags = 0;
1201 1217
1202 if ((mode & S_IFMT) == S_IFDIR) { 1218 if ((mode & S_IFMT) == S_IFDIR) {
@@ -1233,6 +1249,8 @@ xfs_ialloc(
1233 if ((pip->i_d.di_flags & XFS_DIFLAG_NODEFRAG) && 1249 if ((pip->i_d.di_flags & XFS_DIFLAG_NODEFRAG) &&
1234 xfs_inherit_nodefrag) 1250 xfs_inherit_nodefrag)
1235 di_flags |= XFS_DIFLAG_NODEFRAG; 1251 di_flags |= XFS_DIFLAG_NODEFRAG;
1252 if (pip->i_d.di_flags & XFS_DIFLAG_FILESTREAM)
1253 di_flags |= XFS_DIFLAG_FILESTREAM;
1236 ip->i_d.di_flags |= di_flags; 1254 ip->i_d.di_flags |= di_flags;
1237 } 1255 }
1238 /* FALLTHROUGH */ 1256 /* FALLTHROUGH */
@@ -2875,9 +2893,6 @@ xfs_iextents_copy(
2875 int copied; 2893 int copied;
2876 xfs_bmbt_rec_t *dest_ep; 2894 xfs_bmbt_rec_t *dest_ep;
2877 xfs_bmbt_rec_t *ep; 2895 xfs_bmbt_rec_t *ep;
2878#ifdef XFS_BMAP_TRACE
2879 static char fname[] = "xfs_iextents_copy";
2880#endif
2881 int i; 2896 int i;
2882 xfs_ifork_t *ifp; 2897 xfs_ifork_t *ifp;
2883 int nrecs; 2898 int nrecs;
@@ -2888,7 +2903,7 @@ xfs_iextents_copy(
2888 ASSERT(ifp->if_bytes > 0); 2903 ASSERT(ifp->if_bytes > 0);
2889 2904
2890 nrecs = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 2905 nrecs = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
2891 xfs_bmap_trace_exlist(fname, ip, nrecs, whichfork); 2906 XFS_BMAP_TRACE_EXLIST(ip, nrecs, whichfork);
2892 ASSERT(nrecs > 0); 2907 ASSERT(nrecs > 0);
2893 2908
2894 /* 2909 /*
@@ -4184,7 +4199,7 @@ xfs_iext_realloc_direct(
4184 ifp->if_bytes = new_size; 4199 ifp->if_bytes = new_size;
4185 return; 4200 return;
4186 } 4201 }
4187 if ((new_size & (new_size - 1)) != 0) { 4202 if (!is_power_of_2(new_size)){
4188 rnew_size = xfs_iroundup(new_size); 4203 rnew_size = xfs_iroundup(new_size);
4189 } 4204 }
4190 if (rnew_size != ifp->if_real_bytes) { 4205 if (rnew_size != ifp->if_real_bytes) {
@@ -4207,7 +4222,7 @@ xfs_iext_realloc_direct(
4207 */ 4222 */
4208 else { 4223 else {
4209 new_size += ifp->if_bytes; 4224 new_size += ifp->if_bytes;
4210 if ((new_size & (new_size - 1)) != 0) { 4225 if (!is_power_of_2(new_size)) {
4211 rnew_size = xfs_iroundup(new_size); 4226 rnew_size = xfs_iroundup(new_size);
4212 } 4227 }
4213 xfs_iext_inline_to_direct(ifp, rnew_size); 4228 xfs_iext_inline_to_direct(ifp, rnew_size);
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index f75afecef8e7..012dfd4a958c 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -379,6 +379,7 @@ xfs_iflags_test(xfs_inode_t *ip, unsigned short flags)
379#define XFS_ISTALE 0x0010 /* inode has been staled */ 379#define XFS_ISTALE 0x0010 /* inode has been staled */
380#define XFS_IRECLAIMABLE 0x0020 /* inode can be reclaimed */ 380#define XFS_IRECLAIMABLE 0x0020 /* inode can be reclaimed */
381#define XFS_INEW 0x0040 381#define XFS_INEW 0x0040
382#define XFS_IFILESTREAM 0x0080 /* inode is in a filestream directory */
382 383
383/* 384/*
384 * Flags for inode locking. 385 * Flags for inode locking.
@@ -414,19 +415,22 @@ xfs_iflags_test(xfs_inode_t *ip, unsigned short flags)
414 * gets a lockdep subclass of 1 and the second lock will have a lockdep 415 * gets a lockdep subclass of 1 and the second lock will have a lockdep
415 * subclass of 0. 416 * subclass of 0.
416 * 417 *
417 * XFS_I[O]LOCK_INUMORDER - for locking several inodes at the some time 418 * XFS_LOCK_INUMORDER - for locking several inodes at the some time
418 * with xfs_lock_inodes(). This flag is used as the starting subclass 419 * with xfs_lock_inodes(). This flag is used as the starting subclass
419 * and each subsequent lock acquired will increment the subclass by one. 420 * and each subsequent lock acquired will increment the subclass by one.
420 * So the first lock acquired will have a lockdep subclass of 2, the 421 * So the first lock acquired will have a lockdep subclass of 2, the
421 * second lock will have a lockdep subclass of 3, and so on. 422 * second lock will have a lockdep subclass of 3, and so on. It is
423 * the responsibility of the class builder to shift this to the correct
424 * portion of the lock_mode lockdep mask.
422 */ 425 */
426#define XFS_LOCK_PARENT 1
427#define XFS_LOCK_INUMORDER 2
428
423#define XFS_IOLOCK_SHIFT 16 429#define XFS_IOLOCK_SHIFT 16
424#define XFS_IOLOCK_PARENT (1 << XFS_IOLOCK_SHIFT) 430#define XFS_IOLOCK_PARENT (XFS_LOCK_PARENT << XFS_IOLOCK_SHIFT)
425#define XFS_IOLOCK_INUMORDER (2 << XFS_IOLOCK_SHIFT)
426 431
427#define XFS_ILOCK_SHIFT 24 432#define XFS_ILOCK_SHIFT 24
428#define XFS_ILOCK_PARENT (1 << XFS_ILOCK_SHIFT) 433#define XFS_ILOCK_PARENT (XFS_LOCK_PARENT << XFS_ILOCK_SHIFT)
429#define XFS_ILOCK_INUMORDER (2 << XFS_ILOCK_SHIFT)
430 434
431#define XFS_IOLOCK_DEP_MASK 0x00ff0000 435#define XFS_IOLOCK_DEP_MASK 0x00ff0000
432#define XFS_ILOCK_DEP_MASK 0xff000000 436#define XFS_ILOCK_DEP_MASK 0xff000000
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 3f2b9f2a7b94..bf57b75acb90 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -451,19 +451,14 @@ xfs_iomap_write_direct(
451 return XFS_ERROR(error); 451 return XFS_ERROR(error);
452 452
453 rt = XFS_IS_REALTIME_INODE(ip); 453 rt = XFS_IS_REALTIME_INODE(ip);
454 if (unlikely(rt)) { 454 extsz = xfs_get_extsz_hint(ip);
455 if (!(extsz = ip->i_d.di_extsize))
456 extsz = mp->m_sb.sb_rextsize;
457 } else {
458 extsz = ip->i_d.di_extsize;
459 }
460 455
461 isize = ip->i_size; 456 isize = ip->i_size;
462 if (io->io_new_size > isize) 457 if (io->io_new_size > isize)
463 isize = io->io_new_size; 458 isize = io->io_new_size;
464 459
465 offset_fsb = XFS_B_TO_FSBT(mp, offset); 460 offset_fsb = XFS_B_TO_FSBT(mp, offset);
466 last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count))); 461 last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
467 if ((offset + count) > isize) { 462 if ((offset + count) > isize) {
468 error = xfs_iomap_eof_align_last_fsb(mp, io, isize, extsz, 463 error = xfs_iomap_eof_align_last_fsb(mp, io, isize, extsz,
469 &last_fsb); 464 &last_fsb);
@@ -489,13 +484,13 @@ xfs_iomap_write_direct(
489 if (unlikely(rt)) { 484 if (unlikely(rt)) {
490 resrtextents = qblocks = resaligned; 485 resrtextents = qblocks = resaligned;
491 resrtextents /= mp->m_sb.sb_rextsize; 486 resrtextents /= mp->m_sb.sb_rextsize;
492 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0); 487 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
493 quota_flag = XFS_QMOPT_RES_RTBLKS; 488 quota_flag = XFS_QMOPT_RES_RTBLKS;
494 } else { 489 } else {
495 resrtextents = 0; 490 resrtextents = 0;
496 resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned); 491 resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);
497 quota_flag = XFS_QMOPT_RES_REGBLKS; 492 quota_flag = XFS_QMOPT_RES_REGBLKS;
498 } 493 }
499 494
500 /* 495 /*
501 * Allocate and setup the transaction 496 * Allocate and setup the transaction
@@ -666,13 +661,7 @@ xfs_iomap_write_delay(
666 if (error) 661 if (error)
667 return XFS_ERROR(error); 662 return XFS_ERROR(error);
668 663
669 if (XFS_IS_REALTIME_INODE(ip)) { 664 extsz = xfs_get_extsz_hint(ip);
670 if (!(extsz = ip->i_d.di_extsize))
671 extsz = mp->m_sb.sb_rextsize;
672 } else {
673 extsz = ip->i_d.di_extsize;
674 }
675
676 offset_fsb = XFS_B_TO_FSBT(mp, offset); 665 offset_fsb = XFS_B_TO_FSBT(mp, offset);
677 666
678retry: 667retry:
@@ -788,18 +777,12 @@ xfs_iomap_write_allocate(
788 nimaps = 0; 777 nimaps = 0;
789 while (nimaps == 0) { 778 while (nimaps == 0) {
790 tp = xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE); 779 tp = xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE);
780 tp->t_flags |= XFS_TRANS_RESERVE;
791 nres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK); 781 nres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK);
792 error = xfs_trans_reserve(tp, nres, 782 error = xfs_trans_reserve(tp, nres,
793 XFS_WRITE_LOG_RES(mp), 783 XFS_WRITE_LOG_RES(mp),
794 0, XFS_TRANS_PERM_LOG_RES, 784 0, XFS_TRANS_PERM_LOG_RES,
795 XFS_WRITE_LOG_COUNT); 785 XFS_WRITE_LOG_COUNT);
796 if (error == ENOSPC) {
797 error = xfs_trans_reserve(tp, 0,
798 XFS_WRITE_LOG_RES(mp),
799 0,
800 XFS_TRANS_PERM_LOG_RES,
801 XFS_WRITE_LOG_COUNT);
802 }
803 if (error) { 786 if (error) {
804 xfs_trans_cancel(tp, 0); 787 xfs_trans_cancel(tp, 0);
805 return XFS_ERROR(error); 788 return XFS_ERROR(error);
@@ -917,8 +900,8 @@ xfs_iomap_write_unwritten(
917 * from unwritten to real. Do allocations in a loop until 900 * from unwritten to real. Do allocations in a loop until
918 * we have covered the range passed in. 901 * we have covered the range passed in.
919 */ 902 */
920
921 tp = xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE); 903 tp = xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE);
904 tp->t_flags |= XFS_TRANS_RESERVE;
922 error = xfs_trans_reserve(tp, resblks, 905 error = xfs_trans_reserve(tp, resblks,
923 XFS_WRITE_LOG_RES(mp), 0, 906 XFS_WRITE_LOG_RES(mp), 0,
924 XFS_TRANS_PERM_LOG_RES, 907 XFS_TRANS_PERM_LOG_RES,
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index e725ddd3de5f..4c2454bcc714 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -202,6 +202,16 @@ xfs_bulkstat_one_dinode(
202 return 0; 202 return 0;
203} 203}
204 204
205STATIC int
206xfs_bulkstat_one_fmt(
207 void __user *ubuffer,
208 const xfs_bstat_t *buffer)
209{
210 if (copy_to_user(ubuffer, buffer, sizeof(*buffer)))
211 return -EFAULT;
212 return sizeof(*buffer);
213}
214
205/* 215/*
206 * Return stat information for one inode. 216 * Return stat information for one inode.
207 * Return 0 if ok, else errno. 217 * Return 0 if ok, else errno.
@@ -221,6 +231,7 @@ xfs_bulkstat_one(
221 xfs_bstat_t *buf; /* return buffer */ 231 xfs_bstat_t *buf; /* return buffer */
222 int error = 0; /* error value */ 232 int error = 0; /* error value */
223 xfs_dinode_t *dip; /* dinode inode pointer */ 233 xfs_dinode_t *dip; /* dinode inode pointer */
234 bulkstat_one_fmt_pf formatter = private_data ? : xfs_bulkstat_one_fmt;
224 235
225 dip = (xfs_dinode_t *)dibuff; 236 dip = (xfs_dinode_t *)dibuff;
226 *stat = BULKSTAT_RV_NOTHING; 237 *stat = BULKSTAT_RV_NOTHING;
@@ -243,14 +254,15 @@ xfs_bulkstat_one(
243 xfs_bulkstat_one_dinode(mp, ino, dip, buf); 254 xfs_bulkstat_one_dinode(mp, ino, dip, buf);
244 } 255 }
245 256
246 if (copy_to_user(buffer, buf, sizeof(*buf))) { 257 error = formatter(buffer, buf);
258 if (error < 0) {
247 error = EFAULT; 259 error = EFAULT;
248 goto out_free; 260 goto out_free;
249 } 261 }
250 262
251 *stat = BULKSTAT_RV_DIDONE; 263 *stat = BULKSTAT_RV_DIDONE;
252 if (ubused) 264 if (ubused)
253 *ubused = sizeof(*buf); 265 *ubused = error;
254 266
255 out_free: 267 out_free:
256 kmem_free(buf, sizeof(*buf)); 268 kmem_free(buf, sizeof(*buf));
@@ -748,6 +760,19 @@ xfs_bulkstat_single(
748 return 0; 760 return 0;
749} 761}
750 762
763int
764xfs_inumbers_fmt(
765 void __user *ubuffer, /* buffer to write to */
766 const xfs_inogrp_t *buffer, /* buffer to read from */
767 long count, /* # of elements to read */
768 long *written) /* # of bytes written */
769{
770 if (copy_to_user(ubuffer, buffer, count * sizeof(*buffer)))
771 return -EFAULT;
772 *written = count * sizeof(*buffer);
773 return 0;
774}
775
751/* 776/*
752 * Return inode number table for the filesystem. 777 * Return inode number table for the filesystem.
753 */ 778 */
@@ -756,7 +781,8 @@ xfs_inumbers(
756 xfs_mount_t *mp, /* mount point for filesystem */ 781 xfs_mount_t *mp, /* mount point for filesystem */
757 xfs_ino_t *lastino, /* last inode returned */ 782 xfs_ino_t *lastino, /* last inode returned */
758 int *count, /* size of buffer/count returned */ 783 int *count, /* size of buffer/count returned */
759 xfs_inogrp_t __user *ubuffer)/* buffer with inode descriptions */ 784 void __user *ubuffer,/* buffer with inode descriptions */
785 inumbers_fmt_pf formatter)
760{ 786{
761 xfs_buf_t *agbp; 787 xfs_buf_t *agbp;
762 xfs_agino_t agino; 788 xfs_agino_t agino;
@@ -835,12 +861,12 @@ xfs_inumbers(
835 bufidx++; 861 bufidx++;
836 left--; 862 left--;
837 if (bufidx == bcount) { 863 if (bufidx == bcount) {
838 if (copy_to_user(ubuffer, buffer, 864 long written;
839 bufidx * sizeof(*buffer))) { 865 if (formatter(ubuffer, buffer, bufidx, &written)) {
840 error = XFS_ERROR(EFAULT); 866 error = XFS_ERROR(EFAULT);
841 break; 867 break;
842 } 868 }
843 ubuffer += bufidx; 869 ubuffer += written;
844 *count += bufidx; 870 *count += bufidx;
845 bufidx = 0; 871 bufidx = 0;
846 } 872 }
@@ -862,8 +888,8 @@ xfs_inumbers(
862 } 888 }
863 if (!error) { 889 if (!error) {
864 if (bufidx) { 890 if (bufidx) {
865 if (copy_to_user(ubuffer, buffer, 891 long written;
866 bufidx * sizeof(*buffer))) 892 if (formatter(ubuffer, buffer, bufidx, &written))
867 error = XFS_ERROR(EFAULT); 893 error = XFS_ERROR(EFAULT);
868 else 894 else
869 *count += bufidx; 895 *count += bufidx;
diff --git a/fs/xfs/xfs_itable.h b/fs/xfs/xfs_itable.h
index f25a28862a17..a1f18fce9b70 100644
--- a/fs/xfs/xfs_itable.h
+++ b/fs/xfs/xfs_itable.h
@@ -69,6 +69,10 @@ xfs_bulkstat_single(
69 char __user *buffer, 69 char __user *buffer,
70 int *done); 70 int *done);
71 71
72typedef int (*bulkstat_one_fmt_pf)( /* used size in bytes or negative error */
73 void __user *ubuffer, /* buffer to write to */
74 const xfs_bstat_t *buffer); /* buffer to read from */
75
72int 76int
73xfs_bulkstat_one( 77xfs_bulkstat_one(
74 xfs_mount_t *mp, 78 xfs_mount_t *mp,
@@ -86,11 +90,25 @@ xfs_internal_inum(
86 xfs_mount_t *mp, 90 xfs_mount_t *mp,
87 xfs_ino_t ino); 91 xfs_ino_t ino);
88 92
93typedef int (*inumbers_fmt_pf)(
94 void __user *ubuffer, /* buffer to write to */
95 const xfs_inogrp_t *buffer, /* buffer to read from */
96 long count, /* # of elements to read */
97 long *written); /* # of bytes written */
98
99int
100xfs_inumbers_fmt(
101 void __user *ubuffer, /* buffer to write to */
102 const xfs_inogrp_t *buffer, /* buffer to read from */
103 long count, /* # of elements to read */
104 long *written); /* # of bytes written */
105
89int /* error status */ 106int /* error status */
90xfs_inumbers( 107xfs_inumbers(
91 xfs_mount_t *mp, /* mount point for filesystem */ 108 xfs_mount_t *mp, /* mount point for filesystem */
92 xfs_ino_t *last, /* last inode returned */ 109 xfs_ino_t *last, /* last inode returned */
93 int *count, /* size of buffer/count returned */ 110 int *count, /* size of buffer/count returned */
94 xfs_inogrp_t __user *buffer);/* buffer with inode info */ 111 void __user *buffer, /* buffer with inode info */
112 inumbers_fmt_pf formatter);
95 113
96#endif /* __XFS_ITABLE_H__ */ 114#endif /* __XFS_ITABLE_H__ */
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index c48bf61f17bd..9d4c4fbeb3ee 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -817,10 +817,8 @@ xfs_log_need_covered(xfs_mount_t *mp)
817 SPLDECL(s); 817 SPLDECL(s);
818 int needed = 0, gen; 818 int needed = 0, gen;
819 xlog_t *log = mp->m_log; 819 xlog_t *log = mp->m_log;
820 bhv_vfs_t *vfsp = XFS_MTOVFS(mp);
821 820
822 if (vfs_test_for_freeze(vfsp) || XFS_FORCED_SHUTDOWN(mp) || 821 if (!xfs_fs_writable(mp))
823 (vfsp->vfs_flag & VFS_RDONLY))
824 return 0; 822 return 0;
825 823
826 s = LOG_LOCK(log); 824 s = LOG_LOCK(log);
@@ -967,14 +965,16 @@ xlog_iodone(xfs_buf_t *bp)
967 } else if (iclog->ic_state & XLOG_STATE_IOERROR) { 965 } else if (iclog->ic_state & XLOG_STATE_IOERROR) {
968 aborted = XFS_LI_ABORTED; 966 aborted = XFS_LI_ABORTED;
969 } 967 }
968
969 /* log I/O is always issued ASYNC */
970 ASSERT(XFS_BUF_ISASYNC(bp));
970 xlog_state_done_syncing(iclog, aborted); 971 xlog_state_done_syncing(iclog, aborted);
971 if (!(XFS_BUF_ISASYNC(bp))) { 972 /*
972 /* 973 * do not reference the buffer (bp) here as we could race
973 * Corresponding psema() will be done in bwrite(). If we don't 974 * with it being freed after writing the unmount record to the
974 * vsema() here, panic. 975 * log.
975 */ 976 */
976 XFS_BUF_V_IODONESEMA(bp); 977
977 }
978} /* xlog_iodone */ 978} /* xlog_iodone */
979 979
980/* 980/*
@@ -1199,11 +1199,18 @@ xlog_alloc_log(xfs_mount_t *mp,
1199 *iclogp = (xlog_in_core_t *) 1199 *iclogp = (xlog_in_core_t *)
1200 kmem_zalloc(sizeof(xlog_in_core_t), KM_SLEEP); 1200 kmem_zalloc(sizeof(xlog_in_core_t), KM_SLEEP);
1201 iclog = *iclogp; 1201 iclog = *iclogp;
1202 iclog->hic_data = (xlog_in_core_2_t *)
1203 kmem_zalloc(iclogsize, KM_SLEEP | KM_LARGE);
1204
1205 iclog->ic_prev = prev_iclog; 1202 iclog->ic_prev = prev_iclog;
1206 prev_iclog = iclog; 1203 prev_iclog = iclog;
1204
1205 bp = xfs_buf_get_noaddr(log->l_iclog_size, mp->m_logdev_targp);
1206 if (!XFS_BUF_CPSEMA(bp))
1207 ASSERT(0);
1208 XFS_BUF_SET_IODONE_FUNC(bp, xlog_iodone);
1209 XFS_BUF_SET_BDSTRAT_FUNC(bp, xlog_bdstrat_cb);
1210 XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)1);
1211 iclog->ic_bp = bp;
1212 iclog->hic_data = bp->b_addr;
1213
1207 log->l_iclog_bak[i] = (xfs_caddr_t)&(iclog->ic_header); 1214 log->l_iclog_bak[i] = (xfs_caddr_t)&(iclog->ic_header);
1208 1215
1209 head = &iclog->ic_header; 1216 head = &iclog->ic_header;
@@ -1216,11 +1223,6 @@ xlog_alloc_log(xfs_mount_t *mp,
1216 INT_SET(head->h_fmt, ARCH_CONVERT, XLOG_FMT); 1223 INT_SET(head->h_fmt, ARCH_CONVERT, XLOG_FMT);
1217 memcpy(&head->h_fs_uuid, &mp->m_sb.sb_uuid, sizeof(uuid_t)); 1224 memcpy(&head->h_fs_uuid, &mp->m_sb.sb_uuid, sizeof(uuid_t));
1218 1225
1219 bp = xfs_buf_get_empty(log->l_iclog_size, mp->m_logdev_targp);
1220 XFS_BUF_SET_IODONE_FUNC(bp, xlog_iodone);
1221 XFS_BUF_SET_BDSTRAT_FUNC(bp, xlog_bdstrat_cb);
1222 XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)1);
1223 iclog->ic_bp = bp;
1224 1226
1225 iclog->ic_size = XFS_BUF_SIZE(bp) - log->l_iclog_hsize; 1227 iclog->ic_size = XFS_BUF_SIZE(bp) - log->l_iclog_hsize;
1226 iclog->ic_state = XLOG_STATE_ACTIVE; 1228 iclog->ic_state = XLOG_STATE_ACTIVE;
@@ -1432,7 +1434,7 @@ xlog_sync(xlog_t *log,
1432 } else { 1434 } else {
1433 iclog->ic_bwritecnt = 1; 1435 iclog->ic_bwritecnt = 1;
1434 } 1436 }
1435 XFS_BUF_SET_PTR(bp, (xfs_caddr_t) &(iclog->ic_header), count); 1437 XFS_BUF_SET_COUNT(bp, count);
1436 XFS_BUF_SET_FSPRIVATE(bp, iclog); /* save for later */ 1438 XFS_BUF_SET_FSPRIVATE(bp, iclog); /* save for later */
1437 XFS_BUF_ZEROFLAGS(bp); 1439 XFS_BUF_ZEROFLAGS(bp);
1438 XFS_BUF_BUSY(bp); 1440 XFS_BUF_BUSY(bp);
@@ -1528,7 +1530,6 @@ xlog_dealloc_log(xlog_t *log)
1528 } 1530 }
1529#endif 1531#endif
1530 next_iclog = iclog->ic_next; 1532 next_iclog = iclog->ic_next;
1531 kmem_free(iclog->hic_data, log->l_iclog_size);
1532 kmem_free(iclog, sizeof(xlog_in_core_t)); 1533 kmem_free(iclog, sizeof(xlog_in_core_t));
1533 iclog = next_iclog; 1534 iclog = next_iclog;
1534 } 1535 }
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 080fabf61c92..fddbb091a86f 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -927,6 +927,14 @@ xlog_find_tail(
927 ASSIGN_ANY_LSN_HOST(log->l_last_sync_lsn, log->l_curr_cycle, 927 ASSIGN_ANY_LSN_HOST(log->l_last_sync_lsn, log->l_curr_cycle,
928 after_umount_blk); 928 after_umount_blk);
929 *tail_blk = after_umount_blk; 929 *tail_blk = after_umount_blk;
930
931 /*
932 * Note that the unmount was clean. If the unmount
933 * was not clean, we need to know this to rebuild the
934 * superblock counters from the perag headers if we
935 * have a filesystem using non-persistent counters.
936 */
937 log->l_mp->m_flags |= XFS_MOUNT_WAS_CLEAN;
930 } 938 }
931 } 939 }
932 940
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index a96bde6df96d..a66b39805176 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -202,6 +202,27 @@ xfs_mount_free(
202 kmem_free(mp, sizeof(xfs_mount_t)); 202 kmem_free(mp, sizeof(xfs_mount_t));
203} 203}
204 204
205/*
206 * Check size of device based on the (data/realtime) block count.
207 * Note: this check is used by the growfs code as well as mount.
208 */
209int
210xfs_sb_validate_fsb_count(
211 xfs_sb_t *sbp,
212 __uint64_t nblocks)
213{
214 ASSERT(PAGE_SHIFT >= sbp->sb_blocklog);
215 ASSERT(sbp->sb_blocklog >= BBSHIFT);
216
217#if XFS_BIG_BLKNOS /* Limited by ULONG_MAX of page cache index */
218 if (nblocks >> (PAGE_CACHE_SHIFT - sbp->sb_blocklog) > ULONG_MAX)
219 return E2BIG;
220#else /* Limited by UINT_MAX of sectors */
221 if (nblocks << (sbp->sb_blocklog - BBSHIFT) > UINT_MAX)
222 return E2BIG;
223#endif
224 return 0;
225}
205 226
206/* 227/*
207 * Check the validity of the SB found. 228 * Check the validity of the SB found.
@@ -284,18 +305,8 @@ xfs_mount_validate_sb(
284 return XFS_ERROR(EFSCORRUPTED); 305 return XFS_ERROR(EFSCORRUPTED);
285 } 306 }
286 307
287 ASSERT(PAGE_SHIFT >= sbp->sb_blocklog); 308 if (xfs_sb_validate_fsb_count(sbp, sbp->sb_dblocks) ||
288 ASSERT(sbp->sb_blocklog >= BBSHIFT); 309 xfs_sb_validate_fsb_count(sbp, sbp->sb_rblocks)) {
289
290#if XFS_BIG_BLKNOS /* Limited by ULONG_MAX of page cache index */
291 if (unlikely(
292 (sbp->sb_dblocks >> (PAGE_SHIFT - sbp->sb_blocklog)) > ULONG_MAX ||
293 (sbp->sb_rblocks >> (PAGE_SHIFT - sbp->sb_blocklog)) > ULONG_MAX)) {
294#else /* Limited by UINT_MAX of sectors */
295 if (unlikely(
296 (sbp->sb_dblocks << (sbp->sb_blocklog - BBSHIFT)) > UINT_MAX ||
297 (sbp->sb_rblocks << (sbp->sb_blocklog - BBSHIFT)) > UINT_MAX)) {
298#endif
299 xfs_fs_mount_cmn_err(flags, 310 xfs_fs_mount_cmn_err(flags,
300 "file system too large to be mounted on this system."); 311 "file system too large to be mounted on this system.");
301 return XFS_ERROR(E2BIG); 312 return XFS_ERROR(E2BIG);
@@ -632,6 +643,64 @@ xfs_mount_common(xfs_mount_t *mp, xfs_sb_t *sbp)
632 sbp->sb_inopblock); 643 sbp->sb_inopblock);
633 mp->m_ialloc_blks = mp->m_ialloc_inos >> sbp->sb_inopblog; 644 mp->m_ialloc_blks = mp->m_ialloc_inos >> sbp->sb_inopblog;
634} 645}
646
647/*
648 * xfs_initialize_perag_data
649 *
650 * Read in each per-ag structure so we can count up the number of
651 * allocated inodes, free inodes and used filesystem blocks as this
652 * information is no longer persistent in the superblock. Once we have
653 * this information, write it into the in-core superblock structure.
654 */
655STATIC int
656xfs_initialize_perag_data(xfs_mount_t *mp, xfs_agnumber_t agcount)
657{
658 xfs_agnumber_t index;
659 xfs_perag_t *pag;
660 xfs_sb_t *sbp = &mp->m_sb;
661 uint64_t ifree = 0;
662 uint64_t ialloc = 0;
663 uint64_t bfree = 0;
664 uint64_t bfreelst = 0;
665 uint64_t btree = 0;
666 int error;
667 int s;
668
669 for (index = 0; index < agcount; index++) {
670 /*
671 * read the agf, then the agi. This gets us
672 * all the inforamtion we need and populates the
673 * per-ag structures for us.
674 */
675 error = xfs_alloc_pagf_init(mp, NULL, index, 0);
676 if (error)
677 return error;
678
679 error = xfs_ialloc_pagi_init(mp, NULL, index);
680 if (error)
681 return error;
682 pag = &mp->m_perag[index];
683 ifree += pag->pagi_freecount;
684 ialloc += pag->pagi_count;
685 bfree += pag->pagf_freeblks;
686 bfreelst += pag->pagf_flcount;
687 btree += pag->pagf_btreeblks;
688 }
689 /*
690 * Overwrite incore superblock counters with just-read data
691 */
692 s = XFS_SB_LOCK(mp);
693 sbp->sb_ifree = ifree;
694 sbp->sb_icount = ialloc;
695 sbp->sb_fdblocks = bfree + bfreelst + btree;
696 XFS_SB_UNLOCK(mp, s);
697
698 /* Fixup the per-cpu counters as well. */
699 xfs_icsb_reinit_counters(mp);
700
701 return 0;
702}
703
635/* 704/*
636 * xfs_mountfs 705 * xfs_mountfs
637 * 706 *
@@ -656,7 +725,7 @@ xfs_mountfs(
656 bhv_vnode_t *rvp = NULL; 725 bhv_vnode_t *rvp = NULL;
657 int readio_log, writeio_log; 726 int readio_log, writeio_log;
658 xfs_daddr_t d; 727 xfs_daddr_t d;
659 __uint64_t ret64; 728 __uint64_t resblks;
660 __int64_t update_flags; 729 __int64_t update_flags;
661 uint quotamount, quotaflags; 730 uint quotamount, quotaflags;
662 int agno; 731 int agno;
@@ -773,6 +842,7 @@ xfs_mountfs(
773 */ 842 */
774 if ((mfsi_flags & XFS_MFSI_SECOND) == 0 && 843 if ((mfsi_flags & XFS_MFSI_SECOND) == 0 &&
775 (mp->m_flags & XFS_MOUNT_NOUUID) == 0) { 844 (mp->m_flags & XFS_MOUNT_NOUUID) == 0) {
845 __uint64_t ret64;
776 if (xfs_uuid_mount(mp)) { 846 if (xfs_uuid_mount(mp)) {
777 error = XFS_ERROR(EINVAL); 847 error = XFS_ERROR(EINVAL);
778 goto error1; 848 goto error1;
@@ -976,6 +1046,34 @@ xfs_mountfs(
976 } 1046 }
977 1047
978 /* 1048 /*
1049 * Now the log is mounted, we know if it was an unclean shutdown or
1050 * not. If it was, with the first phase of recovery has completed, we
1051 * have consistent AG blocks on disk. We have not recovered EFIs yet,
1052 * but they are recovered transactionally in the second recovery phase
1053 * later.
1054 *
1055 * Hence we can safely re-initialise incore superblock counters from
1056 * the per-ag data. These may not be correct if the filesystem was not
1057 * cleanly unmounted, so we need to wait for recovery to finish before
1058 * doing this.
1059 *
1060 * If the filesystem was cleanly unmounted, then we can trust the
1061 * values in the superblock to be correct and we don't need to do
1062 * anything here.
1063 *
1064 * If we are currently making the filesystem, the initialisation will
1065 * fail as the perag data is in an undefined state.
1066 */
1067
1068 if (xfs_sb_version_haslazysbcount(&mp->m_sb) &&
1069 !XFS_LAST_UNMOUNT_WAS_CLEAN(mp) &&
1070 !mp->m_sb.sb_inprogress) {
1071 error = xfs_initialize_perag_data(mp, sbp->sb_agcount);
1072 if (error) {
1073 goto error2;
1074 }
1075 }
1076 /*
979 * Get and sanity-check the root inode. 1077 * Get and sanity-check the root inode.
980 * Save the pointer to it in the mount structure. 1078 * Save the pointer to it in the mount structure.
981 */ 1079 */
@@ -1044,6 +1142,23 @@ xfs_mountfs(
1044 if ((error = XFS_QM_MOUNT(mp, quotamount, quotaflags, mfsi_flags))) 1142 if ((error = XFS_QM_MOUNT(mp, quotamount, quotaflags, mfsi_flags)))
1045 goto error4; 1143 goto error4;
1046 1144
1145 /*
1146 * Now we are mounted, reserve a small amount of unused space for
1147 * privileged transactions. This is needed so that transaction
1148 * space required for critical operations can dip into this pool
1149 * when at ENOSPC. This is needed for operations like create with
1150 * attr, unwritten extent conversion at ENOSPC, etc. Data allocations
1151 * are not allowed to use this reserved space.
1152 *
1153 * We default to 5% or 1024 fsbs of space reserved, whichever is smaller.
1154 * This may drive us straight to ENOSPC on mount, but that implies
1155 * we were already there on the last unmount.
1156 */
1157 resblks = mp->m_sb.sb_dblocks;
1158 do_div(resblks, 20);
1159 resblks = min_t(__uint64_t, resblks, 1024);
1160 xfs_reserve_blocks(mp, &resblks, NULL);
1161
1047 return 0; 1162 return 0;
1048 1163
1049 error4: 1164 error4:
@@ -1083,7 +1198,19 @@ xfs_unmountfs(xfs_mount_t *mp, struct cred *cr)
1083#if defined(DEBUG) || defined(INDUCE_IO_ERROR) 1198#if defined(DEBUG) || defined(INDUCE_IO_ERROR)
1084 int64_t fsid; 1199 int64_t fsid;
1085#endif 1200#endif
1201 __uint64_t resblks;
1086 1202
1203 /*
1204 * We can potentially deadlock here if we have an inode cluster
1205 * that has been freed has it's buffer still pinned in memory because
1206 * the transaction is still sitting in a iclog. The stale inodes
1207 * on that buffer will have their flush locks held until the
1208 * transaction hits the disk and the callbacks run. the inode
1209 * flush takes the flush lock unconditionally and with nothing to
1210 * push out the iclog we will never get that unlocked. hence we
1211 * need to force the log first.
1212 */
1213 xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE | XFS_LOG_SYNC);
1087 xfs_iflush_all(mp); 1214 xfs_iflush_all(mp);
1088 1215
1089 XFS_QM_DQPURGEALL(mp, XFS_QMOPT_QUOTALL | XFS_QMOPT_UMOUNTING); 1216 XFS_QM_DQPURGEALL(mp, XFS_QMOPT_QUOTALL | XFS_QMOPT_UMOUNTING);
@@ -1100,10 +1227,26 @@ xfs_unmountfs(xfs_mount_t *mp, struct cred *cr)
1100 xfs_binval(mp->m_rtdev_targp); 1227 xfs_binval(mp->m_rtdev_targp);
1101 } 1228 }
1102 1229
1103 xfs_unmountfs_writesb(mp); 1230 /*
1231 * Unreserve any blocks we have so that when we unmount we don't account
1232 * the reserved free space as used. This is really only necessary for
1233 * lazy superblock counting because it trusts the incore superblock
1234 * counters to be aboslutely correct on clean unmount.
1235 *
1236 * We don't bother correcting this elsewhere for lazy superblock
1237 * counting because on mount of an unclean filesystem we reconstruct the
1238 * correct counter value and this is irrelevant.
1239 *
1240 * For non-lazy counter filesystems, this doesn't matter at all because
1241 * we only every apply deltas to the superblock and hence the incore
1242 * value does not matter....
1243 */
1244 resblks = 0;
1245 xfs_reserve_blocks(mp, &resblks, NULL);
1104 1246
1247 xfs_log_sbcount(mp, 1);
1248 xfs_unmountfs_writesb(mp);
1105 xfs_unmountfs_wait(mp); /* wait for async bufs */ 1249 xfs_unmountfs_wait(mp); /* wait for async bufs */
1106
1107 xfs_log_unmount(mp); /* Done! No more fs ops. */ 1250 xfs_log_unmount(mp); /* Done! No more fs ops. */
1108 1251
1109 xfs_freesb(mp); 1252 xfs_freesb(mp);
@@ -1150,6 +1293,62 @@ xfs_unmountfs_wait(xfs_mount_t *mp)
1150} 1293}
1151 1294
1152int 1295int
1296xfs_fs_writable(xfs_mount_t *mp)
1297{
1298 bhv_vfs_t *vfsp = XFS_MTOVFS(mp);
1299
1300 return !(vfs_test_for_freeze(vfsp) || XFS_FORCED_SHUTDOWN(mp) ||
1301 (vfsp->vfs_flag & VFS_RDONLY));
1302}
1303
1304/*
1305 * xfs_log_sbcount
1306 *
1307 * Called either periodically to keep the on disk superblock values
1308 * roughly up to date or from unmount to make sure the values are
1309 * correct on a clean unmount.
1310 *
1311 * Note this code can be called during the process of freezing, so
1312 * we may need to use the transaction allocator which does not not
1313 * block when the transaction subsystem is in its frozen state.
1314 */
1315int
1316xfs_log_sbcount(
1317 xfs_mount_t *mp,
1318 uint sync)
1319{
1320 xfs_trans_t *tp;
1321 int error;
1322
1323 if (!xfs_fs_writable(mp))
1324 return 0;
1325
1326 xfs_icsb_sync_counters(mp);
1327
1328 /*
1329 * we don't need to do this if we are updating the superblock
1330 * counters on every modification.
1331 */
1332 if (!xfs_sb_version_haslazysbcount(&mp->m_sb))
1333 return 0;
1334
1335 tp = _xfs_trans_alloc(mp, XFS_TRANS_SB_COUNT);
1336 error = xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0,
1337 XFS_DEFAULT_LOG_COUNT);
1338 if (error) {
1339 xfs_trans_cancel(tp, 0);
1340 return error;
1341 }
1342
1343 xfs_mod_sb(tp, XFS_SB_IFREE | XFS_SB_ICOUNT | XFS_SB_FDBLOCKS);
1344 if (sync)
1345 xfs_trans_set_sync(tp);
1346 xfs_trans_commit(tp, 0);
1347
1348 return 0;
1349}
1350
1351int
1153xfs_unmountfs_writesb(xfs_mount_t *mp) 1352xfs_unmountfs_writesb(xfs_mount_t *mp)
1154{ 1353{
1155 xfs_buf_t *sbp; 1354 xfs_buf_t *sbp;
@@ -1160,16 +1359,15 @@ xfs_unmountfs_writesb(xfs_mount_t *mp)
1160 * skip superblock write if fs is read-only, or 1359 * skip superblock write if fs is read-only, or
1161 * if we are doing a forced umount. 1360 * if we are doing a forced umount.
1162 */ 1361 */
1163 sbp = xfs_getsb(mp, 0);
1164 if (!(XFS_MTOVFS(mp)->vfs_flag & VFS_RDONLY || 1362 if (!(XFS_MTOVFS(mp)->vfs_flag & VFS_RDONLY ||
1165 XFS_FORCED_SHUTDOWN(mp))) { 1363 XFS_FORCED_SHUTDOWN(mp))) {
1166 1364
1167 xfs_icsb_sync_counters(mp); 1365 sbp = xfs_getsb(mp, 0);
1366 sb = XFS_BUF_TO_SBP(sbp);
1168 1367
1169 /* 1368 /*
1170 * mark shared-readonly if desired 1369 * mark shared-readonly if desired
1171 */ 1370 */
1172 sb = XFS_BUF_TO_SBP(sbp);
1173 if (mp->m_mk_sharedro) { 1371 if (mp->m_mk_sharedro) {
1174 if (!(sb->sb_flags & XFS_SBF_READONLY)) 1372 if (!(sb->sb_flags & XFS_SBF_READONLY))
1175 sb->sb_flags |= XFS_SBF_READONLY; 1373 sb->sb_flags |= XFS_SBF_READONLY;
@@ -1178,6 +1376,7 @@ xfs_unmountfs_writesb(xfs_mount_t *mp)
1178 xfs_fs_cmn_err(CE_NOTE, mp, 1376 xfs_fs_cmn_err(CE_NOTE, mp,
1179 "Unmounting, marking shared read-only"); 1377 "Unmounting, marking shared read-only");
1180 } 1378 }
1379
1181 XFS_BUF_UNDONE(sbp); 1380 XFS_BUF_UNDONE(sbp);
1182 XFS_BUF_UNREAD(sbp); 1381 XFS_BUF_UNREAD(sbp);
1183 XFS_BUF_UNDELAYWRITE(sbp); 1382 XFS_BUF_UNDELAYWRITE(sbp);
@@ -1192,8 +1391,8 @@ xfs_unmountfs_writesb(xfs_mount_t *mp)
1192 mp, sbp, XFS_BUF_ADDR(sbp)); 1391 mp, sbp, XFS_BUF_ADDR(sbp));
1193 if (error && mp->m_mk_sharedro) 1392 if (error && mp->m_mk_sharedro)
1194 xfs_fs_cmn_err(CE_ALERT, mp, "Superblock write error detected while unmounting. Filesystem may not be marked shared readonly"); 1393 xfs_fs_cmn_err(CE_ALERT, mp, "Superblock write error detected while unmounting. Filesystem may not be marked shared readonly");
1394 xfs_buf_relse(sbp);
1195 } 1395 }
1196 xfs_buf_relse(sbp);
1197 return error; 1396 return error;
1198} 1397}
1199 1398
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 82304b94646d..76ad74758696 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -66,6 +66,7 @@ struct xfs_bmbt_irec;
66struct xfs_bmap_free; 66struct xfs_bmap_free;
67struct xfs_extdelta; 67struct xfs_extdelta;
68struct xfs_swapext; 68struct xfs_swapext;
69struct xfs_mru_cache;
69 70
70extern struct bhv_vfsops xfs_vfsops; 71extern struct bhv_vfsops xfs_vfsops;
71extern struct bhv_vnodeops xfs_vnodeops; 72extern struct bhv_vnodeops xfs_vnodeops;
@@ -424,17 +425,18 @@ typedef struct xfs_mount {
424 struct notifier_block m_icsb_notifier; /* hotplug cpu notifier */ 425 struct notifier_block m_icsb_notifier; /* hotplug cpu notifier */
425 struct mutex m_icsb_mutex; /* balancer sync lock */ 426 struct mutex m_icsb_mutex; /* balancer sync lock */
426#endif 427#endif
428 struct xfs_mru_cache *m_filestream; /* per-mount filestream data */
427} xfs_mount_t; 429} xfs_mount_t;
428 430
429/* 431/*
430 * Flags for m_flags. 432 * Flags for m_flags.
431 */ 433 */
432#define XFS_MOUNT_WSYNC (1ULL << 0) /* for nfs - all metadata ops 434#define XFS_MOUNT_WSYNC (1ULL << 0) /* for nfs - all metadata ops
433 must be synchronous except 435 must be synchronous except
434 for space allocations */ 436 for space allocations */
435#define XFS_MOUNT_INO64 (1ULL << 1) 437#define XFS_MOUNT_INO64 (1ULL << 1)
436 /* (1ULL << 2) -- currently unused */ 438 /* (1ULL << 2) -- currently unused */
437 /* (1ULL << 3) -- currently unused */ 439#define XFS_MOUNT_WAS_CLEAN (1ULL << 3)
438#define XFS_MOUNT_FS_SHUTDOWN (1ULL << 4) /* atomic stop of all filesystem 440#define XFS_MOUNT_FS_SHUTDOWN (1ULL << 4) /* atomic stop of all filesystem
439 operations, typically for 441 operations, typically for
440 disk errors in metadata */ 442 disk errors in metadata */
@@ -463,6 +465,8 @@ typedef struct xfs_mount {
463 * I/O size in stat() */ 465 * I/O size in stat() */
464#define XFS_MOUNT_NO_PERCPU_SB (1ULL << 23) /* don't use per-cpu superblock 466#define XFS_MOUNT_NO_PERCPU_SB (1ULL << 23) /* don't use per-cpu superblock
465 counters */ 467 counters */
468#define XFS_MOUNT_FILESTREAMS (1ULL << 24) /* enable the filestreams
469 allocator */
466 470
467 471
468/* 472/*
@@ -511,6 +515,8 @@ xfs_preferred_iosize(xfs_mount_t *mp)
511 515
512#define XFS_MAXIOFFSET(mp) ((mp)->m_maxioffset) 516#define XFS_MAXIOFFSET(mp) ((mp)->m_maxioffset)
513 517
518#define XFS_LAST_UNMOUNT_WAS_CLEAN(mp) \
519 ((mp)->m_flags & XFS_MOUNT_WAS_CLEAN)
514#define XFS_FORCED_SHUTDOWN(mp) ((mp)->m_flags & XFS_MOUNT_FS_SHUTDOWN) 520#define XFS_FORCED_SHUTDOWN(mp) ((mp)->m_flags & XFS_MOUNT_FS_SHUTDOWN)
515#define xfs_force_shutdown(m,f) \ 521#define xfs_force_shutdown(m,f) \
516 bhv_vfs_force_shutdown((XFS_MTOVFS(m)), f, __FILE__, __LINE__) 522 bhv_vfs_force_shutdown((XFS_MTOVFS(m)), f, __FILE__, __LINE__)
@@ -602,6 +608,7 @@ typedef struct xfs_mod_sb {
602 608
603extern xfs_mount_t *xfs_mount_init(void); 609extern xfs_mount_t *xfs_mount_init(void);
604extern void xfs_mod_sb(xfs_trans_t *, __int64_t); 610extern void xfs_mod_sb(xfs_trans_t *, __int64_t);
611extern int xfs_log_sbcount(xfs_mount_t *, uint);
605extern void xfs_mount_free(xfs_mount_t *mp, int remove_bhv); 612extern void xfs_mount_free(xfs_mount_t *mp, int remove_bhv);
606extern int xfs_mountfs(struct bhv_vfs *, xfs_mount_t *mp, int); 613extern int xfs_mountfs(struct bhv_vfs *, xfs_mount_t *mp, int);
607extern void xfs_mountfs_check_barriers(xfs_mount_t *mp); 614extern void xfs_mountfs_check_barriers(xfs_mount_t *mp);
@@ -618,12 +625,14 @@ extern int xfs_mod_incore_sb_batch(xfs_mount_t *, xfs_mod_sb_t *,
618extern struct xfs_buf *xfs_getsb(xfs_mount_t *, int); 625extern struct xfs_buf *xfs_getsb(xfs_mount_t *, int);
619extern int xfs_readsb(xfs_mount_t *, int); 626extern int xfs_readsb(xfs_mount_t *, int);
620extern void xfs_freesb(xfs_mount_t *); 627extern void xfs_freesb(xfs_mount_t *);
628extern int xfs_fs_writable(xfs_mount_t *);
621extern void xfs_do_force_shutdown(bhv_desc_t *, int, char *, int); 629extern void xfs_do_force_shutdown(bhv_desc_t *, int, char *, int);
622extern int xfs_syncsub(xfs_mount_t *, int, int *); 630extern int xfs_syncsub(xfs_mount_t *, int, int *);
623extern int xfs_sync_inodes(xfs_mount_t *, int, int *); 631extern int xfs_sync_inodes(xfs_mount_t *, int, int *);
624extern xfs_agnumber_t xfs_initialize_perag(struct bhv_vfs *, xfs_mount_t *, 632extern xfs_agnumber_t xfs_initialize_perag(struct bhv_vfs *, xfs_mount_t *,
625 xfs_agnumber_t); 633 xfs_agnumber_t);
626extern void xfs_xlatesb(void *, struct xfs_sb *, int, __int64_t); 634extern void xfs_xlatesb(void *, struct xfs_sb *, int, __int64_t);
635extern int xfs_sb_validate_fsb_count(struct xfs_sb *, __uint64_t);
627 636
628extern struct xfs_dmops xfs_dmcore_stub; 637extern struct xfs_dmops xfs_dmcore_stub;
629extern struct xfs_qmops xfs_qmcore_stub; 638extern struct xfs_qmops xfs_qmcore_stub;
diff --git a/fs/xfs/xfs_mru_cache.c b/fs/xfs/xfs_mru_cache.c
new file mode 100644
index 000000000000..7deb9e3cbbd3
--- /dev/null
+++ b/fs/xfs/xfs_mru_cache.c
@@ -0,0 +1,608 @@
1/*
2 * Copyright (c) 2006-2007 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#include "xfs.h"
19#include "xfs_mru_cache.h"
20
21/*
22 * The MRU Cache data structure consists of a data store, an array of lists and
23 * a lock to protect its internal state. At initialisation time, the client
24 * supplies an element lifetime in milliseconds and a group count, as well as a
25 * function pointer to call when deleting elements. A data structure for
26 * queueing up work in the form of timed callbacks is also included.
27 *
28 * The group count controls how many lists are created, and thereby how finely
29 * the elements are grouped in time. When reaping occurs, all the elements in
30 * all the lists whose time has expired are deleted.
31 *
32 * To give an example of how this works in practice, consider a client that
33 * initialises an MRU Cache with a lifetime of ten seconds and a group count of
34 * five. Five internal lists will be created, each representing a two second
35 * period in time. When the first element is added, time zero for the data
36 * structure is initialised to the current time.
37 *
38 * All the elements added in the first two seconds are appended to the first
39 * list. Elements added in the third second go into the second list, and so on.
40 * If an element is accessed at any point, it is removed from its list and
41 * inserted at the head of the current most-recently-used list.
42 *
43 * The reaper function will have nothing to do until at least twelve seconds
44 * have elapsed since the first element was added. The reason for this is that
45 * if it were called at t=11s, there could be elements in the first list that
46 * have only been inactive for nine seconds, so it still does nothing. If it is
47 * called anywhere between t=12 and t=14 seconds, it will delete all the
48 * elements that remain in the first list. It's therefore possible for elements
49 * to remain in the data store even after they've been inactive for up to
50 * (t + t/g) seconds, where t is the inactive element lifetime and g is the
51 * number of groups.
52 *
53 * The above example assumes that the reaper function gets called at least once
54 * every (t/g) seconds. If it is called less frequently, unused elements will
55 * accumulate in the reap list until the reaper function is eventually called.
56 * The current implementation uses work queue callbacks to carefully time the
57 * reaper function calls, so this should happen rarely, if at all.
58 *
59 * From a design perspective, the primary reason for the choice of a list array
60 * representing discrete time intervals is that it's only practical to reap
61 * expired elements in groups of some appreciable size. This automatically
62 * introduces a granularity to element lifetimes, so there's no point storing an
63 * individual timeout with each element that specifies a more precise reap time.
64 * The bonus is a saving of sizeof(long) bytes of memory per element stored.
65 *
66 * The elements could have been stored in just one list, but an array of
67 * counters or pointers would need to be maintained to allow them to be divided
68 * up into discrete time groups. More critically, the process of touching or
69 * removing an element would involve walking large portions of the entire list,
70 * which would have a detrimental effect on performance. The additional memory
71 * requirement for the array of list heads is minimal.
72 *
73 * When an element is touched or deleted, it needs to be removed from its
74 * current list. Doubly linked lists are used to make the list maintenance
75 * portion of these operations O(1). Since reaper timing can be imprecise,
76 * inserts and lookups can occur when there are no free lists available. When
77 * this happens, all the elements on the LRU list need to be migrated to the end
78 * of the reap list. To keep the list maintenance portion of these operations
79 * O(1) also, list tails need to be accessible without walking the entire list.
80 * This is the reason why doubly linked list heads are used.
81 */
82
83/*
84 * An MRU Cache is a dynamic data structure that stores its elements in a way
85 * that allows efficient lookups, but also groups them into discrete time
86 * intervals based on insertion time. This allows elements to be efficiently
87 * and automatically reaped after a fixed period of inactivity.
88 *
89 * When a client data pointer is stored in the MRU Cache it needs to be added to
90 * both the data store and to one of the lists. It must also be possible to
91 * access each of these entries via the other, i.e. to:
92 *
93 * a) Walk a list, removing the corresponding data store entry for each item.
94 * b) Look up a data store entry, then access its list entry directly.
95 *
96 * To achieve both of these goals, each entry must contain both a list entry and
97 * a key, in addition to the user's data pointer. Note that it's not a good
98 * idea to have the client embed one of these structures at the top of their own
99 * data structure, because inserting the same item more than once would most
100 * likely result in a loop in one of the lists. That's a sure-fire recipe for
101 * an infinite loop in the code.
102 */
103typedef struct xfs_mru_cache_elem
104{
105 struct list_head list_node;
106 unsigned long key;
107 void *value;
108} xfs_mru_cache_elem_t;
109
110static kmem_zone_t *xfs_mru_elem_zone;
111static struct workqueue_struct *xfs_mru_reap_wq;
112
113/*
114 * When inserting, destroying or reaping, it's first necessary to update the
115 * lists relative to a particular time. In the case of destroying, that time
116 * will be well in the future to ensure that all items are moved to the reap
117 * list. In all other cases though, the time will be the current time.
118 *
119 * This function enters a loop, moving the contents of the LRU list to the reap
120 * list again and again until either a) the lists are all empty, or b) time zero
121 * has been advanced sufficiently to be within the immediate element lifetime.
122 *
123 * Case a) above is detected by counting how many groups are migrated and
124 * stopping when they've all been moved. Case b) is detected by monitoring the
125 * time_zero field, which is updated as each group is migrated.
126 *
127 * The return value is the earliest time that more migration could be needed, or
128 * zero if there's no need to schedule more work because the lists are empty.
129 */
130STATIC unsigned long
131_xfs_mru_cache_migrate(
132 xfs_mru_cache_t *mru,
133 unsigned long now)
134{
135 unsigned int grp;
136 unsigned int migrated = 0;
137 struct list_head *lru_list;
138
139 /* Nothing to do if the data store is empty. */
140 if (!mru->time_zero)
141 return 0;
142
143 /* While time zero is older than the time spanned by all the lists. */
144 while (mru->time_zero <= now - mru->grp_count * mru->grp_time) {
145
146 /*
147 * If the LRU list isn't empty, migrate its elements to the tail
148 * of the reap list.
149 */
150 lru_list = mru->lists + mru->lru_grp;
151 if (!list_empty(lru_list))
152 list_splice_init(lru_list, mru->reap_list.prev);
153
154 /*
155 * Advance the LRU group number, freeing the old LRU list to
156 * become the new MRU list; advance time zero accordingly.
157 */
158 mru->lru_grp = (mru->lru_grp + 1) % mru->grp_count;
159 mru->time_zero += mru->grp_time;
160
161 /*
162 * If reaping is so far behind that all the elements on all the
163 * lists have been migrated to the reap list, it's now empty.
164 */
165 if (++migrated == mru->grp_count) {
166 mru->lru_grp = 0;
167 mru->time_zero = 0;
168 return 0;
169 }
170 }
171
172 /* Find the first non-empty list from the LRU end. */
173 for (grp = 0; grp < mru->grp_count; grp++) {
174
175 /* Check the grp'th list from the LRU end. */
176 lru_list = mru->lists + ((mru->lru_grp + grp) % mru->grp_count);
177 if (!list_empty(lru_list))
178 return mru->time_zero +
179 (mru->grp_count + grp) * mru->grp_time;
180 }
181
182 /* All the lists must be empty. */
183 mru->lru_grp = 0;
184 mru->time_zero = 0;
185 return 0;
186}
187
188/*
189 * When inserting or doing a lookup, an element needs to be inserted into the
190 * MRU list. The lists must be migrated first to ensure that they're
191 * up-to-date, otherwise the new element could be given a shorter lifetime in
192 * the cache than it should.
193 */
194STATIC void
195_xfs_mru_cache_list_insert(
196 xfs_mru_cache_t *mru,
197 xfs_mru_cache_elem_t *elem)
198{
199 unsigned int grp = 0;
200 unsigned long now = jiffies;
201
202 /*
203 * If the data store is empty, initialise time zero, leave grp set to
204 * zero and start the work queue timer if necessary. Otherwise, set grp
205 * to the number of group times that have elapsed since time zero.
206 */
207 if (!_xfs_mru_cache_migrate(mru, now)) {
208 mru->time_zero = now;
209 if (!mru->next_reap)
210 mru->next_reap = mru->grp_count * mru->grp_time;
211 } else {
212 grp = (now - mru->time_zero) / mru->grp_time;
213 grp = (mru->lru_grp + grp) % mru->grp_count;
214 }
215
216 /* Insert the element at the tail of the corresponding list. */
217 list_add_tail(&elem->list_node, mru->lists + grp);
218}
219
220/*
221 * When destroying or reaping, all the elements that were migrated to the reap
222 * list need to be deleted. For each element this involves removing it from the
223 * data store, removing it from the reap list, calling the client's free
224 * function and deleting the element from the element zone.
225 */
226STATIC void
227_xfs_mru_cache_clear_reap_list(
228 xfs_mru_cache_t *mru)
229{
230 xfs_mru_cache_elem_t *elem, *next;
231 struct list_head tmp;
232
233 INIT_LIST_HEAD(&tmp);
234 list_for_each_entry_safe(elem, next, &mru->reap_list, list_node) {
235
236 /* Remove the element from the data store. */
237 radix_tree_delete(&mru->store, elem->key);
238
239 /*
240 * remove to temp list so it can be freed without
241 * needing to hold the lock
242 */
243 list_move(&elem->list_node, &tmp);
244 }
245 mutex_spinunlock(&mru->lock, 0);
246
247 list_for_each_entry_safe(elem, next, &tmp, list_node) {
248
249 /* Remove the element from the reap list. */
250 list_del_init(&elem->list_node);
251
252 /* Call the client's free function with the key and value pointer. */
253 mru->free_func(elem->key, elem->value);
254
255 /* Free the element structure. */
256 kmem_zone_free(xfs_mru_elem_zone, elem);
257 }
258
259 mutex_spinlock(&mru->lock);
260}
261
262/*
263 * We fire the reap timer every group expiry interval so
264 * we always have a reaper ready to run. This makes shutdown
265 * and flushing of the reaper easy to do. Hence we need to
266 * keep when the next reap must occur so we can determine
267 * at each interval whether there is anything we need to do.
268 */
269STATIC void
270_xfs_mru_cache_reap(
271 struct work_struct *work)
272{
273 xfs_mru_cache_t *mru = container_of(work, xfs_mru_cache_t, work.work);
274 unsigned long now;
275
276 ASSERT(mru && mru->lists);
277 if (!mru || !mru->lists)
278 return;
279
280 mutex_spinlock(&mru->lock);
281 now = jiffies;
282 if (mru->reap_all ||
283 (mru->next_reap && time_after(now, mru->next_reap))) {
284 if (mru->reap_all)
285 now += mru->grp_count * mru->grp_time * 2;
286 mru->next_reap = _xfs_mru_cache_migrate(mru, now);
287 _xfs_mru_cache_clear_reap_list(mru);
288 }
289
290 /*
291 * the process that triggered the reap_all is responsible
292 * for restating the periodic reap if it is required.
293 */
294 if (!mru->reap_all)
295 queue_delayed_work(xfs_mru_reap_wq, &mru->work, mru->grp_time);
296 mru->reap_all = 0;
297 mutex_spinunlock(&mru->lock, 0);
298}
299
300int
301xfs_mru_cache_init(void)
302{
303 xfs_mru_elem_zone = kmem_zone_init(sizeof(xfs_mru_cache_elem_t),
304 "xfs_mru_cache_elem");
305 if (!xfs_mru_elem_zone)
306 return ENOMEM;
307
308 xfs_mru_reap_wq = create_singlethread_workqueue("xfs_mru_cache");
309 if (!xfs_mru_reap_wq) {
310 kmem_zone_destroy(xfs_mru_elem_zone);
311 return ENOMEM;
312 }
313
314 return 0;
315}
316
317void
318xfs_mru_cache_uninit(void)
319{
320 destroy_workqueue(xfs_mru_reap_wq);
321 kmem_zone_destroy(xfs_mru_elem_zone);
322}
323
324/*
325 * To initialise a struct xfs_mru_cache pointer, call xfs_mru_cache_create()
326 * with the address of the pointer, a lifetime value in milliseconds, a group
327 * count and a free function to use when deleting elements. This function
328 * returns 0 if the initialisation was successful.
329 */
330int
331xfs_mru_cache_create(
332 xfs_mru_cache_t **mrup,
333 unsigned int lifetime_ms,
334 unsigned int grp_count,
335 xfs_mru_cache_free_func_t free_func)
336{
337 xfs_mru_cache_t *mru = NULL;
338 int err = 0, grp;
339 unsigned int grp_time;
340
341 if (mrup)
342 *mrup = NULL;
343
344 if (!mrup || !grp_count || !lifetime_ms || !free_func)
345 return EINVAL;
346
347 if (!(grp_time = msecs_to_jiffies(lifetime_ms) / grp_count))
348 return EINVAL;
349
350 if (!(mru = kmem_zalloc(sizeof(*mru), KM_SLEEP)))
351 return ENOMEM;
352
353 /* An extra list is needed to avoid reaping up to a grp_time early. */
354 mru->grp_count = grp_count + 1;
355 mru->lists = kmem_alloc(mru->grp_count * sizeof(*mru->lists), KM_SLEEP);
356
357 if (!mru->lists) {
358 err = ENOMEM;
359 goto exit;
360 }
361
362 for (grp = 0; grp < mru->grp_count; grp++)
363 INIT_LIST_HEAD(mru->lists + grp);
364
365 /*
366 * We use GFP_KERNEL radix tree preload and do inserts under a
367 * spinlock so GFP_ATOMIC is appropriate for the radix tree itself.
368 */
369 INIT_RADIX_TREE(&mru->store, GFP_ATOMIC);
370 INIT_LIST_HEAD(&mru->reap_list);
371 spinlock_init(&mru->lock, "xfs_mru_cache");
372 INIT_DELAYED_WORK(&mru->work, _xfs_mru_cache_reap);
373
374 mru->grp_time = grp_time;
375 mru->free_func = free_func;
376
377 /* start up the reaper event */
378 mru->next_reap = 0;
379 mru->reap_all = 0;
380 queue_delayed_work(xfs_mru_reap_wq, &mru->work, mru->grp_time);
381
382 *mrup = mru;
383
384exit:
385 if (err && mru && mru->lists)
386 kmem_free(mru->lists, mru->grp_count * sizeof(*mru->lists));
387 if (err && mru)
388 kmem_free(mru, sizeof(*mru));
389
390 return err;
391}
392
393/*
394 * Call xfs_mru_cache_flush() to flush out all cached entries, calling their
395 * free functions as they're deleted. When this function returns, the caller is
396 * guaranteed that all the free functions for all the elements have finished
397 * executing.
398 *
399 * While we are flushing, we stop the periodic reaper event from triggering.
400 * Normally, we want to restart this periodic event, but if we are shutting
401 * down the cache we do not want it restarted. hence the restart parameter
402 * where 0 = do not restart reaper and 1 = restart reaper.
403 */
404void
405xfs_mru_cache_flush(
406 xfs_mru_cache_t *mru,
407 int restart)
408{
409 if (!mru || !mru->lists)
410 return;
411
412 cancel_rearming_delayed_workqueue(xfs_mru_reap_wq, &mru->work);
413
414 mutex_spinlock(&mru->lock);
415 mru->reap_all = 1;
416 mutex_spinunlock(&mru->lock, 0);
417
418 queue_work(xfs_mru_reap_wq, &mru->work.work);
419 flush_workqueue(xfs_mru_reap_wq);
420
421 mutex_spinlock(&mru->lock);
422 WARN_ON_ONCE(mru->reap_all != 0);
423 mru->reap_all = 0;
424 if (restart)
425 queue_delayed_work(xfs_mru_reap_wq, &mru->work, mru->grp_time);
426 mutex_spinunlock(&mru->lock, 0);
427}
428
429void
430xfs_mru_cache_destroy(
431 xfs_mru_cache_t *mru)
432{
433 if (!mru || !mru->lists)
434 return;
435
436 /* we don't want the reaper to restart here */
437 xfs_mru_cache_flush(mru, 0);
438
439 kmem_free(mru->lists, mru->grp_count * sizeof(*mru->lists));
440 kmem_free(mru, sizeof(*mru));
441}
442
443/*
444 * To insert an element, call xfs_mru_cache_insert() with the data store, the
445 * element's key and the client data pointer. This function returns 0 on
446 * success or ENOMEM if memory for the data element couldn't be allocated.
447 */
448int
449xfs_mru_cache_insert(
450 xfs_mru_cache_t *mru,
451 unsigned long key,
452 void *value)
453{
454 xfs_mru_cache_elem_t *elem;
455
456 ASSERT(mru && mru->lists);
457 if (!mru || !mru->lists)
458 return EINVAL;
459
460 elem = kmem_zone_zalloc(xfs_mru_elem_zone, KM_SLEEP);
461 if (!elem)
462 return ENOMEM;
463
464 if (radix_tree_preload(GFP_KERNEL)) {
465 kmem_zone_free(xfs_mru_elem_zone, elem);
466 return ENOMEM;
467 }
468
469 INIT_LIST_HEAD(&elem->list_node);
470 elem->key = key;
471 elem->value = value;
472
473 mutex_spinlock(&mru->lock);
474
475 radix_tree_insert(&mru->store, key, elem);
476 radix_tree_preload_end();
477 _xfs_mru_cache_list_insert(mru, elem);
478
479 mutex_spinunlock(&mru->lock, 0);
480
481 return 0;
482}
483
484/*
485 * To remove an element without calling the free function, call
486 * xfs_mru_cache_remove() with the data store and the element's key. On success
487 * the client data pointer for the removed element is returned, otherwise this
488 * function will return a NULL pointer.
489 */
490void *
491xfs_mru_cache_remove(
492 xfs_mru_cache_t *mru,
493 unsigned long key)
494{
495 xfs_mru_cache_elem_t *elem;
496 void *value = NULL;
497
498 ASSERT(mru && mru->lists);
499 if (!mru || !mru->lists)
500 return NULL;
501
502 mutex_spinlock(&mru->lock);
503 elem = radix_tree_delete(&mru->store, key);
504 if (elem) {
505 value = elem->value;
506 list_del(&elem->list_node);
507 }
508
509 mutex_spinunlock(&mru->lock, 0);
510
511 if (elem)
512 kmem_zone_free(xfs_mru_elem_zone, elem);
513
514 return value;
515}
516
517/*
518 * To remove and element and call the free function, call xfs_mru_cache_delete()
519 * with the data store and the element's key.
520 */
521void
522xfs_mru_cache_delete(
523 xfs_mru_cache_t *mru,
524 unsigned long key)
525{
526 void *value = xfs_mru_cache_remove(mru, key);
527
528 if (value)
529 mru->free_func(key, value);
530}
531
532/*
533 * To look up an element using its key, call xfs_mru_cache_lookup() with the
534 * data store and the element's key. If found, the element will be moved to the
535 * head of the MRU list to indicate that it's been touched.
536 *
537 * The internal data structures are protected by a spinlock that is STILL HELD
538 * when this function returns. Call xfs_mru_cache_done() to release it. Note
539 * that it is not safe to call any function that might sleep in the interim.
540 *
541 * The implementation could have used reference counting to avoid this
542 * restriction, but since most clients simply want to get, set or test a member
543 * of the returned data structure, the extra per-element memory isn't warranted.
544 *
545 * If the element isn't found, this function returns NULL and the spinlock is
546 * released. xfs_mru_cache_done() should NOT be called when this occurs.
547 */
548void *
549xfs_mru_cache_lookup(
550 xfs_mru_cache_t *mru,
551 unsigned long key)
552{
553 xfs_mru_cache_elem_t *elem;
554
555 ASSERT(mru && mru->lists);
556 if (!mru || !mru->lists)
557 return NULL;
558
559 mutex_spinlock(&mru->lock);
560 elem = radix_tree_lookup(&mru->store, key);
561 if (elem) {
562 list_del(&elem->list_node);
563 _xfs_mru_cache_list_insert(mru, elem);
564 }
565 else
566 mutex_spinunlock(&mru->lock, 0);
567
568 return elem ? elem->value : NULL;
569}
570
571/*
572 * To look up an element using its key, but leave its location in the internal
573 * lists alone, call xfs_mru_cache_peek(). If the element isn't found, this
574 * function returns NULL.
575 *
576 * See the comments above the declaration of the xfs_mru_cache_lookup() function
577 * for important locking information pertaining to this call.
578 */
579void *
580xfs_mru_cache_peek(
581 xfs_mru_cache_t *mru,
582 unsigned long key)
583{
584 xfs_mru_cache_elem_t *elem;
585
586 ASSERT(mru && mru->lists);
587 if (!mru || !mru->lists)
588 return NULL;
589
590 mutex_spinlock(&mru->lock);
591 elem = radix_tree_lookup(&mru->store, key);
592 if (!elem)
593 mutex_spinunlock(&mru->lock, 0);
594
595 return elem ? elem->value : NULL;
596}
597
598/*
599 * To release the internal data structure spinlock after having performed an
600 * xfs_mru_cache_lookup() or an xfs_mru_cache_peek(), call xfs_mru_cache_done()
601 * with the data store pointer.
602 */
603void
604xfs_mru_cache_done(
605 xfs_mru_cache_t *mru)
606{
607 mutex_spinunlock(&mru->lock, 0);
608}
diff --git a/fs/xfs/xfs_mru_cache.h b/fs/xfs/xfs_mru_cache.h
new file mode 100644
index 000000000000..624fd10ee8e5
--- /dev/null
+++ b/fs/xfs/xfs_mru_cache.h
@@ -0,0 +1,57 @@
1/*
2 * Copyright (c) 2006-2007 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#ifndef __XFS_MRU_CACHE_H__
19#define __XFS_MRU_CACHE_H__
20
21
22/* Function pointer type for callback to free a client's data pointer. */
23typedef void (*xfs_mru_cache_free_func_t)(unsigned long, void*);
24
25typedef struct xfs_mru_cache
26{
27 struct radix_tree_root store; /* Core storage data structure. */
28 struct list_head *lists; /* Array of lists, one per grp. */
29 struct list_head reap_list; /* Elements overdue for reaping. */
30 spinlock_t lock; /* Lock to protect this struct. */
31 unsigned int grp_count; /* Number of discrete groups. */
32 unsigned int grp_time; /* Time period spanned by grps. */
33 unsigned int lru_grp; /* Group containing time zero. */
34 unsigned long time_zero; /* Time first element was added. */
35 unsigned long next_reap; /* Time that the reaper should
36 next do something. */
37 unsigned int reap_all; /* if set, reap all lists */
38 xfs_mru_cache_free_func_t free_func; /* Function pointer for freeing. */
39 struct delayed_work work; /* Workqueue data for reaping. */
40} xfs_mru_cache_t;
41
42int xfs_mru_cache_init(void);
43void xfs_mru_cache_uninit(void);
44int xfs_mru_cache_create(struct xfs_mru_cache **mrup, unsigned int lifetime_ms,
45 unsigned int grp_count,
46 xfs_mru_cache_free_func_t free_func);
47void xfs_mru_cache_flush(xfs_mru_cache_t *mru, int restart);
48void xfs_mru_cache_destroy(struct xfs_mru_cache *mru);
49int xfs_mru_cache_insert(struct xfs_mru_cache *mru, unsigned long key,
50 void *value);
51void * xfs_mru_cache_remove(struct xfs_mru_cache *mru, unsigned long key);
52void xfs_mru_cache_delete(struct xfs_mru_cache *mru, unsigned long key);
53void *xfs_mru_cache_lookup(struct xfs_mru_cache *mru, unsigned long key);
54void *xfs_mru_cache_peek(struct xfs_mru_cache *mru, unsigned long key);
55void xfs_mru_cache_done(struct xfs_mru_cache *mru);
56
57#endif /* __XFS_MRU_CACHE_H__ */
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index b3a5f07bd073..47082c01872d 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -1882,11 +1882,13 @@ xfs_growfs_rt(
1882 (nrblocks = in->newblocks) <= sbp->sb_rblocks || 1882 (nrblocks = in->newblocks) <= sbp->sb_rblocks ||
1883 (sbp->sb_rblocks && (in->extsize != sbp->sb_rextsize))) 1883 (sbp->sb_rblocks && (in->extsize != sbp->sb_rextsize)))
1884 return XFS_ERROR(EINVAL); 1884 return XFS_ERROR(EINVAL);
1885 if ((error = xfs_sb_validate_fsb_count(sbp, nrblocks)))
1886 return error;
1885 /* 1887 /*
1886 * Read in the last block of the device, make sure it exists. 1888 * Read in the last block of the device, make sure it exists.
1887 */ 1889 */
1888 error = xfs_read_buf(mp, mp->m_rtdev_targp, 1890 error = xfs_read_buf(mp, mp->m_rtdev_targp,
1889 XFS_FSB_TO_BB(mp, in->newblocks - 1), 1891 XFS_FSB_TO_BB(mp, nrblocks - 1),
1890 XFS_FSB_TO_BB(mp, 1), 0, &bp); 1892 XFS_FSB_TO_BB(mp, 1), 0, &bp);
1891 if (error) 1893 if (error)
1892 return error; 1894 return error;
diff --git a/fs/xfs/xfs_rw.h b/fs/xfs/xfs_rw.h
index 188b296ff50c..fcf28dbded7c 100644
--- a/fs/xfs/xfs_rw.h
+++ b/fs/xfs/xfs_rw.h
@@ -72,6 +72,34 @@ xfs_fsb_to_db_io(struct xfs_iocore *io, xfs_fsblock_t fsb)
72} 72}
73 73
74/* 74/*
75 * Flags for xfs_free_eofblocks
76 */
77#define XFS_FREE_EOF_LOCK (1<<0)
78#define XFS_FREE_EOF_NOLOCK (1<<1)
79
80
81/*
82 * helper function to extract extent size hint from inode
83 */
84STATIC_INLINE xfs_extlen_t
85xfs_get_extsz_hint(
86 xfs_inode_t *ip)
87{
88 xfs_extlen_t extsz;
89
90 if (unlikely(ip->i_d.di_flags & XFS_DIFLAG_REALTIME)) {
91 extsz = (ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE)
92 ? ip->i_d.di_extsize
93 : ip->i_mount->m_sb.sb_rextsize;
94 ASSERT(extsz);
95 } else {
96 extsz = (ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE)
97 ? ip->i_d.di_extsize : 0;
98 }
99 return extsz;
100}
101
102/*
75 * Prototypes for functions in xfs_rw.c. 103 * Prototypes for functions in xfs_rw.c.
76 */ 104 */
77extern int xfs_write_clear_setuid(struct xfs_inode *ip); 105extern int xfs_write_clear_setuid(struct xfs_inode *ip);
@@ -91,10 +119,12 @@ extern void xfs_ioerror_alert(char *func, struct xfs_mount *mp,
91extern int xfs_rwlock(bhv_desc_t *bdp, bhv_vrwlock_t write_lock); 119extern int xfs_rwlock(bhv_desc_t *bdp, bhv_vrwlock_t write_lock);
92extern void xfs_rwunlock(bhv_desc_t *bdp, bhv_vrwlock_t write_lock); 120extern void xfs_rwunlock(bhv_desc_t *bdp, bhv_vrwlock_t write_lock);
93extern int xfs_setattr(bhv_desc_t *, bhv_vattr_t *vap, int flags, 121extern int xfs_setattr(bhv_desc_t *, bhv_vattr_t *vap, int flags,
94 cred_t *credp); 122 cred_t *credp);
95extern int xfs_change_file_space(bhv_desc_t *bdp, int cmd, xfs_flock64_t *bf, 123extern int xfs_change_file_space(bhv_desc_t *bdp, int cmd, xfs_flock64_t *bf,
96 xfs_off_t offset, cred_t *credp, int flags); 124 xfs_off_t offset, cred_t *credp, int flags);
97extern int xfs_set_dmattrs(bhv_desc_t *bdp, u_int evmask, u_int16_t state, 125extern int xfs_set_dmattrs(bhv_desc_t *bdp, u_int evmask, u_int16_t state,
98 cred_t *credp); 126 cred_t *credp);
127extern int xfs_free_eofblocks(struct xfs_mount *mp, struct xfs_inode *ip,
128 int flags);
99 129
100#endif /* __XFS_RW_H__ */ 130#endif /* __XFS_RW_H__ */
diff --git a/fs/xfs/xfs_sb.h b/fs/xfs/xfs_sb.h
index 467854b45c8f..ef42537a607a 100644
--- a/fs/xfs/xfs_sb.h
+++ b/fs/xfs/xfs_sb.h
@@ -74,12 +74,13 @@ struct xfs_mount;
74 */ 74 */
75#define XFS_SB_VERSION2_REALFBITS 0x00ffffff /* Mask: features */ 75#define XFS_SB_VERSION2_REALFBITS 0x00ffffff /* Mask: features */
76#define XFS_SB_VERSION2_RESERVED1BIT 0x00000001 76#define XFS_SB_VERSION2_RESERVED1BIT 0x00000001
77#define XFS_SB_VERSION2_RESERVED2BIT 0x00000002 77#define XFS_SB_VERSION2_LAZYSBCOUNTBIT 0x00000002 /* Superblk counters */
78#define XFS_SB_VERSION2_RESERVED4BIT 0x00000004 78#define XFS_SB_VERSION2_RESERVED4BIT 0x00000004
79#define XFS_SB_VERSION2_ATTR2BIT 0x00000008 /* Inline attr rework */ 79#define XFS_SB_VERSION2_ATTR2BIT 0x00000008 /* Inline attr rework */
80 80
81#define XFS_SB_VERSION2_OKREALFBITS \ 81#define XFS_SB_VERSION2_OKREALFBITS \
82 (XFS_SB_VERSION2_ATTR2BIT) 82 (XFS_SB_VERSION2_LAZYSBCOUNTBIT | \
83 XFS_SB_VERSION2_ATTR2BIT)
83#define XFS_SB_VERSION2_OKSASHFBITS \ 84#define XFS_SB_VERSION2_OKSASHFBITS \
84 (0) 85 (0)
85#define XFS_SB_VERSION2_OKREALBITS \ 86#define XFS_SB_VERSION2_OKREALBITS \
@@ -181,6 +182,9 @@ typedef enum {
181#define XFS_SB_SHARED_VN XFS_SB_MVAL(SHARED_VN) 182#define XFS_SB_SHARED_VN XFS_SB_MVAL(SHARED_VN)
182#define XFS_SB_UNIT XFS_SB_MVAL(UNIT) 183#define XFS_SB_UNIT XFS_SB_MVAL(UNIT)
183#define XFS_SB_WIDTH XFS_SB_MVAL(WIDTH) 184#define XFS_SB_WIDTH XFS_SB_MVAL(WIDTH)
185#define XFS_SB_ICOUNT XFS_SB_MVAL(ICOUNT)
186#define XFS_SB_IFREE XFS_SB_MVAL(IFREE)
187#define XFS_SB_FDBLOCKS XFS_SB_MVAL(FDBLOCKS)
184#define XFS_SB_FEATURES2 XFS_SB_MVAL(FEATURES2) 188#define XFS_SB_FEATURES2 XFS_SB_MVAL(FEATURES2)
185#define XFS_SB_NUM_BITS ((int)XFS_SBS_FIELDCOUNT) 189#define XFS_SB_NUM_BITS ((int)XFS_SBS_FIELDCOUNT)
186#define XFS_SB_ALL_BITS ((1LL << XFS_SB_NUM_BITS) - 1) 190#define XFS_SB_ALL_BITS ((1LL << XFS_SB_NUM_BITS) - 1)
@@ -188,7 +192,7 @@ typedef enum {
188 (XFS_SB_UUID | XFS_SB_ROOTINO | XFS_SB_RBMINO | XFS_SB_RSUMINO | \ 192 (XFS_SB_UUID | XFS_SB_ROOTINO | XFS_SB_RBMINO | XFS_SB_RSUMINO | \
189 XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO | XFS_SB_GQUOTINO | \ 193 XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO | XFS_SB_GQUOTINO | \
190 XFS_SB_QFLAGS | XFS_SB_SHARED_VN | XFS_SB_UNIT | XFS_SB_WIDTH | \ 194 XFS_SB_QFLAGS | XFS_SB_SHARED_VN | XFS_SB_UNIT | XFS_SB_WIDTH | \
191 XFS_SB_FEATURES2) 195 XFS_SB_ICOUNT | XFS_SB_IFREE | XFS_SB_FDBLOCKS | XFS_SB_FEATURES2)
192 196
193 197
194/* 198/*
@@ -414,6 +418,12 @@ static inline int xfs_sb_version_hasmorebits(xfs_sb_t *sbp)
414 * ((sbp)->sb_features2 & XFS_SB_VERSION2_FUNBIT) 418 * ((sbp)->sb_features2 & XFS_SB_VERSION2_FUNBIT)
415 */ 419 */
416 420
421static inline int xfs_sb_version_haslazysbcount(xfs_sb_t *sbp)
422{
423 return (XFS_SB_VERSION_HASMOREBITS(sbp) && \
424 ((sbp)->sb_features2 & XFS_SB_VERSION2_LAZYSBCOUNTBIT));
425}
426
417#define XFS_SB_VERSION_HASATTR2(sbp) xfs_sb_version_hasattr2(sbp) 427#define XFS_SB_VERSION_HASATTR2(sbp) xfs_sb_version_hasattr2(sbp)
418static inline int xfs_sb_version_hasattr2(xfs_sb_t *sbp) 428static inline int xfs_sb_version_hasattr2(xfs_sb_t *sbp)
419{ 429{
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index cc2d60951e21..356d6627f581 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -427,6 +427,14 @@ undo_blocks:
427 * 427 *
428 * Mark the transaction structure to indicate that the superblock 428 * Mark the transaction structure to indicate that the superblock
429 * needs to be updated before committing. 429 * needs to be updated before committing.
430 *
431 * Because we may not be keeping track of allocated/free inodes and
432 * used filesystem blocks in the superblock, we do not mark the
433 * superblock dirty in this transaction if we modify these fields.
434 * We still need to update the transaction deltas so that they get
435 * applied to the incore superblock, but we don't want them to
436 * cause the superblock to get locked and logged if these are the
437 * only fields in the superblock that the transaction modifies.
430 */ 438 */
431void 439void
432xfs_trans_mod_sb( 440xfs_trans_mod_sb(
@@ -434,13 +442,19 @@ xfs_trans_mod_sb(
434 uint field, 442 uint field,
435 int64_t delta) 443 int64_t delta)
436{ 444{
445 uint32_t flags = (XFS_TRANS_DIRTY|XFS_TRANS_SB_DIRTY);
446 xfs_mount_t *mp = tp->t_mountp;
437 447
438 switch (field) { 448 switch (field) {
439 case XFS_TRANS_SB_ICOUNT: 449 case XFS_TRANS_SB_ICOUNT:
440 tp->t_icount_delta += delta; 450 tp->t_icount_delta += delta;
451 if (xfs_sb_version_haslazysbcount(&mp->m_sb))
452 flags &= ~XFS_TRANS_SB_DIRTY;
441 break; 453 break;
442 case XFS_TRANS_SB_IFREE: 454 case XFS_TRANS_SB_IFREE:
443 tp->t_ifree_delta += delta; 455 tp->t_ifree_delta += delta;
456 if (xfs_sb_version_haslazysbcount(&mp->m_sb))
457 flags &= ~XFS_TRANS_SB_DIRTY;
444 break; 458 break;
445 case XFS_TRANS_SB_FDBLOCKS: 459 case XFS_TRANS_SB_FDBLOCKS:
446 /* 460 /*
@@ -453,6 +467,8 @@ xfs_trans_mod_sb(
453 ASSERT(tp->t_blk_res_used <= tp->t_blk_res); 467 ASSERT(tp->t_blk_res_used <= tp->t_blk_res);
454 } 468 }
455 tp->t_fdblocks_delta += delta; 469 tp->t_fdblocks_delta += delta;
470 if (xfs_sb_version_haslazysbcount(&mp->m_sb))
471 flags &= ~XFS_TRANS_SB_DIRTY;
456 break; 472 break;
457 case XFS_TRANS_SB_RES_FDBLOCKS: 473 case XFS_TRANS_SB_RES_FDBLOCKS:
458 /* 474 /*
@@ -462,6 +478,8 @@ xfs_trans_mod_sb(
462 */ 478 */
463 ASSERT(delta < 0); 479 ASSERT(delta < 0);
464 tp->t_res_fdblocks_delta += delta; 480 tp->t_res_fdblocks_delta += delta;
481 if (xfs_sb_version_haslazysbcount(&mp->m_sb))
482 flags &= ~XFS_TRANS_SB_DIRTY;
465 break; 483 break;
466 case XFS_TRANS_SB_FREXTENTS: 484 case XFS_TRANS_SB_FREXTENTS:
467 /* 485 /*
@@ -515,7 +533,7 @@ xfs_trans_mod_sb(
515 return; 533 return;
516 } 534 }
517 535
518 tp->t_flags |= (XFS_TRANS_SB_DIRTY | XFS_TRANS_DIRTY); 536 tp->t_flags |= flags;
519} 537}
520 538
521/* 539/*
@@ -544,18 +562,23 @@ xfs_trans_apply_sb_deltas(
544 (tp->t_ag_freeblks_delta + tp->t_ag_flist_delta + 562 (tp->t_ag_freeblks_delta + tp->t_ag_flist_delta +
545 tp->t_ag_btree_delta)); 563 tp->t_ag_btree_delta));
546 564
547 if (tp->t_icount_delta != 0) { 565 /*
548 INT_MOD(sbp->sb_icount, ARCH_CONVERT, tp->t_icount_delta); 566 * Only update the superblock counters if we are logging them
549 } 567 */
550 if (tp->t_ifree_delta != 0) { 568 if (!xfs_sb_version_haslazysbcount(&(tp->t_mountp->m_sb))) {
551 INT_MOD(sbp->sb_ifree, ARCH_CONVERT, tp->t_ifree_delta); 569 if (tp->t_icount_delta != 0) {
552 } 570 INT_MOD(sbp->sb_icount, ARCH_CONVERT, tp->t_icount_delta);
571 }
572 if (tp->t_ifree_delta != 0) {
573 INT_MOD(sbp->sb_ifree, ARCH_CONVERT, tp->t_ifree_delta);
574 }
553 575
554 if (tp->t_fdblocks_delta != 0) { 576 if (tp->t_fdblocks_delta != 0) {
555 INT_MOD(sbp->sb_fdblocks, ARCH_CONVERT, tp->t_fdblocks_delta); 577 INT_MOD(sbp->sb_fdblocks, ARCH_CONVERT, tp->t_fdblocks_delta);
556 } 578 }
557 if (tp->t_res_fdblocks_delta != 0) { 579 if (tp->t_res_fdblocks_delta != 0) {
558 INT_MOD(sbp->sb_fdblocks, ARCH_CONVERT, tp->t_res_fdblocks_delta); 580 INT_MOD(sbp->sb_fdblocks, ARCH_CONVERT, tp->t_res_fdblocks_delta);
581 }
559 } 582 }
560 583
561 if (tp->t_frextents_delta != 0) { 584 if (tp->t_frextents_delta != 0) {
@@ -615,11 +638,23 @@ xfs_trans_apply_sb_deltas(
615} 638}
616 639
617/* 640/*
618 * xfs_trans_unreserve_and_mod_sb() is called to release unused 641 * xfs_trans_unreserve_and_mod_sb() is called to release unused reservations
619 * reservations and apply superblock counter changes to the in-core 642 * and apply superblock counter changes to the in-core superblock. The
620 * superblock. 643 * t_res_fdblocks_delta and t_res_frextents_delta fields are explicitly NOT
644 * applied to the in-core superblock. The idea is that that has already been
645 * done.
621 * 646 *
622 * This is done efficiently with a single call to xfs_mod_incore_sb_batch(). 647 * This is done efficiently with a single call to xfs_mod_incore_sb_batch().
648 * However, we have to ensure that we only modify each superblock field only
649 * once because the application of the delta values may not be atomic. That can
650 * lead to ENOSPC races occurring if we have two separate modifcations of the
651 * free space counter to put back the entire reservation and then take away
652 * what we used.
653 *
654 * If we are not logging superblock counters, then the inode allocated/free and
655 * used block counts are not updated in the on disk superblock. In this case,
656 * XFS_TRANS_SB_DIRTY will not be set when the transaction is updated but we
657 * still need to update the incore superblock with the changes.
623 */ 658 */
624STATIC void 659STATIC void
625xfs_trans_unreserve_and_mod_sb( 660xfs_trans_unreserve_and_mod_sb(
@@ -627,40 +662,49 @@ xfs_trans_unreserve_and_mod_sb(
627{ 662{
628 xfs_mod_sb_t msb[14]; /* If you add cases, add entries */ 663 xfs_mod_sb_t msb[14]; /* If you add cases, add entries */
629 xfs_mod_sb_t *msbp; 664 xfs_mod_sb_t *msbp;
665 xfs_mount_t *mp = tp->t_mountp;
630 /* REFERENCED */ 666 /* REFERENCED */
631 int error; 667 int error;
632 int rsvd; 668 int rsvd;
669 int64_t blkdelta = 0;
670 int64_t rtxdelta = 0;
633 671
634 msbp = msb; 672 msbp = msb;
635 rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0; 673 rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
636 674
637 /* 675 /* calculate free blocks delta */
638 * Release any reserved blocks. Any that were allocated 676 if (tp->t_blk_res > 0)
639 * will be taken back again by fdblocks_delta below. 677 blkdelta = tp->t_blk_res;
640 */ 678
641 if (tp->t_blk_res > 0) { 679 if ((tp->t_fdblocks_delta != 0) &&
680 (xfs_sb_version_haslazysbcount(&mp->m_sb) ||
681 (tp->t_flags & XFS_TRANS_SB_DIRTY)))
682 blkdelta += tp->t_fdblocks_delta;
683
684 if (blkdelta != 0) {
642 msbp->msb_field = XFS_SBS_FDBLOCKS; 685 msbp->msb_field = XFS_SBS_FDBLOCKS;
643 msbp->msb_delta = tp->t_blk_res; 686 msbp->msb_delta = blkdelta;
644 msbp++; 687 msbp++;
645 } 688 }
646 689
647 /* 690 /* calculate free realtime extents delta */
648 * Release any reserved real time extents . Any that were 691 if (tp->t_rtx_res > 0)
649 * allocated will be taken back again by frextents_delta below. 692 rtxdelta = tp->t_rtx_res;
650 */ 693
651 if (tp->t_rtx_res > 0) { 694 if ((tp->t_frextents_delta != 0) &&
695 (tp->t_flags & XFS_TRANS_SB_DIRTY))
696 rtxdelta += tp->t_frextents_delta;
697
698 if (rtxdelta != 0) {
652 msbp->msb_field = XFS_SBS_FREXTENTS; 699 msbp->msb_field = XFS_SBS_FREXTENTS;
653 msbp->msb_delta = tp->t_rtx_res; 700 msbp->msb_delta = rtxdelta;
654 msbp++; 701 msbp++;
655 } 702 }
656 703
657 /* 704 /* apply remaining deltas */
658 * Apply any superblock modifications to the in-core version. 705
659 * The t_res_fdblocks_delta and t_res_frextents_delta fields are 706 if (xfs_sb_version_haslazysbcount(&mp->m_sb) ||
660 * explicitly NOT applied to the in-core superblock. 707 (tp->t_flags & XFS_TRANS_SB_DIRTY)) {
661 * The idea is that that has already been done.
662 */
663 if (tp->t_flags & XFS_TRANS_SB_DIRTY) {
664 if (tp->t_icount_delta != 0) { 708 if (tp->t_icount_delta != 0) {
665 msbp->msb_field = XFS_SBS_ICOUNT; 709 msbp->msb_field = XFS_SBS_ICOUNT;
666 msbp->msb_delta = tp->t_icount_delta; 710 msbp->msb_delta = tp->t_icount_delta;
@@ -671,16 +715,9 @@ xfs_trans_unreserve_and_mod_sb(
671 msbp->msb_delta = tp->t_ifree_delta; 715 msbp->msb_delta = tp->t_ifree_delta;
672 msbp++; 716 msbp++;
673 } 717 }
674 if (tp->t_fdblocks_delta != 0) { 718 }
675 msbp->msb_field = XFS_SBS_FDBLOCKS; 719
676 msbp->msb_delta = tp->t_fdblocks_delta; 720 if (tp->t_flags & XFS_TRANS_SB_DIRTY) {
677 msbp++;
678 }
679 if (tp->t_frextents_delta != 0) {
680 msbp->msb_field = XFS_SBS_FREXTENTS;
681 msbp->msb_delta = tp->t_frextents_delta;
682 msbp++;
683 }
684 if (tp->t_dblocks_delta != 0) { 721 if (tp->t_dblocks_delta != 0) {
685 msbp->msb_field = XFS_SBS_DBLOCKS; 722 msbp->msb_field = XFS_SBS_DBLOCKS;
686 msbp->msb_delta = tp->t_dblocks_delta; 723 msbp->msb_delta = tp->t_dblocks_delta;
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index 7dfcc450366f..0e26e729023e 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -94,7 +94,8 @@ typedef struct xfs_trans_header {
94#define XFS_TRANS_GROWFSRT_ZERO 38 94#define XFS_TRANS_GROWFSRT_ZERO 38
95#define XFS_TRANS_GROWFSRT_FREE 39 95#define XFS_TRANS_GROWFSRT_FREE 39
96#define XFS_TRANS_SWAPEXT 40 96#define XFS_TRANS_SWAPEXT 40
97#define XFS_TRANS_TYPE_MAX 40 97#define XFS_TRANS_SB_COUNT 41
98#define XFS_TRANS_TYPE_MAX 41
98/* new transaction types need to be reflected in xfs_logprint(8) */ 99/* new transaction types need to be reflected in xfs_logprint(8) */
99 100
100 101
diff --git a/fs/xfs/xfs_vfsops.c b/fs/xfs/xfs_vfsops.c
index 65c561201cb8..11f5ea29a038 100644
--- a/fs/xfs/xfs_vfsops.c
+++ b/fs/xfs/xfs_vfsops.c
@@ -51,6 +51,8 @@
51#include "xfs_acl.h" 51#include "xfs_acl.h"
52#include "xfs_attr.h" 52#include "xfs_attr.h"
53#include "xfs_clnt.h" 53#include "xfs_clnt.h"
54#include "xfs_mru_cache.h"
55#include "xfs_filestream.h"
54#include "xfs_fsops.h" 56#include "xfs_fsops.h"
55 57
56STATIC int xfs_sync(bhv_desc_t *, int, cred_t *); 58STATIC int xfs_sync(bhv_desc_t *, int, cred_t *);
@@ -81,6 +83,8 @@ xfs_init(void)
81 xfs_dabuf_zone = kmem_zone_init(sizeof(xfs_dabuf_t), "xfs_dabuf"); 83 xfs_dabuf_zone = kmem_zone_init(sizeof(xfs_dabuf_t), "xfs_dabuf");
82 xfs_ifork_zone = kmem_zone_init(sizeof(xfs_ifork_t), "xfs_ifork"); 84 xfs_ifork_zone = kmem_zone_init(sizeof(xfs_ifork_t), "xfs_ifork");
83 xfs_acl_zone_init(xfs_acl_zone, "xfs_acl"); 85 xfs_acl_zone_init(xfs_acl_zone, "xfs_acl");
86 xfs_mru_cache_init();
87 xfs_filestream_init();
84 88
85 /* 89 /*
86 * The size of the zone allocated buf log item is the maximum 90 * The size of the zone allocated buf log item is the maximum
@@ -164,6 +168,8 @@ xfs_cleanup(void)
164 xfs_cleanup_procfs(); 168 xfs_cleanup_procfs();
165 xfs_sysctl_unregister(); 169 xfs_sysctl_unregister();
166 xfs_refcache_destroy(); 170 xfs_refcache_destroy();
171 xfs_filestream_uninit();
172 xfs_mru_cache_uninit();
167 xfs_acl_zone_destroy(xfs_acl_zone); 173 xfs_acl_zone_destroy(xfs_acl_zone);
168 174
169#ifdef XFS_DIR2_TRACE 175#ifdef XFS_DIR2_TRACE
@@ -320,6 +326,9 @@ xfs_start_flags(
320 else 326 else
321 mp->m_flags &= ~XFS_MOUNT_BARRIER; 327 mp->m_flags &= ~XFS_MOUNT_BARRIER;
322 328
329 if (ap->flags2 & XFSMNT2_FILESTREAMS)
330 mp->m_flags |= XFS_MOUNT_FILESTREAMS;
331
323 return 0; 332 return 0;
324} 333}
325 334
@@ -518,6 +527,9 @@ xfs_mount(
518 if (mp->m_flags & XFS_MOUNT_BARRIER) 527 if (mp->m_flags & XFS_MOUNT_BARRIER)
519 xfs_mountfs_check_barriers(mp); 528 xfs_mountfs_check_barriers(mp);
520 529
530 if ((error = xfs_filestream_mount(mp)))
531 goto error2;
532
521 error = XFS_IOINIT(vfsp, args, flags); 533 error = XFS_IOINIT(vfsp, args, flags);
522 if (error) 534 if (error)
523 goto error2; 535 goto error2;
@@ -575,6 +587,13 @@ xfs_unmount(
575 */ 587 */
576 xfs_refcache_purge_mp(mp); 588 xfs_refcache_purge_mp(mp);
577 589
590 /*
591 * Blow away any referenced inode in the filestreams cache.
592 * This can and will cause log traffic as inodes go inactive
593 * here.
594 */
595 xfs_filestream_unmount(mp);
596
578 XFS_bflush(mp->m_ddev_targp); 597 XFS_bflush(mp->m_ddev_targp);
579 error = xfs_unmount_flush(mp, 0); 598 error = xfs_unmount_flush(mp, 0);
580 if (error) 599 if (error)
@@ -640,7 +659,7 @@ xfs_quiesce_fs(
640 * we can write the unmount record. 659 * we can write the unmount record.
641 */ 660 */
642 do { 661 do {
643 xfs_syncsub(mp, SYNC_REMOUNT|SYNC_ATTR|SYNC_WAIT, NULL); 662 xfs_syncsub(mp, SYNC_INODE_QUIESCE, NULL);
644 pincount = xfs_flush_buftarg(mp->m_ddev_targp, 1); 663 pincount = xfs_flush_buftarg(mp->m_ddev_targp, 1);
645 if (!pincount) { 664 if (!pincount) {
646 delay(50); 665 delay(50);
@@ -651,6 +670,30 @@ xfs_quiesce_fs(
651 return 0; 670 return 0;
652} 671}
653 672
673/*
674 * Second stage of a quiesce. The data is already synced, now we have to take
675 * care of the metadata. New transactions are already blocked, so we need to
676 * wait for any remaining transactions to drain out before proceding.
677 */
678STATIC void
679xfs_attr_quiesce(
680 xfs_mount_t *mp)
681{
682 /* wait for all modifications to complete */
683 while (atomic_read(&mp->m_active_trans) > 0)
684 delay(100);
685
686 /* flush inodes and push all remaining buffers out to disk */
687 xfs_quiesce_fs(mp);
688
689 ASSERT_ALWAYS(atomic_read(&mp->m_active_trans) == 0);
690
691 /* Push the superblock and write an unmount record */
692 xfs_log_sbcount(mp, 1);
693 xfs_log_unmount_write(mp);
694 xfs_unmountfs_writesb(mp);
695}
696
654STATIC int 697STATIC int
655xfs_mntupdate( 698xfs_mntupdate(
656 bhv_desc_t *bdp, 699 bhv_desc_t *bdp,
@@ -670,10 +713,9 @@ xfs_mntupdate(
670 mp->m_flags &= ~XFS_MOUNT_BARRIER; 713 mp->m_flags &= ~XFS_MOUNT_BARRIER;
671 } 714 }
672 } else if (!(vfsp->vfs_flag & VFS_RDONLY)) { /* rw -> ro */ 715 } else if (!(vfsp->vfs_flag & VFS_RDONLY)) { /* rw -> ro */
673 bhv_vfs_sync(vfsp, SYNC_FSDATA|SYNC_BDFLUSH|SYNC_ATTR, NULL); 716 xfs_filestream_flush(mp);
674 xfs_quiesce_fs(mp); 717 bhv_vfs_sync(vfsp, SYNC_DATA_QUIESCE, NULL);
675 xfs_log_unmount_write(mp); 718 xfs_attr_quiesce(mp);
676 xfs_unmountfs_writesb(mp);
677 vfsp->vfs_flag |= VFS_RDONLY; 719 vfsp->vfs_flag |= VFS_RDONLY;
678 } 720 }
679 return 0; 721 return 0;
@@ -887,6 +929,9 @@ xfs_sync(
887{ 929{
888 xfs_mount_t *mp = XFS_BHVTOM(bdp); 930 xfs_mount_t *mp = XFS_BHVTOM(bdp);
889 931
932 if (flags & SYNC_IOWAIT)
933 xfs_filestream_flush(mp);
934
890 return xfs_syncsub(mp, flags, NULL); 935 return xfs_syncsub(mp, flags, NULL);
891} 936}
892 937
@@ -1128,58 +1173,41 @@ xfs_sync_inodes(
1128 * in the inode list. 1173 * in the inode list.
1129 */ 1174 */
1130 1175
1131 if ((flags & SYNC_CLOSE) && (vp != NULL)) { 1176 /*
1132 /* 1177 * If we have to flush data or wait for I/O completion
1133 * This is the shutdown case. We just need to 1178 * we need to drop the ilock that we currently hold.
1134 * flush and invalidate all the pages associated 1179 * If we need to drop the lock, insert a marker if we
1135 * with the inode. Drop the inode lock since 1180 * have not already done so.
1136 * we can't hold it across calls to the buffer 1181 */
1137 * cache. 1182 if ((flags & (SYNC_CLOSE|SYNC_IOWAIT)) ||
1138 * 1183 ((flags & SYNC_DELWRI) && VN_DIRTY(vp))) {
1139 * We don't set the VREMAPPING bit in the vnode 1184 if (mount_locked) {
1140 * here, because we don't hold the vnode lock 1185 IPOINTER_INSERT(ip, mp);
1141 * exclusively. It doesn't really matter, though,
1142 * because we only come here when we're shutting
1143 * down anyway.
1144 */
1145 xfs_iunlock(ip, XFS_ILOCK_SHARED);
1146
1147 if (XFS_FORCED_SHUTDOWN(mp)) {
1148 bhv_vop_toss_pages(vp, 0, -1, FI_REMAPF);
1149 } else {
1150 error = bhv_vop_flushinval_pages(vp, 0, -1, FI_REMAPF);
1151 } 1186 }
1187 xfs_iunlock(ip, XFS_ILOCK_SHARED);
1152 1188
1153 xfs_ilock(ip, XFS_ILOCK_SHARED); 1189 if (flags & SYNC_CLOSE) {
1154 1190 /* Shutdown case. Flush and invalidate. */
1155 } else if ((flags & SYNC_DELWRI) && (vp != NULL)) { 1191 if (XFS_FORCED_SHUTDOWN(mp))
1156 if (VN_DIRTY(vp)) { 1192 bhv_vop_toss_pages(vp, 0, -1, FI_REMAPF);
1157 /* We need to have dropped the lock here, 1193 else
1158 * so insert a marker if we have not already 1194 error = bhv_vop_flushinval_pages(vp, 0,
1159 * done so. 1195 -1, FI_REMAPF);
1160 */ 1196 } else if ((flags & SYNC_DELWRI) && VN_DIRTY(vp)) {
1161 if (mount_locked) {
1162 IPOINTER_INSERT(ip, mp);
1163 }
1164
1165 /*
1166 * Drop the inode lock since we can't hold it
1167 * across calls to the buffer cache.
1168 */
1169 xfs_iunlock(ip, XFS_ILOCK_SHARED);
1170 error = bhv_vop_flush_pages(vp, (xfs_off_t)0, 1197 error = bhv_vop_flush_pages(vp, (xfs_off_t)0,
1171 -1, fflag, FI_NONE); 1198 -1, fflag, FI_NONE);
1172 xfs_ilock(ip, XFS_ILOCK_SHARED);
1173 } 1199 }
1174 1200
1201 /*
1202 * When freezing, we need to wait ensure all I/O (including direct
1203 * I/O) is complete to ensure no further data modification can take
1204 * place after this point
1205 */
1206 if (flags & SYNC_IOWAIT)
1207 vn_iowait(vp);
1208
1209 xfs_ilock(ip, XFS_ILOCK_SHARED);
1175 } 1210 }
1176 /*
1177 * When freezing, we need to wait ensure all I/O (including direct
1178 * I/O) is complete to ensure no further data modification can take
1179 * place after this point
1180 */
1181 if (flags & SYNC_IOWAIT)
1182 vn_iowait(vp);
1183 1211
1184 if (flags & SYNC_BDFLUSH) { 1212 if (flags & SYNC_BDFLUSH) {
1185 if ((flags & SYNC_ATTR) && 1213 if ((flags & SYNC_ATTR) &&
@@ -1514,6 +1542,15 @@ xfs_syncsub(
1514 } 1542 }
1515 1543
1516 /* 1544 /*
1545 * If asked, update the disk superblock with incore counter values if we
1546 * are using non-persistent counters so that they don't get too far out
1547 * of sync if we crash or get a forced shutdown. We don't want to force
1548 * this to disk, just get a transaction into the iclogs....
1549 */
1550 if (flags & SYNC_SUPER)
1551 xfs_log_sbcount(mp, 0);
1552
1553 /*
1517 * Now check to see if the log needs a "dummy" transaction. 1554 * Now check to see if the log needs a "dummy" transaction.
1518 */ 1555 */
1519 1556
@@ -1645,6 +1682,7 @@ xfs_vget(
1645 * in stat(). */ 1682 * in stat(). */
1646#define MNTOPT_ATTR2 "attr2" /* do use attr2 attribute format */ 1683#define MNTOPT_ATTR2 "attr2" /* do use attr2 attribute format */
1647#define MNTOPT_NOATTR2 "noattr2" /* do not use attr2 attribute format */ 1684#define MNTOPT_NOATTR2 "noattr2" /* do not use attr2 attribute format */
1685#define MNTOPT_FILESTREAM "filestreams" /* use filestreams allocator */
1648 1686
1649STATIC unsigned long 1687STATIC unsigned long
1650suffix_strtoul(char *s, char **endp, unsigned int base) 1688suffix_strtoul(char *s, char **endp, unsigned int base)
@@ -1831,6 +1869,8 @@ xfs_parseargs(
1831 args->flags |= XFSMNT_ATTR2; 1869 args->flags |= XFSMNT_ATTR2;
1832 } else if (!strcmp(this_char, MNTOPT_NOATTR2)) { 1870 } else if (!strcmp(this_char, MNTOPT_NOATTR2)) {
1833 args->flags &= ~XFSMNT_ATTR2; 1871 args->flags &= ~XFSMNT_ATTR2;
1872 } else if (!strcmp(this_char, MNTOPT_FILESTREAM)) {
1873 args->flags2 |= XFSMNT2_FILESTREAMS;
1834 } else if (!strcmp(this_char, "osyncisdsync")) { 1874 } else if (!strcmp(this_char, "osyncisdsync")) {
1835 /* no-op, this is now the default */ 1875 /* no-op, this is now the default */
1836 cmn_err(CE_WARN, 1876 cmn_err(CE_WARN,
@@ -1959,9 +1999,9 @@ xfs_showargs(
1959} 1999}
1960 2000
1961/* 2001/*
1962 * Second stage of a freeze. The data is already frozen, now we have to take 2002 * Second stage of a freeze. The data is already frozen so we only
1963 * care of the metadata. New transactions are already blocked, so we need to 2003 * need to take care of themetadata. Once that's done write a dummy
1964 * wait for any remaining transactions to drain out before proceding. 2004 * record to dirty the log in case of a crash while frozen.
1965 */ 2005 */
1966STATIC void 2006STATIC void
1967xfs_freeze( 2007xfs_freeze(
@@ -1969,18 +2009,7 @@ xfs_freeze(
1969{ 2009{
1970 xfs_mount_t *mp = XFS_BHVTOM(bdp); 2010 xfs_mount_t *mp = XFS_BHVTOM(bdp);
1971 2011
1972 /* wait for all modifications to complete */ 2012 xfs_attr_quiesce(mp);
1973 while (atomic_read(&mp->m_active_trans) > 0)
1974 delay(100);
1975
1976 /* flush inodes and push all remaining buffers out to disk */
1977 xfs_quiesce_fs(mp);
1978
1979 ASSERT_ALWAYS(atomic_read(&mp->m_active_trans) == 0);
1980
1981 /* Push the superblock and write an unmount record */
1982 xfs_log_unmount_write(mp);
1983 xfs_unmountfs_writesb(mp);
1984 xfs_fs_log_dummy(mp); 2013 xfs_fs_log_dummy(mp);
1985} 2014}
1986 2015
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index 70bc82f65311..79b522779aa4 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -51,6 +51,7 @@
51#include "xfs_refcache.h" 51#include "xfs_refcache.h"
52#include "xfs_trans_space.h" 52#include "xfs_trans_space.h"
53#include "xfs_log_priv.h" 53#include "xfs_log_priv.h"
54#include "xfs_filestream.h"
54 55
55STATIC int 56STATIC int
56xfs_open( 57xfs_open(
@@ -77,36 +78,6 @@ xfs_open(
77 return 0; 78 return 0;
78} 79}
79 80
80STATIC int
81xfs_close(
82 bhv_desc_t *bdp,
83 int flags,
84 lastclose_t lastclose,
85 cred_t *credp)
86{
87 bhv_vnode_t *vp = BHV_TO_VNODE(bdp);
88 xfs_inode_t *ip = XFS_BHVTOI(bdp);
89
90 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
91 return XFS_ERROR(EIO);
92
93 if (lastclose != L_TRUE || !VN_ISREG(vp))
94 return 0;
95
96 /*
97 * If we previously truncated this file and removed old data in
98 * the process, we want to initiate "early" writeout on the last
99 * close. This is an attempt to combat the notorious NULL files
100 * problem which is particularly noticable from a truncate down,
101 * buffered (re-)write (delalloc), followed by a crash. What we
102 * are effectively doing here is significantly reducing the time
103 * window where we'd otherwise be exposed to that problem.
104 */
105 if (VUNTRUNCATE(vp) && VN_DIRTY(vp) && ip->i_delayed_blks > 0)
106 return bhv_vop_flush_pages(vp, 0, -1, XFS_B_ASYNC, FI_NONE);
107 return 0;
108}
109
110/* 81/*
111 * xfs_getattr 82 * xfs_getattr
112 */ 83 */
@@ -183,9 +154,8 @@ xfs_getattr(
183 * realtime extent size or the realtime volume's 154 * realtime extent size or the realtime volume's
184 * extent size. 155 * extent size.
185 */ 156 */
186 vap->va_blocksize = ip->i_d.di_extsize ? 157 vap->va_blocksize =
187 (ip->i_d.di_extsize << mp->m_sb.sb_blocklog) : 158 xfs_get_extsz_hint(ip) << mp->m_sb.sb_blocklog;
188 (mp->m_sb.sb_rextsize << mp->m_sb.sb_blocklog);
189 } 159 }
190 break; 160 break;
191 } 161 }
@@ -814,6 +784,8 @@ xfs_setattr(
814 di_flags |= XFS_DIFLAG_PROJINHERIT; 784 di_flags |= XFS_DIFLAG_PROJINHERIT;
815 if (vap->va_xflags & XFS_XFLAG_NODEFRAG) 785 if (vap->va_xflags & XFS_XFLAG_NODEFRAG)
816 di_flags |= XFS_DIFLAG_NODEFRAG; 786 di_flags |= XFS_DIFLAG_NODEFRAG;
787 if (vap->va_xflags & XFS_XFLAG_FILESTREAM)
788 di_flags |= XFS_DIFLAG_FILESTREAM;
817 if ((ip->i_d.di_mode & S_IFMT) == S_IFDIR) { 789 if ((ip->i_d.di_mode & S_IFMT) == S_IFDIR) {
818 if (vap->va_xflags & XFS_XFLAG_RTINHERIT) 790 if (vap->va_xflags & XFS_XFLAG_RTINHERIT)
819 di_flags |= XFS_DIFLAG_RTINHERIT; 791 di_flags |= XFS_DIFLAG_RTINHERIT;
@@ -1201,13 +1173,15 @@ xfs_fsync(
1201} 1173}
1202 1174
1203/* 1175/*
1204 * This is called by xfs_inactive to free any blocks beyond eof, 1176 * This is called by xfs_inactive to free any blocks beyond eof
1205 * when the link count isn't zero. 1177 * when the link count isn't zero and by xfs_dm_punch_hole() when
1178 * punching a hole to EOF.
1206 */ 1179 */
1207STATIC int 1180int
1208xfs_inactive_free_eofblocks( 1181xfs_free_eofblocks(
1209 xfs_mount_t *mp, 1182 xfs_mount_t *mp,
1210 xfs_inode_t *ip) 1183 xfs_inode_t *ip,
1184 int flags)
1211{ 1185{
1212 xfs_trans_t *tp; 1186 xfs_trans_t *tp;
1213 int error; 1187 int error;
@@ -1216,6 +1190,7 @@ xfs_inactive_free_eofblocks(
1216 xfs_filblks_t map_len; 1190 xfs_filblks_t map_len;
1217 int nimaps; 1191 int nimaps;
1218 xfs_bmbt_irec_t imap; 1192 xfs_bmbt_irec_t imap;
1193 int use_iolock = (flags & XFS_FREE_EOF_LOCK);
1219 1194
1220 /* 1195 /*
1221 * Figure out if there are any blocks beyond the end 1196 * Figure out if there are any blocks beyond the end
@@ -1256,11 +1231,14 @@ xfs_inactive_free_eofblocks(
1256 * cache and we can't 1231 * cache and we can't
1257 * do that within a transaction. 1232 * do that within a transaction.
1258 */ 1233 */
1259 xfs_ilock(ip, XFS_IOLOCK_EXCL); 1234 if (use_iolock)
1235 xfs_ilock(ip, XFS_IOLOCK_EXCL);
1260 error = xfs_itruncate_start(ip, XFS_ITRUNC_DEFINITE, 1236 error = xfs_itruncate_start(ip, XFS_ITRUNC_DEFINITE,
1261 ip->i_size); 1237 ip->i_size);
1262 if (error) { 1238 if (error) {
1263 xfs_iunlock(ip, XFS_IOLOCK_EXCL); 1239 xfs_trans_cancel(tp, 0);
1240 if (use_iolock)
1241 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1264 return error; 1242 return error;
1265 } 1243 }
1266 1244
@@ -1297,7 +1275,8 @@ xfs_inactive_free_eofblocks(
1297 error = xfs_trans_commit(tp, 1275 error = xfs_trans_commit(tp,
1298 XFS_TRANS_RELEASE_LOG_RES); 1276 XFS_TRANS_RELEASE_LOG_RES);
1299 } 1277 }
1300 xfs_iunlock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); 1278 xfs_iunlock(ip, (use_iolock ? (XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL)
1279 : XFS_ILOCK_EXCL));
1301 } 1280 }
1302 return error; 1281 return error;
1303} 1282}
@@ -1560,6 +1539,31 @@ xfs_release(
1560 if (vp->v_vfsp->vfs_flag & VFS_RDONLY) 1539 if (vp->v_vfsp->vfs_flag & VFS_RDONLY)
1561 return 0; 1540 return 0;
1562 1541
1542 if (!XFS_FORCED_SHUTDOWN(mp)) {
1543 /*
1544 * If we are using filestreams, and we have an unlinked
1545 * file that we are processing the last close on, then nothing
1546 * will be able to reopen and write to this file. Purge this
1547 * inode from the filestreams cache so that it doesn't delay
1548 * teardown of the inode.
1549 */
1550 if ((ip->i_d.di_nlink == 0) && xfs_inode_is_filestream(ip))
1551 xfs_filestream_deassociate(ip);
1552
1553 /*
1554 * If we previously truncated this file and removed old data
1555 * in the process, we want to initiate "early" writeout on
1556 * the last close. This is an attempt to combat the notorious
1557 * NULL files problem which is particularly noticable from a
1558 * truncate down, buffered (re-)write (delalloc), followed by
1559 * a crash. What we are effectively doing here is
1560 * significantly reducing the time window where we'd otherwise
1561 * be exposed to that problem.
1562 */
1563 if (VUNTRUNCATE(vp) && VN_DIRTY(vp) && ip->i_delayed_blks > 0)
1564 bhv_vop_flush_pages(vp, 0, -1, XFS_B_ASYNC, FI_NONE);
1565 }
1566
1563#ifdef HAVE_REFCACHE 1567#ifdef HAVE_REFCACHE
1564 /* If we are in the NFS reference cache then don't do this now */ 1568 /* If we are in the NFS reference cache then don't do this now */
1565 if (ip->i_refcache) 1569 if (ip->i_refcache)
@@ -1573,7 +1577,8 @@ xfs_release(
1573 (ip->i_df.if_flags & XFS_IFEXTENTS)) && 1577 (ip->i_df.if_flags & XFS_IFEXTENTS)) &&
1574 (!(ip->i_d.di_flags & 1578 (!(ip->i_d.di_flags &
1575 (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))) { 1579 (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))) {
1576 if ((error = xfs_inactive_free_eofblocks(mp, ip))) 1580 error = xfs_free_eofblocks(mp, ip, XFS_FREE_EOF_LOCK);
1581 if (error)
1577 return error; 1582 return error;
1578 /* Update linux inode block count after free above */ 1583 /* Update linux inode block count after free above */
1579 vn_to_inode(vp)->i_blocks = XFS_FSB_TO_BB(mp, 1584 vn_to_inode(vp)->i_blocks = XFS_FSB_TO_BB(mp,
@@ -1654,7 +1659,8 @@ xfs_inactive(
1654 (!(ip->i_d.di_flags & 1659 (!(ip->i_d.di_flags &
1655 (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)) || 1660 (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)) ||
1656 (ip->i_delayed_blks != 0)))) { 1661 (ip->i_delayed_blks != 0)))) {
1657 if ((error = xfs_inactive_free_eofblocks(mp, ip))) 1662 error = xfs_free_eofblocks(mp, ip, XFS_FREE_EOF_LOCK);
1663 if (error)
1658 return VN_INACTIVE_CACHE; 1664 return VN_INACTIVE_CACHE;
1659 /* Update linux inode block count after free above */ 1665 /* Update linux inode block count after free above */
1660 vn_to_inode(vp)->i_blocks = XFS_FSB_TO_BB(mp, 1666 vn_to_inode(vp)->i_blocks = XFS_FSB_TO_BB(mp,
@@ -1680,6 +1686,7 @@ xfs_inactive(
1680 1686
1681 error = xfs_itruncate_start(ip, XFS_ITRUNC_DEFINITE, 0); 1687 error = xfs_itruncate_start(ip, XFS_ITRUNC_DEFINITE, 0);
1682 if (error) { 1688 if (error) {
1689 xfs_trans_cancel(tp, 0);
1683 xfs_iunlock(ip, XFS_IOLOCK_EXCL); 1690 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1684 return VN_INACTIVE_CACHE; 1691 return VN_INACTIVE_CACHE;
1685 } 1692 }
@@ -2217,9 +2224,9 @@ static inline int
2217xfs_lock_inumorder(int lock_mode, int subclass) 2224xfs_lock_inumorder(int lock_mode, int subclass)
2218{ 2225{
2219 if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) 2226 if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL))
2220 lock_mode |= (subclass + XFS_IOLOCK_INUMORDER) << XFS_IOLOCK_SHIFT; 2227 lock_mode |= (subclass + XFS_LOCK_INUMORDER) << XFS_IOLOCK_SHIFT;
2221 if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)) 2228 if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL))
2222 lock_mode |= (subclass + XFS_ILOCK_INUMORDER) << XFS_ILOCK_SHIFT; 2229 lock_mode |= (subclass + XFS_LOCK_INUMORDER) << XFS_ILOCK_SHIFT;
2223 2230
2224 return lock_mode; 2231 return lock_mode;
2225} 2232}
@@ -2546,6 +2553,15 @@ xfs_remove(
2546 */ 2553 */
2547 xfs_refcache_purge_ip(ip); 2554 xfs_refcache_purge_ip(ip);
2548 2555
2556 /*
2557 * If we are using filestreams, kill the stream association.
2558 * If the file is still open it may get a new one but that
2559 * will get killed on last close in xfs_close() so we don't
2560 * have to worry about that.
2561 */
2562 if (link_zero && xfs_inode_is_filestream(ip))
2563 xfs_filestream_deassociate(ip);
2564
2549 vn_trace_exit(XFS_ITOV(ip), __FUNCTION__, (inst_t *)__return_address); 2565 vn_trace_exit(XFS_ITOV(ip), __FUNCTION__, (inst_t *)__return_address);
2550 2566
2551 /* 2567 /*
@@ -4047,22 +4063,16 @@ xfs_alloc_file_space(
4047 if (XFS_FORCED_SHUTDOWN(mp)) 4063 if (XFS_FORCED_SHUTDOWN(mp))
4048 return XFS_ERROR(EIO); 4064 return XFS_ERROR(EIO);
4049 4065
4050 rt = XFS_IS_REALTIME_INODE(ip);
4051 if (unlikely(rt)) {
4052 if (!(extsz = ip->i_d.di_extsize))
4053 extsz = mp->m_sb.sb_rextsize;
4054 } else {
4055 extsz = ip->i_d.di_extsize;
4056 }
4057
4058 if ((error = XFS_QM_DQATTACH(mp, ip, 0))) 4066 if ((error = XFS_QM_DQATTACH(mp, ip, 0)))
4059 return error; 4067 return error;
4060 4068
4061 if (len <= 0) 4069 if (len <= 0)
4062 return XFS_ERROR(EINVAL); 4070 return XFS_ERROR(EINVAL);
4063 4071
4072 rt = XFS_IS_REALTIME_INODE(ip);
4073 extsz = xfs_get_extsz_hint(ip);
4074
4064 count = len; 4075 count = len;
4065 error = 0;
4066 imapp = &imaps[0]; 4076 imapp = &imaps[0];
4067 nimaps = 1; 4077 nimaps = 1;
4068 bmapi_flag = XFS_BMAPI_WRITE | (alloc_type ? XFS_BMAPI_PREALLOC : 0); 4078 bmapi_flag = XFS_BMAPI_WRITE | (alloc_type ? XFS_BMAPI_PREALLOC : 0);
@@ -4678,7 +4688,6 @@ xfs_change_file_space(
4678bhv_vnodeops_t xfs_vnodeops = { 4688bhv_vnodeops_t xfs_vnodeops = {
4679 BHV_IDENTITY_INIT(VN_BHV_XFS,VNODE_POSITION_XFS), 4689 BHV_IDENTITY_INIT(VN_BHV_XFS,VNODE_POSITION_XFS),
4680 .vop_open = xfs_open, 4690 .vop_open = xfs_open,
4681 .vop_close = xfs_close,
4682 .vop_read = xfs_read, 4691 .vop_read = xfs_read,
4683#ifdef HAVE_SPLICE 4692#ifdef HAVE_SPLICE
4684 .vop_splice_read = xfs_splice_read, 4693 .vop_splice_read = xfs_splice_read,
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 402eb4eb6b23..9927cca14cb7 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -151,6 +151,7 @@ int radix_tree_preload(gfp_t gfp_mask)
151out: 151out:
152 return ret; 152 return ret;
153} 153}
154EXPORT_SYMBOL(radix_tree_preload);
154 155
155static inline void tag_set(struct radix_tree_node *node, unsigned int tag, 156static inline void tag_set(struct radix_tree_node *node, unsigned int tag,
156 int offset) 157 int offset)