aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2006-09-29 12:36:55 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-09-29 12:36:55 -0400
commitc0341b0f47722fbe5ab45f436fc6ddc1c58c0a6f (patch)
tree63279f95e7250a3c465eb061be97fff071d0903b
parentae1390d8c3e2142e5cf6d192951d6e2b1fa213c5 (diff)
parent1b06e7926694178e146ff708b2c15a6da64c9765 (diff)
Merge git://oss.sgi.com:8090/xfs/xfs-2.6
* git://oss.sgi.com:8090/xfs/xfs-2.6: (49 commits) [XFS] Remove v1 dir trace macro - missed in a past commit. [XFS] 955947: Infinite loop in xfs_bulkstat() on formatter() error [XFS] pv 956241, author: nathans, rv: vapo - make ino validation checks [XFS] pv 956240, author: nathans, rv: vapo - Minor fixes in [XFS] Really fix use after free in xfs_iunpin. [XFS] Collapse sv_init and init_sv into just the one interface. [XFS] standardize on one sema init macro [XFS] Reduce endian flipping in alloc_btree, same as was done for [XFS] Minor cleanup from dio locking fix, remove an extra conditional. [XFS] Fix kmem_zalloc_greedy warnings on 64 bit platforms. [XFS] pv 955157, rv bnaujok - break the loop on EFAULT formatter() error [XFS] pv 955157, rv bnaujok - break the loop on formatter() error [XFS] Fixes the leak in reservation space because we weren't ungranting [XFS] Add lock annotations to xfs_trans_update_ail and [XFS] Fix a porting botch on the realtime subvol growfs code path. [XFS] Minor code rearranging and cleanup to prevent some coverity false [XFS] Remove a no-longer-correct debug assert from dio completion [XFS] Add a greedy allocation interface, allocating within a min/max size [XFS] Improve error handling for the zero-fsblock extent detection code. [XFS] Be more defensive with page flags (error/private) for metadata ...
-rw-r--r--fs/xfs/Makefile-linux-2.61
-rw-r--r--fs/xfs/linux-2.6/kmem.c29
-rw-r--r--fs/xfs/linux-2.6/kmem.h6
-rw-r--r--fs/xfs/linux-2.6/sema.h2
-rw-r--r--fs/xfs/linux-2.6/sv.h2
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c9
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c51
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.h7
-rw-r--r--fs/xfs/linux-2.6/xfs_globals.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl.c19
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.c25
-rw-r--r--fs/xfs/linux-2.6/xfs_linux.h14
-rw-r--r--fs/xfs/linux-2.6/xfs_lrw.c10
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_vfs.h2
-rw-r--r--fs/xfs/linux-2.6/xfs_vnode.h2
-rw-r--r--fs/xfs/quota/xfs_dquot_item.c26
-rw-r--r--fs/xfs/quota/xfs_qm.c14
-rw-r--r--fs/xfs/quota/xfs_qm.h6
-rw-r--r--fs/xfs/quota/xfs_quota_priv.h2
-rw-r--r--fs/xfs/support/ktrace.c2
-rw-r--r--fs/xfs/xfs_ag.h2
-rw-r--r--fs/xfs/xfs_alloc.c10
-rw-r--r--fs/xfs/xfs_alloc_btree.c132
-rw-r--r--fs/xfs/xfs_attr.c181
-rw-r--r--fs/xfs/xfs_attr.h8
-rw-r--r--fs/xfs/xfs_attr_leaf.c351
-rw-r--r--fs/xfs/xfs_attr_leaf.h41
-rw-r--r--fs/xfs/xfs_behavior.c20
-rw-r--r--fs/xfs/xfs_behavior.h2
-rw-r--r--fs/xfs/xfs_bmap.c90
-rw-r--r--fs/xfs/xfs_bmap_btree.c113
-rw-r--r--fs/xfs/xfs_bmap_btree.h11
-rw-r--r--fs/xfs/xfs_btree.c8
-rw-r--r--fs/xfs/xfs_btree.h5
-rw-r--r--fs/xfs/xfs_buf_item.c22
-rw-r--r--fs/xfs/xfs_da_btree.c33
-rw-r--r--fs/xfs/xfs_error.h9
-rw-r--r--fs/xfs/xfs_extfree_item.c69
-rw-r--r--fs/xfs/xfs_extfree_item.h50
-rw-r--r--fs/xfs/xfs_fs.h8
-rw-r--r--fs/xfs/xfs_ialloc.c11
-rw-r--r--fs/xfs/xfs_ialloc_btree.c62
-rw-r--r--fs/xfs/xfs_ialloc_btree.h19
-rw-r--r--fs/xfs/xfs_iget.c44
-rw-r--r--fs/xfs/xfs_inode.c30
-rw-r--r--fs/xfs/xfs_inode.h12
-rw-r--r--fs/xfs/xfs_inode_item.c16
-rw-r--r--fs/xfs/xfs_inode_item.h66
-rw-r--r--fs/xfs/xfs_iomap.c89
-rw-r--r--fs/xfs/xfs_itable.c184
-rw-r--r--fs/xfs/xfs_itable.h16
-rw-r--r--fs/xfs/xfs_log.c19
-rw-r--r--fs/xfs/xfs_log.h8
-rw-r--r--fs/xfs/xfs_log_priv.h10
-rw-r--r--fs/xfs/xfs_mount.h5
-rw-r--r--fs/xfs/xfs_quota.h2
-rw-r--r--fs/xfs/xfs_rtalloc.c38
-rw-r--r--fs/xfs/xfs_sb.h22
-rw-r--r--fs/xfs/xfs_trans.h2
-rw-r--r--fs/xfs/xfs_trans_ail.c4
-rw-r--r--fs/xfs/xfs_trans_priv.h12
-rw-r--r--fs/xfs/xfs_vfsops.c2
-rw-r--r--fs/xfs/xfs_vnodeops.c26
64 files changed, 1060 insertions, 1037 deletions
diff --git a/fs/xfs/Makefile-linux-2.6 b/fs/xfs/Makefile-linux-2.6
index 9e7f85986d0d..291948d5085a 100644
--- a/fs/xfs/Makefile-linux-2.6
+++ b/fs/xfs/Makefile-linux-2.6
@@ -30,7 +30,6 @@ ifeq ($(CONFIG_XFS_TRACE),y)
30 EXTRA_CFLAGS += -DXFS_BLI_TRACE 30 EXTRA_CFLAGS += -DXFS_BLI_TRACE
31 EXTRA_CFLAGS += -DXFS_BMAP_TRACE 31 EXTRA_CFLAGS += -DXFS_BMAP_TRACE
32 EXTRA_CFLAGS += -DXFS_BMBT_TRACE 32 EXTRA_CFLAGS += -DXFS_BMBT_TRACE
33 EXTRA_CFLAGS += -DXFS_DIR_TRACE
34 EXTRA_CFLAGS += -DXFS_DIR2_TRACE 33 EXTRA_CFLAGS += -DXFS_DIR2_TRACE
35 EXTRA_CFLAGS += -DXFS_DQUOT_TRACE 34 EXTRA_CFLAGS += -DXFS_DQUOT_TRACE
36 EXTRA_CFLAGS += -DXFS_ILOCK_TRACE 35 EXTRA_CFLAGS += -DXFS_ILOCK_TRACE
diff --git a/fs/xfs/linux-2.6/kmem.c b/fs/xfs/linux-2.6/kmem.c
index aba7fcf881a2..d59737589815 100644
--- a/fs/xfs/linux-2.6/kmem.c
+++ b/fs/xfs/linux-2.6/kmem.c
@@ -34,6 +34,14 @@ kmem_alloc(size_t size, unsigned int __nocast flags)
34 gfp_t lflags = kmem_flags_convert(flags); 34 gfp_t lflags = kmem_flags_convert(flags);
35 void *ptr; 35 void *ptr;
36 36
37#ifdef DEBUG
38 if (unlikely(!(flags & KM_LARGE) && (size > PAGE_SIZE))) {
39 printk(KERN_WARNING "Large %s attempt, size=%ld\n",
40 __FUNCTION__, (long)size);
41 dump_stack();
42 }
43#endif
44
37 do { 45 do {
38 if (size < MAX_SLAB_SIZE || retries > MAX_VMALLOCS) 46 if (size < MAX_SLAB_SIZE || retries > MAX_VMALLOCS)
39 ptr = kmalloc(size, lflags); 47 ptr = kmalloc(size, lflags);
@@ -60,6 +68,27 @@ kmem_zalloc(size_t size, unsigned int __nocast flags)
60 return ptr; 68 return ptr;
61} 69}
62 70
71void *
72kmem_zalloc_greedy(size_t *size, size_t minsize, size_t maxsize,
73 unsigned int __nocast flags)
74{
75 void *ptr;
76 size_t kmsize = maxsize;
77 unsigned int kmflags = (flags & ~KM_SLEEP) | KM_NOSLEEP;
78
79 while (!(ptr = kmem_zalloc(kmsize, kmflags))) {
80 if ((kmsize <= minsize) && (flags & KM_NOSLEEP))
81 break;
82 if ((kmsize >>= 1) <= minsize) {
83 kmsize = minsize;
84 kmflags = flags;
85 }
86 }
87 if (ptr)
88 *size = kmsize;
89 return ptr;
90}
91
63void 92void
64kmem_free(void *ptr, size_t size) 93kmem_free(void *ptr, size_t size)
65{ 94{
diff --git a/fs/xfs/linux-2.6/kmem.h b/fs/xfs/linux-2.6/kmem.h
index 0e8293c5a32f..9ebabdf7829c 100644
--- a/fs/xfs/linux-2.6/kmem.h
+++ b/fs/xfs/linux-2.6/kmem.h
@@ -30,6 +30,7 @@
30#define KM_NOSLEEP 0x0002u 30#define KM_NOSLEEP 0x0002u
31#define KM_NOFS 0x0004u 31#define KM_NOFS 0x0004u
32#define KM_MAYFAIL 0x0008u 32#define KM_MAYFAIL 0x0008u
33#define KM_LARGE 0x0010u
33 34
34/* 35/*
35 * We use a special process flag to avoid recursive callbacks into 36 * We use a special process flag to avoid recursive callbacks into
@@ -41,7 +42,7 @@ kmem_flags_convert(unsigned int __nocast flags)
41{ 42{
42 gfp_t lflags; 43 gfp_t lflags;
43 44
44 BUG_ON(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL)); 45 BUG_ON(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL|KM_LARGE));
45 46
46 if (flags & KM_NOSLEEP) { 47 if (flags & KM_NOSLEEP) {
47 lflags = GFP_ATOMIC | __GFP_NOWARN; 48 lflags = GFP_ATOMIC | __GFP_NOWARN;
@@ -54,8 +55,9 @@ kmem_flags_convert(unsigned int __nocast flags)
54} 55}
55 56
56extern void *kmem_alloc(size_t, unsigned int __nocast); 57extern void *kmem_alloc(size_t, unsigned int __nocast);
57extern void *kmem_realloc(void *, size_t, size_t, unsigned int __nocast);
58extern void *kmem_zalloc(size_t, unsigned int __nocast); 58extern void *kmem_zalloc(size_t, unsigned int __nocast);
59extern void *kmem_zalloc_greedy(size_t *, size_t, size_t, unsigned int __nocast);
60extern void *kmem_realloc(void *, size_t, size_t, unsigned int __nocast);
59extern void kmem_free(void *, size_t); 61extern void kmem_free(void *, size_t);
60 62
61/* 63/*
diff --git a/fs/xfs/linux-2.6/sema.h b/fs/xfs/linux-2.6/sema.h
index b25090094cca..2009e6d922ce 100644
--- a/fs/xfs/linux-2.6/sema.h
+++ b/fs/xfs/linux-2.6/sema.h
@@ -29,8 +29,6 @@
29 29
30typedef struct semaphore sema_t; 30typedef struct semaphore sema_t;
31 31
32#define init_sema(sp, val, c, d) sema_init(sp, val)
33#define initsema(sp, val) sema_init(sp, val)
34#define initnsema(sp, val, name) sema_init(sp, val) 32#define initnsema(sp, val, name) sema_init(sp, val)
35#define psema(sp, b) down(sp) 33#define psema(sp, b) down(sp)
36#define vsema(sp) up(sp) 34#define vsema(sp) up(sp)
diff --git a/fs/xfs/linux-2.6/sv.h b/fs/xfs/linux-2.6/sv.h
index 9a8ad481b008..351a8f454bd1 100644
--- a/fs/xfs/linux-2.6/sv.h
+++ b/fs/xfs/linux-2.6/sv.h
@@ -53,8 +53,6 @@ static inline void _sv_wait(sv_t *sv, spinlock_t *lock, int state,
53 remove_wait_queue(&sv->waiters, &wait); 53 remove_wait_queue(&sv->waiters, &wait);
54} 54}
55 55
56#define init_sv(sv,type,name,flag) \
57 init_waitqueue_head(&(sv)->waiters)
58#define sv_init(sv,flag,name) \ 56#define sv_init(sv,flag,name) \
59 init_waitqueue_head(&(sv)->waiters) 57 init_waitqueue_head(&(sv)->waiters)
60#define sv_destroy(sv) \ 58#define sv_destroy(sv) \
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index 34dcb43a7837..09360cf1e1f2 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -71,7 +71,7 @@ xfs_page_trace(
71 int tag, 71 int tag,
72 struct inode *inode, 72 struct inode *inode,
73 struct page *page, 73 struct page *page,
74 int mask) 74 unsigned long pgoff)
75{ 75{
76 xfs_inode_t *ip; 76 xfs_inode_t *ip;
77 bhv_vnode_t *vp = vn_from_inode(inode); 77 bhv_vnode_t *vp = vn_from_inode(inode);
@@ -91,7 +91,7 @@ xfs_page_trace(
91 (void *)ip, 91 (void *)ip,
92 (void *)inode, 92 (void *)inode,
93 (void *)page, 93 (void *)page,
94 (void *)((unsigned long)mask), 94 (void *)pgoff,
95 (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)), 95 (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
96 (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)), 96 (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
97 (void *)((unsigned long)((isize >> 32) & 0xffffffff)), 97 (void *)((unsigned long)((isize >> 32) & 0xffffffff)),
@@ -105,7 +105,7 @@ xfs_page_trace(
105 (void *)NULL); 105 (void *)NULL);
106} 106}
107#else 107#else
108#define xfs_page_trace(tag, inode, page, mask) 108#define xfs_page_trace(tag, inode, page, pgoff)
109#endif 109#endif
110 110
111/* 111/*
@@ -1197,7 +1197,7 @@ xfs_vm_releasepage(
1197 .nr_to_write = 1, 1197 .nr_to_write = 1,
1198 }; 1198 };
1199 1199
1200 xfs_page_trace(XFS_RELEASEPAGE_ENTER, inode, page, gfp_mask); 1200 xfs_page_trace(XFS_RELEASEPAGE_ENTER, inode, page, 0);
1201 1201
1202 if (!page_has_buffers(page)) 1202 if (!page_has_buffers(page))
1203 return 0; 1203 return 0;
@@ -1356,7 +1356,6 @@ xfs_end_io_direct(
1356 ioend->io_size = size; 1356 ioend->io_size = size;
1357 xfs_finish_ioend(ioend); 1357 xfs_finish_ioend(ioend);
1358 } else { 1358 } else {
1359 ASSERT(size >= 0);
1360 xfs_destroy_ioend(ioend); 1359 xfs_destroy_ioend(ioend);
1361 } 1360 }
1362 1361
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 2af528dcfb04..9bbadafdcb00 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3 * All Rights Reserved. 3 * All Rights Reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -318,8 +318,12 @@ xfs_buf_free(
318 if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1)) 318 if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1))
319 free_address(bp->b_addr - bp->b_offset); 319 free_address(bp->b_addr - bp->b_offset);
320 320
321 for (i = 0; i < bp->b_page_count; i++) 321 for (i = 0; i < bp->b_page_count; i++) {
322 page_cache_release(bp->b_pages[i]); 322 struct page *page = bp->b_pages[i];
323
324 ASSERT(!PagePrivate(page));
325 page_cache_release(page);
326 }
323 _xfs_buf_free_pages(bp); 327 _xfs_buf_free_pages(bp);
324 } else if (bp->b_flags & _XBF_KMEM_ALLOC) { 328 } else if (bp->b_flags & _XBF_KMEM_ALLOC) {
325 /* 329 /*
@@ -400,6 +404,7 @@ _xfs_buf_lookup_pages(
400 nbytes = min_t(size_t, size, PAGE_CACHE_SIZE - offset); 404 nbytes = min_t(size_t, size, PAGE_CACHE_SIZE - offset);
401 size -= nbytes; 405 size -= nbytes;
402 406
407 ASSERT(!PagePrivate(page));
403 if (!PageUptodate(page)) { 408 if (!PageUptodate(page)) {
404 page_count--; 409 page_count--;
405 if (blocksize >= PAGE_CACHE_SIZE) { 410 if (blocksize >= PAGE_CACHE_SIZE) {
@@ -768,7 +773,7 @@ xfs_buf_get_noaddr(
768 _xfs_buf_initialize(bp, target, 0, len, 0); 773 _xfs_buf_initialize(bp, target, 0, len, 0);
769 774
770 try_again: 775 try_again:
771 data = kmem_alloc(malloc_len, KM_SLEEP | KM_MAYFAIL); 776 data = kmem_alloc(malloc_len, KM_SLEEP | KM_MAYFAIL | KM_LARGE);
772 if (unlikely(data == NULL)) 777 if (unlikely(data == NULL))
773 goto fail_free_buf; 778 goto fail_free_buf;
774 779
@@ -1117,10 +1122,10 @@ xfs_buf_bio_end_io(
1117 do { 1122 do {
1118 struct page *page = bvec->bv_page; 1123 struct page *page = bvec->bv_page;
1119 1124
1125 ASSERT(!PagePrivate(page));
1120 if (unlikely(bp->b_error)) { 1126 if (unlikely(bp->b_error)) {
1121 if (bp->b_flags & XBF_READ) 1127 if (bp->b_flags & XBF_READ)
1122 ClearPageUptodate(page); 1128 ClearPageUptodate(page);
1123 SetPageError(page);
1124 } else if (blocksize >= PAGE_CACHE_SIZE) { 1129 } else if (blocksize >= PAGE_CACHE_SIZE) {
1125 SetPageUptodate(page); 1130 SetPageUptodate(page);
1126 } else if (!PagePrivate(page) && 1131 } else if (!PagePrivate(page) &&
@@ -1156,16 +1161,16 @@ _xfs_buf_ioapply(
1156 total_nr_pages = bp->b_page_count; 1161 total_nr_pages = bp->b_page_count;
1157 map_i = 0; 1162 map_i = 0;
1158 1163
1159 if (bp->b_flags & _XBF_RUN_QUEUES) {
1160 bp->b_flags &= ~_XBF_RUN_QUEUES;
1161 rw = (bp->b_flags & XBF_READ) ? READ_SYNC : WRITE_SYNC;
1162 } else {
1163 rw = (bp->b_flags & XBF_READ) ? READ : WRITE;
1164 }
1165
1166 if (bp->b_flags & XBF_ORDERED) { 1164 if (bp->b_flags & XBF_ORDERED) {
1167 ASSERT(!(bp->b_flags & XBF_READ)); 1165 ASSERT(!(bp->b_flags & XBF_READ));
1168 rw = WRITE_BARRIER; 1166 rw = WRITE_BARRIER;
1167 } else if (bp->b_flags & _XBF_RUN_QUEUES) {
1168 ASSERT(!(bp->b_flags & XBF_READ_AHEAD));
1169 bp->b_flags &= ~_XBF_RUN_QUEUES;
1170 rw = (bp->b_flags & XBF_WRITE) ? WRITE_SYNC : READ_SYNC;
1171 } else {
1172 rw = (bp->b_flags & XBF_WRITE) ? WRITE :
1173 (bp->b_flags & XBF_READ_AHEAD) ? READA : READ;
1169 } 1174 }
1170 1175
1171 /* Special code path for reading a sub page size buffer in -- 1176 /* Special code path for reading a sub page size buffer in --
@@ -1681,6 +1686,7 @@ xfsbufd(
1681 xfs_buf_t *bp, *n; 1686 xfs_buf_t *bp, *n;
1682 struct list_head *dwq = &target->bt_delwrite_queue; 1687 struct list_head *dwq = &target->bt_delwrite_queue;
1683 spinlock_t *dwlk = &target->bt_delwrite_lock; 1688 spinlock_t *dwlk = &target->bt_delwrite_lock;
1689 int count;
1684 1690
1685 current->flags |= PF_MEMALLOC; 1691 current->flags |= PF_MEMALLOC;
1686 1692
@@ -1696,6 +1702,7 @@ xfsbufd(
1696 schedule_timeout_interruptible( 1702 schedule_timeout_interruptible(
1697 xfs_buf_timer_centisecs * msecs_to_jiffies(10)); 1703 xfs_buf_timer_centisecs * msecs_to_jiffies(10));
1698 1704
1705 count = 0;
1699 age = xfs_buf_age_centisecs * msecs_to_jiffies(10); 1706 age = xfs_buf_age_centisecs * msecs_to_jiffies(10);
1700 spin_lock(dwlk); 1707 spin_lock(dwlk);
1701 list_for_each_entry_safe(bp, n, dwq, b_list) { 1708 list_for_each_entry_safe(bp, n, dwq, b_list) {
@@ -1711,9 +1718,11 @@ xfsbufd(
1711 break; 1718 break;
1712 } 1719 }
1713 1720
1714 bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q); 1721 bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q|
1722 _XBF_RUN_QUEUES);
1715 bp->b_flags |= XBF_WRITE; 1723 bp->b_flags |= XBF_WRITE;
1716 list_move(&bp->b_list, &tmp); 1724 list_move_tail(&bp->b_list, &tmp);
1725 count++;
1717 } 1726 }
1718 } 1727 }
1719 spin_unlock(dwlk); 1728 spin_unlock(dwlk);
@@ -1724,12 +1733,12 @@ xfsbufd(
1724 1733
1725 list_del_init(&bp->b_list); 1734 list_del_init(&bp->b_list);
1726 xfs_buf_iostrategy(bp); 1735 xfs_buf_iostrategy(bp);
1727
1728 blk_run_address_space(target->bt_mapping);
1729 } 1736 }
1730 1737
1731 if (as_list_len > 0) 1738 if (as_list_len > 0)
1732 purge_addresses(); 1739 purge_addresses();
1740 if (count)
1741 blk_run_address_space(target->bt_mapping);
1733 1742
1734 clear_bit(XBT_FORCE_FLUSH, &target->bt_flags); 1743 clear_bit(XBT_FORCE_FLUSH, &target->bt_flags);
1735 } while (!kthread_should_stop()); 1744 } while (!kthread_should_stop());
@@ -1767,7 +1776,7 @@ xfs_flush_buftarg(
1767 continue; 1776 continue;
1768 } 1777 }
1769 1778
1770 list_move(&bp->b_list, &tmp); 1779 list_move_tail(&bp->b_list, &tmp);
1771 } 1780 }
1772 spin_unlock(dwlk); 1781 spin_unlock(dwlk);
1773 1782
@@ -1776,7 +1785,7 @@ xfs_flush_buftarg(
1776 */ 1785 */
1777 list_for_each_entry_safe(bp, n, &tmp, b_list) { 1786 list_for_each_entry_safe(bp, n, &tmp, b_list) {
1778 xfs_buf_lock(bp); 1787 xfs_buf_lock(bp);
1779 bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q); 1788 bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q|_XBF_RUN_QUEUES);
1780 bp->b_flags |= XBF_WRITE; 1789 bp->b_flags |= XBF_WRITE;
1781 if (wait) 1790 if (wait)
1782 bp->b_flags &= ~XBF_ASYNC; 1791 bp->b_flags &= ~XBF_ASYNC;
@@ -1786,6 +1795,9 @@ xfs_flush_buftarg(
1786 xfs_buf_iostrategy(bp); 1795 xfs_buf_iostrategy(bp);
1787 } 1796 }
1788 1797
1798 if (wait)
1799 blk_run_address_space(target->bt_mapping);
1800
1789 /* 1801 /*
1790 * Remaining list items must be flushed before returning 1802 * Remaining list items must be flushed before returning
1791 */ 1803 */
@@ -1797,9 +1809,6 @@ xfs_flush_buftarg(
1797 xfs_buf_relse(bp); 1809 xfs_buf_relse(bp);
1798 } 1810 }
1799 1811
1800 if (wait)
1801 blk_run_address_space(target->bt_mapping);
1802
1803 return pincount; 1812 return pincount;
1804} 1813}
1805 1814
diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h
index 7858703ed84c..9dd235cb0107 100644
--- a/fs/xfs/linux-2.6/xfs_buf.h
+++ b/fs/xfs/linux-2.6/xfs_buf.h
@@ -298,11 +298,6 @@ extern void xfs_buf_trace(xfs_buf_t *, char *, void *, void *);
298#define XFS_BUF_UNWRITE(bp) ((bp)->b_flags &= ~XBF_WRITE) 298#define XFS_BUF_UNWRITE(bp) ((bp)->b_flags &= ~XBF_WRITE)
299#define XFS_BUF_ISWRITE(bp) ((bp)->b_flags & XBF_WRITE) 299#define XFS_BUF_ISWRITE(bp) ((bp)->b_flags & XBF_WRITE)
300 300
301#define XFS_BUF_ISUNINITIAL(bp) (0)
302#define XFS_BUF_UNUNINITIAL(bp) (0)
303
304#define XFS_BUF_BP_ISMAPPED(bp) (1)
305
306#define XFS_BUF_IODONE_FUNC(bp) ((bp)->b_iodone) 301#define XFS_BUF_IODONE_FUNC(bp) ((bp)->b_iodone)
307#define XFS_BUF_SET_IODONE_FUNC(bp, func) ((bp)->b_iodone = (func)) 302#define XFS_BUF_SET_IODONE_FUNC(bp, func) ((bp)->b_iodone = (func))
308#define XFS_BUF_CLR_IODONE_FUNC(bp) ((bp)->b_iodone = NULL) 303#define XFS_BUF_CLR_IODONE_FUNC(bp) ((bp)->b_iodone = NULL)
@@ -393,8 +388,6 @@ static inline int XFS_bwrite(xfs_buf_t *bp)
393 return error; 388 return error;
394} 389}
395 390
396#define XFS_bdwrite(bp) xfs_buf_iostart(bp, XBF_DELWRI | XBF_ASYNC)
397
398static inline int xfs_bdwrite(void *mp, xfs_buf_t *bp) 391static inline int xfs_bdwrite(void *mp, xfs_buf_t *bp)
399{ 392{
400 bp->b_strat = xfs_bdstrat_cb; 393 bp->b_strat = xfs_bdstrat_cb;
diff --git a/fs/xfs/linux-2.6/xfs_globals.c b/fs/xfs/linux-2.6/xfs_globals.c
index 6c162c3dde7e..ed3a5e1b4b67 100644
--- a/fs/xfs/linux-2.6/xfs_globals.c
+++ b/fs/xfs/linux-2.6/xfs_globals.c
@@ -34,7 +34,7 @@ xfs_param_t xfs_params = {
34 .restrict_chown = { 0, 1, 1 }, 34 .restrict_chown = { 0, 1, 1 },
35 .sgid_inherit = { 0, 0, 1 }, 35 .sgid_inherit = { 0, 0, 1 },
36 .symlink_mode = { 0, 0, 1 }, 36 .symlink_mode = { 0, 0, 1 },
37 .panic_mask = { 0, 0, 127 }, 37 .panic_mask = { 0, 0, 255 },
38 .error_level = { 0, 3, 11 }, 38 .error_level = { 0, 3, 11 },
39 .syncd_timer = { 1*100, 30*100, 7200*100}, 39 .syncd_timer = { 1*100, 30*100, 7200*100},
40 .stats_clear = { 0, 0, 1 }, 40 .stats_clear = { 0, 0, 1 },
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
index 6e52a5dd38d8..a74f854d91e6 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
@@ -653,7 +653,7 @@ xfs_attrmulti_by_handle(
653STATIC int 653STATIC int
654xfs_ioc_space( 654xfs_ioc_space(
655 bhv_desc_t *bdp, 655 bhv_desc_t *bdp,
656 bhv_vnode_t *vp, 656 struct inode *inode,
657 struct file *filp, 657 struct file *filp,
658 int flags, 658 int flags,
659 unsigned int cmd, 659 unsigned int cmd,
@@ -735,7 +735,7 @@ xfs_ioctl(
735 !capable(CAP_SYS_ADMIN)) 735 !capable(CAP_SYS_ADMIN))
736 return -EPERM; 736 return -EPERM;
737 737
738 return xfs_ioc_space(bdp, vp, filp, ioflags, cmd, arg); 738 return xfs_ioc_space(bdp, inode, filp, ioflags, cmd, arg);
739 739
740 case XFS_IOC_DIOINFO: { 740 case XFS_IOC_DIOINFO: {
741 struct dioattr da; 741 struct dioattr da;
@@ -763,6 +763,8 @@ xfs_ioctl(
763 return xfs_ioc_fsgeometry(mp, arg); 763 return xfs_ioc_fsgeometry(mp, arg);
764 764
765 case XFS_IOC_GETVERSION: 765 case XFS_IOC_GETVERSION:
766 return put_user(inode->i_generation, (int __user *)arg);
767
766 case XFS_IOC_GETXFLAGS: 768 case XFS_IOC_GETXFLAGS:
767 case XFS_IOC_SETXFLAGS: 769 case XFS_IOC_SETXFLAGS:
768 case XFS_IOC_FSGETXATTR: 770 case XFS_IOC_FSGETXATTR:
@@ -957,7 +959,7 @@ xfs_ioctl(
957STATIC int 959STATIC int
958xfs_ioc_space( 960xfs_ioc_space(
959 bhv_desc_t *bdp, 961 bhv_desc_t *bdp,
960 bhv_vnode_t *vp, 962 struct inode *inode,
961 struct file *filp, 963 struct file *filp,
962 int ioflags, 964 int ioflags,
963 unsigned int cmd, 965 unsigned int cmd,
@@ -967,13 +969,13 @@ xfs_ioc_space(
967 int attr_flags = 0; 969 int attr_flags = 0;
968 int error; 970 int error;
969 971
970 if (vp->v_inode.i_flags & (S_IMMUTABLE|S_APPEND)) 972 if (inode->i_flags & (S_IMMUTABLE|S_APPEND))
971 return -XFS_ERROR(EPERM); 973 return -XFS_ERROR(EPERM);
972 974
973 if (!(filp->f_mode & FMODE_WRITE)) 975 if (!(filp->f_mode & FMODE_WRITE))
974 return -XFS_ERROR(EBADF); 976 return -XFS_ERROR(EBADF);
975 977
976 if (!VN_ISREG(vp)) 978 if (!S_ISREG(inode->i_mode))
977 return -XFS_ERROR(EINVAL); 979 return -XFS_ERROR(EINVAL);
978 980
979 if (copy_from_user(&bf, arg, sizeof(bf))) 981 if (copy_from_user(&bf, arg, sizeof(bf)))
@@ -1264,13 +1266,6 @@ xfs_ioc_xattr(
1264 break; 1266 break;
1265 } 1267 }
1266 1268
1267 case XFS_IOC_GETVERSION: {
1268 flags = vn_to_inode(vp)->i_generation;
1269 if (copy_to_user(arg, &flags, sizeof(flags)))
1270 error = -EFAULT;
1271 break;
1272 }
1273
1274 default: 1269 default:
1275 error = -ENOTTY; 1270 error = -ENOTTY;
1276 break; 1271 break;
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
index 22e3b714f629..3ba814ae3bba 100644
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ b/fs/xfs/linux-2.6/xfs_iops.c
@@ -623,12 +623,27 @@ xfs_vn_getattr(
623{ 623{
624 struct inode *inode = dentry->d_inode; 624 struct inode *inode = dentry->d_inode;
625 bhv_vnode_t *vp = vn_from_inode(inode); 625 bhv_vnode_t *vp = vn_from_inode(inode);
626 int error = 0; 626 bhv_vattr_t vattr = { .va_mask = XFS_AT_STAT };
627 int error;
627 628
628 if (unlikely(vp->v_flag & VMODIFIED)) 629 error = bhv_vop_getattr(vp, &vattr, ATTR_LAZY, NULL);
629 error = vn_revalidate(vp); 630 if (likely(!error)) {
630 if (!error) 631 stat->size = i_size_read(inode);
631 generic_fillattr(inode, stat); 632 stat->dev = inode->i_sb->s_dev;
633 stat->rdev = (vattr.va_rdev == 0) ? 0 :
634 MKDEV(sysv_major(vattr.va_rdev) & 0x1ff,
635 sysv_minor(vattr.va_rdev));
636 stat->mode = vattr.va_mode;
637 stat->nlink = vattr.va_nlink;
638 stat->uid = vattr.va_uid;
639 stat->gid = vattr.va_gid;
640 stat->ino = vattr.va_nodeid;
641 stat->atime = vattr.va_atime;
642 stat->mtime = vattr.va_mtime;
643 stat->ctime = vattr.va_ctime;
644 stat->blocks = vattr.va_nblocks;
645 stat->blksize = vattr.va_blocksize;
646 }
632 return -error; 647 return -error;
633} 648}
634 649
diff --git a/fs/xfs/linux-2.6/xfs_linux.h b/fs/xfs/linux-2.6/xfs_linux.h
index a13f75c1a936..2b0e0018738a 100644
--- a/fs/xfs/linux-2.6/xfs_linux.h
+++ b/fs/xfs/linux-2.6/xfs_linux.h
@@ -148,11 +148,7 @@ BUFFER_FNS(PrivateStart, unwritten);
148 (current->flags = ((current->flags & ~(f)) | (*(sp) & (f)))) 148 (current->flags = ((current->flags & ~(f)) | (*(sp) & (f))))
149 149
150#define NBPP PAGE_SIZE 150#define NBPP PAGE_SIZE
151#define DPPSHFT (PAGE_SHIFT - 9)
152#define NDPP (1 << (PAGE_SHIFT - 9)) 151#define NDPP (1 << (PAGE_SHIFT - 9))
153#define dtop(DD) (((DD) + NDPP - 1) >> DPPSHFT)
154#define dtopt(DD) ((DD) >> DPPSHFT)
155#define dpoff(DD) ((DD) & (NDPP-1))
156 152
157#define NBBY 8 /* number of bits per byte */ 153#define NBBY 8 /* number of bits per byte */
158#define NBPC PAGE_SIZE /* Number of bytes per click */ 154#define NBPC PAGE_SIZE /* Number of bytes per click */
@@ -172,8 +168,6 @@ BUFFER_FNS(PrivateStart, unwritten);
172#define btoct(x) ((__psunsigned_t)(x)>>BPCSHIFT) 168#define btoct(x) ((__psunsigned_t)(x)>>BPCSHIFT)
173#define btoc64(x) (((__uint64_t)(x)+(NBPC-1))>>BPCSHIFT) 169#define btoc64(x) (((__uint64_t)(x)+(NBPC-1))>>BPCSHIFT)
174#define btoct64(x) ((__uint64_t)(x)>>BPCSHIFT) 170#define btoct64(x) ((__uint64_t)(x)>>BPCSHIFT)
175#define io_btoc(x) (((__psunsigned_t)(x)+(IO_NBPC-1))>>IO_BPCSHIFT)
176#define io_btoct(x) ((__psunsigned_t)(x)>>IO_BPCSHIFT)
177 171
178/* off_t bytes to clicks */ 172/* off_t bytes to clicks */
179#define offtoc(x) (((__uint64_t)(x)+(NBPC-1))>>BPCSHIFT) 173#define offtoc(x) (((__uint64_t)(x)+(NBPC-1))>>BPCSHIFT)
@@ -186,7 +180,6 @@ BUFFER_FNS(PrivateStart, unwritten);
186#define ctob(x) ((__psunsigned_t)(x)<<BPCSHIFT) 180#define ctob(x) ((__psunsigned_t)(x)<<BPCSHIFT)
187#define btoct(x) ((__psunsigned_t)(x)>>BPCSHIFT) 181#define btoct(x) ((__psunsigned_t)(x)>>BPCSHIFT)
188#define ctob64(x) ((__uint64_t)(x)<<BPCSHIFT) 182#define ctob64(x) ((__uint64_t)(x)<<BPCSHIFT)
189#define io_ctob(x) ((__psunsigned_t)(x)<<IO_BPCSHIFT)
190 183
191/* bytes to clicks */ 184/* bytes to clicks */
192#define btoc(x) (((__psunsigned_t)(x)+(NBPC-1))>>BPCSHIFT) 185#define btoc(x) (((__psunsigned_t)(x)+(NBPC-1))>>BPCSHIFT)
@@ -339,4 +332,11 @@ static inline __uint64_t roundup_64(__uint64_t x, __uint32_t y)
339 return(x * y); 332 return(x * y);
340} 333}
341 334
335static inline __uint64_t howmany_64(__uint64_t x, __uint32_t y)
336{
337 x += y - 1;
338 do_div(x, y);
339 return x;
340}
341
342#endif /* __XFS_LINUX__ */ 342#endif /* __XFS_LINUX__ */
diff --git a/fs/xfs/linux-2.6/xfs_lrw.c b/fs/xfs/linux-2.6/xfs_lrw.c
index ee788b1cb364..55992b40353c 100644
--- a/fs/xfs/linux-2.6/xfs_lrw.c
+++ b/fs/xfs/linux-2.6/xfs_lrw.c
@@ -270,12 +270,12 @@ xfs_read(
270 } 270 }
271 } 271 }
272 272
273 if (unlikely((ioflags & IO_ISDIRECT) && VN_CACHED(vp))) 273 if (unlikely(ioflags & IO_ISDIRECT)) {
274 bhv_vop_flushinval_pages(vp, ctooff(offtoct(*offset)), 274 if (VN_CACHED(vp))
275 -1, FI_REMAPF_LOCKED); 275 bhv_vop_flushinval_pages(vp, ctooff(offtoct(*offset)),
276 276 -1, FI_REMAPF_LOCKED);
277 if (unlikely(ioflags & IO_ISDIRECT))
278 mutex_unlock(&inode->i_mutex); 277 mutex_unlock(&inode->i_mutex);
278 }
279 279
280 xfs_rw_enter_trace(XFS_READ_ENTER, &ip->i_iocore, 280 xfs_rw_enter_trace(XFS_READ_ENTER, &ip->i_iocore,
281 (void *)iovp, segs, *offset, ioflags); 281 (void *)iovp, segs, *offset, ioflags);
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 9df9ed37d219..38c4d128a8c0 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -227,7 +227,9 @@ xfs_initialize_vnode(
227 xfs_revalidate_inode(XFS_BHVTOM(bdp), vp, ip); 227 xfs_revalidate_inode(XFS_BHVTOM(bdp), vp, ip);
228 xfs_set_inodeops(inode); 228 xfs_set_inodeops(inode);
229 229
230 spin_lock(&ip->i_flags_lock);
230 ip->i_flags &= ~XFS_INEW; 231 ip->i_flags &= ~XFS_INEW;
232 spin_unlock(&ip->i_flags_lock);
231 barrier(); 233 barrier();
232 234
233 unlock_new_inode(inode); 235 unlock_new_inode(inode);
diff --git a/fs/xfs/linux-2.6/xfs_vfs.h b/fs/xfs/linux-2.6/xfs_vfs.h
index 91fc2c4b3353..da255bdf5260 100644
--- a/fs/xfs/linux-2.6/xfs_vfs.h
+++ b/fs/xfs/linux-2.6/xfs_vfs.h
@@ -79,7 +79,7 @@ typedef enum {
79#define VFS_RDONLY 0x0001 /* read-only vfs */ 79#define VFS_RDONLY 0x0001 /* read-only vfs */
80#define VFS_GRPID 0x0002 /* group-ID assigned from directory */ 80#define VFS_GRPID 0x0002 /* group-ID assigned from directory */
81#define VFS_DMI 0x0004 /* filesystem has the DMI enabled */ 81#define VFS_DMI 0x0004 /* filesystem has the DMI enabled */
82#define VFS_UMOUNT 0x0008 /* unmount in progress */ 82/* ---- VFS_UMOUNT ---- 0x0008 -- unneeded, fixed via kthread APIs */
83#define VFS_32BITINODES 0x0010 /* do not use inums above 32 bits */ 83#define VFS_32BITINODES 0x0010 /* do not use inums above 32 bits */
84#define VFS_END 0x0010 /* max flag */ 84#define VFS_END 0x0010 /* max flag */
85 85
diff --git a/fs/xfs/linux-2.6/xfs_vnode.h b/fs/xfs/linux-2.6/xfs_vnode.h
index c42b3221b20c..515f5fdea57a 100644
--- a/fs/xfs/linux-2.6/xfs_vnode.h
+++ b/fs/xfs/linux-2.6/xfs_vnode.h
@@ -85,8 +85,6 @@ typedef enum {
85#define VN_BHV_HEAD(vp) ((bhv_head_t *)(&((vp)->v_bh))) 85#define VN_BHV_HEAD(vp) ((bhv_head_t *)(&((vp)->v_bh)))
86#define vn_bhv_head_init(bhp,name) bhv_head_init(bhp,name) 86#define vn_bhv_head_init(bhp,name) bhv_head_init(bhp,name)
87#define vn_bhv_remove(bhp,bdp) bhv_remove(bhp,bdp) 87#define vn_bhv_remove(bhp,bdp) bhv_remove(bhp,bdp)
88#define vn_bhv_lookup(bhp,ops) bhv_lookup(bhp,ops)
89#define vn_bhv_lookup_unlocked(bhp,ops) bhv_lookup_unlocked(bhp,ops)
90 88
91/* 89/*
92 * Vnode to Linux inode mapping. 90 * Vnode to Linux inode mapping.
diff --git a/fs/xfs/quota/xfs_dquot_item.c b/fs/xfs/quota/xfs_dquot_item.c
index 5b2dcc58b244..33ad5af386e0 100644
--- a/fs/xfs/quota/xfs_dquot_item.c
+++ b/fs/xfs/quota/xfs_dquot_item.c
@@ -382,18 +382,6 @@ xfs_qm_dquot_logitem_unlock(
382 382
383 383
384/* 384/*
385 * The transaction with the dquot locked has aborted. The dquot
386 * must not be dirty within the transaction. We simply unlock just
387 * as if the transaction had been cancelled.
388 */
389STATIC void
390xfs_qm_dquot_logitem_abort(
391 xfs_dq_logitem_t *ql)
392{
393 xfs_qm_dquot_logitem_unlock(ql);
394}
395
396/*
397 * this needs to stamp an lsn into the dquot, I think. 385 * this needs to stamp an lsn into the dquot, I think.
398 * rpc's that look at user dquot's would then have to 386 * rpc's that look at user dquot's would then have to
399 * push on the dependency recorded in the dquot 387 * push on the dependency recorded in the dquot
@@ -426,7 +414,6 @@ STATIC struct xfs_item_ops xfs_dquot_item_ops = {
426 .iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t)) 414 .iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t))
427 xfs_qm_dquot_logitem_committed, 415 xfs_qm_dquot_logitem_committed,
428 .iop_push = (void(*)(xfs_log_item_t*))xfs_qm_dquot_logitem_push, 416 .iop_push = (void(*)(xfs_log_item_t*))xfs_qm_dquot_logitem_push,
429 .iop_abort = (void(*)(xfs_log_item_t*))xfs_qm_dquot_logitem_abort,
430 .iop_pushbuf = (void(*)(xfs_log_item_t*)) 417 .iop_pushbuf = (void(*)(xfs_log_item_t*))
431 xfs_qm_dquot_logitem_pushbuf, 418 xfs_qm_dquot_logitem_pushbuf,
432 .iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t)) 419 .iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t))
@@ -559,17 +546,6 @@ xfs_qm_qoff_logitem_committed(xfs_qoff_logitem_t *qf, xfs_lsn_t lsn)
559} 546}
560 547
561/* 548/*
562 * The transaction of which this QUOTAOFF is a part has been aborted.
563 * Just clean up after ourselves.
564 * Shouldn't this never happen in the case of qoffend logitems? XXX
565 */
566STATIC void
567xfs_qm_qoff_logitem_abort(xfs_qoff_logitem_t *qf)
568{
569 kmem_free(qf, sizeof(xfs_qoff_logitem_t));
570}
571
572/*
573 * There isn't much you can do to push on an quotaoff item. It is simply 549 * There isn't much you can do to push on an quotaoff item. It is simply
574 * stuck waiting for the log to be flushed to disk. 550 * stuck waiting for the log to be flushed to disk.
575 */ 551 */
@@ -644,7 +620,6 @@ STATIC struct xfs_item_ops xfs_qm_qoffend_logitem_ops = {
644 .iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t)) 620 .iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t))
645 xfs_qm_qoffend_logitem_committed, 621 xfs_qm_qoffend_logitem_committed,
646 .iop_push = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_push, 622 .iop_push = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_push,
647 .iop_abort = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_abort,
648 .iop_pushbuf = NULL, 623 .iop_pushbuf = NULL,
649 .iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t)) 624 .iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t))
650 xfs_qm_qoffend_logitem_committing 625 xfs_qm_qoffend_logitem_committing
@@ -667,7 +642,6 @@ STATIC struct xfs_item_ops xfs_qm_qoff_logitem_ops = {
667 .iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t)) 642 .iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t))
668 xfs_qm_qoff_logitem_committed, 643 xfs_qm_qoff_logitem_committed,
669 .iop_push = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_push, 644 .iop_push = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_push,
670 .iop_abort = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_abort,
671 .iop_pushbuf = NULL, 645 .iop_pushbuf = NULL,
672 .iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t)) 646 .iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t))
673 xfs_qm_qoff_logitem_committing 647 xfs_qm_qoff_logitem_committing
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c
index e23e45535c48..7c6a3a50379e 100644
--- a/fs/xfs/quota/xfs_qm.c
+++ b/fs/xfs/quota/xfs_qm.c
@@ -112,17 +112,17 @@ xfs_Gqm_init(void)
112{ 112{
113 xfs_dqhash_t *udqhash, *gdqhash; 113 xfs_dqhash_t *udqhash, *gdqhash;
114 xfs_qm_t *xqm; 114 xfs_qm_t *xqm;
115 uint i, hsize, flags = KM_SLEEP | KM_MAYFAIL; 115 size_t hsize;
116 uint i;
116 117
117 /* 118 /*
118 * Initialize the dquot hash tables. 119 * Initialize the dquot hash tables.
119 */ 120 */
120 hsize = XFS_QM_HASHSIZE_HIGH; 121 udqhash = kmem_zalloc_greedy(&hsize,
121 while (!(udqhash = kmem_zalloc(hsize * sizeof(xfs_dqhash_t), flags))) { 122 XFS_QM_HASHSIZE_LOW, XFS_QM_HASHSIZE_HIGH,
122 if ((hsize >>= 1) <= XFS_QM_HASHSIZE_LOW) 123 KM_SLEEP | KM_MAYFAIL | KM_LARGE);
123 flags = KM_SLEEP; 124 gdqhash = kmem_zalloc(hsize, KM_SLEEP | KM_LARGE);
124 } 125 hsize /= sizeof(xfs_dqhash_t);
125 gdqhash = kmem_zalloc(hsize * sizeof(xfs_dqhash_t), KM_SLEEP);
126 ndquot = hsize << 8; 126 ndquot = hsize << 8;
127 127
128 xqm = kmem_zalloc(sizeof(xfs_qm_t), KM_SLEEP); 128 xqm = kmem_zalloc(sizeof(xfs_qm_t), KM_SLEEP);
diff --git a/fs/xfs/quota/xfs_qm.h b/fs/xfs/quota/xfs_qm.h
index 4568deb6da86..689407de0a20 100644
--- a/fs/xfs/quota/xfs_qm.h
+++ b/fs/xfs/quota/xfs_qm.h
@@ -56,12 +56,6 @@ extern kmem_zone_t *qm_dqtrxzone;
56#define XFS_QM_HASHSIZE_HIGH ((NBPP * 4) / sizeof(xfs_dqhash_t)) 56#define XFS_QM_HASHSIZE_HIGH ((NBPP * 4) / sizeof(xfs_dqhash_t))
57 57
58/* 58/*
59 * We output a cmn_err when quotachecking a quota file with more than
60 * this many fsbs.
61 */
62#define XFS_QM_BIG_QCHECK_NBLKS 500
63
64/*
65 * This defines the unit of allocation of dquots. 59 * This defines the unit of allocation of dquots.
66 * Currently, it is just one file system block, and a 4K blk contains 30 60 * Currently, it is just one file system block, and a 4K blk contains 30
67 * (136 * 30 = 4080) dquots. It's probably not worth trying to make 61 * (136 * 30 = 4080) dquots. It's probably not worth trying to make
diff --git a/fs/xfs/quota/xfs_quota_priv.h b/fs/xfs/quota/xfs_quota_priv.h
index b7ddd04aae32..a8b85e2be9d5 100644
--- a/fs/xfs/quota/xfs_quota_priv.h
+++ b/fs/xfs/quota/xfs_quota_priv.h
@@ -75,7 +75,6 @@ static inline int XQMISLCKD(struct xfs_dqhash *h)
75 75
76#define xfs_qm_freelist_lock(qm) XQMLCK(&((qm)->qm_dqfreelist)) 76#define xfs_qm_freelist_lock(qm) XQMLCK(&((qm)->qm_dqfreelist))
77#define xfs_qm_freelist_unlock(qm) XQMUNLCK(&((qm)->qm_dqfreelist)) 77#define xfs_qm_freelist_unlock(qm) XQMUNLCK(&((qm)->qm_dqfreelist))
78#define XFS_QM_IS_FREELIST_LOCKED(qm) XQMISLCKD(&((qm)->qm_dqfreelist))
79 78
80/* 79/*
81 * Hash into a bucket in the dquot hash table, based on <mp, id>. 80 * Hash into a bucket in the dquot hash table, based on <mp, id>.
@@ -170,6 +169,5 @@ for ((dqp) = (qlist)->qh_next; (dqp) != (xfs_dquot_t *)(qlist); \
170#define DQFLAGTO_TYPESTR(d) (((d)->dq_flags & XFS_DQ_USER) ? "USR" : \ 169#define DQFLAGTO_TYPESTR(d) (((d)->dq_flags & XFS_DQ_USER) ? "USR" : \
171 (((d)->dq_flags & XFS_DQ_GROUP) ? "GRP" : \ 170 (((d)->dq_flags & XFS_DQ_GROUP) ? "GRP" : \
172 (((d)->dq_flags & XFS_DQ_PROJ) ? "PRJ":"???"))) 171 (((d)->dq_flags & XFS_DQ_PROJ) ? "PRJ":"???")))
173#define DQFLAGTO_DIRTYSTR(d) (XFS_DQ_IS_DIRTY(d) ? "DIRTY" : "NOTDIRTY")
174 172
175#endif /* __XFS_QUOTA_PRIV_H__ */ 173#endif /* __XFS_QUOTA_PRIV_H__ */
diff --git a/fs/xfs/support/ktrace.c b/fs/xfs/support/ktrace.c
index addf5a7ea06c..5cf2e86caa71 100644
--- a/fs/xfs/support/ktrace.c
+++ b/fs/xfs/support/ktrace.c
@@ -75,7 +75,7 @@ ktrace_alloc(int nentries, unsigned int __nocast sleep)
75 sleep); 75 sleep);
76 } else { 76 } else {
77 ktep = (ktrace_entry_t*)kmem_zalloc((nentries * sizeof(*ktep)), 77 ktep = (ktrace_entry_t*)kmem_zalloc((nentries * sizeof(*ktep)),
78 sleep); 78 sleep | KM_LARGE);
79 } 79 }
80 80
81 if (ktep == NULL) { 81 if (ktep == NULL) {
diff --git a/fs/xfs/xfs_ag.h b/fs/xfs/xfs_ag.h
index dc2361dd740a..9ece7f87ec5b 100644
--- a/fs/xfs/xfs_ag.h
+++ b/fs/xfs/xfs_ag.h
@@ -150,7 +150,7 @@ typedef struct xfs_agi {
150#define XFS_BUF_TO_AGFL(bp) ((xfs_agfl_t *)XFS_BUF_PTR(bp)) 150#define XFS_BUF_TO_AGFL(bp) ((xfs_agfl_t *)XFS_BUF_PTR(bp))
151 151
152typedef struct xfs_agfl { 152typedef struct xfs_agfl {
153 xfs_agblock_t agfl_bno[1]; /* actually XFS_AGFL_SIZE(mp) */ 153 __be32 agfl_bno[1]; /* actually XFS_AGFL_SIZE(mp) */
154} xfs_agfl_t; 154} xfs_agfl_t;
155 155
156/* 156/*
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c
index d2bbcd882a69..e80dda3437d1 100644
--- a/fs/xfs/xfs_alloc.c
+++ b/fs/xfs/xfs_alloc.c
@@ -1477,8 +1477,10 @@ xfs_alloc_ag_vextent_small(
1477 /* 1477 /*
1478 * Can't allocate from the freelist for some reason. 1478 * Can't allocate from the freelist for some reason.
1479 */ 1479 */
1480 else 1480 else {
1481 fbno = NULLAGBLOCK;
1481 flen = 0; 1482 flen = 0;
1483 }
1482 /* 1484 /*
1483 * Can't do the allocation, give up. 1485 * Can't do the allocation, give up.
1484 */ 1486 */
@@ -2021,7 +2023,7 @@ xfs_alloc_get_freelist(
2021 /* 2023 /*
2022 * Get the block number and update the data structures. 2024 * Get the block number and update the data structures.
2023 */ 2025 */
2024 bno = INT_GET(agfl->agfl_bno[be32_to_cpu(agf->agf_flfirst)], ARCH_CONVERT); 2026 bno = be32_to_cpu(agfl->agfl_bno[be32_to_cpu(agf->agf_flfirst)]);
2025 be32_add(&agf->agf_flfirst, 1); 2027 be32_add(&agf->agf_flfirst, 1);
2026 xfs_trans_brelse(tp, agflbp); 2028 xfs_trans_brelse(tp, agflbp);
2027 if (be32_to_cpu(agf->agf_flfirst) == XFS_AGFL_SIZE(mp)) 2029 if (be32_to_cpu(agf->agf_flfirst) == XFS_AGFL_SIZE(mp))
@@ -2108,7 +2110,7 @@ xfs_alloc_put_freelist(
2108{ 2110{
2109 xfs_agf_t *agf; /* a.g. freespace structure */ 2111 xfs_agf_t *agf; /* a.g. freespace structure */
2110 xfs_agfl_t *agfl; /* a.g. free block array */ 2112 xfs_agfl_t *agfl; /* a.g. free block array */
2111 xfs_agblock_t *blockp;/* pointer to array entry */ 2113 __be32 *blockp;/* pointer to array entry */
2112 int error; 2114 int error;
2113#ifdef XFS_ALLOC_TRACE 2115#ifdef XFS_ALLOC_TRACE
2114 static char fname[] = "xfs_alloc_put_freelist"; 2116 static char fname[] = "xfs_alloc_put_freelist";
@@ -2132,7 +2134,7 @@ xfs_alloc_put_freelist(
2132 pag->pagf_flcount++; 2134 pag->pagf_flcount++;
2133 ASSERT(be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp)); 2135 ASSERT(be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp));
2134 blockp = &agfl->agfl_bno[be32_to_cpu(agf->agf_fllast)]; 2136 blockp = &agfl->agfl_bno[be32_to_cpu(agf->agf_fllast)];
2135 INT_SET(*blockp, ARCH_CONVERT, bno); 2137 *blockp = cpu_to_be32(bno);
2136 TRACE_MODAGF(NULL, agf, XFS_AGF_FLLAST | XFS_AGF_FLCOUNT); 2138 TRACE_MODAGF(NULL, agf, XFS_AGF_FLLAST | XFS_AGF_FLCOUNT);
2137 xfs_alloc_log_agf(tp, agbp, XFS_AGF_FLLAST | XFS_AGF_FLCOUNT); 2139 xfs_alloc_log_agf(tp, agbp, XFS_AGF_FLLAST | XFS_AGF_FLCOUNT);
2138 xfs_trans_log_buf(tp, agflbp, 2140 xfs_trans_log_buf(tp, agflbp,
diff --git a/fs/xfs/xfs_alloc_btree.c b/fs/xfs/xfs_alloc_btree.c
index 7446556e8021..74cadf95d4e8 100644
--- a/fs/xfs/xfs_alloc_btree.c
+++ b/fs/xfs/xfs_alloc_btree.c
@@ -92,6 +92,7 @@ xfs_alloc_delrec(
92 xfs_alloc_key_t *rkp; /* right block key pointer */ 92 xfs_alloc_key_t *rkp; /* right block key pointer */
93 xfs_alloc_ptr_t *rpp; /* right block address pointer */ 93 xfs_alloc_ptr_t *rpp; /* right block address pointer */
94 int rrecs=0; /* number of records in right block */ 94 int rrecs=0; /* number of records in right block */
95 int numrecs;
95 xfs_alloc_rec_t *rrp; /* right block record pointer */ 96 xfs_alloc_rec_t *rrp; /* right block record pointer */
96 xfs_btree_cur_t *tcur; /* temporary btree cursor */ 97 xfs_btree_cur_t *tcur; /* temporary btree cursor */
97 98
@@ -115,7 +116,8 @@ xfs_alloc_delrec(
115 /* 116 /*
116 * Fail if we're off the end of the block. 117 * Fail if we're off the end of the block.
117 */ 118 */
118 if (ptr > be16_to_cpu(block->bb_numrecs)) { 119 numrecs = be16_to_cpu(block->bb_numrecs);
120 if (ptr > numrecs) {
119 *stat = 0; 121 *stat = 0;
120 return 0; 122 return 0;
121 } 123 }
@@ -129,18 +131,18 @@ xfs_alloc_delrec(
129 lkp = XFS_ALLOC_KEY_ADDR(block, 1, cur); 131 lkp = XFS_ALLOC_KEY_ADDR(block, 1, cur);
130 lpp = XFS_ALLOC_PTR_ADDR(block, 1, cur); 132 lpp = XFS_ALLOC_PTR_ADDR(block, 1, cur);
131#ifdef DEBUG 133#ifdef DEBUG
132 for (i = ptr; i < be16_to_cpu(block->bb_numrecs); i++) { 134 for (i = ptr; i < numrecs; i++) {
133 if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(lpp[i]), level))) 135 if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(lpp[i]), level)))
134 return error; 136 return error;
135 } 137 }
136#endif 138#endif
137 if (ptr < be16_to_cpu(block->bb_numrecs)) { 139 if (ptr < numrecs) {
138 memmove(&lkp[ptr - 1], &lkp[ptr], 140 memmove(&lkp[ptr - 1], &lkp[ptr],
139 (be16_to_cpu(block->bb_numrecs) - ptr) * sizeof(*lkp)); 141 (numrecs - ptr) * sizeof(*lkp));
140 memmove(&lpp[ptr - 1], &lpp[ptr], 142 memmove(&lpp[ptr - 1], &lpp[ptr],
141 (be16_to_cpu(block->bb_numrecs) - ptr) * sizeof(*lpp)); 143 (numrecs - ptr) * sizeof(*lpp));
142 xfs_alloc_log_ptrs(cur, bp, ptr, be16_to_cpu(block->bb_numrecs) - 1); 144 xfs_alloc_log_ptrs(cur, bp, ptr, numrecs - 1);
143 xfs_alloc_log_keys(cur, bp, ptr, be16_to_cpu(block->bb_numrecs) - 1); 145 xfs_alloc_log_keys(cur, bp, ptr, numrecs - 1);
144 } 146 }
145 } 147 }
146 /* 148 /*
@@ -149,10 +151,10 @@ xfs_alloc_delrec(
149 */ 151 */
150 else { 152 else {
151 lrp = XFS_ALLOC_REC_ADDR(block, 1, cur); 153 lrp = XFS_ALLOC_REC_ADDR(block, 1, cur);
152 if (ptr < be16_to_cpu(block->bb_numrecs)) { 154 if (ptr < numrecs) {
153 memmove(&lrp[ptr - 1], &lrp[ptr], 155 memmove(&lrp[ptr - 1], &lrp[ptr],
154 (be16_to_cpu(block->bb_numrecs) - ptr) * sizeof(*lrp)); 156 (numrecs - ptr) * sizeof(*lrp));
155 xfs_alloc_log_recs(cur, bp, ptr, be16_to_cpu(block->bb_numrecs) - 1); 157 xfs_alloc_log_recs(cur, bp, ptr, numrecs - 1);
156 } 158 }
157 /* 159 /*
158 * If it's the first record in the block, we'll need a key 160 * If it's the first record in the block, we'll need a key
@@ -167,7 +169,8 @@ xfs_alloc_delrec(
167 /* 169 /*
168 * Decrement and log the number of entries in the block. 170 * Decrement and log the number of entries in the block.
169 */ 171 */
170 be16_add(&block->bb_numrecs, -1); 172 numrecs--;
173 block->bb_numrecs = cpu_to_be16(numrecs);
171 xfs_alloc_log_block(cur->bc_tp, bp, XFS_BB_NUMRECS); 174 xfs_alloc_log_block(cur->bc_tp, bp, XFS_BB_NUMRECS);
172 /* 175 /*
173 * See if the longest free extent in the allocation group was 176 * See if the longest free extent in the allocation group was
@@ -181,14 +184,14 @@ xfs_alloc_delrec(
181 if (level == 0 && 184 if (level == 0 &&
182 cur->bc_btnum == XFS_BTNUM_CNT && 185 cur->bc_btnum == XFS_BTNUM_CNT &&
183 be32_to_cpu(block->bb_rightsib) == NULLAGBLOCK && 186 be32_to_cpu(block->bb_rightsib) == NULLAGBLOCK &&
184 ptr > be16_to_cpu(block->bb_numrecs)) { 187 ptr > numrecs) {
185 ASSERT(ptr == be16_to_cpu(block->bb_numrecs) + 1); 188 ASSERT(ptr == numrecs + 1);
186 /* 189 /*
187 * There are still records in the block. Grab the size 190 * There are still records in the block. Grab the size
188 * from the last one. 191 * from the last one.
189 */ 192 */
190 if (be16_to_cpu(block->bb_numrecs)) { 193 if (numrecs) {
191 rrp = XFS_ALLOC_REC_ADDR(block, be16_to_cpu(block->bb_numrecs), cur); 194 rrp = XFS_ALLOC_REC_ADDR(block, numrecs, cur);
192 agf->agf_longest = rrp->ar_blockcount; 195 agf->agf_longest = rrp->ar_blockcount;
193 } 196 }
194 /* 197 /*
@@ -211,7 +214,7 @@ xfs_alloc_delrec(
211 * and it's NOT the leaf level, 214 * and it's NOT the leaf level,
212 * then we can get rid of this level. 215 * then we can get rid of this level.
213 */ 216 */
214 if (be16_to_cpu(block->bb_numrecs) == 1 && level > 0) { 217 if (numrecs == 1 && level > 0) {
215 /* 218 /*
216 * lpp is still set to the first pointer in the block. 219 * lpp is still set to the first pointer in the block.
217 * Make it the new root of the btree. 220 * Make it the new root of the btree.
@@ -267,7 +270,7 @@ xfs_alloc_delrec(
267 * If the number of records remaining in the block is at least 270 * If the number of records remaining in the block is at least
268 * the minimum, we're done. 271 * the minimum, we're done.
269 */ 272 */
270 if (be16_to_cpu(block->bb_numrecs) >= XFS_ALLOC_BLOCK_MINRECS(level, cur)) { 273 if (numrecs >= XFS_ALLOC_BLOCK_MINRECS(level, cur)) {
271 if (level > 0 && (error = xfs_alloc_decrement(cur, level, &i))) 274 if (level > 0 && (error = xfs_alloc_decrement(cur, level, &i)))
272 return error; 275 return error;
273 *stat = 1; 276 *stat = 1;
@@ -419,19 +422,21 @@ xfs_alloc_delrec(
419 * See if we can join with the left neighbor block. 422 * See if we can join with the left neighbor block.
420 */ 423 */
421 if (lbno != NULLAGBLOCK && 424 if (lbno != NULLAGBLOCK &&
422 lrecs + be16_to_cpu(block->bb_numrecs) <= XFS_ALLOC_BLOCK_MAXRECS(level, cur)) { 425 lrecs + numrecs <= XFS_ALLOC_BLOCK_MAXRECS(level, cur)) {
423 /* 426 /*
424 * Set "right" to be the starting block, 427 * Set "right" to be the starting block,
425 * "left" to be the left neighbor. 428 * "left" to be the left neighbor.
426 */ 429 */
427 rbno = bno; 430 rbno = bno;
428 right = block; 431 right = block;
432 rrecs = be16_to_cpu(right->bb_numrecs);
429 rbp = bp; 433 rbp = bp;
430 if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, 434 if ((error = xfs_btree_read_bufs(mp, cur->bc_tp,
431 cur->bc_private.a.agno, lbno, 0, &lbp, 435 cur->bc_private.a.agno, lbno, 0, &lbp,
432 XFS_ALLOC_BTREE_REF))) 436 XFS_ALLOC_BTREE_REF)))
433 return error; 437 return error;
434 left = XFS_BUF_TO_ALLOC_BLOCK(lbp); 438 left = XFS_BUF_TO_ALLOC_BLOCK(lbp);
439 lrecs = be16_to_cpu(left->bb_numrecs);
435 if ((error = xfs_btree_check_sblock(cur, left, level, lbp))) 440 if ((error = xfs_btree_check_sblock(cur, left, level, lbp)))
436 return error; 441 return error;
437 } 442 }
@@ -439,20 +444,21 @@ xfs_alloc_delrec(
439 * If that won't work, see if we can join with the right neighbor block. 444 * If that won't work, see if we can join with the right neighbor block.
440 */ 445 */
441 else if (rbno != NULLAGBLOCK && 446 else if (rbno != NULLAGBLOCK &&
442 rrecs + be16_to_cpu(block->bb_numrecs) <= 447 rrecs + numrecs <= XFS_ALLOC_BLOCK_MAXRECS(level, cur)) {
443 XFS_ALLOC_BLOCK_MAXRECS(level, cur)) {
444 /* 448 /*
445 * Set "left" to be the starting block, 449 * Set "left" to be the starting block,
446 * "right" to be the right neighbor. 450 * "right" to be the right neighbor.
447 */ 451 */
448 lbno = bno; 452 lbno = bno;
449 left = block; 453 left = block;
454 lrecs = be16_to_cpu(left->bb_numrecs);
450 lbp = bp; 455 lbp = bp;
451 if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, 456 if ((error = xfs_btree_read_bufs(mp, cur->bc_tp,
452 cur->bc_private.a.agno, rbno, 0, &rbp, 457 cur->bc_private.a.agno, rbno, 0, &rbp,
453 XFS_ALLOC_BTREE_REF))) 458 XFS_ALLOC_BTREE_REF)))
454 return error; 459 return error;
455 right = XFS_BUF_TO_ALLOC_BLOCK(rbp); 460 right = XFS_BUF_TO_ALLOC_BLOCK(rbp);
461 rrecs = be16_to_cpu(right->bb_numrecs);
456 if ((error = xfs_btree_check_sblock(cur, right, level, rbp))) 462 if ((error = xfs_btree_check_sblock(cur, right, level, rbp)))
457 return error; 463 return error;
458 } 464 }
@@ -474,34 +480,28 @@ xfs_alloc_delrec(
474 /* 480 /*
475 * It's a non-leaf. Move keys and pointers. 481 * It's a non-leaf. Move keys and pointers.
476 */ 482 */
477 lkp = XFS_ALLOC_KEY_ADDR(left, be16_to_cpu(left->bb_numrecs) + 1, cur); 483 lkp = XFS_ALLOC_KEY_ADDR(left, lrecs + 1, cur);
478 lpp = XFS_ALLOC_PTR_ADDR(left, be16_to_cpu(left->bb_numrecs) + 1, cur); 484 lpp = XFS_ALLOC_PTR_ADDR(left, lrecs + 1, cur);
479 rkp = XFS_ALLOC_KEY_ADDR(right, 1, cur); 485 rkp = XFS_ALLOC_KEY_ADDR(right, 1, cur);
480 rpp = XFS_ALLOC_PTR_ADDR(right, 1, cur); 486 rpp = XFS_ALLOC_PTR_ADDR(right, 1, cur);
481#ifdef DEBUG 487#ifdef DEBUG
482 for (i = 0; i < be16_to_cpu(right->bb_numrecs); i++) { 488 for (i = 0; i < rrecs; i++) {
483 if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(rpp[i]), level))) 489 if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(rpp[i]), level)))
484 return error; 490 return error;
485 } 491 }
486#endif 492#endif
487 memcpy(lkp, rkp, be16_to_cpu(right->bb_numrecs) * sizeof(*lkp)); 493 memcpy(lkp, rkp, rrecs * sizeof(*lkp));
488 memcpy(lpp, rpp, be16_to_cpu(right->bb_numrecs) * sizeof(*lpp)); 494 memcpy(lpp, rpp, rrecs * sizeof(*lpp));
489 xfs_alloc_log_keys(cur, lbp, be16_to_cpu(left->bb_numrecs) + 1, 495 xfs_alloc_log_keys(cur, lbp, lrecs + 1, lrecs + rrecs);
490 be16_to_cpu(left->bb_numrecs) + 496 xfs_alloc_log_ptrs(cur, lbp, lrecs + 1, lrecs + rrecs);
491 be16_to_cpu(right->bb_numrecs));
492 xfs_alloc_log_ptrs(cur, lbp, be16_to_cpu(left->bb_numrecs) + 1,
493 be16_to_cpu(left->bb_numrecs) +
494 be16_to_cpu(right->bb_numrecs));
495 } else { 497 } else {
496 /* 498 /*
497 * It's a leaf. Move records. 499 * It's a leaf. Move records.
498 */ 500 */
499 lrp = XFS_ALLOC_REC_ADDR(left, be16_to_cpu(left->bb_numrecs) + 1, cur); 501 lrp = XFS_ALLOC_REC_ADDR(left, lrecs + 1, cur);
500 rrp = XFS_ALLOC_REC_ADDR(right, 1, cur); 502 rrp = XFS_ALLOC_REC_ADDR(right, 1, cur);
501 memcpy(lrp, rrp, be16_to_cpu(right->bb_numrecs) * sizeof(*lrp)); 503 memcpy(lrp, rrp, rrecs * sizeof(*lrp));
502 xfs_alloc_log_recs(cur, lbp, be16_to_cpu(left->bb_numrecs) + 1, 504 xfs_alloc_log_recs(cur, lbp, lrecs + 1, lrecs + rrecs);
503 be16_to_cpu(left->bb_numrecs) +
504 be16_to_cpu(right->bb_numrecs));
505 } 505 }
506 /* 506 /*
507 * If we joined with the left neighbor, set the buffer in the 507 * If we joined with the left neighbor, set the buffer in the
@@ -509,7 +509,7 @@ xfs_alloc_delrec(
509 */ 509 */
510 if (bp != lbp) { 510 if (bp != lbp) {
511 xfs_btree_setbuf(cur, level, lbp); 511 xfs_btree_setbuf(cur, level, lbp);
512 cur->bc_ptrs[level] += be16_to_cpu(left->bb_numrecs); 512 cur->bc_ptrs[level] += lrecs;
513 } 513 }
514 /* 514 /*
515 * If we joined with the right neighbor and there's a level above 515 * If we joined with the right neighbor and there's a level above
@@ -521,7 +521,8 @@ xfs_alloc_delrec(
521 /* 521 /*
522 * Fix up the number of records in the surviving block. 522 * Fix up the number of records in the surviving block.
523 */ 523 */
524 be16_add(&left->bb_numrecs, be16_to_cpu(right->bb_numrecs)); 524 lrecs += rrecs;
525 left->bb_numrecs = cpu_to_be16(lrecs);
525 /* 526 /*
526 * Fix up the right block pointer in the surviving block, and log it. 527 * Fix up the right block pointer in the surviving block, and log it.
527 */ 528 */
@@ -608,6 +609,7 @@ xfs_alloc_insrec(
608 xfs_btree_cur_t *ncur; /* new cursor to be used at next lvl */ 609 xfs_btree_cur_t *ncur; /* new cursor to be used at next lvl */
609 xfs_alloc_key_t nkey; /* new key value, from split */ 610 xfs_alloc_key_t nkey; /* new key value, from split */
610 xfs_alloc_rec_t nrec; /* new record value, for caller */ 611 xfs_alloc_rec_t nrec; /* new record value, for caller */
612 int numrecs;
611 int optr; /* old ptr value */ 613 int optr; /* old ptr value */
612 xfs_alloc_ptr_t *pp; /* pointer to btree addresses */ 614 xfs_alloc_ptr_t *pp; /* pointer to btree addresses */
613 int ptr; /* index in btree block for this rec */ 615 int ptr; /* index in btree block for this rec */
@@ -653,13 +655,14 @@ xfs_alloc_insrec(
653 */ 655 */
654 bp = cur->bc_bufs[level]; 656 bp = cur->bc_bufs[level];
655 block = XFS_BUF_TO_ALLOC_BLOCK(bp); 657 block = XFS_BUF_TO_ALLOC_BLOCK(bp);
658 numrecs = be16_to_cpu(block->bb_numrecs);
656#ifdef DEBUG 659#ifdef DEBUG
657 if ((error = xfs_btree_check_sblock(cur, block, level, bp))) 660 if ((error = xfs_btree_check_sblock(cur, block, level, bp)))
658 return error; 661 return error;
659 /* 662 /*
660 * Check that the new entry is being inserted in the right place. 663 * Check that the new entry is being inserted in the right place.
661 */ 664 */
662 if (ptr <= be16_to_cpu(block->bb_numrecs)) { 665 if (ptr <= numrecs) {
663 if (level == 0) { 666 if (level == 0) {
664 rp = XFS_ALLOC_REC_ADDR(block, ptr, cur); 667 rp = XFS_ALLOC_REC_ADDR(block, ptr, cur);
665 xfs_btree_check_rec(cur->bc_btnum, recp, rp); 668 xfs_btree_check_rec(cur->bc_btnum, recp, rp);
@@ -670,12 +673,12 @@ xfs_alloc_insrec(
670 } 673 }
671#endif 674#endif
672 nbno = NULLAGBLOCK; 675 nbno = NULLAGBLOCK;
673 ncur = (xfs_btree_cur_t *)0; 676 ncur = NULL;
674 /* 677 /*
675 * If the block is full, we can't insert the new entry until we 678 * If the block is full, we can't insert the new entry until we
676 * make the block un-full. 679 * make the block un-full.
677 */ 680 */
678 if (be16_to_cpu(block->bb_numrecs) == XFS_ALLOC_BLOCK_MAXRECS(level, cur)) { 681 if (numrecs == XFS_ALLOC_BLOCK_MAXRECS(level, cur)) {
679 /* 682 /*
680 * First, try shifting an entry to the right neighbor. 683 * First, try shifting an entry to the right neighbor.
681 */ 684 */
@@ -729,6 +732,7 @@ xfs_alloc_insrec(
729 * At this point we know there's room for our new entry in the block 732 * At this point we know there's room for our new entry in the block
730 * we're pointing at. 733 * we're pointing at.
731 */ 734 */
735 numrecs = be16_to_cpu(block->bb_numrecs);
732 if (level > 0) { 736 if (level > 0) {
733 /* 737 /*
734 * It's a non-leaf entry. Make a hole for the new data 738 * It's a non-leaf entry. Make a hole for the new data
@@ -737,15 +741,15 @@ xfs_alloc_insrec(
737 kp = XFS_ALLOC_KEY_ADDR(block, 1, cur); 741 kp = XFS_ALLOC_KEY_ADDR(block, 1, cur);
738 pp = XFS_ALLOC_PTR_ADDR(block, 1, cur); 742 pp = XFS_ALLOC_PTR_ADDR(block, 1, cur);
739#ifdef DEBUG 743#ifdef DEBUG
740 for (i = be16_to_cpu(block->bb_numrecs); i >= ptr; i--) { 744 for (i = numrecs; i >= ptr; i--) {
741 if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(pp[i - 1]), level))) 745 if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(pp[i - 1]), level)))
742 return error; 746 return error;
743 } 747 }
744#endif 748#endif
745 memmove(&kp[ptr], &kp[ptr - 1], 749 memmove(&kp[ptr], &kp[ptr - 1],
746 (be16_to_cpu(block->bb_numrecs) - ptr + 1) * sizeof(*kp)); 750 (numrecs - ptr + 1) * sizeof(*kp));
747 memmove(&pp[ptr], &pp[ptr - 1], 751 memmove(&pp[ptr], &pp[ptr - 1],
748 (be16_to_cpu(block->bb_numrecs) - ptr + 1) * sizeof(*pp)); 752 (numrecs - ptr + 1) * sizeof(*pp));
749#ifdef DEBUG 753#ifdef DEBUG
750 if ((error = xfs_btree_check_sptr(cur, *bnop, level))) 754 if ((error = xfs_btree_check_sptr(cur, *bnop, level)))
751 return error; 755 return error;
@@ -755,11 +759,12 @@ xfs_alloc_insrec(
755 */ 759 */
756 kp[ptr - 1] = key; 760 kp[ptr - 1] = key;
757 pp[ptr - 1] = cpu_to_be32(*bnop); 761 pp[ptr - 1] = cpu_to_be32(*bnop);
758 be16_add(&block->bb_numrecs, 1); 762 numrecs++;
759 xfs_alloc_log_keys(cur, bp, ptr, be16_to_cpu(block->bb_numrecs)); 763 block->bb_numrecs = cpu_to_be16(numrecs);
760 xfs_alloc_log_ptrs(cur, bp, ptr, be16_to_cpu(block->bb_numrecs)); 764 xfs_alloc_log_keys(cur, bp, ptr, numrecs);
765 xfs_alloc_log_ptrs(cur, bp, ptr, numrecs);
761#ifdef DEBUG 766#ifdef DEBUG
762 if (ptr < be16_to_cpu(block->bb_numrecs)) 767 if (ptr < numrecs)
763 xfs_btree_check_key(cur->bc_btnum, kp + ptr - 1, 768 xfs_btree_check_key(cur->bc_btnum, kp + ptr - 1,
764 kp + ptr); 769 kp + ptr);
765#endif 770#endif
@@ -769,16 +774,17 @@ xfs_alloc_insrec(
769 */ 774 */
770 rp = XFS_ALLOC_REC_ADDR(block, 1, cur); 775 rp = XFS_ALLOC_REC_ADDR(block, 1, cur);
771 memmove(&rp[ptr], &rp[ptr - 1], 776 memmove(&rp[ptr], &rp[ptr - 1],
772 (be16_to_cpu(block->bb_numrecs) - ptr + 1) * sizeof(*rp)); 777 (numrecs - ptr + 1) * sizeof(*rp));
773 /* 778 /*
774 * Now stuff the new record in, bump numrecs 779 * Now stuff the new record in, bump numrecs
775 * and log the new data. 780 * and log the new data.
776 */ 781 */
777 rp[ptr - 1] = *recp; /* INT_: struct copy */ 782 rp[ptr - 1] = *recp;
778 be16_add(&block->bb_numrecs, 1); 783 numrecs++;
779 xfs_alloc_log_recs(cur, bp, ptr, be16_to_cpu(block->bb_numrecs)); 784 block->bb_numrecs = cpu_to_be16(numrecs);
785 xfs_alloc_log_recs(cur, bp, ptr, numrecs);
780#ifdef DEBUG 786#ifdef DEBUG
781 if (ptr < be16_to_cpu(block->bb_numrecs)) 787 if (ptr < numrecs)
782 xfs_btree_check_rec(cur->bc_btnum, rp + ptr - 1, 788 xfs_btree_check_rec(cur->bc_btnum, rp + ptr - 1,
783 rp + ptr); 789 rp + ptr);
784#endif 790#endif
@@ -819,8 +825,8 @@ xfs_alloc_insrec(
819 */ 825 */
820 *bnop = nbno; 826 *bnop = nbno;
821 if (nbno != NULLAGBLOCK) { 827 if (nbno != NULLAGBLOCK) {
822 *recp = nrec; /* INT_: struct copy */ 828 *recp = nrec;
823 *curp = ncur; /* INT_: struct copy */ 829 *curp = ncur;
824 } 830 }
825 *stat = 1; 831 *stat = 1;
826 return 0; 832 return 0;
@@ -981,7 +987,7 @@ xfs_alloc_lookup(
981 */ 987 */
982 bp = cur->bc_bufs[level]; 988 bp = cur->bc_bufs[level];
983 if (bp && XFS_BUF_ADDR(bp) != d) 989 if (bp && XFS_BUF_ADDR(bp) != d)
984 bp = (xfs_buf_t *)0; 990 bp = NULL;
985 if (!bp) { 991 if (!bp) {
986 /* 992 /*
987 * Need to get a new buffer. Read it, then 993 * Need to get a new buffer. Read it, then
@@ -1229,7 +1235,7 @@ xfs_alloc_lshift(
1229 if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(*rpp), level))) 1235 if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(*rpp), level)))
1230 return error; 1236 return error;
1231#endif 1237#endif
1232 *lpp = *rpp; /* INT_: copy */ 1238 *lpp = *rpp;
1233 xfs_alloc_log_ptrs(cur, lbp, nrec, nrec); 1239 xfs_alloc_log_ptrs(cur, lbp, nrec, nrec);
1234 xfs_btree_check_key(cur->bc_btnum, lkp - 1, lkp); 1240 xfs_btree_check_key(cur->bc_btnum, lkp - 1, lkp);
1235 } 1241 }
@@ -1406,8 +1412,8 @@ xfs_alloc_newroot(
1406 1412
1407 kp = XFS_ALLOC_KEY_ADDR(new, 1, cur); 1413 kp = XFS_ALLOC_KEY_ADDR(new, 1, cur);
1408 if (be16_to_cpu(left->bb_level) > 0) { 1414 if (be16_to_cpu(left->bb_level) > 0) {
1409 kp[0] = *XFS_ALLOC_KEY_ADDR(left, 1, cur); /* INT_: structure copy */ 1415 kp[0] = *XFS_ALLOC_KEY_ADDR(left, 1, cur);
1410 kp[1] = *XFS_ALLOC_KEY_ADDR(right, 1, cur);/* INT_: structure copy */ 1416 kp[1] = *XFS_ALLOC_KEY_ADDR(right, 1, cur);
1411 } else { 1417 } else {
1412 xfs_alloc_rec_t *rp; /* btree record pointer */ 1418 xfs_alloc_rec_t *rp; /* btree record pointer */
1413 1419
@@ -1527,8 +1533,8 @@ xfs_alloc_rshift(
1527 if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(*lpp), level))) 1533 if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(*lpp), level)))
1528 return error; 1534 return error;
1529#endif 1535#endif
1530 *rkp = *lkp; /* INT_: copy */ 1536 *rkp = *lkp;
1531 *rpp = *lpp; /* INT_: copy */ 1537 *rpp = *lpp;
1532 xfs_alloc_log_keys(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1); 1538 xfs_alloc_log_keys(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1);
1533 xfs_alloc_log_ptrs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1); 1539 xfs_alloc_log_ptrs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1);
1534 xfs_btree_check_key(cur->bc_btnum, rkp, rkp + 1); 1540 xfs_btree_check_key(cur->bc_btnum, rkp, rkp + 1);
@@ -2044,7 +2050,7 @@ xfs_alloc_insert(
2044 nbno = NULLAGBLOCK; 2050 nbno = NULLAGBLOCK;
2045 nrec.ar_startblock = cpu_to_be32(cur->bc_rec.a.ar_startblock); 2051 nrec.ar_startblock = cpu_to_be32(cur->bc_rec.a.ar_startblock);
2046 nrec.ar_blockcount = cpu_to_be32(cur->bc_rec.a.ar_blockcount); 2052 nrec.ar_blockcount = cpu_to_be32(cur->bc_rec.a.ar_blockcount);
2047 ncur = (xfs_btree_cur_t *)0; 2053 ncur = NULL;
2048 pcur = cur; 2054 pcur = cur;
2049 /* 2055 /*
2050 * Loop going up the tree, starting at the leaf level. 2056 * Loop going up the tree, starting at the leaf level.
@@ -2076,7 +2082,7 @@ xfs_alloc_insert(
2076 */ 2082 */
2077 if (ncur) { 2083 if (ncur) {
2078 pcur = ncur; 2084 pcur = ncur;
2079 ncur = (xfs_btree_cur_t *)0; 2085 ncur = NULL;
2080 } 2086 }
2081 } while (nbno != NULLAGBLOCK); 2087 } while (nbno != NULLAGBLOCK);
2082 *stat = i; 2088 *stat = i;
diff --git a/fs/xfs/xfs_attr.c b/fs/xfs/xfs_attr.c
index 1a2101043275..9ada7bdbae52 100644
--- a/fs/xfs/xfs_attr.c
+++ b/fs/xfs/xfs_attr.c
@@ -91,7 +91,6 @@ STATIC int xfs_attr_refillstate(xfs_da_state_t *state);
91/* 91/*
92 * Routines to manipulate out-of-line attribute values. 92 * Routines to manipulate out-of-line attribute values.
93 */ 93 */
94STATIC int xfs_attr_rmtval_get(xfs_da_args_t *args);
95STATIC int xfs_attr_rmtval_set(xfs_da_args_t *args); 94STATIC int xfs_attr_rmtval_set(xfs_da_args_t *args);
96STATIC int xfs_attr_rmtval_remove(xfs_da_args_t *args); 95STATIC int xfs_attr_rmtval_remove(xfs_da_args_t *args);
97 96
@@ -180,7 +179,7 @@ xfs_attr_get(bhv_desc_t *bdp, const char *name, char *value, int *valuelenp,
180 return(error); 179 return(error);
181} 180}
182 181
183STATIC int 182int
184xfs_attr_set_int(xfs_inode_t *dp, const char *name, int namelen, 183xfs_attr_set_int(xfs_inode_t *dp, const char *name, int namelen,
185 char *value, int valuelen, int flags) 184 char *value, int valuelen, int flags)
186{ 185{
@@ -440,7 +439,7 @@ xfs_attr_set(bhv_desc_t *bdp, const char *name, char *value, int valuelen, int f
440 * Generic handler routine to remove a name from an attribute list. 439 * Generic handler routine to remove a name from an attribute list.
441 * Transitions attribute list from Btree to shortform as necessary. 440 * Transitions attribute list from Btree to shortform as necessary.
442 */ 441 */
443STATIC int 442int
444xfs_attr_remove_int(xfs_inode_t *dp, const char *name, int namelen, int flags) 443xfs_attr_remove_int(xfs_inode_t *dp, const char *name, int namelen, int flags)
445{ 444{
446 xfs_da_args_t args; 445 xfs_da_args_t args;
@@ -591,6 +590,110 @@ xfs_attr_remove(bhv_desc_t *bdp, const char *name, int flags, struct cred *cred)
591 return xfs_attr_remove_int(dp, name, namelen, flags); 590 return xfs_attr_remove_int(dp, name, namelen, flags);
592} 591}
593 592
593int /* error */
594xfs_attr_list_int(xfs_attr_list_context_t *context)
595{
596 int error;
597 xfs_inode_t *dp = context->dp;
598
599 /*
600 * Decide on what work routines to call based on the inode size.
601 */
602 if (XFS_IFORK_Q(dp) == 0 ||
603 (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS &&
604 dp->i_d.di_anextents == 0)) {
605 error = 0;
606 } else if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) {
607 error = xfs_attr_shortform_list(context);
608 } else if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) {
609 error = xfs_attr_leaf_list(context);
610 } else {
611 error = xfs_attr_node_list(context);
612 }
613 return error;
614}
615
616#define ATTR_ENTBASESIZE /* minimum bytes used by an attr */ \
617 (((struct attrlist_ent *) 0)->a_name - (char *) 0)
618#define ATTR_ENTSIZE(namelen) /* actual bytes used by an attr */ \
619 ((ATTR_ENTBASESIZE + (namelen) + 1 + sizeof(u_int32_t)-1) \
620 & ~(sizeof(u_int32_t)-1))
621
622/*
623 * Format an attribute and copy it out to the user's buffer.
624 * Take care to check values and protect against them changing later,
625 * we may be reading them directly out of a user buffer.
626 */
627/*ARGSUSED*/
628STATIC int
629xfs_attr_put_listent(xfs_attr_list_context_t *context, attrnames_t *namesp,
630 char *name, int namelen,
631 int valuelen, char *value)
632{
633 attrlist_ent_t *aep;
634 int arraytop;
635
636 ASSERT(!(context->flags & ATTR_KERNOVAL));
637 ASSERT(context->count >= 0);
638 ASSERT(context->count < (ATTR_MAX_VALUELEN/8));
639 ASSERT(context->firstu >= sizeof(*context->alist));
640 ASSERT(context->firstu <= context->bufsize);
641
642 arraytop = sizeof(*context->alist) +
643 context->count * sizeof(context->alist->al_offset[0]);
644 context->firstu -= ATTR_ENTSIZE(namelen);
645 if (context->firstu < arraytop) {
646 xfs_attr_trace_l_c("buffer full", context);
647 context->alist->al_more = 1;
648 context->seen_enough = 1;
649 return 1;
650 }
651
652 aep = (attrlist_ent_t *)&(((char *)context->alist)[ context->firstu ]);
653 aep->a_valuelen = valuelen;
654 memcpy(aep->a_name, name, namelen);
655 aep->a_name[ namelen ] = 0;
656 context->alist->al_offset[ context->count++ ] = context->firstu;
657 context->alist->al_count = context->count;
658 xfs_attr_trace_l_c("add", context);
659 return 0;
660}
661
662STATIC int
663xfs_attr_kern_list(xfs_attr_list_context_t *context, attrnames_t *namesp,
664 char *name, int namelen,
665 int valuelen, char *value)
666{
667 char *offset;
668 int arraytop;
669
670 ASSERT(context->count >= 0);
671
672 arraytop = context->count + namesp->attr_namelen + namelen + 1;
673 if (arraytop > context->firstu) {
674 context->count = -1; /* insufficient space */
675 return 1;
676 }
677 offset = (char *)context->alist + context->count;
678 strncpy(offset, namesp->attr_name, namesp->attr_namelen);
679 offset += namesp->attr_namelen;
680 strncpy(offset, name, namelen); /* real name */
681 offset += namelen;
682 *offset = '\0';
683 context->count += namesp->attr_namelen + namelen + 1;
684 return 0;
685}
686
687/*ARGSUSED*/
688STATIC int
689xfs_attr_kern_list_sizes(xfs_attr_list_context_t *context, attrnames_t *namesp,
690 char *name, int namelen,
691 int valuelen, char *value)
692{
693 context->count += namesp->attr_namelen + namelen + 1;
694 return 0;
695}
696
594/* 697/*
595 * Generate a list of extended attribute names and optionally 698 * Generate a list of extended attribute names and optionally
596 * also value lengths. Positive return value follows the XFS 699 * also value lengths. Positive return value follows the XFS
@@ -615,13 +718,13 @@ xfs_attr_list(bhv_desc_t *bdp, char *buffer, int bufsize, int flags,
615 return(XFS_ERROR(EINVAL)); 718 return(XFS_ERROR(EINVAL));
616 if ((cursor->initted == 0) && 719 if ((cursor->initted == 0) &&
617 (cursor->hashval || cursor->blkno || cursor->offset)) 720 (cursor->hashval || cursor->blkno || cursor->offset))
618 return(XFS_ERROR(EINVAL)); 721 return XFS_ERROR(EINVAL);
619 722
620 /* 723 /*
621 * Check for a properly aligned buffer. 724 * Check for a properly aligned buffer.
622 */ 725 */
623 if (((long)buffer) & (sizeof(int)-1)) 726 if (((long)buffer) & (sizeof(int)-1))
624 return(XFS_ERROR(EFAULT)); 727 return XFS_ERROR(EFAULT);
625 if (flags & ATTR_KERNOVAL) 728 if (flags & ATTR_KERNOVAL)
626 bufsize = 0; 729 bufsize = 0;
627 730
@@ -634,53 +737,47 @@ xfs_attr_list(bhv_desc_t *bdp, char *buffer, int bufsize, int flags,
634 context.dupcnt = 0; 737 context.dupcnt = 0;
635 context.resynch = 1; 738 context.resynch = 1;
636 context.flags = flags; 739 context.flags = flags;
637 if (!(flags & ATTR_KERNAMELS)) { 740 context.seen_enough = 0;
741 context.alist = (attrlist_t *)buffer;
742 context.put_value = 0;
743
744 if (flags & ATTR_KERNAMELS) {
745 context.bufsize = bufsize;
746 context.firstu = context.bufsize;
747 if (flags & ATTR_KERNOVAL)
748 context.put_listent = xfs_attr_kern_list_sizes;
749 else
750 context.put_listent = xfs_attr_kern_list;
751 } else {
638 context.bufsize = (bufsize & ~(sizeof(int)-1)); /* align */ 752 context.bufsize = (bufsize & ~(sizeof(int)-1)); /* align */
639 context.firstu = context.bufsize; 753 context.firstu = context.bufsize;
640 context.alist = (attrlist_t *)buffer;
641 context.alist->al_count = 0; 754 context.alist->al_count = 0;
642 context.alist->al_more = 0; 755 context.alist->al_more = 0;
643 context.alist->al_offset[0] = context.bufsize; 756 context.alist->al_offset[0] = context.bufsize;
644 } 757 context.put_listent = xfs_attr_put_listent;
645 else {
646 context.bufsize = bufsize;
647 context.firstu = context.bufsize;
648 context.alist = (attrlist_t *)buffer;
649 } 758 }
650 759
651 if (XFS_FORCED_SHUTDOWN(dp->i_mount)) 760 if (XFS_FORCED_SHUTDOWN(dp->i_mount))
652 return (EIO); 761 return EIO;
653 762
654 xfs_ilock(dp, XFS_ILOCK_SHARED); 763 xfs_ilock(dp, XFS_ILOCK_SHARED);
655 /*
656 * Decide on what work routines to call based on the inode size.
657 */
658 xfs_attr_trace_l_c("syscall start", &context); 764 xfs_attr_trace_l_c("syscall start", &context);
659 if (XFS_IFORK_Q(dp) == 0 || 765
660 (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS && 766 error = xfs_attr_list_int(&context);
661 dp->i_d.di_anextents == 0)) { 767
662 error = 0;
663 } else if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) {
664 error = xfs_attr_shortform_list(&context);
665 } else if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) {
666 error = xfs_attr_leaf_list(&context);
667 } else {
668 error = xfs_attr_node_list(&context);
669 }
670 xfs_iunlock(dp, XFS_ILOCK_SHARED); 768 xfs_iunlock(dp, XFS_ILOCK_SHARED);
671 xfs_attr_trace_l_c("syscall end", &context); 769 xfs_attr_trace_l_c("syscall end", &context);
672 770
673 if (!(context.flags & (ATTR_KERNOVAL|ATTR_KERNAMELS))) { 771 if (context.flags & (ATTR_KERNOVAL|ATTR_KERNAMELS)) {
674 ASSERT(error >= 0); 772 /* must return negated buffer size or the error */
675 }
676 else { /* must return negated buffer size or the error */
677 if (context.count < 0) 773 if (context.count < 0)
678 error = XFS_ERROR(ERANGE); 774 error = XFS_ERROR(ERANGE);
679 else 775 else
680 error = -context.count; 776 error = -context.count;
681 } 777 } else
778 ASSERT(error >= 0);
682 779
683 return(error); 780 return error;
684} 781}
685 782
686int /* error */ 783int /* error */
@@ -1122,19 +1219,19 @@ xfs_attr_leaf_list(xfs_attr_list_context_t *context)
1122 context->cursor->blkno = 0; 1219 context->cursor->blkno = 0;
1123 error = xfs_da_read_buf(NULL, context->dp, 0, -1, &bp, XFS_ATTR_FORK); 1220 error = xfs_da_read_buf(NULL, context->dp, 0, -1, &bp, XFS_ATTR_FORK);
1124 if (error) 1221 if (error)
1125 return(error); 1222 return XFS_ERROR(error);
1126 ASSERT(bp != NULL); 1223 ASSERT(bp != NULL);
1127 leaf = bp->data; 1224 leaf = bp->data;
1128 if (unlikely(be16_to_cpu(leaf->hdr.info.magic) != XFS_ATTR_LEAF_MAGIC)) { 1225 if (unlikely(be16_to_cpu(leaf->hdr.info.magic) != XFS_ATTR_LEAF_MAGIC)) {
1129 XFS_CORRUPTION_ERROR("xfs_attr_leaf_list", XFS_ERRLEVEL_LOW, 1226 XFS_CORRUPTION_ERROR("xfs_attr_leaf_list", XFS_ERRLEVEL_LOW,
1130 context->dp->i_mount, leaf); 1227 context->dp->i_mount, leaf);
1131 xfs_da_brelse(NULL, bp); 1228 xfs_da_brelse(NULL, bp);
1132 return(XFS_ERROR(EFSCORRUPTED)); 1229 return XFS_ERROR(EFSCORRUPTED);
1133 } 1230 }
1134 1231
1135 (void)xfs_attr_leaf_list_int(bp, context); 1232 error = xfs_attr_leaf_list_int(bp, context);
1136 xfs_da_brelse(NULL, bp); 1233 xfs_da_brelse(NULL, bp);
1137 return(0); 1234 return XFS_ERROR(error);
1138} 1235}
1139 1236
1140 1237
@@ -1858,8 +1955,12 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
1858 return(XFS_ERROR(EFSCORRUPTED)); 1955 return(XFS_ERROR(EFSCORRUPTED));
1859 } 1956 }
1860 error = xfs_attr_leaf_list_int(bp, context); 1957 error = xfs_attr_leaf_list_int(bp, context);
1861 if (error || !leaf->hdr.info.forw) 1958 if (error) {
1862 break; /* not really an error, buffer full or EOF */ 1959 xfs_da_brelse(NULL, bp);
1960 return error;
1961 }
1962 if (context->seen_enough || leaf->hdr.info.forw == 0)
1963 break;
1863 cursor->blkno = be32_to_cpu(leaf->hdr.info.forw); 1964 cursor->blkno = be32_to_cpu(leaf->hdr.info.forw);
1864 xfs_da_brelse(NULL, bp); 1965 xfs_da_brelse(NULL, bp);
1865 error = xfs_da_read_buf(NULL, context->dp, cursor->blkno, -1, 1966 error = xfs_da_read_buf(NULL, context->dp, cursor->blkno, -1,
@@ -1886,7 +1987,7 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
1886 * Read the value associated with an attribute from the out-of-line buffer 1987 * Read the value associated with an attribute from the out-of-line buffer
1887 * that we stored it in. 1988 * that we stored it in.
1888 */ 1989 */
1889STATIC int 1990int
1890xfs_attr_rmtval_get(xfs_da_args_t *args) 1991xfs_attr_rmtval_get(xfs_da_args_t *args)
1891{ 1992{
1892 xfs_bmbt_irec_t map[ATTR_RMTVALUE_MAPSIZE]; 1993 xfs_bmbt_irec_t map[ATTR_RMTVALUE_MAPSIZE];
diff --git a/fs/xfs/xfs_attr.h b/fs/xfs/xfs_attr.h
index 981633f6c077..783977d3ea71 100644
--- a/fs/xfs/xfs_attr.h
+++ b/fs/xfs/xfs_attr.h
@@ -37,6 +37,7 @@
37 37
38struct cred; 38struct cred;
39struct bhv_vnode; 39struct bhv_vnode;
40struct xfs_attr_list_context;
40 41
41typedef int (*attrset_t)(struct bhv_vnode *, char *, void *, size_t, int); 42typedef int (*attrset_t)(struct bhv_vnode *, char *, void *, size_t, int);
42typedef int (*attrget_t)(struct bhv_vnode *, char *, void *, size_t, int); 43typedef int (*attrget_t)(struct bhv_vnode *, char *, void *, size_t, int);
@@ -160,13 +161,16 @@ struct xfs_da_args;
160 */ 161 */
161int xfs_attr_get(bhv_desc_t *, const char *, char *, int *, int, struct cred *); 162int xfs_attr_get(bhv_desc_t *, const char *, char *, int *, int, struct cred *);
162int xfs_attr_set(bhv_desc_t *, const char *, char *, int, int, struct cred *); 163int xfs_attr_set(bhv_desc_t *, const char *, char *, int, int, struct cred *);
164int xfs_attr_set_int(struct xfs_inode *, const char *, int, char *, int, int);
163int xfs_attr_remove(bhv_desc_t *, const char *, int, struct cred *); 165int xfs_attr_remove(bhv_desc_t *, const char *, int, struct cred *);
164int xfs_attr_list(bhv_desc_t *, char *, int, int, 166int xfs_attr_remove_int(struct xfs_inode *, const char *, int, int);
165 struct attrlist_cursor_kern *, struct cred *); 167int xfs_attr_list(bhv_desc_t *, char *, int, int, struct attrlist_cursor_kern *, struct cred *);
168int xfs_attr_list_int(struct xfs_attr_list_context *);
166int xfs_attr_inactive(struct xfs_inode *dp); 169int xfs_attr_inactive(struct xfs_inode *dp);
167 170
168int xfs_attr_shortform_getvalue(struct xfs_da_args *); 171int xfs_attr_shortform_getvalue(struct xfs_da_args *);
169int xfs_attr_fetch(struct xfs_inode *, const char *, int, 172int xfs_attr_fetch(struct xfs_inode *, const char *, int,
170 char *, int *, int, struct cred *); 173 char *, int *, int, struct cred *);
174int xfs_attr_rmtval_get(struct xfs_da_args *args);
171 175
172#endif /* __XFS_ATTR_H__ */ 176#endif /* __XFS_ATTR_H__ */
diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/xfs_attr_leaf.c
index 9455051f0120..9719bbef122c 100644
--- a/fs/xfs/xfs_attr_leaf.c
+++ b/fs/xfs/xfs_attr_leaf.c
@@ -89,9 +89,46 @@ STATIC void xfs_attr_leaf_moveents(xfs_attr_leafblock_t *src_leaf,
89 int dst_start, int move_count, 89 int dst_start, int move_count,
90 xfs_mount_t *mp); 90 xfs_mount_t *mp);
91STATIC int xfs_attr_leaf_entsize(xfs_attr_leafblock_t *leaf, int index); 91STATIC int xfs_attr_leaf_entsize(xfs_attr_leafblock_t *leaf, int index);
92STATIC int xfs_attr_put_listent(xfs_attr_list_context_t *context, 92
93 attrnames_t *, char *name, int namelen, 93/*========================================================================
94 int valuelen); 94 * Namespace helper routines
95 *========================================================================*/
96
97STATIC inline attrnames_t *
98xfs_attr_flags_namesp(int flags)
99{
100 return ((flags & XFS_ATTR_SECURE) ? &attr_secure:
101 ((flags & XFS_ATTR_ROOT) ? &attr_trusted : &attr_user));
102}
103
104/*
105 * If namespace bits don't match return 0.
106 * If all match then return 1.
107 */
108STATIC inline int
109xfs_attr_namesp_match(int arg_flags, int ondisk_flags)
110{
111 return XFS_ATTR_NSP_ONDISK(ondisk_flags) == XFS_ATTR_NSP_ARGS_TO_ONDISK(arg_flags);
112}
113
114/*
115 * If namespace bits don't match and we don't have an override for it
116 * then return 0.
117 * If all match or are overridable then return 1.
118 */
119STATIC inline int
120xfs_attr_namesp_match_overrides(int arg_flags, int ondisk_flags)
121{
122 if (((arg_flags & ATTR_SECURE) == 0) !=
123 ((ondisk_flags & XFS_ATTR_SECURE) == 0) &&
124 !(arg_flags & ATTR_KERNORMALS))
125 return 0;
126 if (((arg_flags & ATTR_ROOT) == 0) !=
127 ((ondisk_flags & XFS_ATTR_ROOT) == 0) &&
128 !(arg_flags & ATTR_KERNROOTLS))
129 return 0;
130 return 1;
131}
95 132
96 133
97/*======================================================================== 134/*========================================================================
@@ -228,11 +265,7 @@ xfs_attr_shortform_add(xfs_da_args_t *args, int forkoff)
228 continue; 265 continue;
229 if (memcmp(args->name, sfe->nameval, args->namelen) != 0) 266 if (memcmp(args->name, sfe->nameval, args->namelen) != 0)
230 continue; 267 continue;
231 if (((args->flags & ATTR_SECURE) != 0) != 268 if (!xfs_attr_namesp_match(args->flags, sfe->flags))
232 ((sfe->flags & XFS_ATTR_SECURE) != 0))
233 continue;
234 if (((args->flags & ATTR_ROOT) != 0) !=
235 ((sfe->flags & XFS_ATTR_ROOT) != 0))
236 continue; 269 continue;
237 ASSERT(0); 270 ASSERT(0);
238#endif 271#endif
@@ -246,8 +279,7 @@ xfs_attr_shortform_add(xfs_da_args_t *args, int forkoff)
246 279
247 sfe->namelen = args->namelen; 280 sfe->namelen = args->namelen;
248 sfe->valuelen = args->valuelen; 281 sfe->valuelen = args->valuelen;
249 sfe->flags = (args->flags & ATTR_SECURE) ? XFS_ATTR_SECURE : 282 sfe->flags = XFS_ATTR_NSP_ARGS_TO_ONDISK(args->flags);
250 ((args->flags & ATTR_ROOT) ? XFS_ATTR_ROOT : 0);
251 memcpy(sfe->nameval, args->name, args->namelen); 283 memcpy(sfe->nameval, args->name, args->namelen);
252 memcpy(&sfe->nameval[args->namelen], args->value, args->valuelen); 284 memcpy(&sfe->nameval[args->namelen], args->value, args->valuelen);
253 sf->hdr.count++; 285 sf->hdr.count++;
@@ -282,11 +314,7 @@ xfs_attr_shortform_remove(xfs_da_args_t *args)
282 continue; 314 continue;
283 if (memcmp(sfe->nameval, args->name, args->namelen) != 0) 315 if (memcmp(sfe->nameval, args->name, args->namelen) != 0)
284 continue; 316 continue;
285 if (((args->flags & ATTR_SECURE) != 0) != 317 if (!xfs_attr_namesp_match(args->flags, sfe->flags))
286 ((sfe->flags & XFS_ATTR_SECURE) != 0))
287 continue;
288 if (((args->flags & ATTR_ROOT) != 0) !=
289 ((sfe->flags & XFS_ATTR_ROOT) != 0))
290 continue; 318 continue;
291 break; 319 break;
292 } 320 }
@@ -363,11 +391,7 @@ xfs_attr_shortform_lookup(xfs_da_args_t *args)
363 continue; 391 continue;
364 if (memcmp(args->name, sfe->nameval, args->namelen) != 0) 392 if (memcmp(args->name, sfe->nameval, args->namelen) != 0)
365 continue; 393 continue;
366 if (((args->flags & ATTR_SECURE) != 0) != 394 if (!xfs_attr_namesp_match(args->flags, sfe->flags))
367 ((sfe->flags & XFS_ATTR_SECURE) != 0))
368 continue;
369 if (((args->flags & ATTR_ROOT) != 0) !=
370 ((sfe->flags & XFS_ATTR_ROOT) != 0))
371 continue; 395 continue;
372 return(XFS_ERROR(EEXIST)); 396 return(XFS_ERROR(EEXIST));
373 } 397 }
@@ -394,11 +418,7 @@ xfs_attr_shortform_getvalue(xfs_da_args_t *args)
394 continue; 418 continue;
395 if (memcmp(args->name, sfe->nameval, args->namelen) != 0) 419 if (memcmp(args->name, sfe->nameval, args->namelen) != 0)
396 continue; 420 continue;
397 if (((args->flags & ATTR_SECURE) != 0) != 421 if (!xfs_attr_namesp_match(args->flags, sfe->flags))
398 ((sfe->flags & XFS_ATTR_SECURE) != 0))
399 continue;
400 if (((args->flags & ATTR_ROOT) != 0) !=
401 ((sfe->flags & XFS_ATTR_ROOT) != 0))
402 continue; 422 continue;
403 if (args->flags & ATTR_KERNOVAL) { 423 if (args->flags & ATTR_KERNOVAL) {
404 args->valuelen = sfe->valuelen; 424 args->valuelen = sfe->valuelen;
@@ -485,8 +505,7 @@ xfs_attr_shortform_to_leaf(xfs_da_args_t *args)
485 nargs.valuelen = sfe->valuelen; 505 nargs.valuelen = sfe->valuelen;
486 nargs.hashval = xfs_da_hashname((char *)sfe->nameval, 506 nargs.hashval = xfs_da_hashname((char *)sfe->nameval,
487 sfe->namelen); 507 sfe->namelen);
488 nargs.flags = (sfe->flags & XFS_ATTR_SECURE) ? ATTR_SECURE : 508 nargs.flags = XFS_ATTR_NSP_ONDISK_TO_ARGS(sfe->flags);
489 ((sfe->flags & XFS_ATTR_ROOT) ? ATTR_ROOT : 0);
490 error = xfs_attr_leaf_lookup_int(bp, &nargs); /* set a->index */ 509 error = xfs_attr_leaf_lookup_int(bp, &nargs); /* set a->index */
491 ASSERT(error == ENOATTR); 510 ASSERT(error == ENOATTR);
492 error = xfs_attr_leaf_add(bp, &nargs); 511 error = xfs_attr_leaf_add(bp, &nargs);
@@ -520,6 +539,10 @@ xfs_attr_shortform_compare(const void *a, const void *b)
520 } 539 }
521} 540}
522 541
542
543#define XFS_ISRESET_CURSOR(cursor) \
544 (!((cursor)->initted) && !((cursor)->hashval) && \
545 !((cursor)->blkno) && !((cursor)->offset))
523/* 546/*
524 * Copy out entries of shortform attribute lists for attr_list(). 547 * Copy out entries of shortform attribute lists for attr_list().
525 * Shortform attribute lists are not stored in hashval sorted order. 548 * Shortform attribute lists are not stored in hashval sorted order.
@@ -537,6 +560,7 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
537 xfs_attr_sf_entry_t *sfe; 560 xfs_attr_sf_entry_t *sfe;
538 xfs_inode_t *dp; 561 xfs_inode_t *dp;
539 int sbsize, nsbuf, count, i; 562 int sbsize, nsbuf, count, i;
563 int error;
540 564
541 ASSERT(context != NULL); 565 ASSERT(context != NULL);
542 dp = context->dp; 566 dp = context->dp;
@@ -552,46 +576,51 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
552 xfs_attr_trace_l_c("sf start", context); 576 xfs_attr_trace_l_c("sf start", context);
553 577
554 /* 578 /*
555 * If the buffer is large enough, do not bother with sorting. 579 * If the buffer is large enough and the cursor is at the start,
580 * do not bother with sorting since we will return everything in
581 * one buffer and another call using the cursor won't need to be
582 * made.
556 * Note the generous fudge factor of 16 overhead bytes per entry. 583 * Note the generous fudge factor of 16 overhead bytes per entry.
584 * If bufsize is zero then put_listent must be a search function
585 * and can just scan through what we have.
557 */ 586 */
558 if ((dp->i_afp->if_bytes + sf->hdr.count * 16) < context->bufsize) { 587 if (context->bufsize == 0 ||
588 (XFS_ISRESET_CURSOR(cursor) &&
589 (dp->i_afp->if_bytes + sf->hdr.count * 16) < context->bufsize)) {
559 for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) { 590 for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) {
560 attrnames_t *namesp; 591 attrnames_t *namesp;
561 592
562 if (((context->flags & ATTR_SECURE) != 0) != 593 if (!xfs_attr_namesp_match_overrides(context->flags, sfe->flags)) {
563 ((sfe->flags & XFS_ATTR_SECURE) != 0) &&
564 !(context->flags & ATTR_KERNORMALS)) {
565 sfe = XFS_ATTR_SF_NEXTENTRY(sfe);
566 continue;
567 }
568 if (((context->flags & ATTR_ROOT) != 0) !=
569 ((sfe->flags & XFS_ATTR_ROOT) != 0) &&
570 !(context->flags & ATTR_KERNROOTLS)) {
571 sfe = XFS_ATTR_SF_NEXTENTRY(sfe); 594 sfe = XFS_ATTR_SF_NEXTENTRY(sfe);
572 continue; 595 continue;
573 } 596 }
574 namesp = (sfe->flags & XFS_ATTR_SECURE) ? &attr_secure: 597 namesp = xfs_attr_flags_namesp(sfe->flags);
575 ((sfe->flags & XFS_ATTR_ROOT) ? &attr_trusted : 598 error = context->put_listent(context,
576 &attr_user); 599 namesp,
577 if (context->flags & ATTR_KERNOVAL) { 600 (char *)sfe->nameval,
578 ASSERT(context->flags & ATTR_KERNAMELS); 601 (int)sfe->namelen,
579 context->count += namesp->attr_namelen + 602 (int)sfe->valuelen,
580 sfe->namelen + 1; 603 (char*)&sfe->nameval[sfe->namelen]);
581 } 604
582 else { 605 /*
583 if (xfs_attr_put_listent(context, namesp, 606 * Either search callback finished early or
584 (char *)sfe->nameval, 607 * didn't fit it all in the buffer after all.
585 (int)sfe->namelen, 608 */
586 (int)sfe->valuelen)) 609 if (context->seen_enough)
587 break; 610 break;
588 } 611
612 if (error)
613 return error;
589 sfe = XFS_ATTR_SF_NEXTENTRY(sfe); 614 sfe = XFS_ATTR_SF_NEXTENTRY(sfe);
590 } 615 }
591 xfs_attr_trace_l_c("sf big-gulp", context); 616 xfs_attr_trace_l_c("sf big-gulp", context);
592 return(0); 617 return(0);
593 } 618 }
594 619
620 /* do no more for a search callback */
621 if (context->bufsize == 0)
622 return 0;
623
595 /* 624 /*
596 * It didn't all fit, so we have to sort everything on hashval. 625 * It didn't all fit, so we have to sort everything on hashval.
597 */ 626 */
@@ -614,15 +643,7 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
614 kmem_free(sbuf, sbsize); 643 kmem_free(sbuf, sbsize);
615 return XFS_ERROR(EFSCORRUPTED); 644 return XFS_ERROR(EFSCORRUPTED);
616 } 645 }
617 if (((context->flags & ATTR_SECURE) != 0) != 646 if (!xfs_attr_namesp_match_overrides(context->flags, sfe->flags)) {
618 ((sfe->flags & XFS_ATTR_SECURE) != 0) &&
619 !(context->flags & ATTR_KERNORMALS)) {
620 sfe = XFS_ATTR_SF_NEXTENTRY(sfe);
621 continue;
622 }
623 if (((context->flags & ATTR_ROOT) != 0) !=
624 ((sfe->flags & XFS_ATTR_ROOT) != 0) &&
625 !(context->flags & ATTR_KERNROOTLS)) {
626 sfe = XFS_ATTR_SF_NEXTENTRY(sfe); 647 sfe = XFS_ATTR_SF_NEXTENTRY(sfe);
627 continue; 648 continue;
628 } 649 }
@@ -671,24 +692,22 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
671 for ( ; i < nsbuf; i++, sbp++) { 692 for ( ; i < nsbuf; i++, sbp++) {
672 attrnames_t *namesp; 693 attrnames_t *namesp;
673 694
674 namesp = (sbp->flags & XFS_ATTR_SECURE) ? &attr_secure : 695 namesp = xfs_attr_flags_namesp(sbp->flags);
675 ((sbp->flags & XFS_ATTR_ROOT) ? &attr_trusted :
676 &attr_user);
677 696
678 if (cursor->hashval != sbp->hash) { 697 if (cursor->hashval != sbp->hash) {
679 cursor->hashval = sbp->hash; 698 cursor->hashval = sbp->hash;
680 cursor->offset = 0; 699 cursor->offset = 0;
681 } 700 }
682 if (context->flags & ATTR_KERNOVAL) { 701 error = context->put_listent(context,
683 ASSERT(context->flags & ATTR_KERNAMELS); 702 namesp,
684 context->count += namesp->attr_namelen + 703 sbp->name,
685 sbp->namelen + 1; 704 sbp->namelen,
686 } else { 705 sbp->valuelen,
687 if (xfs_attr_put_listent(context, namesp, 706 &sbp->name[sbp->namelen]);
688 sbp->name, sbp->namelen, 707 if (error)
689 sbp->valuelen)) 708 return error;
690 break; 709 if (context->seen_enough)
691 } 710 break;
692 cursor->offset++; 711 cursor->offset++;
693 } 712 }
694 713
@@ -810,8 +829,7 @@ xfs_attr_leaf_to_shortform(xfs_dabuf_t *bp, xfs_da_args_t *args, int forkoff)
810 nargs.value = (char *)&name_loc->nameval[nargs.namelen]; 829 nargs.value = (char *)&name_loc->nameval[nargs.namelen];
811 nargs.valuelen = be16_to_cpu(name_loc->valuelen); 830 nargs.valuelen = be16_to_cpu(name_loc->valuelen);
812 nargs.hashval = be32_to_cpu(entry->hashval); 831 nargs.hashval = be32_to_cpu(entry->hashval);
813 nargs.flags = (entry->flags & XFS_ATTR_SECURE) ? ATTR_SECURE : 832 nargs.flags = XFS_ATTR_NSP_ONDISK_TO_ARGS(entry->flags);
814 ((entry->flags & XFS_ATTR_ROOT) ? ATTR_ROOT : 0);
815 xfs_attr_shortform_add(&nargs, forkoff); 833 xfs_attr_shortform_add(&nargs, forkoff);
816 } 834 }
817 error = 0; 835 error = 0;
@@ -1098,8 +1116,7 @@ xfs_attr_leaf_add_work(xfs_dabuf_t *bp, xfs_da_args_t *args, int mapindex)
1098 be16_to_cpu(map->size)); 1116 be16_to_cpu(map->size));
1099 entry->hashval = cpu_to_be32(args->hashval); 1117 entry->hashval = cpu_to_be32(args->hashval);
1100 entry->flags = tmp ? XFS_ATTR_LOCAL : 0; 1118 entry->flags = tmp ? XFS_ATTR_LOCAL : 0;
1101 entry->flags |= (args->flags & ATTR_SECURE) ? XFS_ATTR_SECURE : 1119 entry->flags |= XFS_ATTR_NSP_ARGS_TO_ONDISK(args->flags);
1102 ((args->flags & ATTR_ROOT) ? XFS_ATTR_ROOT : 0);
1103 if (args->rename) { 1120 if (args->rename) {
1104 entry->flags |= XFS_ATTR_INCOMPLETE; 1121 entry->flags |= XFS_ATTR_INCOMPLETE;
1105 if ((args->blkno2 == args->blkno) && 1122 if ((args->blkno2 == args->blkno) &&
@@ -1926,7 +1943,7 @@ xfs_attr_leaf_lookup_int(xfs_dabuf_t *bp, xfs_da_args_t *args)
1926 else 1943 else
1927 break; 1944 break;
1928 } 1945 }
1929 ASSERT((probe >= 0) && 1946 ASSERT((probe >= 0) &&
1930 (!leaf->hdr.count 1947 (!leaf->hdr.count
1931 || (probe < be16_to_cpu(leaf->hdr.count)))); 1948 || (probe < be16_to_cpu(leaf->hdr.count))));
1932 ASSERT((span <= 4) || (be32_to_cpu(entry->hashval) == hashval)); 1949 ASSERT((span <= 4) || (be32_to_cpu(entry->hashval) == hashval));
@@ -1971,14 +1988,9 @@ xfs_attr_leaf_lookup_int(xfs_dabuf_t *bp, xfs_da_args_t *args)
1971 name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, probe); 1988 name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, probe);
1972 if (name_loc->namelen != args->namelen) 1989 if (name_loc->namelen != args->namelen)
1973 continue; 1990 continue;
1974 if (memcmp(args->name, (char *)name_loc->nameval, 1991 if (memcmp(args->name, (char *)name_loc->nameval, args->namelen) != 0)
1975 args->namelen) != 0)
1976 continue; 1992 continue;
1977 if (((args->flags & ATTR_SECURE) != 0) != 1993 if (!xfs_attr_namesp_match(args->flags, entry->flags))
1978 ((entry->flags & XFS_ATTR_SECURE) != 0))
1979 continue;
1980 if (((args->flags & ATTR_ROOT) != 0) !=
1981 ((entry->flags & XFS_ATTR_ROOT) != 0))
1982 continue; 1994 continue;
1983 args->index = probe; 1995 args->index = probe;
1984 return(XFS_ERROR(EEXIST)); 1996 return(XFS_ERROR(EEXIST));
@@ -1989,11 +2001,7 @@ xfs_attr_leaf_lookup_int(xfs_dabuf_t *bp, xfs_da_args_t *args)
1989 if (memcmp(args->name, (char *)name_rmt->name, 2001 if (memcmp(args->name, (char *)name_rmt->name,
1990 args->namelen) != 0) 2002 args->namelen) != 0)
1991 continue; 2003 continue;
1992 if (((args->flags & ATTR_SECURE) != 0) != 2004 if (!xfs_attr_namesp_match(args->flags, entry->flags))
1993 ((entry->flags & XFS_ATTR_SECURE) != 0))
1994 continue;
1995 if (((args->flags & ATTR_ROOT) != 0) !=
1996 ((entry->flags & XFS_ATTR_ROOT) != 0))
1997 continue; 2005 continue;
1998 args->index = probe; 2006 args->index = probe;
1999 args->rmtblkno = be32_to_cpu(name_rmt->valueblk); 2007 args->rmtblkno = be32_to_cpu(name_rmt->valueblk);
@@ -2312,8 +2320,6 @@ xfs_attr_leaf_list_int(xfs_dabuf_t *bp, xfs_attr_list_context_t *context)
2312 attrlist_cursor_kern_t *cursor; 2320 attrlist_cursor_kern_t *cursor;
2313 xfs_attr_leafblock_t *leaf; 2321 xfs_attr_leafblock_t *leaf;
2314 xfs_attr_leaf_entry_t *entry; 2322 xfs_attr_leaf_entry_t *entry;
2315 xfs_attr_leaf_name_local_t *name_loc;
2316 xfs_attr_leaf_name_remote_t *name_rmt;
2317 int retval, i; 2323 int retval, i;
2318 2324
2319 ASSERT(bp != NULL); 2325 ASSERT(bp != NULL);
@@ -2355,9 +2361,8 @@ xfs_attr_leaf_list_int(xfs_dabuf_t *bp, xfs_attr_list_context_t *context)
2355 * We have found our place, start copying out the new attributes. 2361 * We have found our place, start copying out the new attributes.
2356 */ 2362 */
2357 retval = 0; 2363 retval = 0;
2358 for ( ; (i < be16_to_cpu(leaf->hdr.count)) 2364 for ( ; (i < be16_to_cpu(leaf->hdr.count)); entry++, i++) {
2359 && (retval == 0); entry++, i++) { 2365 attrnames_t *namesp;
2360 attrnames_t *namesp;
2361 2366
2362 if (be32_to_cpu(entry->hashval) != cursor->hashval) { 2367 if (be32_to_cpu(entry->hashval) != cursor->hashval) {
2363 cursor->hashval = be32_to_cpu(entry->hashval); 2368 cursor->hashval = be32_to_cpu(entry->hashval);
@@ -2366,115 +2371,69 @@ xfs_attr_leaf_list_int(xfs_dabuf_t *bp, xfs_attr_list_context_t *context)
2366 2371
2367 if (entry->flags & XFS_ATTR_INCOMPLETE) 2372 if (entry->flags & XFS_ATTR_INCOMPLETE)
2368 continue; /* skip incomplete entries */ 2373 continue; /* skip incomplete entries */
2369 if (((context->flags & ATTR_SECURE) != 0) != 2374 if (!xfs_attr_namesp_match_overrides(context->flags, entry->flags))
2370 ((entry->flags & XFS_ATTR_SECURE) != 0) && 2375 continue;
2371 !(context->flags & ATTR_KERNORMALS)) 2376
2372 continue; /* skip non-matching entries */ 2377 namesp = xfs_attr_flags_namesp(entry->flags);
2373 if (((context->flags & ATTR_ROOT) != 0) !=
2374 ((entry->flags & XFS_ATTR_ROOT) != 0) &&
2375 !(context->flags & ATTR_KERNROOTLS))
2376 continue; /* skip non-matching entries */
2377
2378 namesp = (entry->flags & XFS_ATTR_SECURE) ? &attr_secure :
2379 ((entry->flags & XFS_ATTR_ROOT) ? &attr_trusted :
2380 &attr_user);
2381 2378
2382 if (entry->flags & XFS_ATTR_LOCAL) { 2379 if (entry->flags & XFS_ATTR_LOCAL) {
2383 name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, i); 2380 xfs_attr_leaf_name_local_t *name_loc =
2384 if (context->flags & ATTR_KERNOVAL) { 2381 XFS_ATTR_LEAF_NAME_LOCAL(leaf, i);
2385 ASSERT(context->flags & ATTR_KERNAMELS); 2382
2386 context->count += namesp->attr_namelen + 2383 retval = context->put_listent(context,
2387 (int)name_loc->namelen + 1; 2384 namesp,
2388 } else { 2385 (char *)name_loc->nameval,
2389 retval = xfs_attr_put_listent(context, namesp, 2386 (int)name_loc->namelen,
2390 (char *)name_loc->nameval, 2387 be16_to_cpu(name_loc->valuelen),
2391 (int)name_loc->namelen, 2388 (char *)&name_loc->nameval[name_loc->namelen]);
2392 be16_to_cpu(name_loc->valuelen)); 2389 if (retval)
2393 } 2390 return retval;
2394 } else { 2391 } else {
2395 name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, i); 2392 xfs_attr_leaf_name_remote_t *name_rmt =
2396 if (context->flags & ATTR_KERNOVAL) { 2393 XFS_ATTR_LEAF_NAME_REMOTE(leaf, i);
2397 ASSERT(context->flags & ATTR_KERNAMELS); 2394
2398 context->count += namesp->attr_namelen + 2395 int valuelen = be32_to_cpu(name_rmt->valuelen);
2399 (int)name_rmt->namelen + 1; 2396
2400 } else { 2397 if (context->put_value) {
2401 retval = xfs_attr_put_listent(context, namesp, 2398 xfs_da_args_t args;
2402 (char *)name_rmt->name, 2399
2403 (int)name_rmt->namelen, 2400 memset((char *)&args, 0, sizeof(args));
2404 be32_to_cpu(name_rmt->valuelen)); 2401 args.dp = context->dp;
2402 args.whichfork = XFS_ATTR_FORK;
2403 args.valuelen = valuelen;
2404 args.value = kmem_alloc(valuelen, KM_SLEEP);
2405 args.rmtblkno = be32_to_cpu(name_rmt->valueblk);
2406 args.rmtblkcnt = XFS_B_TO_FSB(args.dp->i_mount, valuelen);
2407 retval = xfs_attr_rmtval_get(&args);
2408 if (retval)
2409 return retval;
2410 retval = context->put_listent(context,
2411 namesp,
2412 (char *)name_rmt->name,
2413 (int)name_rmt->namelen,
2414 valuelen,
2415 (char*)args.value);
2416 kmem_free(args.value, valuelen);
2405 } 2417 }
2418 else {
2419 retval = context->put_listent(context,
2420 namesp,
2421 (char *)name_rmt->name,
2422 (int)name_rmt->namelen,
2423 valuelen,
2424 NULL);
2425 }
2426 if (retval)
2427 return retval;
2406 } 2428 }
2407 if (retval == 0) { 2429 if (context->seen_enough)
2408 cursor->offset++; 2430 break;
2409 } 2431 cursor->offset++;
2410 } 2432 }
2411 xfs_attr_trace_l_cl("blk end", context, leaf); 2433 xfs_attr_trace_l_cl("blk end", context, leaf);
2412 return(retval); 2434 return(retval);
2413} 2435}
2414 2436
2415#define ATTR_ENTBASESIZE /* minimum bytes used by an attr */ \
2416 (((struct attrlist_ent *) 0)->a_name - (char *) 0)
2417#define ATTR_ENTSIZE(namelen) /* actual bytes used by an attr */ \
2418 ((ATTR_ENTBASESIZE + (namelen) + 1 + sizeof(u_int32_t)-1) \
2419 & ~(sizeof(u_int32_t)-1))
2420
2421/*
2422 * Format an attribute and copy it out to the user's buffer.
2423 * Take care to check values and protect against them changing later,
2424 * we may be reading them directly out of a user buffer.
2425 */
2426/*ARGSUSED*/
2427STATIC int
2428xfs_attr_put_listent(xfs_attr_list_context_t *context,
2429 attrnames_t *namesp, char *name, int namelen, int valuelen)
2430{
2431 attrlist_ent_t *aep;
2432 int arraytop;
2433
2434 ASSERT(!(context->flags & ATTR_KERNOVAL));
2435 if (context->flags & ATTR_KERNAMELS) {
2436 char *offset;
2437
2438 ASSERT(context->count >= 0);
2439
2440 arraytop = context->count + namesp->attr_namelen + namelen + 1;
2441 if (arraytop > context->firstu) {
2442 context->count = -1; /* insufficient space */
2443 return(1);
2444 }
2445 offset = (char *)context->alist + context->count;
2446 strncpy(offset, namesp->attr_name, namesp->attr_namelen);
2447 offset += namesp->attr_namelen;
2448 strncpy(offset, name, namelen); /* real name */
2449 offset += namelen;
2450 *offset = '\0';
2451 context->count += namesp->attr_namelen + namelen + 1;
2452 return(0);
2453 }
2454
2455 ASSERT(context->count >= 0);
2456 ASSERT(context->count < (ATTR_MAX_VALUELEN/8));
2457 ASSERT(context->firstu >= sizeof(*context->alist));
2458 ASSERT(context->firstu <= context->bufsize);
2459
2460 arraytop = sizeof(*context->alist) +
2461 context->count * sizeof(context->alist->al_offset[0]);
2462 context->firstu -= ATTR_ENTSIZE(namelen);
2463 if (context->firstu < arraytop) {
2464 xfs_attr_trace_l_c("buffer full", context);
2465 context->alist->al_more = 1;
2466 return(1);
2467 }
2468
2469 aep = (attrlist_ent_t *)&(((char *)context->alist)[ context->firstu ]);
2470 aep->a_valuelen = valuelen;
2471 memcpy(aep->a_name, name, namelen);
2472 aep->a_name[ namelen ] = 0;
2473 context->alist->al_offset[ context->count++ ] = context->firstu;
2474 context->alist->al_count = context->count;
2475 xfs_attr_trace_l_c("add", context);
2476 return(0);
2477}
2478 2437
2479/*======================================================================== 2438/*========================================================================
2480 * Manage the INCOMPLETE flag in a leaf entry 2439 * Manage the INCOMPLETE flag in a leaf entry
diff --git a/fs/xfs/xfs_attr_leaf.h b/fs/xfs/xfs_attr_leaf.h
index 51c3ee156b2f..040f732ce1e2 100644
--- a/fs/xfs/xfs_attr_leaf.h
+++ b/fs/xfs/xfs_attr_leaf.h
@@ -130,6 +130,19 @@ typedef struct xfs_attr_leafblock {
130#define XFS_ATTR_INCOMPLETE (1 << XFS_ATTR_INCOMPLETE_BIT) 130#define XFS_ATTR_INCOMPLETE (1 << XFS_ATTR_INCOMPLETE_BIT)
131 131
132/* 132/*
133 * Conversion macros for converting namespace bits from argument flags
134 * to ondisk flags.
135 */
136#define XFS_ATTR_NSP_ARGS_MASK (ATTR_ROOT | ATTR_SECURE)
137#define XFS_ATTR_NSP_ONDISK_MASK (XFS_ATTR_ROOT | XFS_ATTR_SECURE)
138#define XFS_ATTR_NSP_ONDISK(flags) ((flags) & XFS_ATTR_NSP_ONDISK_MASK)
139#define XFS_ATTR_NSP_ARGS(flags) ((flags) & XFS_ATTR_NSP_ARGS_MASK)
140#define XFS_ATTR_NSP_ARGS_TO_ONDISK(x) (((x) & ATTR_ROOT ? XFS_ATTR_ROOT : 0) |\
141 ((x) & ATTR_SECURE ? XFS_ATTR_SECURE : 0))
142#define XFS_ATTR_NSP_ONDISK_TO_ARGS(x) (((x) & XFS_ATTR_ROOT ? ATTR_ROOT : 0) |\
143 ((x) & XFS_ATTR_SECURE ? ATTR_SECURE : 0))
144
145/*
133 * Alignment for namelist and valuelist entries (since they are mixed 146 * Alignment for namelist and valuelist entries (since they are mixed
134 * there can be only one alignment value) 147 * there can be only one alignment value)
135 */ 148 */
@@ -196,16 +209,26 @@ static inline int xfs_attr_leaf_entsize_local_max(int bsize)
196 * Structure used to pass context around among the routines. 209 * Structure used to pass context around among the routines.
197 *========================================================================*/ 210 *========================================================================*/
198 211
212
213struct xfs_attr_list_context;
214
215typedef int (*put_listent_func_t)(struct xfs_attr_list_context *, struct attrnames *,
216 char *, int, int, char *);
217
199typedef struct xfs_attr_list_context { 218typedef struct xfs_attr_list_context {
200 struct xfs_inode *dp; /* inode */ 219 struct xfs_inode *dp; /* inode */
201 struct attrlist_cursor_kern *cursor;/* position in list */ 220 struct attrlist_cursor_kern *cursor; /* position in list */
202 struct attrlist *alist; /* output buffer */ 221 struct attrlist *alist; /* output buffer */
203 int count; /* num used entries */ 222 int seen_enough; /* T/F: seen enough of list? */
204 int dupcnt; /* count dup hashvals seen */ 223 int count; /* num used entries */
205 int bufsize;/* total buffer size */ 224 int dupcnt; /* count dup hashvals seen */
206 int firstu; /* first used byte in buffer */ 225 int bufsize; /* total buffer size */
207 int flags; /* from VOP call */ 226 int firstu; /* first used byte in buffer */
208 int resynch;/* T/F: resynch with cursor */ 227 int flags; /* from VOP call */
228 int resynch; /* T/F: resynch with cursor */
229 int put_value; /* T/F: need value for listent */
230 put_listent_func_t put_listent; /* list output fmt function */
231 int index; /* index into output buffer */
209} xfs_attr_list_context_t; 232} xfs_attr_list_context_t;
210 233
211/* 234/*
diff --git a/fs/xfs/xfs_behavior.c b/fs/xfs/xfs_behavior.c
index f4fe3715a803..0dc17219d412 100644
--- a/fs/xfs/xfs_behavior.c
+++ b/fs/xfs/xfs_behavior.c
@@ -110,26 +110,6 @@ bhv_remove_not_first(bhv_head_t *bhp, bhv_desc_t *bdp)
110} 110}
111 111
112/* 112/*
113 * Look for a specific ops vector on the specified behavior chain.
114 * Return the associated behavior descriptor. Or NULL, if not found.
115 */
116bhv_desc_t *
117bhv_lookup(bhv_head_t *bhp, void *ops)
118{
119 bhv_desc_t *curdesc;
120
121 for (curdesc = bhp->bh_first;
122 curdesc != NULL;
123 curdesc = curdesc->bd_next) {
124
125 if (curdesc->bd_ops == ops)
126 return curdesc;
127 }
128
129 return NULL;
130}
131
132/*
133 * Looks for the first behavior within a specified range of positions. 113 * Looks for the first behavior within a specified range of positions.
134 * Return the associated behavior descriptor. Or NULL, if none found. 114 * Return the associated behavior descriptor. Or NULL, if none found.
135 */ 115 */
diff --git a/fs/xfs/xfs_behavior.h b/fs/xfs/xfs_behavior.h
index 6e6e56fb352d..e7ca1fed955a 100644
--- a/fs/xfs/xfs_behavior.h
+++ b/fs/xfs/xfs_behavior.h
@@ -176,12 +176,10 @@ extern void bhv_insert_initial(bhv_head_t *, bhv_desc_t *);
176 * Behavior module prototypes. 176 * Behavior module prototypes.
177 */ 177 */
178extern void bhv_remove_not_first(bhv_head_t *bhp, bhv_desc_t *bdp); 178extern void bhv_remove_not_first(bhv_head_t *bhp, bhv_desc_t *bdp);
179extern bhv_desc_t * bhv_lookup(bhv_head_t *bhp, void *ops);
180extern bhv_desc_t * bhv_lookup_range(bhv_head_t *bhp, int low, int high); 179extern bhv_desc_t * bhv_lookup_range(bhv_head_t *bhp, int low, int high);
181extern bhv_desc_t * bhv_base(bhv_head_t *bhp); 180extern bhv_desc_t * bhv_base(bhv_head_t *bhp);
182 181
183/* No bhv locking on Linux */ 182/* No bhv locking on Linux */
184#define bhv_lookup_unlocked bhv_lookup
185#define bhv_base_unlocked bhv_base 183#define bhv_base_unlocked bhv_base
186 184
187#endif /* __XFS_BEHAVIOR_H__ */ 185#endif /* __XFS_BEHAVIOR_H__ */
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index bf46fae303af..5b050c06795f 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -2999,7 +2999,7 @@ xfs_bmap_btree_to_extents(
2999 int error; /* error return value */ 2999 int error; /* error return value */
3000 xfs_ifork_t *ifp; /* inode fork data */ 3000 xfs_ifork_t *ifp; /* inode fork data */
3001 xfs_mount_t *mp; /* mount point structure */ 3001 xfs_mount_t *mp; /* mount point structure */
3002 xfs_bmbt_ptr_t *pp; /* ptr to block address */ 3002 __be64 *pp; /* ptr to block address */
3003 xfs_bmbt_block_t *rblock;/* root btree block */ 3003 xfs_bmbt_block_t *rblock;/* root btree block */
3004 3004
3005 ifp = XFS_IFORK_PTR(ip, whichfork); 3005 ifp = XFS_IFORK_PTR(ip, whichfork);
@@ -3011,12 +3011,12 @@ xfs_bmap_btree_to_extents(
3011 ASSERT(XFS_BMAP_BROOT_MAXRECS(ifp->if_broot_bytes) == 1); 3011 ASSERT(XFS_BMAP_BROOT_MAXRECS(ifp->if_broot_bytes) == 1);
3012 mp = ip->i_mount; 3012 mp = ip->i_mount;
3013 pp = XFS_BMAP_BROOT_PTR_ADDR(rblock, 1, ifp->if_broot_bytes); 3013 pp = XFS_BMAP_BROOT_PTR_ADDR(rblock, 1, ifp->if_broot_bytes);
3014 cbno = be64_to_cpu(*pp);
3014 *logflagsp = 0; 3015 *logflagsp = 0;
3015#ifdef DEBUG 3016#ifdef DEBUG
3016 if ((error = xfs_btree_check_lptr(cur, INT_GET(*pp, ARCH_CONVERT), 1))) 3017 if ((error = xfs_btree_check_lptr(cur, cbno, 1)))
3017 return error; 3018 return error;
3018#endif 3019#endif
3019 cbno = INT_GET(*pp, ARCH_CONVERT);
3020 if ((error = xfs_btree_read_bufl(mp, tp, cbno, 0, &cbp, 3020 if ((error = xfs_btree_read_bufl(mp, tp, cbno, 0, &cbp,
3021 XFS_BMAP_BTREE_REF))) 3021 XFS_BMAP_BTREE_REF)))
3022 return error; 3022 return error;
@@ -3512,9 +3512,9 @@ xfs_bmap_extents_to_btree(
3512 */ 3512 */
3513 kp = XFS_BMAP_KEY_IADDR(block, 1, cur); 3513 kp = XFS_BMAP_KEY_IADDR(block, 1, cur);
3514 arp = XFS_BMAP_REC_IADDR(ablock, 1, cur); 3514 arp = XFS_BMAP_REC_IADDR(ablock, 1, cur);
3515 INT_SET(kp->br_startoff, ARCH_CONVERT, xfs_bmbt_disk_get_startoff(arp)); 3515 kp->br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(arp));
3516 pp = XFS_BMAP_PTR_IADDR(block, 1, cur); 3516 pp = XFS_BMAP_PTR_IADDR(block, 1, cur);
3517 INT_SET(*pp, ARCH_CONVERT, args.fsbno); 3517 *pp = cpu_to_be64(args.fsbno);
3518 /* 3518 /*
3519 * Do all this logging at the end so that 3519 * Do all this logging at the end so that
3520 * the root is at the right level. 3520 * the root is at the right level.
@@ -3705,7 +3705,7 @@ STATIC xfs_bmbt_rec_t * /* pointer to found extent entry */
3705xfs_bmap_search_extents( 3705xfs_bmap_search_extents(
3706 xfs_inode_t *ip, /* incore inode pointer */ 3706 xfs_inode_t *ip, /* incore inode pointer */
3707 xfs_fileoff_t bno, /* block number searched for */ 3707 xfs_fileoff_t bno, /* block number searched for */
3708 int whichfork, /* data or attr fork */ 3708 int fork, /* data or attr fork */
3709 int *eofp, /* out: end of file found */ 3709 int *eofp, /* out: end of file found */
3710 xfs_extnum_t *lastxp, /* out: last extent index */ 3710 xfs_extnum_t *lastxp, /* out: last extent index */
3711 xfs_bmbt_irec_t *gotp, /* out: extent entry found */ 3711 xfs_bmbt_irec_t *gotp, /* out: extent entry found */
@@ -3713,25 +3713,28 @@ xfs_bmap_search_extents(
3713{ 3713{
3714 xfs_ifork_t *ifp; /* inode fork pointer */ 3714 xfs_ifork_t *ifp; /* inode fork pointer */
3715 xfs_bmbt_rec_t *ep; /* extent record pointer */ 3715 xfs_bmbt_rec_t *ep; /* extent record pointer */
3716 int rt; /* realtime flag */
3717 3716
3718 XFS_STATS_INC(xs_look_exlist); 3717 XFS_STATS_INC(xs_look_exlist);
3719 ifp = XFS_IFORK_PTR(ip, whichfork); 3718 ifp = XFS_IFORK_PTR(ip, fork);
3720 3719
3721 ep = xfs_bmap_search_multi_extents(ifp, bno, eofp, lastxp, gotp, prevp); 3720 ep = xfs_bmap_search_multi_extents(ifp, bno, eofp, lastxp, gotp, prevp);
3722 3721
3723 rt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip); 3722 if (unlikely(!(gotp->br_startblock) && (*lastxp != NULLEXTNUM) &&
3724 if (unlikely(!rt && !gotp->br_startblock && (*lastxp != NULLEXTNUM))) { 3723 !(XFS_IS_REALTIME_INODE(ip) && fork == XFS_DATA_FORK))) {
3725 cmn_err(CE_PANIC,"Access to block zero: fs: <%s> inode: %lld " 3724 xfs_cmn_err(XFS_PTAG_FSBLOCK_ZERO, CE_ALERT, ip->i_mount,
3726 "start_block : %llx start_off : %llx blkcnt : %llx " 3725 "Access to block zero in inode %llu "
3727 "extent-state : %x \n", 3726 "start_block: %llx start_off: %llx "
3728 (ip->i_mount)->m_fsname, (long long)ip->i_ino, 3727 "blkcnt: %llx extent-state: %x lastx: %x\n",
3728 (unsigned long long)ip->i_ino,
3729 (unsigned long long)gotp->br_startblock, 3729 (unsigned long long)gotp->br_startblock,
3730 (unsigned long long)gotp->br_startoff, 3730 (unsigned long long)gotp->br_startoff,
3731 (unsigned long long)gotp->br_blockcount, 3731 (unsigned long long)gotp->br_blockcount,
3732 gotp->br_state); 3732 gotp->br_state, *lastxp);
3733 } 3733 *lastxp = NULLEXTNUM;
3734 return ep; 3734 *eofp = 1;
3735 return NULL;
3736 }
3737 return ep;
3735} 3738}
3736 3739
3737 3740
@@ -4494,7 +4497,7 @@ xfs_bmap_read_extents(
4494 xfs_ifork_t *ifp; /* fork structure */ 4497 xfs_ifork_t *ifp; /* fork structure */
4495 int level; /* btree level, for checking */ 4498 int level; /* btree level, for checking */
4496 xfs_mount_t *mp; /* file system mount structure */ 4499 xfs_mount_t *mp; /* file system mount structure */
4497 xfs_bmbt_ptr_t *pp; /* pointer to block address */ 4500 __be64 *pp; /* pointer to block address */
4498 /* REFERENCED */ 4501 /* REFERENCED */
4499 xfs_extnum_t room; /* number of entries there's room for */ 4502 xfs_extnum_t room; /* number of entries there's room for */
4500 4503
@@ -4510,10 +4513,10 @@ xfs_bmap_read_extents(
4510 level = be16_to_cpu(block->bb_level); 4513 level = be16_to_cpu(block->bb_level);
4511 ASSERT(level > 0); 4514 ASSERT(level > 0);
4512 pp = XFS_BMAP_BROOT_PTR_ADDR(block, 1, ifp->if_broot_bytes); 4515 pp = XFS_BMAP_BROOT_PTR_ADDR(block, 1, ifp->if_broot_bytes);
4513 ASSERT(INT_GET(*pp, ARCH_CONVERT) != NULLDFSBNO); 4516 bno = be64_to_cpu(*pp);
4514 ASSERT(XFS_FSB_TO_AGNO(mp, INT_GET(*pp, ARCH_CONVERT)) < mp->m_sb.sb_agcount); 4517 ASSERT(bno != NULLDFSBNO);
4515 ASSERT(XFS_FSB_TO_AGBNO(mp, INT_GET(*pp, ARCH_CONVERT)) < mp->m_sb.sb_agblocks); 4518 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
4516 bno = INT_GET(*pp, ARCH_CONVERT); 4519 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
4517 /* 4520 /*
4518 * Go down the tree until leaf level is reached, following the first 4521 * Go down the tree until leaf level is reached, following the first
4519 * pointer (leftmost) at each level. 4522 * pointer (leftmost) at each level.
@@ -4530,10 +4533,8 @@ xfs_bmap_read_extents(
4530 break; 4533 break;
4531 pp = XFS_BTREE_PTR_ADDR(mp->m_sb.sb_blocksize, xfs_bmbt, block, 4534 pp = XFS_BTREE_PTR_ADDR(mp->m_sb.sb_blocksize, xfs_bmbt, block,
4532 1, mp->m_bmap_dmxr[1]); 4535 1, mp->m_bmap_dmxr[1]);
4533 XFS_WANT_CORRUPTED_GOTO( 4536 bno = be64_to_cpu(*pp);
4534 XFS_FSB_SANITY_CHECK(mp, INT_GET(*pp, ARCH_CONVERT)), 4537 XFS_WANT_CORRUPTED_GOTO(XFS_FSB_SANITY_CHECK(mp, bno), error0);
4535 error0);
4536 bno = INT_GET(*pp, ARCH_CONVERT);
4537 xfs_trans_brelse(tp, bp); 4538 xfs_trans_brelse(tp, bp);
4538 } 4539 }
4539 /* 4540 /*
@@ -6141,7 +6142,7 @@ xfs_check_block(
6141 short sz) 6142 short sz)
6142{ 6143{
6143 int i, j, dmxr; 6144 int i, j, dmxr;
6144 xfs_bmbt_ptr_t *pp, *thispa; /* pointer to block address */ 6145 __be64 *pp, *thispa; /* pointer to block address */
6145 xfs_bmbt_key_t *prevp, *keyp; 6146 xfs_bmbt_key_t *prevp, *keyp;
6146 6147
6147 ASSERT(be16_to_cpu(block->bb_level) > 0); 6148 ASSERT(be16_to_cpu(block->bb_level) > 0);
@@ -6179,11 +6180,10 @@ xfs_check_block(
6179 thispa = XFS_BTREE_PTR_ADDR(mp->m_sb.sb_blocksize, 6180 thispa = XFS_BTREE_PTR_ADDR(mp->m_sb.sb_blocksize,
6180 xfs_bmbt, block, j, dmxr); 6181 xfs_bmbt, block, j, dmxr);
6181 } 6182 }
6182 if (INT_GET(*thispa, ARCH_CONVERT) == 6183 if (*thispa == *pp) {
6183 INT_GET(*pp, ARCH_CONVERT)) {
6184 cmn_err(CE_WARN, "%s: thispa(%d) == pp(%d) %Ld", 6184 cmn_err(CE_WARN, "%s: thispa(%d) == pp(%d) %Ld",
6185 __FUNCTION__, j, i, 6185 __FUNCTION__, j, i,
6186 INT_GET(*thispa, ARCH_CONVERT)); 6186 (unsigned long long)be64_to_cpu(*thispa));
6187 panic("%s: ptrs are equal in node\n", 6187 panic("%s: ptrs are equal in node\n",
6188 __FUNCTION__); 6188 __FUNCTION__);
6189 } 6189 }
@@ -6210,7 +6210,7 @@ xfs_bmap_check_leaf_extents(
6210 xfs_ifork_t *ifp; /* fork structure */ 6210 xfs_ifork_t *ifp; /* fork structure */
6211 int level; /* btree level, for checking */ 6211 int level; /* btree level, for checking */
6212 xfs_mount_t *mp; /* file system mount structure */ 6212 xfs_mount_t *mp; /* file system mount structure */
6213 xfs_bmbt_ptr_t *pp; /* pointer to block address */ 6213 __be64 *pp; /* pointer to block address */
6214 xfs_bmbt_rec_t *ep; /* pointer to current extent */ 6214 xfs_bmbt_rec_t *ep; /* pointer to current extent */
6215 xfs_bmbt_rec_t *lastp; /* pointer to previous extent */ 6215 xfs_bmbt_rec_t *lastp; /* pointer to previous extent */
6216 xfs_bmbt_rec_t *nextp; /* pointer to next extent */ 6216 xfs_bmbt_rec_t *nextp; /* pointer to next extent */
@@ -6231,10 +6231,12 @@ xfs_bmap_check_leaf_extents(
6231 ASSERT(level > 0); 6231 ASSERT(level > 0);
6232 xfs_check_block(block, mp, 1, ifp->if_broot_bytes); 6232 xfs_check_block(block, mp, 1, ifp->if_broot_bytes);
6233 pp = XFS_BMAP_BROOT_PTR_ADDR(block, 1, ifp->if_broot_bytes); 6233 pp = XFS_BMAP_BROOT_PTR_ADDR(block, 1, ifp->if_broot_bytes);
6234 ASSERT(INT_GET(*pp, ARCH_CONVERT) != NULLDFSBNO); 6234 bno = be64_to_cpu(*pp);
6235 ASSERT(XFS_FSB_TO_AGNO(mp, INT_GET(*pp, ARCH_CONVERT)) < mp->m_sb.sb_agcount); 6235
6236 ASSERT(XFS_FSB_TO_AGBNO(mp, INT_GET(*pp, ARCH_CONVERT)) < mp->m_sb.sb_agblocks); 6236 ASSERT(bno != NULLDFSBNO);
6237 bno = INT_GET(*pp, ARCH_CONVERT); 6237 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
6238 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
6239
6238 /* 6240 /*
6239 * Go down the tree until leaf level is reached, following the first 6241 * Go down the tree until leaf level is reached, following the first
6240 * pointer (leftmost) at each level. 6242 * pointer (leftmost) at each level.
@@ -6265,8 +6267,8 @@ xfs_bmap_check_leaf_extents(
6265 xfs_check_block(block, mp, 0, 0); 6267 xfs_check_block(block, mp, 0, 0);
6266 pp = XFS_BTREE_PTR_ADDR(mp->m_sb.sb_blocksize, xfs_bmbt, block, 6268 pp = XFS_BTREE_PTR_ADDR(mp->m_sb.sb_blocksize, xfs_bmbt, block,
6267 1, mp->m_bmap_dmxr[1]); 6269 1, mp->m_bmap_dmxr[1]);
6268 XFS_WANT_CORRUPTED_GOTO(XFS_FSB_SANITY_CHECK(mp, INT_GET(*pp, ARCH_CONVERT)), error0); 6270 bno = be64_to_cpu(*pp);
6269 bno = INT_GET(*pp, ARCH_CONVERT); 6271 XFS_WANT_CORRUPTED_GOTO(XFS_FSB_SANITY_CHECK(mp, bno), error0);
6270 if (bp_release) { 6272 if (bp_release) {
6271 bp_release = 0; 6273 bp_release = 0;
6272 xfs_trans_brelse(NULL, bp); 6274 xfs_trans_brelse(NULL, bp);
@@ -6372,7 +6374,7 @@ xfs_bmap_count_blocks(
6372 xfs_ifork_t *ifp; /* fork structure */ 6374 xfs_ifork_t *ifp; /* fork structure */
6373 int level; /* btree level, for checking */ 6375 int level; /* btree level, for checking */
6374 xfs_mount_t *mp; /* file system mount structure */ 6376 xfs_mount_t *mp; /* file system mount structure */
6375 xfs_bmbt_ptr_t *pp; /* pointer to block address */ 6377 __be64 *pp; /* pointer to block address */
6376 6378
6377 bno = NULLFSBLOCK; 6379 bno = NULLFSBLOCK;
6378 mp = ip->i_mount; 6380 mp = ip->i_mount;
@@ -6395,10 +6397,10 @@ xfs_bmap_count_blocks(
6395 level = be16_to_cpu(block->bb_level); 6397 level = be16_to_cpu(block->bb_level);
6396 ASSERT(level > 0); 6398 ASSERT(level > 0);
6397 pp = XFS_BMAP_BROOT_PTR_ADDR(block, 1, ifp->if_broot_bytes); 6399 pp = XFS_BMAP_BROOT_PTR_ADDR(block, 1, ifp->if_broot_bytes);
6398 ASSERT(INT_GET(*pp, ARCH_CONVERT) != NULLDFSBNO); 6400 bno = be64_to_cpu(*pp);
6399 ASSERT(XFS_FSB_TO_AGNO(mp, INT_GET(*pp, ARCH_CONVERT)) < mp->m_sb.sb_agcount); 6401 ASSERT(bno != NULLDFSBNO);
6400 ASSERT(XFS_FSB_TO_AGBNO(mp, INT_GET(*pp, ARCH_CONVERT)) < mp->m_sb.sb_agblocks); 6402 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
6401 bno = INT_GET(*pp, ARCH_CONVERT); 6403 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
6402 6404
6403 if (unlikely(xfs_bmap_count_tree(mp, tp, ifp, bno, level, count) < 0)) { 6405 if (unlikely(xfs_bmap_count_tree(mp, tp, ifp, bno, level, count) < 0)) {
6404 XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)", XFS_ERRLEVEL_LOW, 6406 XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)", XFS_ERRLEVEL_LOW,
@@ -6425,7 +6427,7 @@ xfs_bmap_count_tree(
6425 int error; 6427 int error;
6426 xfs_buf_t *bp, *nbp; 6428 xfs_buf_t *bp, *nbp;
6427 int level = levelin; 6429 int level = levelin;
6428 xfs_bmbt_ptr_t *pp; 6430 __be64 *pp;
6429 xfs_fsblock_t bno = blockno; 6431 xfs_fsblock_t bno = blockno;
6430 xfs_fsblock_t nextbno; 6432 xfs_fsblock_t nextbno;
6431 xfs_bmbt_block_t *block, *nextblock; 6433 xfs_bmbt_block_t *block, *nextblock;
@@ -6452,7 +6454,7 @@ xfs_bmap_count_tree(
6452 /* Dive to the next level */ 6454 /* Dive to the next level */
6453 pp = XFS_BTREE_PTR_ADDR(mp->m_sb.sb_blocksize, 6455 pp = XFS_BTREE_PTR_ADDR(mp->m_sb.sb_blocksize,
6454 xfs_bmbt, block, 1, mp->m_bmap_dmxr[1]); 6456 xfs_bmbt, block, 1, mp->m_bmap_dmxr[1]);
6455 bno = INT_GET(*pp, ARCH_CONVERT); 6457 bno = be64_to_cpu(*pp);
6456 if (unlikely((error = 6458 if (unlikely((error =
6457 xfs_bmap_count_tree(mp, tp, ifp, bno, level, count)) < 0)) { 6459 xfs_bmap_count_tree(mp, tp, ifp, bno, level, count)) < 0)) {
6458 xfs_trans_brelse(tp, bp); 6460 xfs_trans_brelse(tp, bp);
diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c
index 18fb7385d719..a7b835bf870a 100644
--- a/fs/xfs/xfs_bmap_btree.c
+++ b/fs/xfs/xfs_bmap_btree.c
@@ -58,7 +58,7 @@ STATIC void xfs_bmbt_log_ptrs(xfs_btree_cur_t *, xfs_buf_t *, int, int);
58STATIC int xfs_bmbt_lshift(xfs_btree_cur_t *, int, int *); 58STATIC int xfs_bmbt_lshift(xfs_btree_cur_t *, int, int *);
59STATIC int xfs_bmbt_rshift(xfs_btree_cur_t *, int, int *); 59STATIC int xfs_bmbt_rshift(xfs_btree_cur_t *, int, int *);
60STATIC int xfs_bmbt_split(xfs_btree_cur_t *, int, xfs_fsblock_t *, 60STATIC int xfs_bmbt_split(xfs_btree_cur_t *, int, xfs_fsblock_t *,
61 xfs_bmbt_key_t *, xfs_btree_cur_t **, int *); 61 __uint64_t *, xfs_btree_cur_t **, int *);
62STATIC int xfs_bmbt_updkey(xfs_btree_cur_t *, xfs_bmbt_key_t *, int); 62STATIC int xfs_bmbt_updkey(xfs_btree_cur_t *, xfs_bmbt_key_t *, int);
63 63
64 64
@@ -192,16 +192,11 @@ xfs_bmbt_trace_argifk(
192 xfs_btree_cur_t *cur, 192 xfs_btree_cur_t *cur,
193 int i, 193 int i,
194 xfs_fsblock_t f, 194 xfs_fsblock_t f,
195 xfs_bmbt_key_t *k, 195 xfs_dfiloff_t o,
196 int line) 196 int line)
197{ 197{
198 xfs_dfsbno_t d;
199 xfs_dfiloff_t o;
200
201 d = (xfs_dfsbno_t)f;
202 o = INT_GET(k->br_startoff, ARCH_CONVERT);
203 xfs_bmbt_trace_enter(func, cur, ARGS, XFS_BMBT_KTRACE_ARGIFK, line, 198 xfs_bmbt_trace_enter(func, cur, ARGS, XFS_BMBT_KTRACE_ARGIFK, line,
204 i, d >> 32, (int)d, o >> 32, 199 i, (xfs_dfsbno_t)f >> 32, (int)f, o >> 32,
205 (int)o, 0, 0, 0, 200 (int)o, 0, 0, 0,
206 0, 0, 0); 201 0, 0, 0);
207} 202}
@@ -248,7 +243,7 @@ xfs_bmbt_trace_argik(
248{ 243{
249 xfs_dfiloff_t o; 244 xfs_dfiloff_t o;
250 245
251 o = INT_GET(k->br_startoff, ARCH_CONVERT); 246 o = be64_to_cpu(k->br_startoff);
252 xfs_bmbt_trace_enter(func, cur, ARGS, XFS_BMBT_KTRACE_ARGIFK, line, 247 xfs_bmbt_trace_enter(func, cur, ARGS, XFS_BMBT_KTRACE_ARGIFK, line,
253 i, o >> 32, (int)o, 0, 248 i, o >> 32, (int)o, 0,
254 0, 0, 0, 0, 249 0, 0, 0, 0,
@@ -286,8 +281,8 @@ xfs_bmbt_trace_cursor(
286 xfs_bmbt_trace_argfffi(fname, c, o, b, i, j, __LINE__) 281 xfs_bmbt_trace_argfffi(fname, c, o, b, i, j, __LINE__)
287#define XFS_BMBT_TRACE_ARGI(c,i) \ 282#define XFS_BMBT_TRACE_ARGI(c,i) \
288 xfs_bmbt_trace_argi(fname, c, i, __LINE__) 283 xfs_bmbt_trace_argi(fname, c, i, __LINE__)
289#define XFS_BMBT_TRACE_ARGIFK(c,i,f,k) \ 284#define XFS_BMBT_TRACE_ARGIFK(c,i,f,s) \
290 xfs_bmbt_trace_argifk(fname, c, i, f, k, __LINE__) 285 xfs_bmbt_trace_argifk(fname, c, i, f, s, __LINE__)
291#define XFS_BMBT_TRACE_ARGIFR(c,i,f,r) \ 286#define XFS_BMBT_TRACE_ARGIFR(c,i,f,r) \
292 xfs_bmbt_trace_argifr(fname, c, i, f, r, __LINE__) 287 xfs_bmbt_trace_argifr(fname, c, i, f, r, __LINE__)
293#define XFS_BMBT_TRACE_ARGIK(c,i,k) \ 288#define XFS_BMBT_TRACE_ARGIK(c,i,k) \
@@ -299,7 +294,7 @@ xfs_bmbt_trace_cursor(
299#define XFS_BMBT_TRACE_ARGBII(c,b,i,j) 294#define XFS_BMBT_TRACE_ARGBII(c,b,i,j)
300#define XFS_BMBT_TRACE_ARGFFFI(c,o,b,i,j) 295#define XFS_BMBT_TRACE_ARGFFFI(c,o,b,i,j)
301#define XFS_BMBT_TRACE_ARGI(c,i) 296#define XFS_BMBT_TRACE_ARGI(c,i)
302#define XFS_BMBT_TRACE_ARGIFK(c,i,f,k) 297#define XFS_BMBT_TRACE_ARGIFK(c,i,f,s)
303#define XFS_BMBT_TRACE_ARGIFR(c,i,f,r) 298#define XFS_BMBT_TRACE_ARGIFR(c,i,f,r)
304#define XFS_BMBT_TRACE_ARGIK(c,i,k) 299#define XFS_BMBT_TRACE_ARGIK(c,i,k)
305#define XFS_BMBT_TRACE_CURSOR(c,s) 300#define XFS_BMBT_TRACE_CURSOR(c,s)
@@ -357,7 +352,7 @@ xfs_bmbt_delrec(
357 XFS_BMBT_TRACE_CURSOR(cur, ENTRY); 352 XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
358 XFS_BMBT_TRACE_ARGI(cur, level); 353 XFS_BMBT_TRACE_ARGI(cur, level);
359 ptr = cur->bc_ptrs[level]; 354 ptr = cur->bc_ptrs[level];
360 tcur = (xfs_btree_cur_t *)0; 355 tcur = NULL;
361 if (ptr == 0) { 356 if (ptr == 0) {
362 XFS_BMBT_TRACE_CURSOR(cur, EXIT); 357 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
363 *stat = 0; 358 *stat = 0;
@@ -382,7 +377,7 @@ xfs_bmbt_delrec(
382 pp = XFS_BMAP_PTR_IADDR(block, 1, cur); 377 pp = XFS_BMAP_PTR_IADDR(block, 1, cur);
383#ifdef DEBUG 378#ifdef DEBUG
384 for (i = ptr; i < numrecs; i++) { 379 for (i = ptr; i < numrecs; i++) {
385 if ((error = xfs_btree_check_lptr(cur, INT_GET(pp[i], ARCH_CONVERT), level))) { 380 if ((error = xfs_btree_check_lptr_disk(cur, pp[i], level))) {
386 XFS_BMBT_TRACE_CURSOR(cur, ERROR); 381 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
387 goto error0; 382 goto error0;
388 } 383 }
@@ -404,7 +399,8 @@ xfs_bmbt_delrec(
404 xfs_bmbt_log_recs(cur, bp, ptr, numrecs - 1); 399 xfs_bmbt_log_recs(cur, bp, ptr, numrecs - 1);
405 } 400 }
406 if (ptr == 1) { 401 if (ptr == 1) {
407 INT_SET(key.br_startoff, ARCH_CONVERT, xfs_bmbt_disk_get_startoff(rp)); 402 key.br_startoff =
403 cpu_to_be64(xfs_bmbt_disk_get_startoff(rp));
408 kp = &key; 404 kp = &key;
409 } 405 }
410 } 406 }
@@ -621,7 +617,7 @@ xfs_bmbt_delrec(
621 rpp = XFS_BMAP_PTR_IADDR(right, 1, cur); 617 rpp = XFS_BMAP_PTR_IADDR(right, 1, cur);
622#ifdef DEBUG 618#ifdef DEBUG
623 for (i = 0; i < numrrecs; i++) { 619 for (i = 0; i < numrrecs; i++) {
624 if ((error = xfs_btree_check_lptr(cur, INT_GET(rpp[i], ARCH_CONVERT), level))) { 620 if ((error = xfs_btree_check_lptr_disk(cur, rpp[i], level))) {
625 XFS_BMBT_TRACE_CURSOR(cur, ERROR); 621 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
626 goto error0; 622 goto error0;
627 } 623 }
@@ -748,7 +744,7 @@ xfs_bmbt_insrec(
748 int logflags; /* inode logging flags */ 744 int logflags; /* inode logging flags */
749 xfs_fsblock_t nbno; /* new block number */ 745 xfs_fsblock_t nbno; /* new block number */
750 struct xfs_btree_cur *ncur; /* new btree cursor */ 746 struct xfs_btree_cur *ncur; /* new btree cursor */
751 xfs_bmbt_key_t nkey; /* new btree key value */ 747 __uint64_t startoff; /* new btree key value */
752 xfs_bmbt_rec_t nrec; /* new record count */ 748 xfs_bmbt_rec_t nrec; /* new record count */
753 int optr; /* old key/record index */ 749 int optr; /* old key/record index */
754 xfs_bmbt_ptr_t *pp; /* pointer to bmap block addr */ 750 xfs_bmbt_ptr_t *pp; /* pointer to bmap block addr */
@@ -759,9 +755,8 @@ xfs_bmbt_insrec(
759 ASSERT(level < cur->bc_nlevels); 755 ASSERT(level < cur->bc_nlevels);
760 XFS_BMBT_TRACE_CURSOR(cur, ENTRY); 756 XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
761 XFS_BMBT_TRACE_ARGIFR(cur, level, *bnop, recp); 757 XFS_BMBT_TRACE_ARGIFR(cur, level, *bnop, recp);
762 ncur = (xfs_btree_cur_t *)0; 758 ncur = NULL;
763 INT_SET(key.br_startoff, ARCH_CONVERT, 759 key.br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(recp));
764 xfs_bmbt_disk_get_startoff(recp));
765 optr = ptr = cur->bc_ptrs[level]; 760 optr = ptr = cur->bc_ptrs[level];
766 if (ptr == 0) { 761 if (ptr == 0) {
767 XFS_BMBT_TRACE_CURSOR(cur, EXIT); 762 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
@@ -820,7 +815,7 @@ xfs_bmbt_insrec(
820 optr = ptr = cur->bc_ptrs[level]; 815 optr = ptr = cur->bc_ptrs[level];
821 } else { 816 } else {
822 if ((error = xfs_bmbt_split(cur, level, 817 if ((error = xfs_bmbt_split(cur, level,
823 &nbno, &nkey, &ncur, 818 &nbno, &startoff, &ncur,
824 &i))) { 819 &i))) {
825 XFS_BMBT_TRACE_CURSOR(cur, 820 XFS_BMBT_TRACE_CURSOR(cur,
826 ERROR); 821 ERROR);
@@ -840,7 +835,7 @@ xfs_bmbt_insrec(
840#endif 835#endif
841 ptr = cur->bc_ptrs[level]; 836 ptr = cur->bc_ptrs[level];
842 xfs_bmbt_disk_set_allf(&nrec, 837 xfs_bmbt_disk_set_allf(&nrec,
843 nkey.br_startoff, 0, 0, 838 startoff, 0, 0,
844 XFS_EXT_NORM); 839 XFS_EXT_NORM);
845 } else { 840 } else {
846 XFS_BMBT_TRACE_CURSOR(cur, 841 XFS_BMBT_TRACE_CURSOR(cur,
@@ -858,7 +853,7 @@ xfs_bmbt_insrec(
858 pp = XFS_BMAP_PTR_IADDR(block, 1, cur); 853 pp = XFS_BMAP_PTR_IADDR(block, 1, cur);
859#ifdef DEBUG 854#ifdef DEBUG
860 for (i = numrecs; i >= ptr; i--) { 855 for (i = numrecs; i >= ptr; i--) {
861 if ((error = xfs_btree_check_lptr(cur, INT_GET(pp[i - 1], ARCH_CONVERT), 856 if ((error = xfs_btree_check_lptr_disk(cur, pp[i - 1],
862 level))) { 857 level))) {
863 XFS_BMBT_TRACE_CURSOR(cur, ERROR); 858 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
864 return error; 859 return error;
@@ -870,14 +865,13 @@ xfs_bmbt_insrec(
870 memmove(&pp[ptr], &pp[ptr - 1], /* INT_: direct copy */ 865 memmove(&pp[ptr], &pp[ptr - 1], /* INT_: direct copy */
871 (numrecs - ptr + 1) * sizeof(*pp)); 866 (numrecs - ptr + 1) * sizeof(*pp));
872#ifdef DEBUG 867#ifdef DEBUG
873 if ((error = xfs_btree_check_lptr(cur, (xfs_bmbt_ptr_t)*bnop, 868 if ((error = xfs_btree_check_lptr(cur, *bnop, level))) {
874 level))) {
875 XFS_BMBT_TRACE_CURSOR(cur, ERROR); 869 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
876 return error; 870 return error;
877 } 871 }
878#endif 872#endif
879 kp[ptr - 1] = key; 873 kp[ptr - 1] = key;
880 INT_SET(pp[ptr - 1], ARCH_CONVERT, *bnop); 874 pp[ptr - 1] = cpu_to_be64(*bnop);
881 numrecs++; 875 numrecs++;
882 block->bb_numrecs = cpu_to_be16(numrecs); 876 block->bb_numrecs = cpu_to_be16(numrecs);
883 xfs_bmbt_log_keys(cur, bp, ptr, numrecs); 877 xfs_bmbt_log_keys(cur, bp, ptr, numrecs);
@@ -988,7 +982,7 @@ xfs_bmbt_killroot(
988 cpp = XFS_BMAP_PTR_IADDR(cblock, 1, cur); 982 cpp = XFS_BMAP_PTR_IADDR(cblock, 1, cur);
989#ifdef DEBUG 983#ifdef DEBUG
990 for (i = 0; i < be16_to_cpu(cblock->bb_numrecs); i++) { 984 for (i = 0; i < be16_to_cpu(cblock->bb_numrecs); i++) {
991 if ((error = xfs_btree_check_lptr(cur, INT_GET(cpp[i], ARCH_CONVERT), level - 1))) { 985 if ((error = xfs_btree_check_lptr_disk(cur, cpp[i], level - 1))) {
992 XFS_BMBT_TRACE_CURSOR(cur, ERROR); 986 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
993 return error; 987 return error;
994 } 988 }
@@ -1132,7 +1126,7 @@ xfs_bmbt_lookup(
1132 d = XFS_FSB_TO_DADDR(mp, fsbno); 1126 d = XFS_FSB_TO_DADDR(mp, fsbno);
1133 bp = cur->bc_bufs[level]; 1127 bp = cur->bc_bufs[level];
1134 if (bp && XFS_BUF_ADDR(bp) != d) 1128 if (bp && XFS_BUF_ADDR(bp) != d)
1135 bp = (xfs_buf_t *)0; 1129 bp = NULL;
1136 if (!bp) { 1130 if (!bp) {
1137 if ((error = xfs_btree_read_bufl(mp, tp, fsbno, 1131 if ((error = xfs_btree_read_bufl(mp, tp, fsbno,
1138 0, &bp, XFS_BMAP_BTREE_REF))) { 1132 0, &bp, XFS_BMAP_BTREE_REF))) {
@@ -1170,7 +1164,7 @@ xfs_bmbt_lookup(
1170 keyno = (low + high) >> 1; 1164 keyno = (low + high) >> 1;
1171 if (level > 0) { 1165 if (level > 0) {
1172 kkp = kkbase + keyno - 1; 1166 kkp = kkbase + keyno - 1;
1173 startoff = INT_GET(kkp->br_startoff, ARCH_CONVERT); 1167 startoff = be64_to_cpu(kkp->br_startoff);
1174 } else { 1168 } else {
1175 krp = krbase + keyno - 1; 1169 krp = krbase + keyno - 1;
1176 startoff = xfs_bmbt_disk_get_startoff(krp); 1170 startoff = xfs_bmbt_disk_get_startoff(krp);
@@ -1189,13 +1183,13 @@ xfs_bmbt_lookup(
1189 if (diff > 0 && --keyno < 1) 1183 if (diff > 0 && --keyno < 1)
1190 keyno = 1; 1184 keyno = 1;
1191 pp = XFS_BMAP_PTR_IADDR(block, keyno, cur); 1185 pp = XFS_BMAP_PTR_IADDR(block, keyno, cur);
1186 fsbno = be64_to_cpu(*pp);
1192#ifdef DEBUG 1187#ifdef DEBUG
1193 if ((error = xfs_btree_check_lptr(cur, INT_GET(*pp, ARCH_CONVERT), level))) { 1188 if ((error = xfs_btree_check_lptr(cur, fsbno, level))) {
1194 XFS_BMBT_TRACE_CURSOR(cur, ERROR); 1189 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1195 return error; 1190 return error;
1196 } 1191 }
1197#endif 1192#endif
1198 fsbno = INT_GET(*pp, ARCH_CONVERT);
1199 cur->bc_ptrs[level] = keyno; 1193 cur->bc_ptrs[level] = keyno;
1200 } 1194 }
1201 } 1195 }
@@ -1313,7 +1307,7 @@ xfs_bmbt_lshift(
1313 lpp = XFS_BMAP_PTR_IADDR(left, lrecs, cur); 1307 lpp = XFS_BMAP_PTR_IADDR(left, lrecs, cur);
1314 rpp = XFS_BMAP_PTR_IADDR(right, 1, cur); 1308 rpp = XFS_BMAP_PTR_IADDR(right, 1, cur);
1315#ifdef DEBUG 1309#ifdef DEBUG
1316 if ((error = xfs_btree_check_lptr(cur, INT_GET(*rpp, ARCH_CONVERT), level))) { 1310 if ((error = xfs_btree_check_lptr_disk(cur, *rpp, level))) {
1317 XFS_BMBT_TRACE_CURSOR(cur, ERROR); 1311 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1318 return error; 1312 return error;
1319 } 1313 }
@@ -1340,7 +1334,7 @@ xfs_bmbt_lshift(
1340 if (level > 0) { 1334 if (level > 0) {
1341#ifdef DEBUG 1335#ifdef DEBUG
1342 for (i = 0; i < rrecs; i++) { 1336 for (i = 0; i < rrecs; i++) {
1343 if ((error = xfs_btree_check_lptr(cur, INT_GET(rpp[i + 1], ARCH_CONVERT), 1337 if ((error = xfs_btree_check_lptr_disk(cur, rpp[i + 1],
1344 level))) { 1338 level))) {
1345 XFS_BMBT_TRACE_CURSOR(cur, ERROR); 1339 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1346 return error; 1340 return error;
@@ -1354,8 +1348,7 @@ xfs_bmbt_lshift(
1354 } else { 1348 } else {
1355 memmove(rrp, rrp + 1, rrecs * sizeof(*rrp)); 1349 memmove(rrp, rrp + 1, rrecs * sizeof(*rrp));
1356 xfs_bmbt_log_recs(cur, rbp, 1, rrecs); 1350 xfs_bmbt_log_recs(cur, rbp, 1, rrecs);
1357 INT_SET(key.br_startoff, ARCH_CONVERT, 1351 key.br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(rrp));
1358 xfs_bmbt_disk_get_startoff(rrp));
1359 rkp = &key; 1352 rkp = &key;
1360 } 1353 }
1361 if ((error = xfs_bmbt_updkey(cur, rkp, level + 1))) { 1354 if ((error = xfs_bmbt_updkey(cur, rkp, level + 1))) {
@@ -1445,7 +1438,7 @@ xfs_bmbt_rshift(
1445 rpp = XFS_BMAP_PTR_IADDR(right, 1, cur); 1438 rpp = XFS_BMAP_PTR_IADDR(right, 1, cur);
1446#ifdef DEBUG 1439#ifdef DEBUG
1447 for (i = be16_to_cpu(right->bb_numrecs) - 1; i >= 0; i--) { 1440 for (i = be16_to_cpu(right->bb_numrecs) - 1; i >= 0; i--) {
1448 if ((error = xfs_btree_check_lptr(cur, INT_GET(rpp[i], ARCH_CONVERT), level))) { 1441 if ((error = xfs_btree_check_lptr_disk(cur, rpp[i], level))) {
1449 XFS_BMBT_TRACE_CURSOR(cur, ERROR); 1442 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1450 return error; 1443 return error;
1451 } 1444 }
@@ -1454,7 +1447,7 @@ xfs_bmbt_rshift(
1454 memmove(rkp + 1, rkp, be16_to_cpu(right->bb_numrecs) * sizeof(*rkp)); 1447 memmove(rkp + 1, rkp, be16_to_cpu(right->bb_numrecs) * sizeof(*rkp));
1455 memmove(rpp + 1, rpp, be16_to_cpu(right->bb_numrecs) * sizeof(*rpp)); 1448 memmove(rpp + 1, rpp, be16_to_cpu(right->bb_numrecs) * sizeof(*rpp));
1456#ifdef DEBUG 1449#ifdef DEBUG
1457 if ((error = xfs_btree_check_lptr(cur, INT_GET(*lpp, ARCH_CONVERT), level))) { 1450 if ((error = xfs_btree_check_lptr_disk(cur, *lpp, level))) {
1458 XFS_BMBT_TRACE_CURSOR(cur, ERROR); 1451 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1459 return error; 1452 return error;
1460 } 1453 }
@@ -1469,8 +1462,7 @@ xfs_bmbt_rshift(
1469 memmove(rrp + 1, rrp, be16_to_cpu(right->bb_numrecs) * sizeof(*rrp)); 1462 memmove(rrp + 1, rrp, be16_to_cpu(right->bb_numrecs) * sizeof(*rrp));
1470 *rrp = *lrp; 1463 *rrp = *lrp;
1471 xfs_bmbt_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1); 1464 xfs_bmbt_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1);
1472 INT_SET(key.br_startoff, ARCH_CONVERT, 1465 key.br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(rrp));
1473 xfs_bmbt_disk_get_startoff(rrp));
1474 rkp = &key; 1466 rkp = &key;
1475 } 1467 }
1476 be16_add(&left->bb_numrecs, -1); 1468 be16_add(&left->bb_numrecs, -1);
@@ -1535,7 +1527,7 @@ xfs_bmbt_split(
1535 xfs_btree_cur_t *cur, 1527 xfs_btree_cur_t *cur,
1536 int level, 1528 int level,
1537 xfs_fsblock_t *bnop, 1529 xfs_fsblock_t *bnop,
1538 xfs_bmbt_key_t *keyp, 1530 __uint64_t *startoff,
1539 xfs_btree_cur_t **curp, 1531 xfs_btree_cur_t **curp,
1540 int *stat) /* success/failure */ 1532 int *stat) /* success/failure */
1541{ 1533{
@@ -1560,7 +1552,7 @@ xfs_bmbt_split(
1560 xfs_bmbt_rec_t *rrp; /* right record pointer */ 1552 xfs_bmbt_rec_t *rrp; /* right record pointer */
1561 1553
1562 XFS_BMBT_TRACE_CURSOR(cur, ENTRY); 1554 XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
1563 XFS_BMBT_TRACE_ARGIFK(cur, level, *bnop, keyp); 1555 XFS_BMBT_TRACE_ARGIFK(cur, level, *bnop, *startoff);
1564 args.tp = cur->bc_tp; 1556 args.tp = cur->bc_tp;
1565 args.mp = cur->bc_mp; 1557 args.mp = cur->bc_mp;
1566 lbp = cur->bc_bufs[level]; 1558 lbp = cur->bc_bufs[level];
@@ -1619,7 +1611,7 @@ xfs_bmbt_split(
1619 rpp = XFS_BMAP_PTR_IADDR(right, 1, cur); 1611 rpp = XFS_BMAP_PTR_IADDR(right, 1, cur);
1620#ifdef DEBUG 1612#ifdef DEBUG
1621 for (i = 0; i < be16_to_cpu(right->bb_numrecs); i++) { 1613 for (i = 0; i < be16_to_cpu(right->bb_numrecs); i++) {
1622 if ((error = xfs_btree_check_lptr(cur, INT_GET(lpp[i], ARCH_CONVERT), level))) { 1614 if ((error = xfs_btree_check_lptr_disk(cur, lpp[i], level))) {
1623 XFS_BMBT_TRACE_CURSOR(cur, ERROR); 1615 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1624 return error; 1616 return error;
1625 } 1617 }
@@ -1629,13 +1621,13 @@ xfs_bmbt_split(
1629 memcpy(rpp, lpp, be16_to_cpu(right->bb_numrecs) * sizeof(*rpp)); 1621 memcpy(rpp, lpp, be16_to_cpu(right->bb_numrecs) * sizeof(*rpp));
1630 xfs_bmbt_log_keys(cur, rbp, 1, be16_to_cpu(right->bb_numrecs)); 1622 xfs_bmbt_log_keys(cur, rbp, 1, be16_to_cpu(right->bb_numrecs));
1631 xfs_bmbt_log_ptrs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs)); 1623 xfs_bmbt_log_ptrs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs));
1632 keyp->br_startoff = INT_GET(rkp->br_startoff, ARCH_CONVERT); 1624 *startoff = be64_to_cpu(rkp->br_startoff);
1633 } else { 1625 } else {
1634 lrp = XFS_BMAP_REC_IADDR(left, i, cur); 1626 lrp = XFS_BMAP_REC_IADDR(left, i, cur);
1635 rrp = XFS_BMAP_REC_IADDR(right, 1, cur); 1627 rrp = XFS_BMAP_REC_IADDR(right, 1, cur);
1636 memcpy(rrp, lrp, be16_to_cpu(right->bb_numrecs) * sizeof(*rrp)); 1628 memcpy(rrp, lrp, be16_to_cpu(right->bb_numrecs) * sizeof(*rrp));
1637 xfs_bmbt_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs)); 1629 xfs_bmbt_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs));
1638 keyp->br_startoff = xfs_bmbt_disk_get_startoff(rrp); 1630 *startoff = xfs_bmbt_disk_get_startoff(rrp);
1639 } 1631 }
1640 be16_add(&left->bb_numrecs, -(be16_to_cpu(right->bb_numrecs))); 1632 be16_add(&left->bb_numrecs, -(be16_to_cpu(right->bb_numrecs)));
1641 right->bb_rightsib = left->bb_rightsib; 1633 right->bb_rightsib = left->bb_rightsib;
@@ -1728,9 +1720,9 @@ xfs_bmdr_to_bmbt(
1728{ 1720{
1729 int dmxr; 1721 int dmxr;
1730 xfs_bmbt_key_t *fkp; 1722 xfs_bmbt_key_t *fkp;
1731 xfs_bmbt_ptr_t *fpp; 1723 __be64 *fpp;
1732 xfs_bmbt_key_t *tkp; 1724 xfs_bmbt_key_t *tkp;
1733 xfs_bmbt_ptr_t *tpp; 1725 __be64 *tpp;
1734 1726
1735 rblock->bb_magic = cpu_to_be32(XFS_BMAP_MAGIC); 1727 rblock->bb_magic = cpu_to_be32(XFS_BMAP_MAGIC);
1736 rblock->bb_level = dblock->bb_level; 1728 rblock->bb_level = dblock->bb_level;
@@ -1745,7 +1737,7 @@ xfs_bmdr_to_bmbt(
1745 tpp = XFS_BMAP_BROOT_PTR_ADDR(rblock, 1, rblocklen); 1737 tpp = XFS_BMAP_BROOT_PTR_ADDR(rblock, 1, rblocklen);
1746 dmxr = be16_to_cpu(dblock->bb_numrecs); 1738 dmxr = be16_to_cpu(dblock->bb_numrecs);
1747 memcpy(tkp, fkp, sizeof(*fkp) * dmxr); 1739 memcpy(tkp, fkp, sizeof(*fkp) * dmxr);
1748 memcpy(tpp, fpp, sizeof(*fpp) * dmxr); /* INT_: direct copy */ 1740 memcpy(tpp, fpp, sizeof(*fpp) * dmxr);
1749} 1741}
1750 1742
1751/* 1743/*
@@ -1805,7 +1797,7 @@ xfs_bmbt_decrement(
1805 tp = cur->bc_tp; 1797 tp = cur->bc_tp;
1806 mp = cur->bc_mp; 1798 mp = cur->bc_mp;
1807 for (block = xfs_bmbt_get_block(cur, lev, &bp); lev > level; ) { 1799 for (block = xfs_bmbt_get_block(cur, lev, &bp); lev > level; ) {
1808 fsbno = INT_GET(*XFS_BMAP_PTR_IADDR(block, cur->bc_ptrs[lev], cur), ARCH_CONVERT); 1800 fsbno = be64_to_cpu(*XFS_BMAP_PTR_IADDR(block, cur->bc_ptrs[lev], cur));
1809 if ((error = xfs_btree_read_bufl(mp, tp, fsbno, 0, &bp, 1801 if ((error = xfs_btree_read_bufl(mp, tp, fsbno, 0, &bp,
1810 XFS_BMAP_BTREE_REF))) { 1802 XFS_BMAP_BTREE_REF))) {
1811 XFS_BMBT_TRACE_CURSOR(cur, ERROR); 1803 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
@@ -2135,7 +2127,7 @@ xfs_bmbt_increment(
2135 tp = cur->bc_tp; 2127 tp = cur->bc_tp;
2136 mp = cur->bc_mp; 2128 mp = cur->bc_mp;
2137 for (block = xfs_bmbt_get_block(cur, lev, &bp); lev > level; ) { 2129 for (block = xfs_bmbt_get_block(cur, lev, &bp); lev > level; ) {
2138 fsbno = INT_GET(*XFS_BMAP_PTR_IADDR(block, cur->bc_ptrs[lev], cur), ARCH_CONVERT); 2130 fsbno = be64_to_cpu(*XFS_BMAP_PTR_IADDR(block, cur->bc_ptrs[lev], cur));
2139 if ((error = xfs_btree_read_bufl(mp, tp, fsbno, 0, &bp, 2131 if ((error = xfs_btree_read_bufl(mp, tp, fsbno, 0, &bp,
2140 XFS_BMAP_BTREE_REF))) { 2132 XFS_BMAP_BTREE_REF))) {
2141 XFS_BMBT_TRACE_CURSOR(cur, ERROR); 2133 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
@@ -2178,7 +2170,7 @@ xfs_bmbt_insert(
2178 level = 0; 2170 level = 0;
2179 nbno = NULLFSBLOCK; 2171 nbno = NULLFSBLOCK;
2180 xfs_bmbt_disk_set_all(&nrec, &cur->bc_rec.b); 2172 xfs_bmbt_disk_set_all(&nrec, &cur->bc_rec.b);
2181 ncur = (xfs_btree_cur_t *)0; 2173 ncur = NULL;
2182 pcur = cur; 2174 pcur = cur;
2183 do { 2175 do {
2184 if ((error = xfs_bmbt_insrec(pcur, level++, &nbno, &nrec, &ncur, 2176 if ((error = xfs_bmbt_insrec(pcur, level++, &nbno, &nrec, &ncur,
@@ -2205,7 +2197,7 @@ xfs_bmbt_insert(
2205 } 2197 }
2206 if (ncur) { 2198 if (ncur) {
2207 pcur = ncur; 2199 pcur = ncur;
2208 ncur = (xfs_btree_cur_t *)0; 2200 ncur = NULL;
2209 } 2201 }
2210 } while (nbno != NULLFSBLOCK); 2202 } while (nbno != NULLFSBLOCK);
2211 XFS_BMBT_TRACE_CURSOR(cur, EXIT); 2203 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
@@ -2356,12 +2348,12 @@ xfs_bmbt_newroot(
2356 args.firstblock = args.fsbno; 2348 args.firstblock = args.fsbno;
2357 if (args.fsbno == NULLFSBLOCK) { 2349 if (args.fsbno == NULLFSBLOCK) {
2358#ifdef DEBUG 2350#ifdef DEBUG
2359 if ((error = xfs_btree_check_lptr(cur, INT_GET(*pp, ARCH_CONVERT), level))) { 2351 if ((error = xfs_btree_check_lptr_disk(cur, *pp, level))) {
2360 XFS_BMBT_TRACE_CURSOR(cur, ERROR); 2352 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
2361 return error; 2353 return error;
2362 } 2354 }
2363#endif 2355#endif
2364 args.fsbno = INT_GET(*pp, ARCH_CONVERT); 2356 args.fsbno = be64_to_cpu(*pp);
2365 args.type = XFS_ALLOCTYPE_START_BNO; 2357 args.type = XFS_ALLOCTYPE_START_BNO;
2366 } else 2358 } else
2367 args.type = XFS_ALLOCTYPE_NEAR_BNO; 2359 args.type = XFS_ALLOCTYPE_NEAR_BNO;
@@ -2393,7 +2385,7 @@ xfs_bmbt_newroot(
2393 cpp = XFS_BMAP_PTR_IADDR(cblock, 1, cur); 2385 cpp = XFS_BMAP_PTR_IADDR(cblock, 1, cur);
2394#ifdef DEBUG 2386#ifdef DEBUG
2395 for (i = 0; i < be16_to_cpu(cblock->bb_numrecs); i++) { 2387 for (i = 0; i < be16_to_cpu(cblock->bb_numrecs); i++) {
2396 if ((error = xfs_btree_check_lptr(cur, INT_GET(pp[i], ARCH_CONVERT), level))) { 2388 if ((error = xfs_btree_check_lptr_disk(cur, pp[i], level))) {
2397 XFS_BMBT_TRACE_CURSOR(cur, ERROR); 2389 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
2398 return error; 2390 return error;
2399 } 2391 }
@@ -2401,13 +2393,12 @@ xfs_bmbt_newroot(
2401#endif 2393#endif
2402 memcpy(cpp, pp, be16_to_cpu(cblock->bb_numrecs) * sizeof(*pp)); 2394 memcpy(cpp, pp, be16_to_cpu(cblock->bb_numrecs) * sizeof(*pp));
2403#ifdef DEBUG 2395#ifdef DEBUG
2404 if ((error = xfs_btree_check_lptr(cur, (xfs_bmbt_ptr_t)args.fsbno, 2396 if ((error = xfs_btree_check_lptr(cur, args.fsbno, level))) {
2405 level))) {
2406 XFS_BMBT_TRACE_CURSOR(cur, ERROR); 2397 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
2407 return error; 2398 return error;
2408 } 2399 }
2409#endif 2400#endif
2410 INT_SET(*pp, ARCH_CONVERT, args.fsbno); 2401 *pp = cpu_to_be64(args.fsbno);
2411 xfs_iroot_realloc(cur->bc_private.b.ip, 1 - be16_to_cpu(cblock->bb_numrecs), 2402 xfs_iroot_realloc(cur->bc_private.b.ip, 1 - be16_to_cpu(cblock->bb_numrecs),
2412 cur->bc_private.b.whichfork); 2403 cur->bc_private.b.whichfork);
2413 xfs_btree_setbuf(cur, level, bp); 2404 xfs_btree_setbuf(cur, level, bp);
@@ -2681,9 +2672,9 @@ xfs_bmbt_to_bmdr(
2681{ 2672{
2682 int dmxr; 2673 int dmxr;
2683 xfs_bmbt_key_t *fkp; 2674 xfs_bmbt_key_t *fkp;
2684 xfs_bmbt_ptr_t *fpp; 2675 __be64 *fpp;
2685 xfs_bmbt_key_t *tkp; 2676 xfs_bmbt_key_t *tkp;
2686 xfs_bmbt_ptr_t *tpp; 2677 __be64 *tpp;
2687 2678
2688 ASSERT(be32_to_cpu(rblock->bb_magic) == XFS_BMAP_MAGIC); 2679 ASSERT(be32_to_cpu(rblock->bb_magic) == XFS_BMAP_MAGIC);
2689 ASSERT(be64_to_cpu(rblock->bb_leftsib) == NULLDFSBNO); 2680 ASSERT(be64_to_cpu(rblock->bb_leftsib) == NULLDFSBNO);
@@ -2698,7 +2689,7 @@ xfs_bmbt_to_bmdr(
2698 tpp = XFS_BTREE_PTR_ADDR(dblocklen, xfs_bmdr, dblock, 1, dmxr); 2689 tpp = XFS_BTREE_PTR_ADDR(dblocklen, xfs_bmdr, dblock, 1, dmxr);
2699 dmxr = be16_to_cpu(dblock->bb_numrecs); 2690 dmxr = be16_to_cpu(dblock->bb_numrecs);
2700 memcpy(tkp, fkp, sizeof(*fkp) * dmxr); 2691 memcpy(tkp, fkp, sizeof(*fkp) * dmxr);
2701 memcpy(tpp, fpp, sizeof(*fpp) * dmxr); /* INT_: direct copy */ 2692 memcpy(tpp, fpp, sizeof(*fpp) * dmxr);
2702} 2693}
2703 2694
2704/* 2695/*
@@ -2740,7 +2731,7 @@ xfs_bmbt_update(
2740 XFS_BMBT_TRACE_CURSOR(cur, EXIT); 2731 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
2741 return 0; 2732 return 0;
2742 } 2733 }
2743 INT_SET(key.br_startoff, ARCH_CONVERT, off); 2734 key.br_startoff = cpu_to_be64(off);
2744 if ((error = xfs_bmbt_updkey(cur, &key, 1))) { 2735 if ((error = xfs_bmbt_updkey(cur, &key, 1))) {
2745 XFS_BMBT_TRACE_CURSOR(cur, ERROR); 2736 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
2746 return error; 2737 return error;
diff --git a/fs/xfs/xfs_bmap_btree.h b/fs/xfs/xfs_bmap_btree.h
index 6478cfa0e539..49539de9525b 100644
--- a/fs/xfs/xfs_bmap_btree.h
+++ b/fs/xfs/xfs_bmap_btree.h
@@ -163,13 +163,14 @@ typedef struct xfs_bmbt_irec
163/* 163/*
164 * Key structure for non-leaf levels of the tree. 164 * Key structure for non-leaf levels of the tree.
165 */ 165 */
166typedef struct xfs_bmbt_key 166typedef struct xfs_bmbt_key {
167{ 167 __be64 br_startoff; /* starting file offset */
168 xfs_dfiloff_t br_startoff; /* starting file offset */
169} xfs_bmbt_key_t, xfs_bmdr_key_t; 168} xfs_bmbt_key_t, xfs_bmdr_key_t;
170 169
171typedef xfs_dfsbno_t xfs_bmbt_ptr_t, xfs_bmdr_ptr_t; /* btree pointer type */ 170/* btree pointer type */
172 /* btree block header type */ 171typedef __be64 xfs_bmbt_ptr_t, xfs_bmdr_ptr_t;
172
173/* btree block header type */
173typedef struct xfs_btree_lblock xfs_bmbt_block_t; 174typedef struct xfs_btree_lblock xfs_bmbt_block_t;
174 175
175#define XFS_BUF_TO_BMBT_BLOCK(bp) ((xfs_bmbt_block_t *)XFS_BUF_PTR(bp)) 176#define XFS_BUF_TO_BMBT_BLOCK(bp) ((xfs_bmbt_block_t *)XFS_BUF_PTR(bp))
diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c
index ee2255bd6562..aeb87ca69fcc 100644
--- a/fs/xfs/xfs_btree.c
+++ b/fs/xfs/xfs_btree.c
@@ -161,7 +161,7 @@ xfs_btree_check_key(
161 161
162 k1 = ak1; 162 k1 = ak1;
163 k2 = ak2; 163 k2 = ak2;
164 ASSERT(INT_GET(k1->br_startoff, ARCH_CONVERT) < INT_GET(k2->br_startoff, ARCH_CONVERT)); 164 ASSERT(be64_to_cpu(k1->br_startoff) < be64_to_cpu(k2->br_startoff));
165 break; 165 break;
166 } 166 }
167 case XFS_BTNUM_INO: { 167 case XFS_BTNUM_INO: {
@@ -170,7 +170,7 @@ xfs_btree_check_key(
170 170
171 k1 = ak1; 171 k1 = ak1;
172 k2 = ak2; 172 k2 = ak2;
173 ASSERT(INT_GET(k1->ir_startino, ARCH_CONVERT) < INT_GET(k2->ir_startino, ARCH_CONVERT)); 173 ASSERT(be32_to_cpu(k1->ir_startino) < be32_to_cpu(k2->ir_startino));
174 break; 174 break;
175 } 175 }
176 default: 176 default:
@@ -285,8 +285,8 @@ xfs_btree_check_rec(
285 285
286 r1 = ar1; 286 r1 = ar1;
287 r2 = ar2; 287 r2 = ar2;
288 ASSERT(INT_GET(r1->ir_startino, ARCH_CONVERT) + XFS_INODES_PER_CHUNK <= 288 ASSERT(be32_to_cpu(r1->ir_startino) + XFS_INODES_PER_CHUNK <=
289 INT_GET(r2->ir_startino, ARCH_CONVERT)); 289 be32_to_cpu(r2->ir_startino));
290 break; 290 break;
291 } 291 }
292 default: 292 default:
diff --git a/fs/xfs/xfs_btree.h b/fs/xfs/xfs_btree.h
index 44f1bd98064a..892b06c54263 100644
--- a/fs/xfs/xfs_btree.h
+++ b/fs/xfs/xfs_btree.h
@@ -145,7 +145,7 @@ typedef struct xfs_btree_cur
145 union { 145 union {
146 xfs_alloc_rec_incore_t a; 146 xfs_alloc_rec_incore_t a;
147 xfs_bmbt_irec_t b; 147 xfs_bmbt_irec_t b;
148 xfs_inobt_rec_t i; 148 xfs_inobt_rec_incore_t i;
149 } bc_rec; /* current insert/search record value */ 149 } bc_rec; /* current insert/search record value */
150 struct xfs_buf *bc_bufs[XFS_BTREE_MAXLEVELS]; /* buf ptr per level */ 150 struct xfs_buf *bc_bufs[XFS_BTREE_MAXLEVELS]; /* buf ptr per level */
151 int bc_ptrs[XFS_BTREE_MAXLEVELS]; /* key/record # */ 151 int bc_ptrs[XFS_BTREE_MAXLEVELS]; /* key/record # */
@@ -243,6 +243,9 @@ xfs_btree_check_lptr(
243 xfs_dfsbno_t ptr, /* btree block disk address */ 243 xfs_dfsbno_t ptr, /* btree block disk address */
244 int level); /* btree block level */ 244 int level); /* btree block level */
245 245
246#define xfs_btree_check_lptr_disk(cur, ptr, level) \
247 xfs_btree_check_lptr(cur, be64_to_cpu(ptr), level)
248
246/* 249/*
247 * Checking routine: check that short form block header is ok. 250 * Checking routine: check that short form block header is ok.
248 */ 251 */
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index a4aa53974f76..7a55c248ea70 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -234,7 +234,6 @@ xfs_buf_item_format(
234 ASSERT((bip->bli_flags & XFS_BLI_LOGGED) || 234 ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
235 (bip->bli_flags & XFS_BLI_STALE)); 235 (bip->bli_flags & XFS_BLI_STALE));
236 bp = bip->bli_buf; 236 bp = bip->bli_buf;
237 ASSERT(XFS_BUF_BP_ISMAPPED(bp));
238 vecp = log_vector; 237 vecp = log_vector;
239 238
240 /* 239 /*
@@ -628,25 +627,6 @@ xfs_buf_item_committed(
628} 627}
629 628
630/* 629/*
631 * This is called when the transaction holding the buffer is aborted.
632 * Just behave as if the transaction had been cancelled. If we're shutting down
633 * and have aborted this transaction, we'll trap this buffer when it tries to
634 * get written out.
635 */
636STATIC void
637xfs_buf_item_abort(
638 xfs_buf_log_item_t *bip)
639{
640 xfs_buf_t *bp;
641
642 bp = bip->bli_buf;
643 xfs_buftrace("XFS_ABORT", bp);
644 XFS_BUF_SUPER_STALE(bp);
645 xfs_buf_item_unlock(bip);
646 return;
647}
648
649/*
650 * This is called to asynchronously write the buffer associated with this 630 * This is called to asynchronously write the buffer associated with this
651 * buf log item out to disk. The buffer will already have been locked by 631 * buf log item out to disk. The buffer will already have been locked by
652 * a successful call to xfs_buf_item_trylock(). If the buffer still has 632 * a successful call to xfs_buf_item_trylock(). If the buffer still has
@@ -693,7 +673,6 @@ STATIC struct xfs_item_ops xfs_buf_item_ops = {
693 .iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t)) 673 .iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t))
694 xfs_buf_item_committed, 674 xfs_buf_item_committed,
695 .iop_push = (void(*)(xfs_log_item_t*))xfs_buf_item_push, 675 .iop_push = (void(*)(xfs_log_item_t*))xfs_buf_item_push,
696 .iop_abort = (void(*)(xfs_log_item_t*))xfs_buf_item_abort,
697 .iop_pushbuf = NULL, 676 .iop_pushbuf = NULL,
698 .iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t)) 677 .iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t))
699 xfs_buf_item_committing 678 xfs_buf_item_committing
@@ -901,7 +880,6 @@ xfs_buf_item_relse(
901 XFS_BUF_SET_FSPRIVATE(bp, bip->bli_item.li_bio_list); 880 XFS_BUF_SET_FSPRIVATE(bp, bip->bli_item.li_bio_list);
902 if ((XFS_BUF_FSPRIVATE(bp, void *) == NULL) && 881 if ((XFS_BUF_FSPRIVATE(bp, void *) == NULL) &&
903 (XFS_BUF_IODONE_FUNC(bp) != NULL)) { 882 (XFS_BUF_IODONE_FUNC(bp) != NULL)) {
904 ASSERT((XFS_BUF_ISUNINITIAL(bp)) == 0);
905 XFS_BUF_CLR_IODONE_FUNC(bp); 883 XFS_BUF_CLR_IODONE_FUNC(bp);
906 } 884 }
907 885
diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c
index 32ab61d17ace..a68bc1f1a313 100644
--- a/fs/xfs/xfs_da_btree.c
+++ b/fs/xfs/xfs_da_btree.c
@@ -1054,7 +1054,7 @@ xfs_da_node_lookup_int(xfs_da_state_t *state, int *result)
1054 xfs_da_node_entry_t *btree; 1054 xfs_da_node_entry_t *btree;
1055 xfs_dablk_t blkno; 1055 xfs_dablk_t blkno;
1056 int probe, span, max, error, retval; 1056 int probe, span, max, error, retval;
1057 xfs_dahash_t hashval; 1057 xfs_dahash_t hashval, btreehashval;
1058 xfs_da_args_t *args; 1058 xfs_da_args_t *args;
1059 1059
1060 args = state->args; 1060 args = state->args;
@@ -1079,30 +1079,32 @@ xfs_da_node_lookup_int(xfs_da_state_t *state, int *result)
1079 return(error); 1079 return(error);
1080 } 1080 }
1081 curr = blk->bp->data; 1081 curr = blk->bp->data;
1082 ASSERT(be16_to_cpu(curr->magic) == XFS_DA_NODE_MAGIC || 1082 blk->magic = be16_to_cpu(curr->magic);
1083 be16_to_cpu(curr->magic) == XFS_DIR2_LEAFN_MAGIC || 1083 ASSERT(blk->magic == XFS_DA_NODE_MAGIC ||
1084 be16_to_cpu(curr->magic) == XFS_ATTR_LEAF_MAGIC); 1084 blk->magic == XFS_DIR2_LEAFN_MAGIC ||
1085 blk->magic == XFS_ATTR_LEAF_MAGIC);
1085 1086
1086 /* 1087 /*
1087 * Search an intermediate node for a match. 1088 * Search an intermediate node for a match.
1088 */ 1089 */
1089 blk->magic = be16_to_cpu(curr->magic);
1090 if (blk->magic == XFS_DA_NODE_MAGIC) { 1090 if (blk->magic == XFS_DA_NODE_MAGIC) {
1091 node = blk->bp->data; 1091 node = blk->bp->data;
1092 blk->hashval = be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1].hashval); 1092 max = be16_to_cpu(node->hdr.count);
1093 btreehashval = node->btree[max-1].hashval;
1094 blk->hashval = be32_to_cpu(btreehashval);
1093 1095
1094 /* 1096 /*
1095 * Binary search. (note: small blocks will skip loop) 1097 * Binary search. (note: small blocks will skip loop)
1096 */ 1098 */
1097 max = be16_to_cpu(node->hdr.count);
1098 probe = span = max / 2; 1099 probe = span = max / 2;
1099 hashval = args->hashval; 1100 hashval = args->hashval;
1100 for (btree = &node->btree[probe]; span > 4; 1101 for (btree = &node->btree[probe]; span > 4;
1101 btree = &node->btree[probe]) { 1102 btree = &node->btree[probe]) {
1102 span /= 2; 1103 span /= 2;
1103 if (be32_to_cpu(btree->hashval) < hashval) 1104 btreehashval = be32_to_cpu(btree->hashval);
1105 if (btreehashval < hashval)
1104 probe += span; 1106 probe += span;
1105 else if (be32_to_cpu(btree->hashval) > hashval) 1107 else if (btreehashval > hashval)
1106 probe -= span; 1108 probe -= span;
1107 else 1109 else
1108 break; 1110 break;
@@ -1133,10 +1135,10 @@ xfs_da_node_lookup_int(xfs_da_state_t *state, int *result)
1133 blk->index = probe; 1135 blk->index = probe;
1134 blkno = be32_to_cpu(btree->before); 1136 blkno = be32_to_cpu(btree->before);
1135 } 1137 }
1136 } else if (be16_to_cpu(curr->magic) == XFS_ATTR_LEAF_MAGIC) { 1138 } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
1137 blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL); 1139 blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL);
1138 break; 1140 break;
1139 } else if (be16_to_cpu(curr->magic) == XFS_DIR2_LEAFN_MAGIC) { 1141 } else if (blk->magic == XFS_DIR2_LEAFN_MAGIC) {
1140 blk->hashval = xfs_dir2_leafn_lasthash(blk->bp, NULL); 1142 blk->hashval = xfs_dir2_leafn_lasthash(blk->bp, NULL);
1141 break; 1143 break;
1142 } 1144 }
@@ -1152,11 +1154,13 @@ xfs_da_node_lookup_int(xfs_da_state_t *state, int *result)
1152 if (blk->magic == XFS_DIR2_LEAFN_MAGIC) { 1154 if (blk->magic == XFS_DIR2_LEAFN_MAGIC) {
1153 retval = xfs_dir2_leafn_lookup_int(blk->bp, args, 1155 retval = xfs_dir2_leafn_lookup_int(blk->bp, args,
1154 &blk->index, state); 1156 &blk->index, state);
1155 } 1157 } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
1156 else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
1157 retval = xfs_attr_leaf_lookup_int(blk->bp, args); 1158 retval = xfs_attr_leaf_lookup_int(blk->bp, args);
1158 blk->index = args->index; 1159 blk->index = args->index;
1159 args->blkno = blk->blkno; 1160 args->blkno = blk->blkno;
1161 } else {
1162 ASSERT(0);
1163 return XFS_ERROR(EFSCORRUPTED);
1160 } 1164 }
1161 if (((retval == ENOENT) || (retval == ENOATTR)) && 1165 if (((retval == ENOENT) || (retval == ENOATTR)) &&
1162 (blk->hashval == args->hashval)) { 1166 (blk->hashval == args->hashval)) {
@@ -1166,8 +1170,7 @@ xfs_da_node_lookup_int(xfs_da_state_t *state, int *result)
1166 return(error); 1170 return(error);
1167 if (retval == 0) { 1171 if (retval == 0) {
1168 continue; 1172 continue;
1169 } 1173 } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
1170 else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
1171 /* path_shift() gives ENOENT */ 1174 /* path_shift() gives ENOENT */
1172 retval = XFS_ERROR(ENOATTR); 1175 retval = XFS_ERROR(ENOATTR);
1173 } 1176 }
diff --git a/fs/xfs/xfs_error.h b/fs/xfs/xfs_error.h
index bc43163456ef..0893e16b7d83 100644
--- a/fs/xfs/xfs_error.h
+++ b/fs/xfs/xfs_error.h
@@ -18,14 +18,6 @@
18#ifndef __XFS_ERROR_H__ 18#ifndef __XFS_ERROR_H__
19#define __XFS_ERROR_H__ 19#define __XFS_ERROR_H__
20 20
21#define XFS_ERECOVER 1 /* Failure to recover log */
22#define XFS_ELOGSTAT 2 /* Failure to stat log in user space */
23#define XFS_ENOLOGSPACE 3 /* Reservation too large */
24#define XFS_ENOTSUP 4 /* Operation not supported */
25#define XFS_ENOLSN 5 /* Can't find the lsn you asked for */
26#define XFS_ENOTFOUND 6
27#define XFS_ENOTXFS 7 /* Not XFS filesystem */
28
29#ifdef DEBUG 21#ifdef DEBUG
30#define XFS_ERROR_NTRAP 10 22#define XFS_ERROR_NTRAP 10
31extern int xfs_etrap[XFS_ERROR_NTRAP]; 23extern int xfs_etrap[XFS_ERROR_NTRAP];
@@ -175,6 +167,7 @@ extern int xfs_errortag_clearall_umount(int64_t fsid, char *fsname, int loud);
175#define XFS_PTAG_SHUTDOWN_CORRUPT 0x00000010 167#define XFS_PTAG_SHUTDOWN_CORRUPT 0x00000010
176#define XFS_PTAG_SHUTDOWN_IOERROR 0x00000020 168#define XFS_PTAG_SHUTDOWN_IOERROR 0x00000020
177#define XFS_PTAG_SHUTDOWN_LOGERROR 0x00000040 169#define XFS_PTAG_SHUTDOWN_LOGERROR 0x00000040
170#define XFS_PTAG_FSBLOCK_ZERO 0x00000080
178 171
179struct xfs_mount; 172struct xfs_mount;
180/* PRINTFLIKE4 */ 173/* PRINTFLIKE4 */
diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c
index 6cf6d8769b97..6dba78199faf 100644
--- a/fs/xfs/xfs_extfree_item.c
+++ b/fs/xfs/xfs_extfree_item.c
@@ -33,9 +33,6 @@ kmem_zone_t *xfs_efi_zone;
33kmem_zone_t *xfs_efd_zone; 33kmem_zone_t *xfs_efd_zone;
34 34
35STATIC void xfs_efi_item_unlock(xfs_efi_log_item_t *); 35STATIC void xfs_efi_item_unlock(xfs_efi_log_item_t *);
36STATIC void xfs_efi_item_abort(xfs_efi_log_item_t *);
37STATIC void xfs_efd_item_abort(xfs_efd_log_item_t *);
38
39 36
40void 37void
41xfs_efi_item_free(xfs_efi_log_item_t *efip) 38xfs_efi_item_free(xfs_efi_log_item_t *efip)
@@ -184,7 +181,7 @@ STATIC void
184xfs_efi_item_unlock(xfs_efi_log_item_t *efip) 181xfs_efi_item_unlock(xfs_efi_log_item_t *efip)
185{ 182{
186 if (efip->efi_item.li_flags & XFS_LI_ABORTED) 183 if (efip->efi_item.li_flags & XFS_LI_ABORTED)
187 xfs_efi_item_abort(efip); 184 xfs_efi_item_free(efip);
188 return; 185 return;
189} 186}
190 187
@@ -202,18 +199,6 @@ xfs_efi_item_committed(xfs_efi_log_item_t *efip, xfs_lsn_t lsn)
202} 199}
203 200
204/* 201/*
205 * This is called when the transaction logging the EFI is aborted.
206 * Free up the EFI and return. No need to clean up the slot for
207 * the item in the transaction. That was done by the unpin code
208 * which is called prior to this routine in the abort/fs-shutdown path.
209 */
210STATIC void
211xfs_efi_item_abort(xfs_efi_log_item_t *efip)
212{
213 xfs_efi_item_free(efip);
214}
215
216/*
217 * There isn't much you can do to push on an efi item. It is simply 202 * There isn't much you can do to push on an efi item. It is simply
218 * stuck waiting for all of its corresponding efd items to be 203 * stuck waiting for all of its corresponding efd items to be
219 * committed to disk. 204 * committed to disk.
@@ -255,7 +240,6 @@ STATIC struct xfs_item_ops xfs_efi_item_ops = {
255 .iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t)) 240 .iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t))
256 xfs_efi_item_committed, 241 xfs_efi_item_committed,
257 .iop_push = (void(*)(xfs_log_item_t*))xfs_efi_item_push, 242 .iop_push = (void(*)(xfs_log_item_t*))xfs_efi_item_push,
258 .iop_abort = (void(*)(xfs_log_item_t*))xfs_efi_item_abort,
259 .iop_pushbuf = NULL, 243 .iop_pushbuf = NULL,
260 .iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t)) 244 .iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t))
261 xfs_efi_item_committing 245 xfs_efi_item_committing
@@ -386,33 +370,6 @@ xfs_efi_release(xfs_efi_log_item_t *efip,
386 } 370 }
387} 371}
388 372
389/*
390 * This is called when the transaction that should be committing the
391 * EFD corresponding to the given EFI is aborted. The committed and
392 * canceled flags are used to coordinate the freeing of the EFI and
393 * the references by the transaction that committed it.
394 */
395STATIC void
396xfs_efi_cancel(
397 xfs_efi_log_item_t *efip)
398{
399 xfs_mount_t *mp;
400 SPLDECL(s);
401
402 mp = efip->efi_item.li_mountp;
403 AIL_LOCK(mp, s);
404 if (efip->efi_flags & XFS_EFI_COMMITTED) {
405 /*
406 * xfs_trans_delete_ail() drops the AIL lock.
407 */
408 xfs_trans_delete_ail(mp, (xfs_log_item_t *)efip, s);
409 xfs_efi_item_free(efip);
410 } else {
411 efip->efi_flags |= XFS_EFI_CANCELED;
412 AIL_UNLOCK(mp, s);
413 }
414}
415
416STATIC void 373STATIC void
417xfs_efd_item_free(xfs_efd_log_item_t *efdp) 374xfs_efd_item_free(xfs_efd_log_item_t *efdp)
418{ 375{
@@ -514,7 +471,7 @@ STATIC void
514xfs_efd_item_unlock(xfs_efd_log_item_t *efdp) 471xfs_efd_item_unlock(xfs_efd_log_item_t *efdp)
515{ 472{
516 if (efdp->efd_item.li_flags & XFS_LI_ABORTED) 473 if (efdp->efd_item.li_flags & XFS_LI_ABORTED)
517 xfs_efd_item_abort(efdp); 474 xfs_efd_item_free(efdp);
518 return; 475 return;
519} 476}
520 477
@@ -541,27 +498,6 @@ xfs_efd_item_committed(xfs_efd_log_item_t *efdp, xfs_lsn_t lsn)
541} 498}
542 499
543/* 500/*
544 * The transaction of which this EFD is a part has been aborted.
545 * Inform its companion EFI of this fact and then clean up after
546 * ourselves. No need to clean up the slot for the item in the
547 * transaction. That was done by the unpin code which is called
548 * prior to this routine in the abort/fs-shutdown path.
549 */
550STATIC void
551xfs_efd_item_abort(xfs_efd_log_item_t *efdp)
552{
553 /*
554 * If we got a log I/O error, it's always the case that the LR with the
555 * EFI got unpinned and freed before the EFD got aborted. So don't
556 * reference the EFI at all in that case.
557 */
558 if ((efdp->efd_item.li_flags & XFS_LI_ABORTED) == 0)
559 xfs_efi_cancel(efdp->efd_efip);
560
561 xfs_efd_item_free(efdp);
562}
563
564/*
565 * There isn't much you can do to push on an efd item. It is simply 501 * There isn't much you can do to push on an efd item. It is simply
566 * stuck waiting for the log to be flushed to disk. 502 * stuck waiting for the log to be flushed to disk.
567 */ 503 */
@@ -602,7 +538,6 @@ STATIC struct xfs_item_ops xfs_efd_item_ops = {
602 .iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t)) 538 .iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t))
603 xfs_efd_item_committed, 539 xfs_efd_item_committed,
604 .iop_push = (void(*)(xfs_log_item_t*))xfs_efd_item_push, 540 .iop_push = (void(*)(xfs_log_item_t*))xfs_efd_item_push,
605 .iop_abort = (void(*)(xfs_log_item_t*))xfs_efd_item_abort,
606 .iop_pushbuf = NULL, 541 .iop_pushbuf = NULL,
607 .iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t)) 542 .iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t))
608 xfs_efd_item_committing 543 xfs_efd_item_committing
diff --git a/fs/xfs/xfs_extfree_item.h b/fs/xfs/xfs_extfree_item.h
index 0ea45edaab03..2f049f63e85f 100644
--- a/fs/xfs/xfs_extfree_item.h
+++ b/fs/xfs/xfs_extfree_item.h
@@ -33,14 +33,16 @@ typedef struct xfs_extent {
33 * conversion routine. 33 * conversion routine.
34 */ 34 */
35 35
36#ifndef HAVE_FORMAT32
36typedef struct xfs_extent_32 { 37typedef struct xfs_extent_32 {
37 xfs_dfsbno_t ext_start; 38 __uint64_t ext_start;
38 xfs_extlen_t ext_len; 39 __uint32_t ext_len;
39} __attribute__((packed)) xfs_extent_32_t; 40} __attribute__((packed)) xfs_extent_32_t;
41#endif
40 42
41typedef struct xfs_extent_64 { 43typedef struct xfs_extent_64 {
42 xfs_dfsbno_t ext_start; 44 __uint64_t ext_start;
43 xfs_extlen_t ext_len; 45 __uint32_t ext_len;
44 __uint32_t ext_pad; 46 __uint32_t ext_pad;
45} xfs_extent_64_t; 47} xfs_extent_64_t;
46 48
@@ -50,25 +52,27 @@ typedef struct xfs_extent_64 {
50 * size is given by efi_nextents. 52 * size is given by efi_nextents.
51 */ 53 */
52typedef struct xfs_efi_log_format { 54typedef struct xfs_efi_log_format {
53 unsigned short efi_type; /* efi log item type */ 55 __uint16_t efi_type; /* efi log item type */
54 unsigned short efi_size; /* size of this item */ 56 __uint16_t efi_size; /* size of this item */
55 uint efi_nextents; /* # extents to free */ 57 __uint32_t efi_nextents; /* # extents to free */
56 __uint64_t efi_id; /* efi identifier */ 58 __uint64_t efi_id; /* efi identifier */
57 xfs_extent_t efi_extents[1]; /* array of extents to free */ 59 xfs_extent_t efi_extents[1]; /* array of extents to free */
58} xfs_efi_log_format_t; 60} xfs_efi_log_format_t;
59 61
62#ifndef HAVE_FORMAT32
60typedef struct xfs_efi_log_format_32 { 63typedef struct xfs_efi_log_format_32 {
61 unsigned short efi_type; /* efi log item type */ 64 __uint16_t efi_type; /* efi log item type */
62 unsigned short efi_size; /* size of this item */ 65 __uint16_t efi_size; /* size of this item */
63 uint efi_nextents; /* # extents to free */ 66 __uint32_t efi_nextents; /* # extents to free */
64 __uint64_t efi_id; /* efi identifier */ 67 __uint64_t efi_id; /* efi identifier */
65 xfs_extent_32_t efi_extents[1]; /* array of extents to free */ 68 xfs_extent_32_t efi_extents[1]; /* array of extents to free */
66} __attribute__((packed)) xfs_efi_log_format_32_t; 69} __attribute__((packed)) xfs_efi_log_format_32_t;
70#endif
67 71
68typedef struct xfs_efi_log_format_64 { 72typedef struct xfs_efi_log_format_64 {
69 unsigned short efi_type; /* efi log item type */ 73 __uint16_t efi_type; /* efi log item type */
70 unsigned short efi_size; /* size of this item */ 74 __uint16_t efi_size; /* size of this item */
71 uint efi_nextents; /* # extents to free */ 75 __uint32_t efi_nextents; /* # extents to free */
72 __uint64_t efi_id; /* efi identifier */ 76 __uint64_t efi_id; /* efi identifier */
73 xfs_extent_64_t efi_extents[1]; /* array of extents to free */ 77 xfs_extent_64_t efi_extents[1]; /* array of extents to free */
74} xfs_efi_log_format_64_t; 78} xfs_efi_log_format_64_t;
@@ -79,25 +83,27 @@ typedef struct xfs_efi_log_format_64 {
79 * size is given by efd_nextents; 83 * size is given by efd_nextents;
80 */ 84 */
81typedef struct xfs_efd_log_format { 85typedef struct xfs_efd_log_format {
82 unsigned short efd_type; /* efd log item type */ 86 __uint16_t efd_type; /* efd log item type */
83 unsigned short efd_size; /* size of this item */ 87 __uint16_t efd_size; /* size of this item */
84 uint efd_nextents; /* # of extents freed */ 88 __uint32_t efd_nextents; /* # of extents freed */
85 __uint64_t efd_efi_id; /* id of corresponding efi */ 89 __uint64_t efd_efi_id; /* id of corresponding efi */
86 xfs_extent_t efd_extents[1]; /* array of extents freed */ 90 xfs_extent_t efd_extents[1]; /* array of extents freed */
87} xfs_efd_log_format_t; 91} xfs_efd_log_format_t;
88 92
93#ifndef HAVE_FORMAT32
89typedef struct xfs_efd_log_format_32 { 94typedef struct xfs_efd_log_format_32 {
90 unsigned short efd_type; /* efd log item type */ 95 __uint16_t efd_type; /* efd log item type */
91 unsigned short efd_size; /* size of this item */ 96 __uint16_t efd_size; /* size of this item */
92 uint efd_nextents; /* # of extents freed */ 97 __uint32_t efd_nextents; /* # of extents freed */
93 __uint64_t efd_efi_id; /* id of corresponding efi */ 98 __uint64_t efd_efi_id; /* id of corresponding efi */
94 xfs_extent_32_t efd_extents[1]; /* array of extents freed */ 99 xfs_extent_32_t efd_extents[1]; /* array of extents freed */
95} __attribute__((packed)) xfs_efd_log_format_32_t; 100} __attribute__((packed)) xfs_efd_log_format_32_t;
101#endif
96 102
97typedef struct xfs_efd_log_format_64 { 103typedef struct xfs_efd_log_format_64 {
98 unsigned short efd_type; /* efd log item type */ 104 __uint16_t efd_type; /* efd log item type */
99 unsigned short efd_size; /* size of this item */ 105 __uint16_t efd_size; /* size of this item */
100 uint efd_nextents; /* # of extents freed */ 106 __uint32_t efd_nextents; /* # of extents freed */
101 __uint64_t efd_efi_id; /* id of corresponding efi */ 107 __uint64_t efd_efi_id; /* id of corresponding efi */
102 xfs_extent_64_t efd_extents[1]; /* array of extents freed */ 108 xfs_extent_64_t efd_extents[1]; /* array of extents freed */
103} xfs_efd_log_format_64_t; 109} xfs_efd_log_format_64_t;
diff --git a/fs/xfs/xfs_fs.h b/fs/xfs/xfs_fs.h
index 0f0ad1535951..1335449841cd 100644
--- a/fs/xfs/xfs_fs.h
+++ b/fs/xfs/xfs_fs.h
@@ -22,8 +22,6 @@
22 * SGI's XFS filesystem's major stuff (constants, structures) 22 * SGI's XFS filesystem's major stuff (constants, structures)
23 */ 23 */
24 24
25#define XFS_NAME "xfs"
26
27/* 25/*
28 * Direct I/O attribute record used with XFS_IOC_DIOINFO 26 * Direct I/O attribute record used with XFS_IOC_DIOINFO
29 * d_miniosz is the min xfer size, xfer size multiple and file seek offset 27 * d_miniosz is the min xfer size, xfer size multiple and file seek offset
@@ -426,11 +424,7 @@ typedef struct xfs_handle {
426 - (char *) &(handle)) \ 424 - (char *) &(handle)) \
427 + (handle).ha_fid.xfs_fid_len) 425 + (handle).ha_fid.xfs_fid_len)
428 426
429#define XFS_HANDLE_CMP(h1, h2) memcmp(h1, h2, sizeof(xfs_handle_t)) 427/*
430
431#define FSHSIZE sizeof(fsid_t)
432
433/*
434 * Flags for going down operation 428 * Flags for going down operation
435 */ 429 */
436#define XFS_FSOP_GOING_FLAGS_DEFAULT 0x0 /* going down */ 430#define XFS_FSOP_GOING_FLAGS_DEFAULT 0x0 /* going down */
diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c
index 33164a85aa9d..a446e5a115c6 100644
--- a/fs/xfs/xfs_ialloc.c
+++ b/fs/xfs/xfs_ialloc.c
@@ -458,7 +458,7 @@ nextag:
458 */ 458 */
459 if (XFS_FORCED_SHUTDOWN(mp)) { 459 if (XFS_FORCED_SHUTDOWN(mp)) {
460 up_read(&mp->m_peraglock); 460 up_read(&mp->m_peraglock);
461 return (xfs_buf_t *)0; 461 return NULL;
462 } 462 }
463 agno++; 463 agno++;
464 if (agno >= agcount) 464 if (agno >= agcount)
@@ -466,7 +466,7 @@ nextag:
466 if (agno == pagno) { 466 if (agno == pagno) {
467 if (flags == 0) { 467 if (flags == 0) {
468 up_read(&mp->m_peraglock); 468 up_read(&mp->m_peraglock);
469 return (xfs_buf_t *)0; 469 return NULL;
470 } 470 }
471 flags = 0; 471 flags = 0;
472 } 472 }
@@ -529,10 +529,10 @@ xfs_dialloc(
529 int offset; /* index of inode in chunk */ 529 int offset; /* index of inode in chunk */
530 xfs_agino_t pagino; /* parent's a.g. relative inode # */ 530 xfs_agino_t pagino; /* parent's a.g. relative inode # */
531 xfs_agnumber_t pagno; /* parent's allocation group number */ 531 xfs_agnumber_t pagno; /* parent's allocation group number */
532 xfs_inobt_rec_t rec; /* inode allocation record */ 532 xfs_inobt_rec_incore_t rec; /* inode allocation record */
533 xfs_agnumber_t tagno; /* testing allocation group number */ 533 xfs_agnumber_t tagno; /* testing allocation group number */
534 xfs_btree_cur_t *tcur; /* temp cursor */ 534 xfs_btree_cur_t *tcur; /* temp cursor */
535 xfs_inobt_rec_t trec; /* temp inode allocation record */ 535 xfs_inobt_rec_incore_t trec; /* temp inode allocation record */
536 536
537 537
538 if (*IO_agbp == NULL) { 538 if (*IO_agbp == NULL) {
@@ -945,7 +945,7 @@ xfs_difree(
945 int ilen; /* inodes in an inode cluster */ 945 int ilen; /* inodes in an inode cluster */
946 xfs_mount_t *mp; /* mount structure for filesystem */ 946 xfs_mount_t *mp; /* mount structure for filesystem */
947 int off; /* offset of inode in inode chunk */ 947 int off; /* offset of inode in inode chunk */
948 xfs_inobt_rec_t rec; /* btree record */ 948 xfs_inobt_rec_incore_t rec; /* btree record */
949 949
950 mp = tp->t_mountp; 950 mp = tp->t_mountp;
951 951
@@ -1195,6 +1195,7 @@ xfs_dilocate(
1195 "(0x%llx)", 1195 "(0x%llx)",
1196 ino, XFS_AGINO_TO_INO(mp, agno, agino)); 1196 ino, XFS_AGINO_TO_INO(mp, agno, agino));
1197 } 1197 }
1198 xfs_stack_trace();
1198#endif /* DEBUG */ 1199#endif /* DEBUG */
1199 return XFS_ERROR(EINVAL); 1200 return XFS_ERROR(EINVAL);
1200 } 1201 }
diff --git a/fs/xfs/xfs_ialloc_btree.c b/fs/xfs/xfs_ialloc_btree.c
index 616eeeb6953e..8cdeeaf8632b 100644
--- a/fs/xfs/xfs_ialloc_btree.c
+++ b/fs/xfs/xfs_ialloc_btree.c
@@ -568,7 +568,7 @@ xfs_inobt_insrec(
568 /* 568 /*
569 * Make a key out of the record data to be inserted, and save it. 569 * Make a key out of the record data to be inserted, and save it.
570 */ 570 */
571 key.ir_startino = recp->ir_startino; /* INT_: direct copy */ 571 key.ir_startino = recp->ir_startino;
572 optr = ptr = cur->bc_ptrs[level]; 572 optr = ptr = cur->bc_ptrs[level];
573 /* 573 /*
574 * If we're off the left edge, return failure. 574 * If we're off the left edge, return failure.
@@ -600,7 +600,7 @@ xfs_inobt_insrec(
600 } 600 }
601#endif 601#endif
602 nbno = NULLAGBLOCK; 602 nbno = NULLAGBLOCK;
603 ncur = (xfs_btree_cur_t *)0; 603 ncur = NULL;
604 /* 604 /*
605 * If the block is full, we can't insert the new entry until we 605 * If the block is full, we can't insert the new entry until we
606 * make the block un-full. 606 * make the block un-full.
@@ -641,7 +641,7 @@ xfs_inobt_insrec(
641 return error; 641 return error;
642#endif 642#endif
643 ptr = cur->bc_ptrs[level]; 643 ptr = cur->bc_ptrs[level];
644 nrec.ir_startino = nkey.ir_startino; /* INT_: direct copy */ 644 nrec.ir_startino = nkey.ir_startino;
645 } else { 645 } else {
646 /* 646 /*
647 * Otherwise the insert fails. 647 * Otherwise the insert fails.
@@ -681,7 +681,7 @@ xfs_inobt_insrec(
681 if ((error = xfs_btree_check_sptr(cur, *bnop, level))) 681 if ((error = xfs_btree_check_sptr(cur, *bnop, level)))
682 return error; 682 return error;
683#endif 683#endif
684 kp[ptr - 1] = key; /* INT_: struct copy */ 684 kp[ptr - 1] = key;
685 pp[ptr - 1] = cpu_to_be32(*bnop); 685 pp[ptr - 1] = cpu_to_be32(*bnop);
686 numrecs++; 686 numrecs++;
687 block->bb_numrecs = cpu_to_be16(numrecs); 687 block->bb_numrecs = cpu_to_be16(numrecs);
@@ -698,7 +698,7 @@ xfs_inobt_insrec(
698 * Now stuff the new record in, bump numrecs 698 * Now stuff the new record in, bump numrecs
699 * and log the new data. 699 * and log the new data.
700 */ 700 */
701 rp[ptr - 1] = *recp; /* INT_: struct copy */ 701 rp[ptr - 1] = *recp;
702 numrecs++; 702 numrecs++;
703 block->bb_numrecs = cpu_to_be16(numrecs); 703 block->bb_numrecs = cpu_to_be16(numrecs);
704 xfs_inobt_log_recs(cur, bp, ptr, numrecs); 704 xfs_inobt_log_recs(cur, bp, ptr, numrecs);
@@ -731,7 +731,7 @@ xfs_inobt_insrec(
731 */ 731 */
732 *bnop = nbno; 732 *bnop = nbno;
733 if (nbno != NULLAGBLOCK) { 733 if (nbno != NULLAGBLOCK) {
734 *recp = nrec; /* INT_: struct copy */ 734 *recp = nrec;
735 *curp = ncur; 735 *curp = ncur;
736 } 736 }
737 *stat = 1; 737 *stat = 1;
@@ -878,7 +878,7 @@ xfs_inobt_lookup(
878 */ 878 */
879 bp = cur->bc_bufs[level]; 879 bp = cur->bc_bufs[level];
880 if (bp && XFS_BUF_ADDR(bp) != d) 880 if (bp && XFS_BUF_ADDR(bp) != d)
881 bp = (xfs_buf_t *)0; 881 bp = NULL;
882 if (!bp) { 882 if (!bp) {
883 /* 883 /*
884 * Need to get a new buffer. Read it, then 884 * Need to get a new buffer. Read it, then
@@ -950,12 +950,12 @@ xfs_inobt_lookup(
950 xfs_inobt_key_t *kkp; 950 xfs_inobt_key_t *kkp;
951 951
952 kkp = kkbase + keyno - 1; 952 kkp = kkbase + keyno - 1;
953 startino = INT_GET(kkp->ir_startino, ARCH_CONVERT); 953 startino = be32_to_cpu(kkp->ir_startino);
954 } else { 954 } else {
955 xfs_inobt_rec_t *krp; 955 xfs_inobt_rec_t *krp;
956 956
957 krp = krbase + keyno - 1; 957 krp = krbase + keyno - 1;
958 startino = INT_GET(krp->ir_startino, ARCH_CONVERT); 958 startino = be32_to_cpu(krp->ir_startino);
959 } 959 }
960 /* 960 /*
961 * Compute difference to get next direction. 961 * Compute difference to get next direction.
@@ -1117,7 +1117,7 @@ xfs_inobt_lshift(
1117 if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(*rpp), level))) 1117 if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(*rpp), level)))
1118 return error; 1118 return error;
1119#endif 1119#endif
1120 *lpp = *rpp; /* INT_: no-change copy */ 1120 *lpp = *rpp;
1121 xfs_inobt_log_ptrs(cur, lbp, nrec, nrec); 1121 xfs_inobt_log_ptrs(cur, lbp, nrec, nrec);
1122 } 1122 }
1123 /* 1123 /*
@@ -1160,7 +1160,7 @@ xfs_inobt_lshift(
1160 } else { 1160 } else {
1161 memmove(rrp, rrp + 1, be16_to_cpu(right->bb_numrecs) * sizeof(*rrp)); 1161 memmove(rrp, rrp + 1, be16_to_cpu(right->bb_numrecs) * sizeof(*rrp));
1162 xfs_inobt_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs)); 1162 xfs_inobt_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs));
1163 key.ir_startino = rrp->ir_startino; /* INT_: direct copy */ 1163 key.ir_startino = rrp->ir_startino;
1164 rkp = &key; 1164 rkp = &key;
1165 } 1165 }
1166 /* 1166 /*
@@ -1297,13 +1297,13 @@ xfs_inobt_newroot(
1297 */ 1297 */
1298 kp = XFS_INOBT_KEY_ADDR(new, 1, cur); 1298 kp = XFS_INOBT_KEY_ADDR(new, 1, cur);
1299 if (be16_to_cpu(left->bb_level) > 0) { 1299 if (be16_to_cpu(left->bb_level) > 0) {
1300 kp[0] = *XFS_INOBT_KEY_ADDR(left, 1, cur); /* INT_: struct copy */ 1300 kp[0] = *XFS_INOBT_KEY_ADDR(left, 1, cur);
1301 kp[1] = *XFS_INOBT_KEY_ADDR(right, 1, cur); /* INT_: struct copy */ 1301 kp[1] = *XFS_INOBT_KEY_ADDR(right, 1, cur);
1302 } else { 1302 } else {
1303 rp = XFS_INOBT_REC_ADDR(left, 1, cur); 1303 rp = XFS_INOBT_REC_ADDR(left, 1, cur);
1304 INT_COPY(kp[0].ir_startino, rp->ir_startino, ARCH_CONVERT); 1304 kp[0].ir_startino = rp->ir_startino;
1305 rp = XFS_INOBT_REC_ADDR(right, 1, cur); 1305 rp = XFS_INOBT_REC_ADDR(right, 1, cur);
1306 INT_COPY(kp[1].ir_startino, rp->ir_startino, ARCH_CONVERT); 1306 kp[1].ir_startino = rp->ir_startino;
1307 } 1307 }
1308 xfs_inobt_log_keys(cur, nbp, 1, 2); 1308 xfs_inobt_log_keys(cur, nbp, 1, 2);
1309 /* 1309 /*
@@ -1410,8 +1410,8 @@ xfs_inobt_rshift(
1410 if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(*lpp), level))) 1410 if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(*lpp), level)))
1411 return error; 1411 return error;
1412#endif 1412#endif
1413 *rkp = *lkp; /* INT_: no change copy */ 1413 *rkp = *lkp;
1414 *rpp = *lpp; /* INT_: no change copy */ 1414 *rpp = *lpp;
1415 xfs_inobt_log_keys(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1); 1415 xfs_inobt_log_keys(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1);
1416 xfs_inobt_log_ptrs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1); 1416 xfs_inobt_log_ptrs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1);
1417 } else { 1417 } else {
@@ -1420,7 +1420,7 @@ xfs_inobt_rshift(
1420 memmove(rrp + 1, rrp, be16_to_cpu(right->bb_numrecs) * sizeof(*rrp)); 1420 memmove(rrp + 1, rrp, be16_to_cpu(right->bb_numrecs) * sizeof(*rrp));
1421 *rrp = *lrp; 1421 *rrp = *lrp;
1422 xfs_inobt_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1); 1422 xfs_inobt_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1);
1423 key.ir_startino = rrp->ir_startino; /* INT_: direct copy */ 1423 key.ir_startino = rrp->ir_startino;
1424 rkp = &key; 1424 rkp = &key;
1425 } 1425 }
1426 /* 1426 /*
@@ -1559,7 +1559,7 @@ xfs_inobt_split(
1559 rrp = XFS_INOBT_REC_ADDR(right, 1, cur); 1559 rrp = XFS_INOBT_REC_ADDR(right, 1, cur);
1560 memcpy(rrp, lrp, be16_to_cpu(right->bb_numrecs) * sizeof(*rrp)); 1560 memcpy(rrp, lrp, be16_to_cpu(right->bb_numrecs) * sizeof(*rrp));
1561 xfs_inobt_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs)); 1561 xfs_inobt_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs));
1562 keyp->ir_startino = rrp->ir_startino; /* INT_: direct copy */ 1562 keyp->ir_startino = rrp->ir_startino;
1563 } 1563 }
1564 /* 1564 /*
1565 * Find the left block number by looking in the buffer. 1565 * Find the left block number by looking in the buffer.
@@ -1813,9 +1813,9 @@ xfs_inobt_get_rec(
1813 * Point to the record and extract its data. 1813 * Point to the record and extract its data.
1814 */ 1814 */
1815 rec = XFS_INOBT_REC_ADDR(block, ptr, cur); 1815 rec = XFS_INOBT_REC_ADDR(block, ptr, cur);
1816 *ino = INT_GET(rec->ir_startino, ARCH_CONVERT); 1816 *ino = be32_to_cpu(rec->ir_startino);
1817 *fcnt = INT_GET(rec->ir_freecount, ARCH_CONVERT); 1817 *fcnt = be32_to_cpu(rec->ir_freecount);
1818 *free = INT_GET(rec->ir_free, ARCH_CONVERT); 1818 *free = be64_to_cpu(rec->ir_free);
1819 *stat = 1; 1819 *stat = 1;
1820 return 0; 1820 return 0;
1821} 1821}
@@ -1930,10 +1930,10 @@ xfs_inobt_insert(
1930 1930
1931 level = 0; 1931 level = 0;
1932 nbno = NULLAGBLOCK; 1932 nbno = NULLAGBLOCK;
1933 INT_SET(nrec.ir_startino, ARCH_CONVERT, cur->bc_rec.i.ir_startino); 1933 nrec.ir_startino = cpu_to_be32(cur->bc_rec.i.ir_startino);
1934 INT_SET(nrec.ir_freecount, ARCH_CONVERT, cur->bc_rec.i.ir_freecount); 1934 nrec.ir_freecount = cpu_to_be32(cur->bc_rec.i.ir_freecount);
1935 INT_SET(nrec.ir_free, ARCH_CONVERT, cur->bc_rec.i.ir_free); 1935 nrec.ir_free = cpu_to_be64(cur->bc_rec.i.ir_free);
1936 ncur = (xfs_btree_cur_t *)0; 1936 ncur = NULL;
1937 pcur = cur; 1937 pcur = cur;
1938 /* 1938 /*
1939 * Loop going up the tree, starting at the leaf level. 1939 * Loop going up the tree, starting at the leaf level.
@@ -1965,7 +1965,7 @@ xfs_inobt_insert(
1965 */ 1965 */
1966 if (ncur) { 1966 if (ncur) {
1967 pcur = ncur; 1967 pcur = ncur;
1968 ncur = (xfs_btree_cur_t *)0; 1968 ncur = NULL;
1969 } 1969 }
1970 } while (nbno != NULLAGBLOCK); 1970 } while (nbno != NULLAGBLOCK);
1971 *stat = i; 1971 *stat = i;
@@ -2060,9 +2060,9 @@ xfs_inobt_update(
2060 /* 2060 /*
2061 * Fill in the new contents and log them. 2061 * Fill in the new contents and log them.
2062 */ 2062 */
2063 INT_SET(rp->ir_startino, ARCH_CONVERT, ino); 2063 rp->ir_startino = cpu_to_be32(ino);
2064 INT_SET(rp->ir_freecount, ARCH_CONVERT, fcnt); 2064 rp->ir_freecount = cpu_to_be32(fcnt);
2065 INT_SET(rp->ir_free, ARCH_CONVERT, free); 2065 rp->ir_free = cpu_to_be64(free);
2066 xfs_inobt_log_recs(cur, bp, ptr, ptr); 2066 xfs_inobt_log_recs(cur, bp, ptr, ptr);
2067 /* 2067 /*
2068 * Updating first record in leaf. Pass new key value up to our parent. 2068 * Updating first record in leaf. Pass new key value up to our parent.
@@ -2070,7 +2070,7 @@ xfs_inobt_update(
2070 if (ptr == 1) { 2070 if (ptr == 1) {
2071 xfs_inobt_key_t key; /* key containing [ino] */ 2071 xfs_inobt_key_t key; /* key containing [ino] */
2072 2072
2073 INT_SET(key.ir_startino, ARCH_CONVERT, ino); 2073 key.ir_startino = cpu_to_be32(ino);
2074 if ((error = xfs_inobt_updkey(cur, &key, 1))) 2074 if ((error = xfs_inobt_updkey(cur, &key, 1)))
2075 return error; 2075 return error;
2076 } 2076 }
diff --git a/fs/xfs/xfs_ialloc_btree.h b/fs/xfs/xfs_ialloc_btree.h
index ae3904cb1ee8..2c0e49893ff7 100644
--- a/fs/xfs/xfs_ialloc_btree.h
+++ b/fs/xfs/xfs_ialloc_btree.h
@@ -47,19 +47,24 @@ static inline xfs_inofree_t xfs_inobt_maskn(int i, int n)
47/* 47/*
48 * Data record structure 48 * Data record structure
49 */ 49 */
50typedef struct xfs_inobt_rec 50typedef struct xfs_inobt_rec {
51{ 51 __be32 ir_startino; /* starting inode number */
52 __be32 ir_freecount; /* count of free inodes (set bits) */
53 __be64 ir_free; /* free inode mask */
54} xfs_inobt_rec_t;
55
56typedef struct xfs_inobt_rec_incore {
52 xfs_agino_t ir_startino; /* starting inode number */ 57 xfs_agino_t ir_startino; /* starting inode number */
53 __int32_t ir_freecount; /* count of free inodes (set bits) */ 58 __int32_t ir_freecount; /* count of free inodes (set bits) */
54 xfs_inofree_t ir_free; /* free inode mask */ 59 xfs_inofree_t ir_free; /* free inode mask */
55} xfs_inobt_rec_t; 60} xfs_inobt_rec_incore_t;
61
56 62
57/* 63/*
58 * Key structure 64 * Key structure
59 */ 65 */
60typedef struct xfs_inobt_key 66typedef struct xfs_inobt_key {
61{ 67 __be32 ir_startino; /* starting inode number */
62 xfs_agino_t ir_startino; /* starting inode number */
63} xfs_inobt_key_t; 68} xfs_inobt_key_t;
64 69
65/* btree pointer type */ 70/* btree pointer type */
@@ -77,7 +82,7 @@ typedef struct xfs_btree_sblock xfs_inobt_block_t;
77#define XFS_INOBT_IS_FREE(rp,i) \ 82#define XFS_INOBT_IS_FREE(rp,i) \
78 (((rp)->ir_free & XFS_INOBT_MASK(i)) != 0) 83 (((rp)->ir_free & XFS_INOBT_MASK(i)) != 0)
79#define XFS_INOBT_IS_FREE_DISK(rp,i) \ 84#define XFS_INOBT_IS_FREE_DISK(rp,i) \
80 ((INT_GET((rp)->ir_free,ARCH_CONVERT) & XFS_INOBT_MASK(i)) != 0) 85 ((be64_to_cpu((rp)->ir_free) & XFS_INOBT_MASK(i)) != 0)
81#define XFS_INOBT_SET_FREE(rp,i) ((rp)->ir_free |= XFS_INOBT_MASK(i)) 86#define XFS_INOBT_SET_FREE(rp,i) ((rp)->ir_free |= XFS_INOBT_MASK(i))
82#define XFS_INOBT_CLR_FREE(rp,i) ((rp)->ir_free &= ~XFS_INOBT_MASK(i)) 87#define XFS_INOBT_CLR_FREE(rp,i) ((rp)->ir_free &= ~XFS_INOBT_MASK(i))
83 88
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c
index 0724df7fabb7..b73d216ecaf9 100644
--- a/fs/xfs/xfs_iget.c
+++ b/fs/xfs/xfs_iget.c
@@ -50,7 +50,7 @@ void
50xfs_ihash_init(xfs_mount_t *mp) 50xfs_ihash_init(xfs_mount_t *mp)
51{ 51{
52 __uint64_t icount; 52 __uint64_t icount;
53 uint i, flags = KM_SLEEP | KM_MAYFAIL; 53 uint i;
54 54
55 if (!mp->m_ihsize) { 55 if (!mp->m_ihsize) {
56 icount = mp->m_maxicount ? mp->m_maxicount : 56 icount = mp->m_maxicount ? mp->m_maxicount :
@@ -61,14 +61,13 @@ xfs_ihash_init(xfs_mount_t *mp)
61 (64 * NBPP) / sizeof(xfs_ihash_t)); 61 (64 * NBPP) / sizeof(xfs_ihash_t));
62 } 62 }
63 63
64 while (!(mp->m_ihash = (xfs_ihash_t *)kmem_zalloc(mp->m_ihsize * 64 mp->m_ihash = kmem_zalloc_greedy(&mp->m_ihsize,
65 sizeof(xfs_ihash_t), flags))) { 65 NBPC * sizeof(xfs_ihash_t),
66 if ((mp->m_ihsize >>= 1) <= NBPP) 66 mp->m_ihsize * sizeof(xfs_ihash_t),
67 flags = KM_SLEEP; 67 KM_SLEEP | KM_MAYFAIL | KM_LARGE);
68 } 68 mp->m_ihsize /= sizeof(xfs_ihash_t);
69 for (i = 0; i < mp->m_ihsize; i++) { 69 for (i = 0; i < mp->m_ihsize; i++)
70 rwlock_init(&(mp->m_ihash[i].ih_lock)); 70 rwlock_init(&(mp->m_ihash[i].ih_lock));
71 }
72} 71}
73 72
74/* 73/*
@@ -77,7 +76,7 @@ xfs_ihash_init(xfs_mount_t *mp)
77void 76void
78xfs_ihash_free(xfs_mount_t *mp) 77xfs_ihash_free(xfs_mount_t *mp)
79{ 78{
80 kmem_free(mp->m_ihash, mp->m_ihsize*sizeof(xfs_ihash_t)); 79 kmem_free(mp->m_ihash, mp->m_ihsize * sizeof(xfs_ihash_t));
81 mp->m_ihash = NULL; 80 mp->m_ihash = NULL;
82} 81}
83 82
@@ -95,7 +94,7 @@ xfs_chash_init(xfs_mount_t *mp)
95 mp->m_chsize = min_t(uint, mp->m_chsize, mp->m_ihsize); 94 mp->m_chsize = min_t(uint, mp->m_chsize, mp->m_ihsize);
96 mp->m_chash = (xfs_chash_t *)kmem_zalloc(mp->m_chsize 95 mp->m_chash = (xfs_chash_t *)kmem_zalloc(mp->m_chsize
97 * sizeof(xfs_chash_t), 96 * sizeof(xfs_chash_t),
98 KM_SLEEP); 97 KM_SLEEP | KM_LARGE);
99 for (i = 0; i < mp->m_chsize; i++) { 98 for (i = 0; i < mp->m_chsize; i++) {
100 spinlock_init(&mp->m_chash[i].ch_lock,"xfshash"); 99 spinlock_init(&mp->m_chash[i].ch_lock,"xfshash");
101 } 100 }
@@ -244,7 +243,9 @@ again:
244 243
245 XFS_STATS_INC(xs_ig_found); 244 XFS_STATS_INC(xs_ig_found);
246 245
246 spin_lock(&ip->i_flags_lock);
247 ip->i_flags &= ~XFS_IRECLAIMABLE; 247 ip->i_flags &= ~XFS_IRECLAIMABLE;
248 spin_unlock(&ip->i_flags_lock);
248 version = ih->ih_version; 249 version = ih->ih_version;
249 read_unlock(&ih->ih_lock); 250 read_unlock(&ih->ih_lock);
250 xfs_ihash_promote(ih, ip, version); 251 xfs_ihash_promote(ih, ip, version);
@@ -290,15 +291,17 @@ again:
290 291
291finish_inode: 292finish_inode:
292 if (ip->i_d.di_mode == 0) { 293 if (ip->i_d.di_mode == 0) {
293 if (!(flags & IGET_CREATE)) 294 if (!(flags & XFS_IGET_CREATE))
294 return ENOENT; 295 return ENOENT;
295 xfs_iocore_inode_reinit(ip); 296 xfs_iocore_inode_reinit(ip);
296 } 297 }
297 298
298 if (lock_flags != 0) 299 if (lock_flags != 0)
299 xfs_ilock(ip, lock_flags); 300 xfs_ilock(ip, lock_flags);
300 301
302 spin_lock(&ip->i_flags_lock);
301 ip->i_flags &= ~XFS_ISTALE; 303 ip->i_flags &= ~XFS_ISTALE;
304 spin_unlock(&ip->i_flags_lock);
302 305
303 vn_trace_exit(vp, "xfs_iget.found", 306 vn_trace_exit(vp, "xfs_iget.found",
304 (inst_t *)__return_address); 307 (inst_t *)__return_address);
@@ -320,21 +323,20 @@ finish_inode:
320 * Read the disk inode attributes into a new inode structure and get 323 * Read the disk inode attributes into a new inode structure and get
321 * a new vnode for it. This should also initialize i_ino and i_mount. 324 * a new vnode for it. This should also initialize i_ino and i_mount.
322 */ 325 */
323 error = xfs_iread(mp, tp, ino, &ip, bno); 326 error = xfs_iread(mp, tp, ino, &ip, bno,
324 if (error) { 327 (flags & XFS_IGET_BULKSTAT) ? XFS_IMAP_BULKSTAT : 0);
328 if (error)
325 return error; 329 return error;
326 }
327 330
328 vn_trace_exit(vp, "xfs_iget.alloc", (inst_t *)__return_address); 331 vn_trace_exit(vp, "xfs_iget.alloc", (inst_t *)__return_address);
329 332
330 xfs_inode_lock_init(ip, vp); 333 xfs_inode_lock_init(ip, vp);
331 xfs_iocore_inode_init(ip); 334 xfs_iocore_inode_init(ip);
332 335
333 if (lock_flags != 0) { 336 if (lock_flags)
334 xfs_ilock(ip, lock_flags); 337 xfs_ilock(ip, lock_flags);
335 } 338
336 339 if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) {
337 if ((ip->i_d.di_mode == 0) && !(flags & IGET_CREATE)) {
338 xfs_idestroy(ip); 340 xfs_idestroy(ip);
339 return ENOENT; 341 return ENOENT;
340 } 342 }
@@ -369,7 +371,9 @@ finish_inode:
369 ih->ih_next = ip; 371 ih->ih_next = ip;
370 ip->i_udquot = ip->i_gdquot = NULL; 372 ip->i_udquot = ip->i_gdquot = NULL;
371 ih->ih_version++; 373 ih->ih_version++;
374 spin_lock(&ip->i_flags_lock);
372 ip->i_flags |= XFS_INEW; 375 ip->i_flags |= XFS_INEW;
376 spin_unlock(&ip->i_flags_lock);
373 377
374 write_unlock(&ih->ih_lock); 378 write_unlock(&ih->ih_lock);
375 379
@@ -548,7 +552,7 @@ xfs_inode_lock_init(
548 mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", vp->v_number); 552 mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", vp->v_number);
549 init_waitqueue_head(&ip->i_ipin_wait); 553 init_waitqueue_head(&ip->i_ipin_wait);
550 atomic_set(&ip->i_pincount, 0); 554 atomic_set(&ip->i_pincount, 0);
551 init_sema(&ip->i_flock, 1, "xfsfino", vp->v_number); 555 initnsema(&ip->i_flock, 1, "xfsfino");
552} 556}
553 557
554/* 558/*
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 1f8ecff8553a..c27d7d495aa0 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -854,7 +854,8 @@ xfs_iread(
854 xfs_trans_t *tp, 854 xfs_trans_t *tp,
855 xfs_ino_t ino, 855 xfs_ino_t ino,
856 xfs_inode_t **ipp, 856 xfs_inode_t **ipp,
857 xfs_daddr_t bno) 857 xfs_daddr_t bno,
858 uint imap_flags)
858{ 859{
859 xfs_buf_t *bp; 860 xfs_buf_t *bp;
860 xfs_dinode_t *dip; 861 xfs_dinode_t *dip;
@@ -866,6 +867,7 @@ xfs_iread(
866 ip = kmem_zone_zalloc(xfs_inode_zone, KM_SLEEP); 867 ip = kmem_zone_zalloc(xfs_inode_zone, KM_SLEEP);
867 ip->i_ino = ino; 868 ip->i_ino = ino;
868 ip->i_mount = mp; 869 ip->i_mount = mp;
870 spin_lock_init(&ip->i_flags_lock);
869 871
870 /* 872 /*
871 * Get pointer's to the on-disk inode and the buffer containing it. 873 * Get pointer's to the on-disk inode and the buffer containing it.
@@ -874,7 +876,7 @@ xfs_iread(
874 * return NULL as well. Set i_blkno to 0 so that xfs_itobp() will 876 * return NULL as well. Set i_blkno to 0 so that xfs_itobp() will
875 * know that this is a new incore inode. 877 * know that this is a new incore inode.
876 */ 878 */
877 error = xfs_itobp(mp, tp, ip, &dip, &bp, bno, 0); 879 error = xfs_itobp(mp, tp, ip, &dip, &bp, bno, imap_flags);
878 if (error) { 880 if (error) {
879 kmem_zone_free(xfs_inode_zone, ip); 881 kmem_zone_free(xfs_inode_zone, ip);
880 return error; 882 return error;
@@ -1113,7 +1115,7 @@ xfs_ialloc(
1113 * to prevent others from looking at until we're done. 1115 * to prevent others from looking at until we're done.
1114 */ 1116 */
1115 error = xfs_trans_iget(tp->t_mountp, tp, ino, 1117 error = xfs_trans_iget(tp->t_mountp, tp, ino,
1116 IGET_CREATE, XFS_ILOCK_EXCL, &ip); 1118 XFS_IGET_CREATE, XFS_ILOCK_EXCL, &ip);
1117 if (error != 0) { 1119 if (error != 0) {
1118 return error; 1120 return error;
1119 } 1121 }
@@ -2213,7 +2215,9 @@ xfs_ifree_cluster(
2213 2215
2214 if (ip == free_ip) { 2216 if (ip == free_ip) {
2215 if (xfs_iflock_nowait(ip)) { 2217 if (xfs_iflock_nowait(ip)) {
2218 spin_lock(&ip->i_flags_lock);
2216 ip->i_flags |= XFS_ISTALE; 2219 ip->i_flags |= XFS_ISTALE;
2220 spin_unlock(&ip->i_flags_lock);
2217 2221
2218 if (xfs_inode_clean(ip)) { 2222 if (xfs_inode_clean(ip)) {
2219 xfs_ifunlock(ip); 2223 xfs_ifunlock(ip);
@@ -2227,7 +2231,9 @@ xfs_ifree_cluster(
2227 2231
2228 if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) { 2232 if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
2229 if (xfs_iflock_nowait(ip)) { 2233 if (xfs_iflock_nowait(ip)) {
2234 spin_lock(&ip->i_flags_lock);
2230 ip->i_flags |= XFS_ISTALE; 2235 ip->i_flags |= XFS_ISTALE;
2236 spin_unlock(&ip->i_flags_lock);
2231 2237
2232 if (xfs_inode_clean(ip)) { 2238 if (xfs_inode_clean(ip)) {
2233 xfs_ifunlock(ip); 2239 xfs_ifunlock(ip);
@@ -2257,7 +2263,9 @@ xfs_ifree_cluster(
2257 AIL_LOCK(mp,s); 2263 AIL_LOCK(mp,s);
2258 iip->ili_flush_lsn = iip->ili_item.li_lsn; 2264 iip->ili_flush_lsn = iip->ili_item.li_lsn;
2259 AIL_UNLOCK(mp, s); 2265 AIL_UNLOCK(mp, s);
2266 spin_lock(&iip->ili_inode->i_flags_lock);
2260 iip->ili_inode->i_flags |= XFS_ISTALE; 2267 iip->ili_inode->i_flags |= XFS_ISTALE;
2268 spin_unlock(&iip->ili_inode->i_flags_lock);
2261 pre_flushed++; 2269 pre_flushed++;
2262 } 2270 }
2263 lip = lip->li_bio_list; 2271 lip = lip->li_bio_list;
@@ -2753,19 +2761,29 @@ xfs_iunpin(
2753 * call as the inode reclaim may be blocked waiting for 2761 * call as the inode reclaim may be blocked waiting for
2754 * the inode to become unpinned. 2762 * the inode to become unpinned.
2755 */ 2763 */
2764 struct inode *inode = NULL;
2765
2766 spin_lock(&ip->i_flags_lock);
2756 if (!(ip->i_flags & (XFS_IRECLAIM|XFS_IRECLAIMABLE))) { 2767 if (!(ip->i_flags & (XFS_IRECLAIM|XFS_IRECLAIMABLE))) {
2757 bhv_vnode_t *vp = XFS_ITOV_NULL(ip); 2768 bhv_vnode_t *vp = XFS_ITOV_NULL(ip);
2758 2769
2759 /* make sync come back and flush this inode */ 2770 /* make sync come back and flush this inode */
2760 if (vp) { 2771 if (vp) {
2761 struct inode *inode = vn_to_inode(vp); 2772 inode = vn_to_inode(vp);
2762 2773
2763 if (!(inode->i_state & 2774 if (!(inode->i_state &
2764 (I_NEW|I_FREEING|I_CLEAR))) 2775 (I_NEW|I_FREEING|I_CLEAR))) {
2765 mark_inode_dirty_sync(inode); 2776 inode = igrab(inode);
2777 if (inode)
2778 mark_inode_dirty_sync(inode);
2779 } else
2780 inode = NULL;
2766 } 2781 }
2767 } 2782 }
2783 spin_unlock(&ip->i_flags_lock);
2768 wake_up(&ip->i_ipin_wait); 2784 wake_up(&ip->i_ipin_wait);
2785 if (inode)
2786 iput(inode);
2769 } 2787 }
2770} 2788}
2771 2789
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index d10b76ed1e5b..e96eb0835fe6 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -267,6 +267,7 @@ typedef struct xfs_inode {
267 sema_t i_flock; /* inode flush lock */ 267 sema_t i_flock; /* inode flush lock */
268 atomic_t i_pincount; /* inode pin count */ 268 atomic_t i_pincount; /* inode pin count */
269 wait_queue_head_t i_ipin_wait; /* inode pinning wait queue */ 269 wait_queue_head_t i_ipin_wait; /* inode pinning wait queue */
270 spinlock_t i_flags_lock; /* inode i_flags lock */
270#ifdef HAVE_REFCACHE 271#ifdef HAVE_REFCACHE
271 struct xfs_inode **i_refcache; /* ptr to entry in ref cache */ 272 struct xfs_inode **i_refcache; /* ptr to entry in ref cache */
272 struct xfs_inode *i_release; /* inode to unref */ 273 struct xfs_inode *i_release; /* inode to unref */
@@ -389,11 +390,14 @@ typedef struct xfs_inode {
389 (((vfsp)->vfs_flag & VFS_GRPID) || ((pip)->i_d.di_mode & S_ISGID)) 390 (((vfsp)->vfs_flag & VFS_GRPID) || ((pip)->i_d.di_mode & S_ISGID))
390 391
391/* 392/*
392 * xfs_iget.c prototypes. 393 * Flags for xfs_iget()
393 */ 394 */
395#define XFS_IGET_CREATE 0x1
396#define XFS_IGET_BULKSTAT 0x2
394 397
395#define IGET_CREATE 1 398/*
396 399 * xfs_iget.c prototypes.
400 */
397void xfs_ihash_init(struct xfs_mount *); 401void xfs_ihash_init(struct xfs_mount *);
398void xfs_ihash_free(struct xfs_mount *); 402void xfs_ihash_free(struct xfs_mount *);
399void xfs_chash_init(struct xfs_mount *); 403void xfs_chash_init(struct xfs_mount *);
@@ -425,7 +429,7 @@ int xfs_itobp(struct xfs_mount *, struct xfs_trans *,
425 xfs_inode_t *, xfs_dinode_t **, struct xfs_buf **, 429 xfs_inode_t *, xfs_dinode_t **, struct xfs_buf **,
426 xfs_daddr_t, uint); 430 xfs_daddr_t, uint);
427int xfs_iread(struct xfs_mount *, struct xfs_trans *, xfs_ino_t, 431int xfs_iread(struct xfs_mount *, struct xfs_trans *, xfs_ino_t,
428 xfs_inode_t **, xfs_daddr_t); 432 xfs_inode_t **, xfs_daddr_t, uint);
429int xfs_iread_extents(struct xfs_trans *, xfs_inode_t *, int); 433int xfs_iread_extents(struct xfs_trans *, xfs_inode_t *, int);
430int xfs_ialloc(struct xfs_trans *, xfs_inode_t *, mode_t, 434int xfs_ialloc(struct xfs_trans *, xfs_inode_t *, mode_t,
431 xfs_nlink_t, xfs_dev_t, struct cred *, xfs_prid_t, 435 xfs_nlink_t, xfs_dev_t, struct cred *, xfs_prid_t,
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index f8e80d8e7237..a7a92251eb56 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -743,21 +743,6 @@ xfs_inode_item_committed(
743} 743}
744 744
745/* 745/*
746 * The transaction with the inode locked has aborted. The inode
747 * must not be dirty within the transaction (unless we're forcibly
748 * shutting down). We simply unlock just as if the transaction
749 * had been cancelled.
750 */
751STATIC void
752xfs_inode_item_abort(
753 xfs_inode_log_item_t *iip)
754{
755 xfs_inode_item_unlock(iip);
756 return;
757}
758
759
760/*
761 * This gets called by xfs_trans_push_ail(), when IOP_TRYLOCK 746 * This gets called by xfs_trans_push_ail(), when IOP_TRYLOCK
762 * failed to get the inode flush lock but did get the inode locked SHARED. 747 * failed to get the inode flush lock but did get the inode locked SHARED.
763 * Here we're trying to see if the inode buffer is incore, and if so whether it's 748 * Here we're trying to see if the inode buffer is incore, and if so whether it's
@@ -915,7 +900,6 @@ STATIC struct xfs_item_ops xfs_inode_item_ops = {
915 .iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t)) 900 .iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t))
916 xfs_inode_item_committed, 901 xfs_inode_item_committed,
917 .iop_push = (void(*)(xfs_log_item_t*))xfs_inode_item_push, 902 .iop_push = (void(*)(xfs_log_item_t*))xfs_inode_item_push,
918 .iop_abort = (void(*)(xfs_log_item_t*))xfs_inode_item_abort,
919 .iop_pushbuf = (void(*)(xfs_log_item_t*))xfs_inode_item_pushbuf, 903 .iop_pushbuf = (void(*)(xfs_log_item_t*))xfs_inode_item_pushbuf,
920 .iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t)) 904 .iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t))
921 xfs_inode_item_committing 905 xfs_inode_item_committing
diff --git a/fs/xfs/xfs_inode_item.h b/fs/xfs/xfs_inode_item.h
index 5db6cd1b4cf3..bfe92ea17952 100644
--- a/fs/xfs/xfs_inode_item.h
+++ b/fs/xfs/xfs_inode_item.h
@@ -25,52 +25,54 @@
25 * must be added on to the end. 25 * must be added on to the end.
26 */ 26 */
27typedef struct xfs_inode_log_format { 27typedef struct xfs_inode_log_format {
28 unsigned short ilf_type; /* inode log item type */ 28 __uint16_t ilf_type; /* inode log item type */
29 unsigned short ilf_size; /* size of this item */ 29 __uint16_t ilf_size; /* size of this item */
30 uint ilf_fields; /* flags for fields logged */ 30 __uint32_t ilf_fields; /* flags for fields logged */
31 ushort ilf_asize; /* size of attr d/ext/root */ 31 __uint16_t ilf_asize; /* size of attr d/ext/root */
32 ushort ilf_dsize; /* size of data/ext/root */ 32 __uint16_t ilf_dsize; /* size of data/ext/root */
33 xfs_ino_t ilf_ino; /* inode number */ 33 __uint64_t ilf_ino; /* inode number */
34 union { 34 union {
35 xfs_dev_t ilfu_rdev; /* rdev value for dev inode*/ 35 __uint32_t ilfu_rdev; /* rdev value for dev inode*/
36 uuid_t ilfu_uuid; /* mount point value */ 36 uuid_t ilfu_uuid; /* mount point value */
37 } ilf_u; 37 } ilf_u;
38 __int64_t ilf_blkno; /* blkno of inode buffer */ 38 __int64_t ilf_blkno; /* blkno of inode buffer */
39 int ilf_len; /* len of inode buffer */ 39 __int32_t ilf_len; /* len of inode buffer */
40 int ilf_boffset; /* off of inode in buffer */ 40 __int32_t ilf_boffset; /* off of inode in buffer */
41} xfs_inode_log_format_t; 41} xfs_inode_log_format_t;
42 42
43#ifndef HAVE_FORMAT32
43typedef struct xfs_inode_log_format_32 { 44typedef struct xfs_inode_log_format_32 {
44 unsigned short ilf_type; /* 16: inode log item type */ 45 __uint16_t ilf_type; /* inode log item type */
45 unsigned short ilf_size; /* 16: size of this item */ 46 __uint16_t ilf_size; /* size of this item */
46 uint ilf_fields; /* 32: flags for fields logged */ 47 __uint32_t ilf_fields; /* flags for fields logged */
47 ushort ilf_asize; /* 32: size of attr d/ext/root */ 48 __uint16_t ilf_asize; /* size of attr d/ext/root */
48 ushort ilf_dsize; /* 32: size of data/ext/root */ 49 __uint16_t ilf_dsize; /* size of data/ext/root */
49 xfs_ino_t ilf_ino; /* 64: inode number */ 50 __uint64_t ilf_ino; /* inode number */
50 union { 51 union {
51 xfs_dev_t ilfu_rdev; /* 32: rdev value for dev inode*/ 52 __uint32_t ilfu_rdev; /* rdev value for dev inode*/
52 uuid_t ilfu_uuid; /* 128: mount point value */ 53 uuid_t ilfu_uuid; /* mount point value */
53 } ilf_u; 54 } ilf_u;
54 __int64_t ilf_blkno; /* 64: blkno of inode buffer */ 55 __int64_t ilf_blkno; /* blkno of inode buffer */
55 int ilf_len; /* 32: len of inode buffer */ 56 __int32_t ilf_len; /* len of inode buffer */
56 int ilf_boffset; /* 32: off of inode in buffer */ 57 __int32_t ilf_boffset; /* off of inode in buffer */
57} __attribute__((packed)) xfs_inode_log_format_32_t; 58} __attribute__((packed)) xfs_inode_log_format_32_t;
59#endif
58 60
59typedef struct xfs_inode_log_format_64 { 61typedef struct xfs_inode_log_format_64 {
60 unsigned short ilf_type; /* 16: inode log item type */ 62 __uint16_t ilf_type; /* inode log item type */
61 unsigned short ilf_size; /* 16: size of this item */ 63 __uint16_t ilf_size; /* size of this item */
62 uint ilf_fields; /* 32: flags for fields logged */ 64 __uint32_t ilf_fields; /* flags for fields logged */
63 ushort ilf_asize; /* 32: size of attr d/ext/root */ 65 __uint16_t ilf_asize; /* size of attr d/ext/root */
64 ushort ilf_dsize; /* 32: size of data/ext/root */ 66 __uint16_t ilf_dsize; /* size of data/ext/root */
65 __uint32_t ilf_pad; /* 32: pad for 64 bit boundary */ 67 __uint32_t ilf_pad; /* pad for 64 bit boundary */
66 xfs_ino_t ilf_ino; /* 64: inode number */ 68 __uint64_t ilf_ino; /* inode number */
67 union { 69 union {
68 xfs_dev_t ilfu_rdev; /* 32: rdev value for dev inode*/ 70 __uint32_t ilfu_rdev; /* rdev value for dev inode*/
69 uuid_t ilfu_uuid; /* 128: mount point value */ 71 uuid_t ilfu_uuid; /* mount point value */
70 } ilf_u; 72 } ilf_u;
71 __int64_t ilf_blkno; /* 64: blkno of inode buffer */ 73 __int64_t ilf_blkno; /* blkno of inode buffer */
72 int ilf_len; /* 32: len of inode buffer */ 74 __int32_t ilf_len; /* len of inode buffer */
73 int ilf_boffset; /* 32: off of inode in buffer */ 75 __int32_t ilf_boffset; /* off of inode in buffer */
74} xfs_inode_log_format_64_t; 76} xfs_inode_log_format_64_t;
75 77
76/* 78/*
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index f1949c16df15..19655124da78 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -398,6 +398,23 @@ xfs_flush_space(
398 return 1; 398 return 1;
399} 399}
400 400
401STATIC int
402xfs_cmn_err_fsblock_zero(
403 xfs_inode_t *ip,
404 xfs_bmbt_irec_t *imap)
405{
406 xfs_cmn_err(XFS_PTAG_FSBLOCK_ZERO, CE_ALERT, ip->i_mount,
407 "Access to block zero in inode %llu "
408 "start_block: %llx start_off: %llx "
409 "blkcnt: %llx extent-state: %x\n",
410 (unsigned long long)ip->i_ino,
411 (unsigned long long)imap->br_startblock,
412 (unsigned long long)imap->br_startoff,
413 (unsigned long long)imap->br_blockcount,
414 imap->br_state);
415 return EFSCORRUPTED;
416}
417
401int 418int
402xfs_iomap_write_direct( 419xfs_iomap_write_direct(
403 xfs_inode_t *ip, 420 xfs_inode_t *ip,
@@ -536,23 +553,17 @@ xfs_iomap_write_direct(
536 * Copy any maps to caller's array and return any error. 553 * Copy any maps to caller's array and return any error.
537 */ 554 */
538 if (nimaps == 0) { 555 if (nimaps == 0) {
539 error = (ENOSPC); 556 error = ENOSPC;
557 goto error_out;
558 }
559
560 if (unlikely(!imap.br_startblock && !(io->io_flags & XFS_IOCORE_RT))) {
561 error = xfs_cmn_err_fsblock_zero(ip, &imap);
540 goto error_out; 562 goto error_out;
541 } 563 }
542 564
543 *ret_imap = imap; 565 *ret_imap = imap;
544 *nmaps = 1; 566 *nmaps = 1;
545 if ( !(io->io_flags & XFS_IOCORE_RT) && !ret_imap->br_startblock) {
546 cmn_err(CE_PANIC,"Access to block zero: fs <%s> inode: %lld "
547 "start_block : %llx start_off : %llx blkcnt : %llx "
548 "extent-state : %x \n",
549 (ip->i_mount)->m_fsname,
550 (long long)ip->i_ino,
551 (unsigned long long)ret_imap->br_startblock,
552 (unsigned long long)ret_imap->br_startoff,
553 (unsigned long long)ret_imap->br_blockcount,
554 ret_imap->br_state);
555 }
556 return 0; 567 return 0;
557 568
558error0: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */ 569error0: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
@@ -715,17 +726,8 @@ retry:
715 goto retry; 726 goto retry;
716 } 727 }
717 728
718 if (!(io->io_flags & XFS_IOCORE_RT) && !ret_imap->br_startblock) { 729 if (unlikely(!imap[0].br_startblock && !(io->io_flags & XFS_IOCORE_RT)))
719 cmn_err(CE_PANIC,"Access to block zero: fs <%s> inode: %lld " 730 return xfs_cmn_err_fsblock_zero(ip, &imap[0]);
720 "start_block : %llx start_off : %llx blkcnt : %llx "
721 "extent-state : %x \n",
722 (ip->i_mount)->m_fsname,
723 (long long)ip->i_ino,
724 (unsigned long long)ret_imap->br_startblock,
725 (unsigned long long)ret_imap->br_startoff,
726 (unsigned long long)ret_imap->br_blockcount,
727 ret_imap->br_state);
728 }
729 731
730 *ret_imap = imap[0]; 732 *ret_imap = imap[0];
731 *nmaps = 1; 733 *nmaps = 1;
@@ -853,24 +855,10 @@ xfs_iomap_write_allocate(
853 * See if we were able to allocate an extent that 855 * See if we were able to allocate an extent that
854 * covers at least part of the callers request 856 * covers at least part of the callers request
855 */ 857 */
856
857 for (i = 0; i < nimaps; i++) { 858 for (i = 0; i < nimaps; i++) {
858 if (!(io->io_flags & XFS_IOCORE_RT) && 859 if (unlikely(!imap[i].br_startblock &&
859 !imap[i].br_startblock) { 860 !(io->io_flags & XFS_IOCORE_RT)))
860 cmn_err(CE_PANIC,"Access to block zero: " 861 return xfs_cmn_err_fsblock_zero(ip, &imap[i]);
861 "fs <%s> inode: %lld "
862 "start_block : %llx start_off : %llx "
863 "blkcnt : %llx extent-state : %x \n",
864 (ip->i_mount)->m_fsname,
865 (long long)ip->i_ino,
866 (unsigned long long)
867 imap[i].br_startblock,
868 (unsigned long long)
869 imap[i].br_startoff,
870 (unsigned long long)
871 imap[i].br_blockcount,
872 imap[i].br_state);
873 }
874 if ((offset_fsb >= imap[i].br_startoff) && 862 if ((offset_fsb >= imap[i].br_startoff) &&
875 (offset_fsb < (imap[i].br_startoff + 863 (offset_fsb < (imap[i].br_startoff +
876 imap[i].br_blockcount))) { 864 imap[i].br_blockcount))) {
@@ -941,7 +929,7 @@ xfs_iomap_write_unwritten(
941 XFS_WRITE_LOG_COUNT); 929 XFS_WRITE_LOG_COUNT);
942 if (error) { 930 if (error) {
943 xfs_trans_cancel(tp, 0); 931 xfs_trans_cancel(tp, 0);
944 goto error0; 932 return XFS_ERROR(error);
945 } 933 }
946 934
947 xfs_ilock(ip, XFS_ILOCK_EXCL); 935 xfs_ilock(ip, XFS_ILOCK_EXCL);
@@ -967,19 +955,11 @@ xfs_iomap_write_unwritten(
967 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL); 955 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL);
968 xfs_iunlock(ip, XFS_ILOCK_EXCL); 956 xfs_iunlock(ip, XFS_ILOCK_EXCL);
969 if (error) 957 if (error)
970 goto error0; 958 return XFS_ERROR(error);
971 959
972 if ( !(io->io_flags & XFS_IOCORE_RT) && !imap.br_startblock) { 960 if (unlikely(!imap.br_startblock &&
973 cmn_err(CE_PANIC,"Access to block zero: fs <%s> " 961 !(io->io_flags & XFS_IOCORE_RT)))
974 "inode: %lld start_block : %llx start_off : " 962 return xfs_cmn_err_fsblock_zero(ip, &imap);
975 "%llx blkcnt : %llx extent-state : %x \n",
976 (ip->i_mount)->m_fsname,
977 (long long)ip->i_ino,
978 (unsigned long long)imap.br_startblock,
979 (unsigned long long)imap.br_startoff,
980 (unsigned long long)imap.br_blockcount,
981 imap.br_state);
982 }
983 963
984 if ((numblks_fsb = imap.br_blockcount) == 0) { 964 if ((numblks_fsb = imap.br_blockcount) == 0) {
985 /* 965 /*
@@ -999,6 +979,5 @@ error_on_bmapi_transaction:
999 xfs_bmap_cancel(&free_list); 979 xfs_bmap_cancel(&free_list);
1000 xfs_trans_cancel(tp, (XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT)); 980 xfs_trans_cancel(tp, (XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT));
1001 xfs_iunlock(ip, XFS_ILOCK_EXCL); 981 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1002error0:
1003 return XFS_ERROR(error); 982 return XFS_ERROR(error);
1004} 983}
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index 46249e4d1fea..7775ddc0b3c6 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -39,6 +39,16 @@
39#include "xfs_error.h" 39#include "xfs_error.h"
40#include "xfs_btree.h" 40#include "xfs_btree.h"
41 41
42int
43xfs_internal_inum(
44 xfs_mount_t *mp,
45 xfs_ino_t ino)
46{
47 return (ino == mp->m_sb.sb_rbmino || ino == mp->m_sb.sb_rsumino ||
48 (XFS_SB_VERSION_HASQUOTA(&mp->m_sb) &&
49 (ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino)));
50}
51
42STATIC int 52STATIC int
43xfs_bulkstat_one_iget( 53xfs_bulkstat_one_iget(
44 xfs_mount_t *mp, /* mount point for filesystem */ 54 xfs_mount_t *mp, /* mount point for filesystem */
@@ -52,7 +62,8 @@ xfs_bulkstat_one_iget(
52 bhv_vnode_t *vp; 62 bhv_vnode_t *vp;
53 int error; 63 int error;
54 64
55 error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_SHARED, &ip, bno); 65 error = xfs_iget(mp, NULL, ino,
66 XFS_IGET_BULKSTAT, XFS_ILOCK_SHARED, &ip, bno);
56 if (error) { 67 if (error) {
57 *stat = BULKSTAT_RV_NOTHING; 68 *stat = BULKSTAT_RV_NOTHING;
58 return error; 69 return error;
@@ -212,17 +223,12 @@ xfs_bulkstat_one(
212 xfs_dinode_t *dip; /* dinode inode pointer */ 223 xfs_dinode_t *dip; /* dinode inode pointer */
213 224
214 dip = (xfs_dinode_t *)dibuff; 225 dip = (xfs_dinode_t *)dibuff;
226 *stat = BULKSTAT_RV_NOTHING;
215 227
216 if (!buffer || ino == mp->m_sb.sb_rbmino || ino == mp->m_sb.sb_rsumino || 228 if (!buffer || xfs_internal_inum(mp, ino))
217 (XFS_SB_VERSION_HASQUOTA(&mp->m_sb) &&
218 (ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino))) {
219 *stat = BULKSTAT_RV_NOTHING;
220 return XFS_ERROR(EINVAL); 229 return XFS_ERROR(EINVAL);
221 } 230 if (ubsize < sizeof(*buf))
222 if (ubsize < sizeof(*buf)) {
223 *stat = BULKSTAT_RV_NOTHING;
224 return XFS_ERROR(ENOMEM); 231 return XFS_ERROR(ENOMEM);
225 }
226 232
227 buf = kmem_alloc(sizeof(*buf), KM_SLEEP); 233 buf = kmem_alloc(sizeof(*buf), KM_SLEEP);
228 234
@@ -238,8 +244,7 @@ xfs_bulkstat_one(
238 } 244 }
239 245
240 if (copy_to_user(buffer, buf, sizeof(*buf))) { 246 if (copy_to_user(buffer, buf, sizeof(*buf))) {
241 *stat = BULKSTAT_RV_NOTHING; 247 error = EFAULT;
242 error = EFAULT;
243 goto out_free; 248 goto out_free;
244 } 249 }
245 250
@@ -253,6 +258,46 @@ xfs_bulkstat_one(
253} 258}
254 259
255/* 260/*
261 * Test to see whether we can use the ondisk inode directly, based
262 * on the given bulkstat flags, filling in dipp accordingly.
263 * Returns zero if the inode is dodgey.
264 */
265STATIC int
266xfs_bulkstat_use_dinode(
267 xfs_mount_t *mp,
268 int flags,
269 xfs_buf_t *bp,
270 int clustidx,
271 xfs_dinode_t **dipp)
272{
273 xfs_dinode_t *dip;
274 unsigned int aformat;
275
276 *dipp = NULL;
277 if (!bp || (flags & BULKSTAT_FG_IGET))
278 return 1;
279 dip = (xfs_dinode_t *)
280 xfs_buf_offset(bp, clustidx << mp->m_sb.sb_inodelog);
281 if (INT_GET(dip->di_core.di_magic, ARCH_CONVERT) != XFS_DINODE_MAGIC ||
282 !XFS_DINODE_GOOD_VERSION(
283 INT_GET(dip->di_core.di_version, ARCH_CONVERT)))
284 return 0;
285 if (flags & BULKSTAT_FG_QUICK) {
286 *dipp = dip;
287 return 1;
288 }
289 /* BULKSTAT_FG_INLINE: if attr fork is local, or not there, use it */
290 aformat = INT_GET(dip->di_core.di_aformat, ARCH_CONVERT);
291 if ((XFS_CFORK_Q(&dip->di_core) == 0) ||
292 (aformat == XFS_DINODE_FMT_LOCAL) ||
293 (aformat == XFS_DINODE_FMT_EXTENTS && !dip->di_core.di_anextents)) {
294 *dipp = dip;
295 return 1;
296 }
297 return 1;
298}
299
300/*
256 * Return stat information in bulk (by-inode) for the filesystem. 301 * Return stat information in bulk (by-inode) for the filesystem.
257 */ 302 */
258int /* error status */ 303int /* error status */
@@ -284,10 +329,11 @@ xfs_bulkstat(
284 xfs_agino_t gino; /* current btree rec's start inode */ 329 xfs_agino_t gino; /* current btree rec's start inode */
285 int i; /* loop index */ 330 int i; /* loop index */
286 int icount; /* count of inodes good in irbuf */ 331 int icount; /* count of inodes good in irbuf */
332 size_t irbsize; /* size of irec buffer in bytes */
287 xfs_ino_t ino; /* inode number (filesystem) */ 333 xfs_ino_t ino; /* inode number (filesystem) */
288 xfs_inobt_rec_t *irbp; /* current irec buffer pointer */ 334 xfs_inobt_rec_incore_t *irbp; /* current irec buffer pointer */
289 xfs_inobt_rec_t *irbuf; /* start of irec buffer */ 335 xfs_inobt_rec_incore_t *irbuf; /* start of irec buffer */
290 xfs_inobt_rec_t *irbufend; /* end of good irec buffer entries */ 336 xfs_inobt_rec_incore_t *irbufend; /* end of good irec buffer entries */
291 xfs_ino_t lastino=0; /* last inode number returned */ 337 xfs_ino_t lastino=0; /* last inode number returned */
292 int nbcluster; /* # of blocks in a cluster */ 338 int nbcluster; /* # of blocks in a cluster */
293 int nicluster; /* # of inodes in a cluster */ 339 int nicluster; /* # of inodes in a cluster */
@@ -328,13 +374,10 @@ xfs_bulkstat(
328 (XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog); 374 (XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog);
329 nimask = ~(nicluster - 1); 375 nimask = ~(nicluster - 1);
330 nbcluster = nicluster >> mp->m_sb.sb_inopblog; 376 nbcluster = nicluster >> mp->m_sb.sb_inopblog;
331 /* 377 irbuf = kmem_zalloc_greedy(&irbsize, NBPC, NBPC * 4,
332 * Allocate a page-sized buffer for inode btree records. 378 KM_SLEEP | KM_MAYFAIL | KM_LARGE);
333 * We could try allocating something smaller, but for normal 379 nirbuf = irbsize / sizeof(*irbuf);
334 * calls we'll always (potentially) need the whole page. 380
335 */
336 irbuf = kmem_alloc(NBPC, KM_SLEEP);
337 nirbuf = NBPC / sizeof(*irbuf);
338 /* 381 /*
339 * Loop over the allocation groups, starting from the last 382 * Loop over the allocation groups, starting from the last
340 * inode returned; 0 means start of the allocation group. 383 * inode returned; 0 means start of the allocation group.
@@ -358,7 +401,7 @@ xfs_bulkstat(
358 * Allocate and initialize a btree cursor for ialloc btree. 401 * Allocate and initialize a btree cursor for ialloc btree.
359 */ 402 */
360 cur = xfs_btree_init_cursor(mp, NULL, agbp, agno, XFS_BTNUM_INO, 403 cur = xfs_btree_init_cursor(mp, NULL, agbp, agno, XFS_BTNUM_INO,
361 (xfs_inode_t *)0, 0); 404 (xfs_inode_t *)0, 0);
362 irbp = irbuf; 405 irbp = irbuf;
363 irbufend = irbuf + nirbuf; 406 irbufend = irbuf + nirbuf;
364 end_of_ag = 0; 407 end_of_ag = 0;
@@ -395,9 +438,9 @@ xfs_bulkstat(
395 gcnt++; 438 gcnt++;
396 } 439 }
397 gfree |= XFS_INOBT_MASKN(0, chunkidx); 440 gfree |= XFS_INOBT_MASKN(0, chunkidx);
398 INT_SET(irbp->ir_startino, ARCH_CONVERT, gino); 441 irbp->ir_startino = gino;
399 INT_SET(irbp->ir_freecount, ARCH_CONVERT, gcnt); 442 irbp->ir_freecount = gcnt;
400 INT_SET(irbp->ir_free, ARCH_CONVERT, gfree); 443 irbp->ir_free = gfree;
401 irbp++; 444 irbp++;
402 agino = gino + XFS_INODES_PER_CHUNK; 445 agino = gino + XFS_INODES_PER_CHUNK;
403 icount = XFS_INODES_PER_CHUNK - gcnt; 446 icount = XFS_INODES_PER_CHUNK - gcnt;
@@ -451,11 +494,27 @@ xfs_bulkstat(
451 } 494 }
452 /* 495 /*
453 * If this chunk has any allocated inodes, save it. 496 * If this chunk has any allocated inodes, save it.
497 * Also start read-ahead now for this chunk.
454 */ 498 */
455 if (gcnt < XFS_INODES_PER_CHUNK) { 499 if (gcnt < XFS_INODES_PER_CHUNK) {
456 INT_SET(irbp->ir_startino, ARCH_CONVERT, gino); 500 /*
457 INT_SET(irbp->ir_freecount, ARCH_CONVERT, gcnt); 501 * Loop over all clusters in the next chunk.
458 INT_SET(irbp->ir_free, ARCH_CONVERT, gfree); 502 * Do a readahead if there are any allocated
503 * inodes in that cluster.
504 */
505 for (agbno = XFS_AGINO_TO_AGBNO(mp, gino),
506 chunkidx = 0;
507 chunkidx < XFS_INODES_PER_CHUNK;
508 chunkidx += nicluster,
509 agbno += nbcluster) {
510 if (XFS_INOBT_MASKN(chunkidx,
511 nicluster) & ~gfree)
512 xfs_btree_reada_bufs(mp, agno,
513 agbno, nbcluster);
514 }
515 irbp->ir_startino = gino;
516 irbp->ir_freecount = gcnt;
517 irbp->ir_free = gfree;
459 irbp++; 518 irbp++;
460 icount += XFS_INODES_PER_CHUNK - gcnt; 519 icount += XFS_INODES_PER_CHUNK - gcnt;
461 } 520 }
@@ -479,33 +538,11 @@ xfs_bulkstat(
479 for (irbp = irbuf; 538 for (irbp = irbuf;
480 irbp < irbufend && ubleft >= statstruct_size; irbp++) { 539 irbp < irbufend && ubleft >= statstruct_size; irbp++) {
481 /* 540 /*
482 * Read-ahead the next chunk's worth of inodes.
483 */
484 if (&irbp[1] < irbufend) {
485 /*
486 * Loop over all clusters in the next chunk.
487 * Do a readahead if there are any allocated
488 * inodes in that cluster.
489 */
490 for (agbno = XFS_AGINO_TO_AGBNO(mp,
491 INT_GET(irbp[1].ir_startino, ARCH_CONVERT)),
492 chunkidx = 0;
493 chunkidx < XFS_INODES_PER_CHUNK;
494 chunkidx += nicluster,
495 agbno += nbcluster) {
496 if (XFS_INOBT_MASKN(chunkidx,
497 nicluster) &
498 ~(INT_GET(irbp[1].ir_free, ARCH_CONVERT)))
499 xfs_btree_reada_bufs(mp, agno,
500 agbno, nbcluster);
501 }
502 }
503 /*
504 * Now process this chunk of inodes. 541 * Now process this chunk of inodes.
505 */ 542 */
506 for (agino = INT_GET(irbp->ir_startino, ARCH_CONVERT), chunkidx = 0, clustidx = 0; 543 for (agino = irbp->ir_startino, chunkidx = clustidx = 0;
507 ubleft > 0 && 544 ubleft > 0 &&
508 INT_GET(irbp->ir_freecount, ARCH_CONVERT) < XFS_INODES_PER_CHUNK; 545 irbp->ir_freecount < XFS_INODES_PER_CHUNK;
509 chunkidx++, clustidx++, agino++) { 546 chunkidx++, clustidx++, agino++) {
510 ASSERT(chunkidx < XFS_INODES_PER_CHUNK); 547 ASSERT(chunkidx < XFS_INODES_PER_CHUNK);
511 /* 548 /*
@@ -525,11 +562,12 @@ xfs_bulkstat(
525 */ 562 */
526 if ((chunkidx & (nicluster - 1)) == 0) { 563 if ((chunkidx & (nicluster - 1)) == 0) {
527 agbno = XFS_AGINO_TO_AGBNO(mp, 564 agbno = XFS_AGINO_TO_AGBNO(mp,
528 INT_GET(irbp->ir_startino, ARCH_CONVERT)) + 565 irbp->ir_startino) +
529 ((chunkidx & nimask) >> 566 ((chunkidx & nimask) >>
530 mp->m_sb.sb_inopblog); 567 mp->m_sb.sb_inopblog);
531 568
532 if (flags & BULKSTAT_FG_QUICK) { 569 if (flags & (BULKSTAT_FG_QUICK |
570 BULKSTAT_FG_INLINE)) {
533 ino = XFS_AGINO_TO_INO(mp, agno, 571 ino = XFS_AGINO_TO_INO(mp, agno,
534 agino); 572 agino);
535 bno = XFS_AGB_TO_DADDR(mp, agno, 573 bno = XFS_AGB_TO_DADDR(mp, agno,
@@ -543,6 +581,7 @@ xfs_bulkstat(
543 KM_SLEEP); 581 KM_SLEEP);
544 ip->i_ino = ino; 582 ip->i_ino = ino;
545 ip->i_mount = mp; 583 ip->i_mount = mp;
584 spin_lock_init(&ip->i_flags_lock);
546 if (bp) 585 if (bp)
547 xfs_buf_relse(bp); 586 xfs_buf_relse(bp);
548 error = xfs_itobp(mp, NULL, ip, 587 error = xfs_itobp(mp, NULL, ip,
@@ -564,30 +603,34 @@ xfs_bulkstat(
564 /* 603 /*
565 * Skip if this inode is free. 604 * Skip if this inode is free.
566 */ 605 */
567 if (XFS_INOBT_MASK(chunkidx) & INT_GET(irbp->ir_free, ARCH_CONVERT)) 606 if (XFS_INOBT_MASK(chunkidx) & irbp->ir_free)
568 continue; 607 continue;
569 /* 608 /*
570 * Count used inodes as free so we can tell 609 * Count used inodes as free so we can tell
571 * when the chunk is used up. 610 * when the chunk is used up.
572 */ 611 */
573 INT_MOD(irbp->ir_freecount, ARCH_CONVERT, +1); 612 irbp->ir_freecount++;
574 ino = XFS_AGINO_TO_INO(mp, agno, agino); 613 ino = XFS_AGINO_TO_INO(mp, agno, agino);
575 bno = XFS_AGB_TO_DADDR(mp, agno, agbno); 614 bno = XFS_AGB_TO_DADDR(mp, agno, agbno);
576 if (flags & BULKSTAT_FG_QUICK) { 615 if (!xfs_bulkstat_use_dinode(mp, flags, bp,
577 dip = (xfs_dinode_t *)xfs_buf_offset(bp, 616 clustidx, &dip))
578 (clustidx << mp->m_sb.sb_inodelog)); 617 continue;
579 618 /*
580 if (INT_GET(dip->di_core.di_magic, ARCH_CONVERT) 619 * If we need to do an iget, cannot hold bp.
581 != XFS_DINODE_MAGIC 620 * Drop it, until starting the next cluster.
582 || !XFS_DINODE_GOOD_VERSION( 621 */
583 INT_GET(dip->di_core.di_version, ARCH_CONVERT))) 622 if ((flags & BULKSTAT_FG_INLINE) && !dip) {
584 continue; 623 if (bp)
624 xfs_buf_relse(bp);
625 bp = NULL;
585 } 626 }
586 627
587 /* 628 /*
588 * Get the inode and fill in a single buffer. 629 * Get the inode and fill in a single buffer.
589 * BULKSTAT_FG_QUICK uses dip to fill it in. 630 * BULKSTAT_FG_QUICK uses dip to fill it in.
590 * BULKSTAT_FG_IGET uses igets. 631 * BULKSTAT_FG_IGET uses igets.
632 * BULKSTAT_FG_INLINE uses dip if we have an
633 * inline attr fork, else igets.
591 * See: xfs_bulkstat_one & xfs_dm_bulkstat_one. 634 * See: xfs_bulkstat_one & xfs_dm_bulkstat_one.
592 * This is also used to count inodes/blks, etc 635 * This is also used to count inodes/blks, etc
593 * in xfs_qm_quotacheck. 636 * in xfs_qm_quotacheck.
@@ -597,8 +640,15 @@ xfs_bulkstat(
597 ubleft, private_data, 640 ubleft, private_data,
598 bno, &ubused, dip, &fmterror); 641 bno, &ubused, dip, &fmterror);
599 if (fmterror == BULKSTAT_RV_NOTHING) { 642 if (fmterror == BULKSTAT_RV_NOTHING) {
600 if (error == ENOMEM) 643 if (error == EFAULT) {
644 ubleft = 0;
645 rval = error;
646 break;
647 }
648 else if (error == ENOMEM)
601 ubleft = 0; 649 ubleft = 0;
650 else
651 lastino = ino;
602 continue; 652 continue;
603 } 653 }
604 if (fmterror == BULKSTAT_RV_GIVEUP) { 654 if (fmterror == BULKSTAT_RV_GIVEUP) {
@@ -633,7 +683,7 @@ xfs_bulkstat(
633 /* 683 /*
634 * Done, we're either out of filesystem or space to put the data. 684 * Done, we're either out of filesystem or space to put the data.
635 */ 685 */
636 kmem_free(irbuf, NBPC); 686 kmem_free(irbuf, irbsize);
637 *ubcountp = ubelem; 687 *ubcountp = ubelem;
638 if (agno >= mp->m_sb.sb_agcount) { 688 if (agno >= mp->m_sb.sb_agcount) {
639 /* 689 /*
diff --git a/fs/xfs/xfs_itable.h b/fs/xfs/xfs_itable.h
index be5f12e07d22..f25a28862a17 100644
--- a/fs/xfs/xfs_itable.h
+++ b/fs/xfs/xfs_itable.h
@@ -36,15 +36,16 @@ typedef int (*bulkstat_one_pf)(struct xfs_mount *mp,
36/* 36/*
37 * Values for stat return value. 37 * Values for stat return value.
38 */ 38 */
39#define BULKSTAT_RV_NOTHING 0 39#define BULKSTAT_RV_NOTHING 0
40#define BULKSTAT_RV_DIDONE 1 40#define BULKSTAT_RV_DIDONE 1
41#define BULKSTAT_RV_GIVEUP 2 41#define BULKSTAT_RV_GIVEUP 2
42 42
43/* 43/*
44 * Values for bulkstat flag argument. 44 * Values for bulkstat flag argument.
45 */ 45 */
46#define BULKSTAT_FG_IGET 0x1 /* Go through the buffer cache */ 46#define BULKSTAT_FG_IGET 0x1 /* Go through the buffer cache */
47#define BULKSTAT_FG_QUICK 0x2 /* No iget, walk the dinode cluster */ 47#define BULKSTAT_FG_QUICK 0x2 /* No iget, walk the dinode cluster */
48#define BULKSTAT_FG_INLINE 0x4 /* No iget if inline attrs */
48 49
49/* 50/*
50 * Return stat information in bulk (by-inode) for the filesystem. 51 * Return stat information in bulk (by-inode) for the filesystem.
@@ -80,6 +81,11 @@ xfs_bulkstat_one(
80 void *dibuff, 81 void *dibuff,
81 int *stat); 82 int *stat);
82 83
84int
85xfs_internal_inum(
86 xfs_mount_t *mp,
87 xfs_ino_t ino);
88
83int /* error status */ 89int /* error status */
84xfs_inumbers( 90xfs_inumbers(
85 xfs_mount_t *mp, /* mount point for filesystem */ 91 xfs_mount_t *mp, /* mount point for filesystem */
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 21ac1a67e3e0..c48bf61f17bd 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -617,7 +617,8 @@ xfs_log_unmount_write(xfs_mount_t *mp)
617 reg[0].i_len = sizeof(magic); 617 reg[0].i_len = sizeof(magic);
618 XLOG_VEC_SET_TYPE(&reg[0], XLOG_REG_TYPE_UNMOUNT); 618 XLOG_VEC_SET_TYPE(&reg[0], XLOG_REG_TYPE_UNMOUNT);
619 619
620 error = xfs_log_reserve(mp, 600, 1, &tic, XFS_LOG, 0, 0); 620 error = xfs_log_reserve(mp, 600, 1, &tic,
621 XFS_LOG, 0, XLOG_UNMOUNT_REC_TYPE);
621 if (!error) { 622 if (!error) {
622 /* remove inited flag */ 623 /* remove inited flag */
623 ((xlog_ticket_t *)tic)->t_flags = 0; 624 ((xlog_ticket_t *)tic)->t_flags = 0;
@@ -655,8 +656,11 @@ xfs_log_unmount_write(xfs_mount_t *mp)
655 } else { 656 } else {
656 LOG_UNLOCK(log, s); 657 LOG_UNLOCK(log, s);
657 } 658 }
658 if (tic) 659 if (tic) {
660 xlog_trace_loggrant(log, tic, "unmount rec");
661 xlog_ungrant_log_space(log, tic);
659 xlog_state_put_ticket(log, tic); 662 xlog_state_put_ticket(log, tic);
663 }
660 } else { 664 } else {
661 /* 665 /*
662 * We're already in forced_shutdown mode, couldn't 666 * We're already in forced_shutdown mode, couldn't
@@ -1196,7 +1200,7 @@ xlog_alloc_log(xfs_mount_t *mp,
1196 kmem_zalloc(sizeof(xlog_in_core_t), KM_SLEEP); 1200 kmem_zalloc(sizeof(xlog_in_core_t), KM_SLEEP);
1197 iclog = *iclogp; 1201 iclog = *iclogp;
1198 iclog->hic_data = (xlog_in_core_2_t *) 1202 iclog->hic_data = (xlog_in_core_2_t *)
1199 kmem_zalloc(iclogsize, KM_SLEEP); 1203 kmem_zalloc(iclogsize, KM_SLEEP | KM_LARGE);
1200 1204
1201 iclog->ic_prev = prev_iclog; 1205 iclog->ic_prev = prev_iclog;
1202 prev_iclog = iclog; 1206 prev_iclog = iclog;
@@ -2212,9 +2216,13 @@ xlog_state_do_callback(
2212 2216
2213 iclog = iclog->ic_next; 2217 iclog = iclog->ic_next;
2214 } while (first_iclog != iclog); 2218 } while (first_iclog != iclog);
2215 if (repeats && (repeats % 10) == 0) { 2219
2220 if (repeats > 5000) {
2221 flushcnt += repeats;
2222 repeats = 0;
2216 xfs_fs_cmn_err(CE_WARN, log->l_mp, 2223 xfs_fs_cmn_err(CE_WARN, log->l_mp,
2217 "xlog_state_do_callback: looping %d", repeats); 2224 "%s: possible infinite loop (%d iterations)",
2225 __FUNCTION__, flushcnt);
2218 } 2226 }
2219 } while (!ioerrors && loopdidcallbacks); 2227 } while (!ioerrors && loopdidcallbacks);
2220 2228
@@ -2246,6 +2254,7 @@ xlog_state_do_callback(
2246 } 2254 }
2247#endif 2255#endif
2248 2256
2257 flushcnt = 0;
2249 if (log->l_iclog->ic_state & (XLOG_STATE_ACTIVE|XLOG_STATE_IOERROR)) { 2258 if (log->l_iclog->ic_state & (XLOG_STATE_ACTIVE|XLOG_STATE_IOERROR)) {
2250 flushcnt = log->l_flushcnt; 2259 flushcnt = log->l_flushcnt;
2251 log->l_flushcnt = 0; 2260 log->l_flushcnt = 0;
diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h
index eacb3d4987f2..ebbe93f4f97b 100644
--- a/fs/xfs/xfs_log.h
+++ b/fs/xfs/xfs_log.h
@@ -48,16 +48,10 @@ static inline xfs_lsn_t _lsn_cmp(xfs_lsn_t lsn1, xfs_lsn_t lsn2)
48 */ 48 */
49 49
50/* 50/*
51 * Flags to xfs_log_mount
52 */
53#define XFS_LOG_RECOVER 0x1
54
55/*
56 * Flags to xfs_log_done() 51 * Flags to xfs_log_done()
57 */ 52 */
58#define XFS_LOG_REL_PERM_RESERV 0x1 53#define XFS_LOG_REL_PERM_RESERV 0x1
59 54
60
61/* 55/*
62 * Flags to xfs_log_reserve() 56 * Flags to xfs_log_reserve()
63 * 57 *
@@ -70,8 +64,6 @@ static inline xfs_lsn_t _lsn_cmp(xfs_lsn_t lsn1, xfs_lsn_t lsn2)
70#define XFS_LOG_SLEEP 0x0 64#define XFS_LOG_SLEEP 0x0
71#define XFS_LOG_NOSLEEP 0x1 65#define XFS_LOG_NOSLEEP 0x1
72#define XFS_LOG_PERM_RESERV 0x2 66#define XFS_LOG_PERM_RESERV 0x2
73#define XFS_LOG_RESV_ALL (XFS_LOG_NOSLEEP|XFS_LOG_PERM_RESERV)
74
75 67
76/* 68/*
77 * Flags to xfs_log_force() 69 * Flags to xfs_log_force()
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index 34bcbf50789c..9bd3cdf11a87 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -32,7 +32,6 @@ struct xfs_mount;
32#define XLOG_MIN_ICLOGS 2 32#define XLOG_MIN_ICLOGS 2
33#define XLOG_MED_ICLOGS 4 33#define XLOG_MED_ICLOGS 4
34#define XLOG_MAX_ICLOGS 8 34#define XLOG_MAX_ICLOGS 8
35#define XLOG_CALLBACK_SIZE 10
36#define XLOG_HEADER_MAGIC_NUM 0xFEEDbabe /* Invalid cycle number */ 35#define XLOG_HEADER_MAGIC_NUM 0xFEEDbabe /* Invalid cycle number */
37#define XLOG_VERSION_1 1 36#define XLOG_VERSION_1 1
38#define XLOG_VERSION_2 2 /* Large IClogs, Log sunit */ 37#define XLOG_VERSION_2 2 /* Large IClogs, Log sunit */
@@ -149,9 +148,6 @@ struct xfs_mount;
149#define XLOG_WAS_CONT_TRANS 0x08 /* Cont this trans into new region */ 148#define XLOG_WAS_CONT_TRANS 0x08 /* Cont this trans into new region */
150#define XLOG_END_TRANS 0x10 /* End a continued transaction */ 149#define XLOG_END_TRANS 0x10 /* End a continued transaction */
151#define XLOG_UNMOUNT_TRANS 0x20 /* Unmount a filesystem transaction */ 150#define XLOG_UNMOUNT_TRANS 0x20 /* Unmount a filesystem transaction */
152#define XLOG_SKIP_TRANS (XLOG_COMMIT_TRANS | XLOG_CONTINUE_TRANS | \
153 XLOG_WAS_CONT_TRANS | XLOG_END_TRANS | \
154 XLOG_UNMOUNT_TRANS)
155 151
156#ifdef __KERNEL__ 152#ifdef __KERNEL__
157/* 153/*
@@ -506,6 +502,12 @@ extern int xlog_bread(xlog_t *, xfs_daddr_t, int, struct xfs_buf *);
506#define XLOG_TRACE_SLEEP_FLUSH 3 502#define XLOG_TRACE_SLEEP_FLUSH 3
507#define XLOG_TRACE_WAKE_FLUSH 4 503#define XLOG_TRACE_WAKE_FLUSH 4
508 504
505/*
506 * Unmount record type is used as a pseudo transaction type for the ticket.
507 * It's value must be outside the range of XFS_TRANS_* values.
508 */
509#define XLOG_UNMOUNT_REC_TYPE (-1U)
510
509#endif /* __KERNEL__ */ 511#endif /* __KERNEL__ */
510 512
511#endif /* __XFS_LOG_PRIV_H__ */ 513#endif /* __XFS_LOG_PRIV_H__ */
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index b2bd4be4200a..e5f396ff9a3d 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -331,7 +331,7 @@ typedef struct xfs_mount {
331 xfs_agnumber_t m_agirotor; /* last ag dir inode alloced */ 331 xfs_agnumber_t m_agirotor; /* last ag dir inode alloced */
332 lock_t m_agirotor_lock;/* .. and lock protecting it */ 332 lock_t m_agirotor_lock;/* .. and lock protecting it */
333 xfs_agnumber_t m_maxagi; /* highest inode alloc group */ 333 xfs_agnumber_t m_maxagi; /* highest inode alloc group */
334 uint m_ihsize; /* size of next field */ 334 size_t m_ihsize; /* size of next field */
335 struct xfs_ihash *m_ihash; /* fs private inode hash table*/ 335 struct xfs_ihash *m_ihash; /* fs private inode hash table*/
336 struct xfs_inode *m_inodes; /* active inode list */ 336 struct xfs_inode *m_inodes; /* active inode list */
337 struct list_head m_del_inodes; /* inodes to reclaim */ 337 struct list_head m_del_inodes; /* inodes to reclaim */
@@ -541,7 +541,8 @@ static inline xfs_mount_t *xfs_bhvtom(bhv_desc_t *bdp)
541#define XFS_VFSTOM(vfs) xfs_vfstom(vfs) 541#define XFS_VFSTOM(vfs) xfs_vfstom(vfs)
542static inline xfs_mount_t *xfs_vfstom(bhv_vfs_t *vfs) 542static inline xfs_mount_t *xfs_vfstom(bhv_vfs_t *vfs)
543{ 543{
544 return XFS_BHVTOM(bhv_lookup(VFS_BHVHEAD(vfs), &xfs_vfsops)); 544 return XFS_BHVTOM(bhv_lookup_range(VFS_BHVHEAD(vfs),
545 VFS_POSITION_XFS, VFS_POSITION_XFS));
545} 546}
546 547
547#define XFS_DADDR_TO_AGNO(mp,d) xfs_daddr_to_agno(mp,d) 548#define XFS_DADDR_TO_AGNO(mp,d) xfs_daddr_to_agno(mp,d)
diff --git a/fs/xfs/xfs_quota.h b/fs/xfs/xfs_quota.h
index acb853b33ebb..9dcb32aa4e2e 100644
--- a/fs/xfs/xfs_quota.h
+++ b/fs/xfs/xfs_quota.h
@@ -281,8 +281,6 @@ typedef struct xfs_qoff_logformat {
281 XFS_UQUOTA_CHKD|XFS_PQUOTA_ACCT|\ 281 XFS_UQUOTA_CHKD|XFS_PQUOTA_ACCT|\
282 XFS_OQUOTA_ENFD|XFS_OQUOTA_CHKD|\ 282 XFS_OQUOTA_ENFD|XFS_OQUOTA_CHKD|\
283 XFS_GQUOTA_ACCT) 283 XFS_GQUOTA_ACCT)
284#define XFS_MOUNT_QUOTA_MASK (XFS_MOUNT_QUOTA_ALL | XFS_UQUOTA_ACTIVE | \
285 XFS_GQUOTA_ACTIVE | XFS_PQUOTA_ACTIVE)
286 284
287 285
288/* 286/*
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index 5a0b678956e0..880c73271c05 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -1948,7 +1948,7 @@ xfs_growfs_rt(
1948 */ 1948 */
1949 nrextents = nrblocks; 1949 nrextents = nrblocks;
1950 do_div(nrextents, in->extsize); 1950 do_div(nrextents, in->extsize);
1951 nrbmblocks = roundup_64(nrextents, NBBY * sbp->sb_blocksize); 1951 nrbmblocks = howmany_64(nrextents, NBBY * sbp->sb_blocksize);
1952 nrextslog = xfs_highbit32(nrextents); 1952 nrextslog = xfs_highbit32(nrextents);
1953 nrsumlevels = nrextslog + 1; 1953 nrsumlevels = nrextslog + 1;
1954 nrsumsize = (uint)sizeof(xfs_suminfo_t) * nrsumlevels * nrbmblocks; 1954 nrsumsize = (uint)sizeof(xfs_suminfo_t) * nrsumlevels * nrbmblocks;
@@ -1976,7 +1976,10 @@ xfs_growfs_rt(
1976 if ((error = xfs_growfs_rt_alloc(mp, rsumblocks, nrsumblocks, 1976 if ((error = xfs_growfs_rt_alloc(mp, rsumblocks, nrsumblocks,
1977 mp->m_sb.sb_rsumino))) 1977 mp->m_sb.sb_rsumino)))
1978 return error; 1978 return error;
1979 nmp = NULL; 1979 /*
1980 * Allocate a new (fake) mount/sb.
1981 */
1982 nmp = kmem_alloc(sizeof(*nmp), KM_SLEEP);
1980 /* 1983 /*
1981 * Loop over the bitmap blocks. 1984 * Loop over the bitmap blocks.
1982 * We will do everything one bitmap block at a time. 1985 * We will do everything one bitmap block at a time.
@@ -1987,10 +1990,6 @@ xfs_growfs_rt(
1987 ((sbp->sb_rextents & ((1 << mp->m_blkbit_log) - 1)) != 0); 1990 ((sbp->sb_rextents & ((1 << mp->m_blkbit_log) - 1)) != 0);
1988 bmbno < nrbmblocks; 1991 bmbno < nrbmblocks;
1989 bmbno++) { 1992 bmbno++) {
1990 /*
1991 * Allocate a new (fake) mount/sb.
1992 */
1993 nmp = kmem_alloc(sizeof(*nmp), KM_SLEEP);
1994 *nmp = *mp; 1993 *nmp = *mp;
1995 nsbp = &nmp->m_sb; 1994 nsbp = &nmp->m_sb;
1996 /* 1995 /*
@@ -2018,13 +2017,13 @@ xfs_growfs_rt(
2018 cancelflags = 0; 2017 cancelflags = 0;
2019 if ((error = xfs_trans_reserve(tp, 0, 2018 if ((error = xfs_trans_reserve(tp, 0,
2020 XFS_GROWRTFREE_LOG_RES(nmp), 0, 0, 0))) 2019 XFS_GROWRTFREE_LOG_RES(nmp), 0, 0, 0)))
2021 goto error_exit; 2020 break;
2022 /* 2021 /*
2023 * Lock out other callers by grabbing the bitmap inode lock. 2022 * Lock out other callers by grabbing the bitmap inode lock.
2024 */ 2023 */
2025 if ((error = xfs_trans_iget(mp, tp, mp->m_sb.sb_rbmino, 0, 2024 if ((error = xfs_trans_iget(mp, tp, mp->m_sb.sb_rbmino, 0,
2026 XFS_ILOCK_EXCL, &ip))) 2025 XFS_ILOCK_EXCL, &ip)))
2027 goto error_exit; 2026 break;
2028 ASSERT(ip == mp->m_rbmip); 2027 ASSERT(ip == mp->m_rbmip);
2029 /* 2028 /*
2030 * Update the bitmap inode's size. 2029 * Update the bitmap inode's size.
@@ -2038,7 +2037,7 @@ xfs_growfs_rt(
2038 */ 2037 */
2039 if ((error = xfs_trans_iget(mp, tp, mp->m_sb.sb_rsumino, 0, 2038 if ((error = xfs_trans_iget(mp, tp, mp->m_sb.sb_rsumino, 0,
2040 XFS_ILOCK_EXCL, &ip))) 2039 XFS_ILOCK_EXCL, &ip)))
2041 goto error_exit; 2040 break;
2042 ASSERT(ip == mp->m_rsumip); 2041 ASSERT(ip == mp->m_rsumip);
2043 /* 2042 /*
2044 * Update the summary inode's size. 2043 * Update the summary inode's size.
@@ -2053,7 +2052,7 @@ xfs_growfs_rt(
2053 mp->m_rsumlevels != nmp->m_rsumlevels) { 2052 mp->m_rsumlevels != nmp->m_rsumlevels) {
2054 error = xfs_rtcopy_summary(mp, nmp, tp); 2053 error = xfs_rtcopy_summary(mp, nmp, tp);
2055 if (error) 2054 if (error)
2056 goto error_exit; 2055 break;
2057 } 2056 }
2058 /* 2057 /*
2059 * Update superblock fields. 2058 * Update superblock fields.
@@ -2080,18 +2079,13 @@ xfs_growfs_rt(
2080 error = xfs_rtfree_range(nmp, tp, sbp->sb_rextents, 2079 error = xfs_rtfree_range(nmp, tp, sbp->sb_rextents,
2081 nsbp->sb_rextents - sbp->sb_rextents, &bp, &sumbno); 2080 nsbp->sb_rextents - sbp->sb_rextents, &bp, &sumbno);
2082 if (error) 2081 if (error)
2083 goto error_exit; 2082 break;
2084 /* 2083 /*
2085 * Mark more blocks free in the superblock. 2084 * Mark more blocks free in the superblock.
2086 */ 2085 */
2087 xfs_trans_mod_sb(tp, XFS_TRANS_SB_FREXTENTS, 2086 xfs_trans_mod_sb(tp, XFS_TRANS_SB_FREXTENTS,
2088 nsbp->sb_rextents - sbp->sb_rextents); 2087 nsbp->sb_rextents - sbp->sb_rextents);
2089 /* 2088 /*
2090 * Free the fake mp structure.
2091 */
2092 kmem_free(nmp, sizeof(*nmp));
2093 nmp = NULL;
2094 /*
2095 * Update mp values into the real mp structure. 2089 * Update mp values into the real mp structure.
2096 */ 2090 */
2097 mp->m_rsumlevels = nrsumlevels; 2091 mp->m_rsumlevels = nrsumlevels;
@@ -2101,15 +2095,15 @@ xfs_growfs_rt(
2101 */ 2095 */
2102 xfs_trans_commit(tp, 0, NULL); 2096 xfs_trans_commit(tp, 0, NULL);
2103 } 2097 }
2104 return 0; 2098
2099 if (error)
2100 xfs_trans_cancel(tp, cancelflags);
2105 2101
2106 /* 2102 /*
2107 * Error paths come here. 2103 * Free the fake mp structure.
2108 */ 2104 */
2109error_exit: 2105 kmem_free(nmp, sizeof(*nmp));
2110 if (nmp) 2106
2111 kmem_free(nmp, sizeof(*nmp));
2112 xfs_trans_cancel(tp, cancelflags);
2113 return error; 2107 return error;
2114} 2108}
2115 2109
diff --git a/fs/xfs/xfs_sb.h b/fs/xfs/xfs_sb.h
index bf168a91ddb8..467854b45c8f 100644
--- a/fs/xfs/xfs_sb.h
+++ b/fs/xfs/xfs_sb.h
@@ -60,10 +60,6 @@ struct xfs_mount;
60 XFS_SB_VERSION_LOGV2BIT | \ 60 XFS_SB_VERSION_LOGV2BIT | \
61 XFS_SB_VERSION_SECTORBIT | \ 61 XFS_SB_VERSION_SECTORBIT | \
62 XFS_SB_VERSION_MOREBITSBIT) 62 XFS_SB_VERSION_MOREBITSBIT)
63#define XFS_SB_VERSION_OKSASHBITS \
64 (XFS_SB_VERSION_NUMBITS | \
65 XFS_SB_VERSION_REALFBITS | \
66 XFS_SB_VERSION_OKSASHFBITS)
67#define XFS_SB_VERSION_OKREALBITS \ 63#define XFS_SB_VERSION_OKREALBITS \
68 (XFS_SB_VERSION_NUMBITS | \ 64 (XFS_SB_VERSION_NUMBITS | \
69 XFS_SB_VERSION_OKREALFBITS | \ 65 XFS_SB_VERSION_OKREALFBITS | \
@@ -81,9 +77,6 @@ struct xfs_mount;
81#define XFS_SB_VERSION2_RESERVED2BIT 0x00000002 77#define XFS_SB_VERSION2_RESERVED2BIT 0x00000002
82#define XFS_SB_VERSION2_RESERVED4BIT 0x00000004 78#define XFS_SB_VERSION2_RESERVED4BIT 0x00000004
83#define XFS_SB_VERSION2_ATTR2BIT 0x00000008 /* Inline attr rework */ 79#define XFS_SB_VERSION2_ATTR2BIT 0x00000008 /* Inline attr rework */
84#define XFS_SB_VERSION2_SASHFBITS 0xff000000 /* Mask: features that
85 require changing
86 PROM and SASH */
87 80
88#define XFS_SB_VERSION2_OKREALFBITS \ 81#define XFS_SB_VERSION2_OKREALFBITS \
89 (XFS_SB_VERSION2_ATTR2BIT) 82 (XFS_SB_VERSION2_ATTR2BIT)
@@ -238,12 +231,6 @@ static inline int xfs_sb_good_version(xfs_sb_t *sbp)
238} 231}
239#endif /* __KERNEL__ */ 232#endif /* __KERNEL__ */
240 233
241#define XFS_SB_GOOD_SASH_VERSION(sbp) \
242 ((((sbp)->sb_versionnum >= XFS_SB_VERSION_1) && \
243 ((sbp)->sb_versionnum <= XFS_SB_VERSION_3)) || \
244 ((XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4) && \
245 !((sbp)->sb_versionnum & ~XFS_SB_VERSION_OKSASHBITS)))
246
247#define XFS_SB_VERSION_TONEW(v) xfs_sb_version_tonew(v) 234#define XFS_SB_VERSION_TONEW(v) xfs_sb_version_tonew(v)
248static inline unsigned xfs_sb_version_tonew(unsigned v) 235static inline unsigned xfs_sb_version_tonew(unsigned v)
249{ 236{
@@ -461,15 +448,6 @@ static inline void xfs_sb_version_addattr2(xfs_sb_t *sbp)
461 * File system sector to basic block conversions. 448 * File system sector to basic block conversions.
462 */ 449 */
463#define XFS_FSS_TO_BB(mp,sec) ((sec) << (mp)->m_sectbb_log) 450#define XFS_FSS_TO_BB(mp,sec) ((sec) << (mp)->m_sectbb_log)
464#define XFS_BB_TO_FSS(mp,bb) \
465 (((bb) + (XFS_FSS_TO_BB(mp,1) - 1)) >> (mp)->m_sectbb_log)
466#define XFS_BB_TO_FSST(mp,bb) ((bb) >> (mp)->m_sectbb_log)
467
468/*
469 * File system sector to byte conversions.
470 */
471#define XFS_FSS_TO_B(mp,sectno) ((xfs_fsize_t)(sectno) << (mp)->m_sb.sb_sectlog)
472#define XFS_B_TO_FSST(mp,b) (((__uint64_t)(b)) >> (mp)->m_sb.sb_sectlog)
473 451
474/* 452/*
475 * File system block to basic block conversions. 453 * File system block to basic block conversions.
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index 9dc88b380608..c68e00105d23 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -149,7 +149,6 @@ typedef struct xfs_item_ops {
149 void (*iop_unlock)(xfs_log_item_t *); 149 void (*iop_unlock)(xfs_log_item_t *);
150 xfs_lsn_t (*iop_committed)(xfs_log_item_t *, xfs_lsn_t); 150 xfs_lsn_t (*iop_committed)(xfs_log_item_t *, xfs_lsn_t);
151 void (*iop_push)(xfs_log_item_t *); 151 void (*iop_push)(xfs_log_item_t *);
152 void (*iop_abort)(xfs_log_item_t *);
153 void (*iop_pushbuf)(xfs_log_item_t *); 152 void (*iop_pushbuf)(xfs_log_item_t *);
154 void (*iop_committing)(xfs_log_item_t *, xfs_lsn_t); 153 void (*iop_committing)(xfs_log_item_t *, xfs_lsn_t);
155} xfs_item_ops_t; 154} xfs_item_ops_t;
@@ -163,7 +162,6 @@ typedef struct xfs_item_ops {
163#define IOP_UNLOCK(ip) (*(ip)->li_ops->iop_unlock)(ip) 162#define IOP_UNLOCK(ip) (*(ip)->li_ops->iop_unlock)(ip)
164#define IOP_COMMITTED(ip, lsn) (*(ip)->li_ops->iop_committed)(ip, lsn) 163#define IOP_COMMITTED(ip, lsn) (*(ip)->li_ops->iop_committed)(ip, lsn)
165#define IOP_PUSH(ip) (*(ip)->li_ops->iop_push)(ip) 164#define IOP_PUSH(ip) (*(ip)->li_ops->iop_push)(ip)
166#define IOP_ABORT(ip) (*(ip)->li_ops->iop_abort)(ip)
167#define IOP_PUSHBUF(ip) (*(ip)->li_ops->iop_pushbuf)(ip) 165#define IOP_PUSHBUF(ip) (*(ip)->li_ops->iop_pushbuf)(ip)
168#define IOP_COMMITTING(ip, lsn) (*(ip)->li_ops->iop_committing)(ip, lsn) 166#define IOP_COMMITTING(ip, lsn) (*(ip)->li_ops->iop_committing)(ip, lsn)
169 167
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
index 558c87ff0c41..fc39b166d403 100644
--- a/fs/xfs/xfs_trans_ail.c
+++ b/fs/xfs/xfs_trans_ail.c
@@ -276,7 +276,7 @@ xfs_trans_update_ail(
276 xfs_mount_t *mp, 276 xfs_mount_t *mp,
277 xfs_log_item_t *lip, 277 xfs_log_item_t *lip,
278 xfs_lsn_t lsn, 278 xfs_lsn_t lsn,
279 unsigned long s) 279 unsigned long s) __releases(mp->m_ail_lock)
280{ 280{
281 xfs_ail_entry_t *ailp; 281 xfs_ail_entry_t *ailp;
282 xfs_log_item_t *dlip=NULL; 282 xfs_log_item_t *dlip=NULL;
@@ -328,7 +328,7 @@ void
328xfs_trans_delete_ail( 328xfs_trans_delete_ail(
329 xfs_mount_t *mp, 329 xfs_mount_t *mp,
330 xfs_log_item_t *lip, 330 xfs_log_item_t *lip,
331 unsigned long s) 331 unsigned long s) __releases(mp->m_ail_lock)
332{ 332{
333 xfs_ail_entry_t *ailp; 333 xfs_ail_entry_t *ailp;
334 xfs_log_item_t *dlip; 334 xfs_log_item_t *dlip;
diff --git a/fs/xfs/xfs_trans_priv.h b/fs/xfs/xfs_trans_priv.h
index 13edab8a9e94..447ac4308c91 100644
--- a/fs/xfs/xfs_trans_priv.h
+++ b/fs/xfs/xfs_trans_priv.h
@@ -46,11 +46,13 @@ xfs_log_busy_slot_t *xfs_trans_add_busy(xfs_trans_t *tp,
46/* 46/*
47 * From xfs_trans_ail.c 47 * From xfs_trans_ail.c
48 */ 48 */
49void xfs_trans_update_ail(struct xfs_mount *, 49void xfs_trans_update_ail(struct xfs_mount *mp,
50 struct xfs_log_item *, xfs_lsn_t, 50 struct xfs_log_item *lip, xfs_lsn_t lsn,
51 unsigned long); 51 unsigned long s)
52void xfs_trans_delete_ail(struct xfs_mount *, 52 __releases(mp->m_ail_lock);
53 struct xfs_log_item *, unsigned long); 53void xfs_trans_delete_ail(struct xfs_mount *mp,
54 struct xfs_log_item *lip, unsigned long s)
55 __releases(mp->m_ail_lock);
54struct xfs_log_item *xfs_trans_first_ail(struct xfs_mount *, int *); 56struct xfs_log_item *xfs_trans_first_ail(struct xfs_mount *, int *);
55struct xfs_log_item *xfs_trans_next_ail(struct xfs_mount *, 57struct xfs_log_item *xfs_trans_next_ail(struct xfs_mount *,
56 struct xfs_log_item *, int *, int *); 58 struct xfs_log_item *, int *, int *);
diff --git a/fs/xfs/xfs_vfsops.c b/fs/xfs/xfs_vfsops.c
index a34796e57afb..62336a4cc5a4 100644
--- a/fs/xfs/xfs_vfsops.c
+++ b/fs/xfs/xfs_vfsops.c
@@ -1922,7 +1922,7 @@ xfs_showargs(
1922 } 1922 }
1923 1923
1924 if (mp->m_flags & XFS_MOUNT_IHASHSIZE) 1924 if (mp->m_flags & XFS_MOUNT_IHASHSIZE)
1925 seq_printf(m, "," MNTOPT_IHASHSIZE "=%d", mp->m_ihsize); 1925 seq_printf(m, "," MNTOPT_IHASHSIZE "=%d", (int)mp->m_ihsize);
1926 1926
1927 if (mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) 1927 if (mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)
1928 seq_printf(m, "," MNTOPT_ALLOCSIZE "=%dk", 1928 seq_printf(m, "," MNTOPT_ALLOCSIZE "=%dk",
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index 23cfa5837728..061e2ffdd1de 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -2366,10 +2366,15 @@ xfs_remove(
2366 2366
2367 namelen = VNAMELEN(dentry); 2367 namelen = VNAMELEN(dentry);
2368 2368
2369 if (!xfs_get_dir_entry(dentry, &ip)) {
2370 dm_di_mode = ip->i_d.di_mode;
2371 IRELE(ip);
2372 }
2373
2369 if (DM_EVENT_ENABLED(dir_vp->v_vfsp, dp, DM_EVENT_REMOVE)) { 2374 if (DM_EVENT_ENABLED(dir_vp->v_vfsp, dp, DM_EVENT_REMOVE)) {
2370 error = XFS_SEND_NAMESP(mp, DM_EVENT_REMOVE, dir_vp, 2375 error = XFS_SEND_NAMESP(mp, DM_EVENT_REMOVE, dir_vp,
2371 DM_RIGHT_NULL, NULL, DM_RIGHT_NULL, 2376 DM_RIGHT_NULL, NULL, DM_RIGHT_NULL,
2372 name, NULL, 0, 0, 0); 2377 name, NULL, dm_di_mode, 0, 0);
2373 if (error) 2378 if (error)
2374 return error; 2379 return error;
2375 } 2380 }
@@ -2995,7 +3000,7 @@ xfs_rmdir(
2995 int cancel_flags; 3000 int cancel_flags;
2996 int committed; 3001 int committed;
2997 bhv_vnode_t *dir_vp; 3002 bhv_vnode_t *dir_vp;
2998 int dm_di_mode = 0; 3003 int dm_di_mode = S_IFDIR;
2999 int last_cdp_link; 3004 int last_cdp_link;
3000 int namelen; 3005 int namelen;
3001 uint resblks; 3006 uint resblks;
@@ -3010,11 +3015,16 @@ xfs_rmdir(
3010 return XFS_ERROR(EIO); 3015 return XFS_ERROR(EIO);
3011 namelen = VNAMELEN(dentry); 3016 namelen = VNAMELEN(dentry);
3012 3017
3018 if (!xfs_get_dir_entry(dentry, &cdp)) {
3019 dm_di_mode = cdp->i_d.di_mode;
3020 IRELE(cdp);
3021 }
3022
3013 if (DM_EVENT_ENABLED(dir_vp->v_vfsp, dp, DM_EVENT_REMOVE)) { 3023 if (DM_EVENT_ENABLED(dir_vp->v_vfsp, dp, DM_EVENT_REMOVE)) {
3014 error = XFS_SEND_NAMESP(mp, DM_EVENT_REMOVE, 3024 error = XFS_SEND_NAMESP(mp, DM_EVENT_REMOVE,
3015 dir_vp, DM_RIGHT_NULL, 3025 dir_vp, DM_RIGHT_NULL,
3016 NULL, DM_RIGHT_NULL, 3026 NULL, DM_RIGHT_NULL,
3017 name, NULL, 0, 0, 0); 3027 name, NULL, dm_di_mode, 0, 0);
3018 if (error) 3028 if (error)
3019 return XFS_ERROR(error); 3029 return XFS_ERROR(error);
3020 } 3030 }
@@ -3834,7 +3844,9 @@ xfs_reclaim(
3834 XFS_MOUNT_ILOCK(mp); 3844 XFS_MOUNT_ILOCK(mp);
3835 vn_bhv_remove(VN_BHV_HEAD(vp), XFS_ITOBHV(ip)); 3845 vn_bhv_remove(VN_BHV_HEAD(vp), XFS_ITOBHV(ip));
3836 list_add_tail(&ip->i_reclaim, &mp->m_del_inodes); 3846 list_add_tail(&ip->i_reclaim, &mp->m_del_inodes);
3847 spin_lock(&ip->i_flags_lock);
3837 ip->i_flags |= XFS_IRECLAIMABLE; 3848 ip->i_flags |= XFS_IRECLAIMABLE;
3849 spin_unlock(&ip->i_flags_lock);
3838 XFS_MOUNT_IUNLOCK(mp); 3850 XFS_MOUNT_IUNLOCK(mp);
3839 } 3851 }
3840 return 0; 3852 return 0;
@@ -3859,8 +3871,10 @@ xfs_finish_reclaim(
3859 * us. 3871 * us.
3860 */ 3872 */
3861 write_lock(&ih->ih_lock); 3873 write_lock(&ih->ih_lock);
3874 spin_lock(&ip->i_flags_lock);
3862 if ((ip->i_flags & XFS_IRECLAIM) || 3875 if ((ip->i_flags & XFS_IRECLAIM) ||
3863 (!(ip->i_flags & XFS_IRECLAIMABLE) && vp == NULL)) { 3876 (!(ip->i_flags & XFS_IRECLAIMABLE) && vp == NULL)) {
3877 spin_unlock(&ip->i_flags_lock);
3864 write_unlock(&ih->ih_lock); 3878 write_unlock(&ih->ih_lock);
3865 if (locked) { 3879 if (locked) {
3866 xfs_ifunlock(ip); 3880 xfs_ifunlock(ip);
@@ -3869,6 +3883,7 @@ xfs_finish_reclaim(
3869 return 1; 3883 return 1;
3870 } 3884 }
3871 ip->i_flags |= XFS_IRECLAIM; 3885 ip->i_flags |= XFS_IRECLAIM;
3886 spin_unlock(&ip->i_flags_lock);
3872 write_unlock(&ih->ih_lock); 3887 write_unlock(&ih->ih_lock);
3873 3888
3874 /* 3889 /*
@@ -4272,7 +4287,7 @@ xfs_free_file_space(
4272 xfs_mount_t *mp; 4287 xfs_mount_t *mp;
4273 int nimap; 4288 int nimap;
4274 uint resblks; 4289 uint resblks;
4275 int rounding; 4290 uint rounding;
4276 int rt; 4291 int rt;
4277 xfs_fileoff_t startoffset_fsb; 4292 xfs_fileoff_t startoffset_fsb;
4278 xfs_trans_t *tp; 4293 xfs_trans_t *tp;
@@ -4313,8 +4328,7 @@ xfs_free_file_space(
4313 vn_iowait(vp); /* wait for the completion of any pending DIOs */ 4328 vn_iowait(vp); /* wait for the completion of any pending DIOs */
4314 } 4329 }
4315 4330
4316 rounding = MAX((__uint8_t)(1 << mp->m_sb.sb_blocklog), 4331 rounding = max_t(uint, 1 << mp->m_sb.sb_blocklog, NBPP);
4317 (__uint8_t)NBPP);
4318 ilen = len + (offset & (rounding - 1)); 4332 ilen = len + (offset & (rounding - 1));
4319 ioffset = offset & ~(rounding - 1); 4333 ioffset = offset & ~(rounding - 1);
4320 if (ilen & (rounding - 1)) 4334 if (ilen & (rounding - 1))