aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.cz>2010-08-04 09:14:38 -0400
committerJiri Kosina <jkosina@suse.cz>2010-08-04 09:14:38 -0400
commitd790d4d583aeaed9fc6f8a9f4d9f8ce6b1c15c7f (patch)
tree854ab394486288d40fa8179cbfaf66e8bdc44b0f /fs/xfs
parent73b2c7165b76b20eb1290e7efebc33cfd21db1ca (diff)
parent3a09b1be53d23df780a0cd0e4087a05e2ca4a00c (diff)
Merge branch 'master' into for-next
Diffstat (limited to 'fs/xfs')
-rw-r--r--fs/xfs/Makefile4
-rw-r--r--fs/xfs/linux-2.6/xfs_acl.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c611
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.h4
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c67
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.h119
-rw-r--r--fs/xfs/linux-2.6/xfs_dmapi_priv.h28
-rw-r--r--fs/xfs/linux-2.6/xfs_export.c15
-rw-r--r--fs/xfs/linux-2.6/xfs_file.c104
-rw-r--r--fs/xfs/linux-2.6/xfs_fs_subr.c4
-rw-r--r--fs/xfs/linux-2.6/xfs_fs_subr.h25
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl.c34
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl32.c21
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.c10
-rw-r--r--fs/xfs/linux-2.6/xfs_linux.h1
-rw-r--r--fs/xfs/linux-2.6/xfs_quotaops.c1
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c173
-rw-r--r--fs/xfs/linux-2.6/xfs_super.h7
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.c179
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.h5
-rw-r--r--fs/xfs/linux-2.6/xfs_trace.c4
-rw-r--r--fs/xfs/linux-2.6/xfs_trace.h131
-rw-r--r--fs/xfs/quota/xfs_dquot.c114
-rw-r--r--fs/xfs/quota/xfs_dquot_item.c301
-rw-r--r--fs/xfs/quota/xfs_qm.c40
-rw-r--r--fs/xfs/quota/xfs_qm_bhv.c10
-rw-r--r--fs/xfs/quota/xfs_qm_stats.c10
-rw-r--r--fs/xfs/quota/xfs_qm_syscalls.c112
-rw-r--r--fs/xfs/quota/xfs_trans_dquot.c35
-rw-r--r--fs/xfs/support/debug.c1
-rw-r--r--fs/xfs/xfs_alloc.c15
-rw-r--r--fs/xfs/xfs_alloc.h20
-rw-r--r--fs/xfs/xfs_alloc_btree.c5
-rw-r--r--fs/xfs/xfs_attr.c91
-rw-r--r--fs/xfs/xfs_attr_leaf.c5
-rw-r--r--fs/xfs/xfs_bmap.c327
-rw-r--r--fs/xfs/xfs_bmap.h37
-rw-r--r--fs/xfs/xfs_bmap_btree.c5
-rw-r--r--fs/xfs/xfs_btree.c5
-rw-r--r--fs/xfs/xfs_buf_item.c228
-rw-r--r--fs/xfs/xfs_buf_item.h2
-rw-r--r--fs/xfs/xfs_da_btree.c20
-rw-r--r--fs/xfs/xfs_dfrag.c21
-rw-r--r--fs/xfs/xfs_dir2.c11
-rw-r--r--fs/xfs/xfs_dir2_block.c8
-rw-r--r--fs/xfs/xfs_dir2_data.c2
-rw-r--r--fs/xfs/xfs_dir2_leaf.c4
-rw-r--r--fs/xfs/xfs_dir2_node.c2
-rw-r--r--fs/xfs/xfs_dir2_sf.c2
-rw-r--r--fs/xfs/xfs_dmapi.h170
-rw-r--r--fs/xfs/xfs_dmops.c55
-rw-r--r--fs/xfs/xfs_error.c4
-rw-r--r--fs/xfs/xfs_extfree_item.c278
-rw-r--r--fs/xfs/xfs_filestream.c84
-rw-r--r--fs/xfs/xfs_filestream.h82
-rw-r--r--fs/xfs/xfs_fsops.c7
-rw-r--r--fs/xfs/xfs_ialloc.c146
-rw-r--r--fs/xfs/xfs_ialloc_btree.c4
-rw-r--r--fs/xfs/xfs_iget.c118
-rw-r--r--fs/xfs/xfs_inode.c65
-rw-r--r--fs/xfs/xfs_inode.h10
-rw-r--r--fs/xfs/xfs_inode_item.c273
-rw-r--r--fs/xfs/xfs_inode_item.h12
-rw-r--r--fs/xfs/xfs_iomap.c76
-rw-r--r--fs/xfs/xfs_iomap.h22
-rw-r--r--fs/xfs/xfs_itable.c293
-rw-r--r--fs/xfs/xfs_itable.h17
-rw-r--r--fs/xfs/xfs_log.c16
-rw-r--r--fs/xfs/xfs_log.h11
-rw-r--r--fs/xfs/xfs_log_cil.c4
-rw-r--r--fs/xfs/xfs_log_recover.c44
-rw-r--r--fs/xfs/xfs_mount.c5
-rw-r--r--fs/xfs/xfs_mount.h71
-rw-r--r--fs/xfs/xfs_rename.c63
-rw-r--r--fs/xfs/xfs_rtalloc.c13
-rw-r--r--fs/xfs/xfs_rw.c15
-rw-r--r--fs/xfs/xfs_trans.c211
-rw-r--r--fs/xfs/xfs_trans.h117
-rw-r--r--fs/xfs/xfs_trans_ail.c1
-rw-r--r--fs/xfs/xfs_trans_buf.c75
-rw-r--r--fs/xfs/xfs_trans_extfree.c23
-rw-r--r--fs/xfs/xfs_trans_inode.c76
-rw-r--r--fs/xfs/xfs_trans_item.c441
-rw-r--r--fs/xfs/xfs_trans_priv.h18
-rw-r--r--fs/xfs/xfs_utils.c87
-rw-r--r--fs/xfs/xfs_utils.h1
-rw-r--r--fs/xfs/xfs_vnodeops.c295
87 files changed, 1849 insertions, 4436 deletions
diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile
index c8fb13f83b3f..0dce969d6cad 100644
--- a/fs/xfs/Makefile
+++ b/fs/xfs/Makefile
@@ -87,11 +87,9 @@ xfs-y += xfs_alloc.o \
87 xfs_trans_buf.o \ 87 xfs_trans_buf.o \
88 xfs_trans_extfree.o \ 88 xfs_trans_extfree.o \
89 xfs_trans_inode.o \ 89 xfs_trans_inode.o \
90 xfs_trans_item.o \
91 xfs_utils.o \ 90 xfs_utils.o \
92 xfs_vnodeops.o \ 91 xfs_vnodeops.o \
93 xfs_rw.o \ 92 xfs_rw.o
94 xfs_dmops.o
95 93
96xfs-$(CONFIG_XFS_TRACE) += xfs_btree_trace.o 94xfs-$(CONFIG_XFS_TRACE) += xfs_btree_trace.o
97 95
diff --git a/fs/xfs/linux-2.6/xfs_acl.c b/fs/xfs/linux-2.6/xfs_acl.c
index 9f769b5b38fc..b2771862fd3d 100644
--- a/fs/xfs/linux-2.6/xfs_acl.c
+++ b/fs/xfs/linux-2.6/xfs_acl.c
@@ -225,7 +225,7 @@ xfs_check_acl(struct inode *inode, int mask)
225 struct posix_acl *acl; 225 struct posix_acl *acl;
226 int error = -EAGAIN; 226 int error = -EAGAIN;
227 227
228 xfs_itrace_entry(ip); 228 trace_xfs_check_acl(ip);
229 229
230 /* 230 /*
231 * If there is no attribute fork no ACL exists on this inode and 231 * If there is no attribute fork no ACL exists on this inode and
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index 34640d6dbdcb..d24e78f32f3e 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -21,19 +21,12 @@
21#include "xfs_inum.h" 21#include "xfs_inum.h"
22#include "xfs_sb.h" 22#include "xfs_sb.h"
23#include "xfs_ag.h" 23#include "xfs_ag.h"
24#include "xfs_dir2.h"
25#include "xfs_trans.h" 24#include "xfs_trans.h"
26#include "xfs_dmapi.h"
27#include "xfs_mount.h" 25#include "xfs_mount.h"
28#include "xfs_bmap_btree.h" 26#include "xfs_bmap_btree.h"
29#include "xfs_alloc_btree.h"
30#include "xfs_ialloc_btree.h"
31#include "xfs_dir2_sf.h"
32#include "xfs_attr_sf.h"
33#include "xfs_dinode.h" 27#include "xfs_dinode.h"
34#include "xfs_inode.h" 28#include "xfs_inode.h"
35#include "xfs_alloc.h" 29#include "xfs_alloc.h"
36#include "xfs_btree.h"
37#include "xfs_error.h" 30#include "xfs_error.h"
38#include "xfs_rw.h" 31#include "xfs_rw.h"
39#include "xfs_iomap.h" 32#include "xfs_iomap.h"
@@ -92,18 +85,15 @@ void
92xfs_count_page_state( 85xfs_count_page_state(
93 struct page *page, 86 struct page *page,
94 int *delalloc, 87 int *delalloc,
95 int *unmapped,
96 int *unwritten) 88 int *unwritten)
97{ 89{
98 struct buffer_head *bh, *head; 90 struct buffer_head *bh, *head;
99 91
100 *delalloc = *unmapped = *unwritten = 0; 92 *delalloc = *unwritten = 0;
101 93
102 bh = head = page_buffers(page); 94 bh = head = page_buffers(page);
103 do { 95 do {
104 if (buffer_uptodate(bh) && !buffer_mapped(bh)) 96 if (buffer_unwritten(bh))
105 (*unmapped) = 1;
106 else if (buffer_unwritten(bh))
107 (*unwritten) = 1; 97 (*unwritten) = 1;
108 else if (buffer_delay(bh)) 98 else if (buffer_delay(bh))
109 (*delalloc) = 1; 99 (*delalloc) = 1;
@@ -212,23 +202,17 @@ xfs_setfilesize(
212} 202}
213 203
214/* 204/*
215 * Schedule IO completion handling on a xfsdatad if this was 205 * Schedule IO completion handling on the final put of an ioend.
216 * the final hold on this ioend. If we are asked to wait,
217 * flush the workqueue.
218 */ 206 */
219STATIC void 207STATIC void
220xfs_finish_ioend( 208xfs_finish_ioend(
221 xfs_ioend_t *ioend, 209 struct xfs_ioend *ioend)
222 int wait)
223{ 210{
224 if (atomic_dec_and_test(&ioend->io_remaining)) { 211 if (atomic_dec_and_test(&ioend->io_remaining)) {
225 struct workqueue_struct *wq; 212 if (ioend->io_type == IO_UNWRITTEN)
226 213 queue_work(xfsconvertd_workqueue, &ioend->io_work);
227 wq = (ioend->io_type == IO_UNWRITTEN) ? 214 else
228 xfsconvertd_workqueue : xfsdatad_workqueue; 215 queue_work(xfsdatad_workqueue, &ioend->io_work);
229 queue_work(wq, &ioend->io_work);
230 if (wait)
231 flush_workqueue(wq);
232 } 216 }
233} 217}
234 218
@@ -272,11 +256,25 @@ xfs_end_io(
272 */ 256 */
273 if (error == EAGAIN) { 257 if (error == EAGAIN) {
274 atomic_inc(&ioend->io_remaining); 258 atomic_inc(&ioend->io_remaining);
275 xfs_finish_ioend(ioend, 0); 259 xfs_finish_ioend(ioend);
276 /* ensure we don't spin on blocked ioends */ 260 /* ensure we don't spin on blocked ioends */
277 delay(1); 261 delay(1);
278 } else 262 } else {
263 if (ioend->io_iocb)
264 aio_complete(ioend->io_iocb, ioend->io_result, 0);
279 xfs_destroy_ioend(ioend); 265 xfs_destroy_ioend(ioend);
266 }
267}
268
269/*
270 * Call IO completion handling in caller context on the final put of an ioend.
271 */
272STATIC void
273xfs_finish_ioend_sync(
274 struct xfs_ioend *ioend)
275{
276 if (atomic_dec_and_test(&ioend->io_remaining))
277 xfs_end_io(&ioend->io_work);
280} 278}
281 279
282/* 280/*
@@ -309,6 +307,8 @@ xfs_alloc_ioend(
309 atomic_inc(&XFS_I(ioend->io_inode)->i_iocount); 307 atomic_inc(&XFS_I(ioend->io_inode)->i_iocount);
310 ioend->io_offset = 0; 308 ioend->io_offset = 0;
311 ioend->io_size = 0; 309 ioend->io_size = 0;
310 ioend->io_iocb = NULL;
311 ioend->io_result = 0;
312 312
313 INIT_WORK(&ioend->io_work, xfs_end_io); 313 INIT_WORK(&ioend->io_work, xfs_end_io);
314 return ioend; 314 return ioend;
@@ -358,7 +358,7 @@ xfs_end_bio(
358 bio->bi_end_io = NULL; 358 bio->bi_end_io = NULL;
359 bio_put(bio); 359 bio_put(bio);
360 360
361 xfs_finish_ioend(ioend, 0); 361 xfs_finish_ioend(ioend);
362} 362}
363 363
364STATIC void 364STATIC void
@@ -500,7 +500,7 @@ xfs_submit_ioend(
500 } 500 }
501 if (bio) 501 if (bio)
502 xfs_submit_ioend_bio(wbc, ioend, bio); 502 xfs_submit_ioend_bio(wbc, ioend, bio);
503 xfs_finish_ioend(ioend, 0); 503 xfs_finish_ioend(ioend);
504 } while ((ioend = next) != NULL); 504 } while ((ioend = next) != NULL);
505} 505}
506 506
@@ -614,31 +614,30 @@ xfs_map_at_offset(
614STATIC unsigned int 614STATIC unsigned int
615xfs_probe_page( 615xfs_probe_page(
616 struct page *page, 616 struct page *page,
617 unsigned int pg_offset, 617 unsigned int pg_offset)
618 int mapped)
619{ 618{
619 struct buffer_head *bh, *head;
620 int ret = 0; 620 int ret = 0;
621 621
622 if (PageWriteback(page)) 622 if (PageWriteback(page))
623 return 0; 623 return 0;
624 if (!PageDirty(page))
625 return 0;
626 if (!page->mapping)
627 return 0;
628 if (!page_has_buffers(page))
629 return 0;
624 630
625 if (page->mapping && PageDirty(page)) { 631 bh = head = page_buffers(page);
626 if (page_has_buffers(page)) { 632 do {
627 struct buffer_head *bh, *head; 633 if (!buffer_uptodate(bh))
628 634 break;
629 bh = head = page_buffers(page); 635 if (!buffer_mapped(bh))
630 do { 636 break;
631 if (!buffer_uptodate(bh)) 637 ret += bh->b_size;
632 break; 638 if (ret >= pg_offset)
633 if (mapped != buffer_mapped(bh)) 639 break;
634 break; 640 } while ((bh = bh->b_this_page) != head);
635 ret += bh->b_size;
636 if (ret >= pg_offset)
637 break;
638 } while ((bh = bh->b_this_page) != head);
639 } else
640 ret = mapped ? 0 : PAGE_CACHE_SIZE;
641 }
642 641
643 return ret; 642 return ret;
644} 643}
@@ -648,8 +647,7 @@ xfs_probe_cluster(
648 struct inode *inode, 647 struct inode *inode,
649 struct page *startpage, 648 struct page *startpage,
650 struct buffer_head *bh, 649 struct buffer_head *bh,
651 struct buffer_head *head, 650 struct buffer_head *head)
652 int mapped)
653{ 651{
654 struct pagevec pvec; 652 struct pagevec pvec;
655 pgoff_t tindex, tlast, tloff; 653 pgoff_t tindex, tlast, tloff;
@@ -658,7 +656,7 @@ xfs_probe_cluster(
658 656
659 /* First sum forwards in this page */ 657 /* First sum forwards in this page */
660 do { 658 do {
661 if (!buffer_uptodate(bh) || (mapped != buffer_mapped(bh))) 659 if (!buffer_uptodate(bh) || !buffer_mapped(bh))
662 return total; 660 return total;
663 total += bh->b_size; 661 total += bh->b_size;
664 } while ((bh = bh->b_this_page) != head); 662 } while ((bh = bh->b_this_page) != head);
@@ -692,7 +690,7 @@ xfs_probe_cluster(
692 pg_offset = PAGE_CACHE_SIZE; 690 pg_offset = PAGE_CACHE_SIZE;
693 691
694 if (page->index == tindex && trylock_page(page)) { 692 if (page->index == tindex && trylock_page(page)) {
695 pg_len = xfs_probe_page(page, pg_offset, mapped); 693 pg_len = xfs_probe_page(page, pg_offset);
696 unlock_page(page); 694 unlock_page(page);
697 } 695 }
698 696
@@ -761,7 +759,6 @@ xfs_convert_page(
761 struct xfs_bmbt_irec *imap, 759 struct xfs_bmbt_irec *imap,
762 xfs_ioend_t **ioendp, 760 xfs_ioend_t **ioendp,
763 struct writeback_control *wbc, 761 struct writeback_control *wbc,
764 int startio,
765 int all_bh) 762 int all_bh)
766{ 763{
767 struct buffer_head *bh, *head; 764 struct buffer_head *bh, *head;
@@ -832,19 +829,14 @@ xfs_convert_page(
832 ASSERT(imap->br_startblock != DELAYSTARTBLOCK); 829 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
833 830
834 xfs_map_at_offset(inode, bh, imap, offset); 831 xfs_map_at_offset(inode, bh, imap, offset);
835 if (startio) { 832 xfs_add_to_ioend(inode, bh, offset, type,
836 xfs_add_to_ioend(inode, bh, offset, 833 ioendp, done);
837 type, ioendp, done); 834
838 } else {
839 set_buffer_dirty(bh);
840 unlock_buffer(bh);
841 mark_buffer_dirty(bh);
842 }
843 page_dirty--; 835 page_dirty--;
844 count++; 836 count++;
845 } else { 837 } else {
846 type = IO_NEW; 838 type = IO_NEW;
847 if (buffer_mapped(bh) && all_bh && startio) { 839 if (buffer_mapped(bh) && all_bh) {
848 lock_buffer(bh); 840 lock_buffer(bh);
849 xfs_add_to_ioend(inode, bh, offset, 841 xfs_add_to_ioend(inode, bh, offset,
850 type, ioendp, done); 842 type, ioendp, done);
@@ -859,14 +851,12 @@ xfs_convert_page(
859 if (uptodate && bh == head) 851 if (uptodate && bh == head)
860 SetPageUptodate(page); 852 SetPageUptodate(page);
861 853
862 if (startio) { 854 if (count) {
863 if (count) { 855 wbc->nr_to_write--;
864 wbc->nr_to_write--; 856 if (wbc->nr_to_write <= 0)
865 if (wbc->nr_to_write <= 0) 857 done = 1;
866 done = 1;
867 }
868 xfs_start_page_writeback(page, !page_dirty, count);
869 } 858 }
859 xfs_start_page_writeback(page, !page_dirty, count);
870 860
871 return done; 861 return done;
872 fail_unlock_page: 862 fail_unlock_page:
@@ -886,7 +876,6 @@ xfs_cluster_write(
886 struct xfs_bmbt_irec *imap, 876 struct xfs_bmbt_irec *imap,
887 xfs_ioend_t **ioendp, 877 xfs_ioend_t **ioendp,
888 struct writeback_control *wbc, 878 struct writeback_control *wbc,
889 int startio,
890 int all_bh, 879 int all_bh,
891 pgoff_t tlast) 880 pgoff_t tlast)
892{ 881{
@@ -902,7 +891,7 @@ xfs_cluster_write(
902 891
903 for (i = 0; i < pagevec_count(&pvec); i++) { 892 for (i = 0; i < pagevec_count(&pvec); i++) {
904 done = xfs_convert_page(inode, pvec.pages[i], tindex++, 893 done = xfs_convert_page(inode, pvec.pages[i], tindex++,
905 imap, ioendp, wbc, startio, all_bh); 894 imap, ioendp, wbc, all_bh);
906 if (done) 895 if (done)
907 break; 896 break;
908 } 897 }
@@ -981,7 +970,7 @@ xfs_aops_discard_page(
981 */ 970 */
982 error = xfs_bmapi(NULL, ip, offset_fsb, 1, 971 error = xfs_bmapi(NULL, ip, offset_fsb, 1,
983 XFS_BMAPI_ENTIRE, NULL, 0, &imap, 972 XFS_BMAPI_ENTIRE, NULL, 0, &imap,
984 &nimaps, NULL, NULL); 973 &nimaps, NULL);
985 974
986 if (error) { 975 if (error) {
987 /* something screwed, just bail */ 976 /* something screwed, just bail */
@@ -1009,7 +998,7 @@ xfs_aops_discard_page(
1009 */ 998 */
1010 xfs_bmap_init(&flist, &firstblock); 999 xfs_bmap_init(&flist, &firstblock);
1011 error = xfs_bunmapi(NULL, ip, offset_fsb, 1, 0, 1, &firstblock, 1000 error = xfs_bunmapi(NULL, ip, offset_fsb, 1, 0, 1, &firstblock,
1012 &flist, NULL, &done); 1001 &flist, &done);
1013 1002
1014 ASSERT(!flist.xbf_count && !flist.xbf_first); 1003 ASSERT(!flist.xbf_count && !flist.xbf_first);
1015 if (error) { 1004 if (error) {
@@ -1032,50 +1021,66 @@ out_invalidate:
1032} 1021}
1033 1022
1034/* 1023/*
1035 * Calling this without startio set means we are being asked to make a dirty 1024 * Write out a dirty page.
1036 * page ready for freeing it's buffers. When called with startio set then 1025 *
1037 * we are coming from writepage. 1026 * For delalloc space on the page we need to allocate space and flush it.
1027 * For unwritten space on the page we need to start the conversion to
1028 * regular allocated space.
1029 * For any other dirty buffer heads on the page we should flush them.
1038 * 1030 *
1039 * When called with startio set it is important that we write the WHOLE 1031 * If we detect that a transaction would be required to flush the page, we
1040 * page if possible. 1032 * have to check the process flags first, if we are already in a transaction
1041 * The bh->b_state's cannot know if any of the blocks or which block for 1033 * or disk I/O during allocations is off, we need to fail the writepage and
1042 * that matter are dirty due to mmap writes, and therefore bh uptodate is 1034 * redirty the page.
1043 * only valid if the page itself isn't completely uptodate. Some layers
1044 * may clear the page dirty flag prior to calling write page, under the
1045 * assumption the entire page will be written out; by not writing out the
1046 * whole page the page can be reused before all valid dirty data is
1047 * written out. Note: in the case of a page that has been dirty'd by
1048 * mapwrite and but partially setup by block_prepare_write the
1049 * bh->b_states's will not agree and only ones setup by BPW/BCW will have
1050 * valid state, thus the whole page must be written out thing.
1051 */ 1035 */
1052
1053STATIC int 1036STATIC int
1054xfs_page_state_convert( 1037xfs_vm_writepage(
1055 struct inode *inode, 1038 struct page *page,
1056 struct page *page, 1039 struct writeback_control *wbc)
1057 struct writeback_control *wbc,
1058 int startio,
1059 int unmapped) /* also implies page uptodate */
1060{ 1040{
1041 struct inode *inode = page->mapping->host;
1042 int delalloc, unwritten;
1061 struct buffer_head *bh, *head; 1043 struct buffer_head *bh, *head;
1062 struct xfs_bmbt_irec imap; 1044 struct xfs_bmbt_irec imap;
1063 xfs_ioend_t *ioend = NULL, *iohead = NULL; 1045 xfs_ioend_t *ioend = NULL, *iohead = NULL;
1064 loff_t offset; 1046 loff_t offset;
1065 unsigned long p_offset = 0;
1066 unsigned int type; 1047 unsigned int type;
1067 __uint64_t end_offset; 1048 __uint64_t end_offset;
1068 pgoff_t end_index, last_index; 1049 pgoff_t end_index, last_index;
1069 ssize_t size, len; 1050 ssize_t size, len;
1070 int flags, err, imap_valid = 0, uptodate = 1; 1051 int flags, err, imap_valid = 0, uptodate = 1;
1071 int page_dirty, count = 0; 1052 int count = 0;
1072 int trylock = 0; 1053 int all_bh = 0;
1073 int all_bh = unmapped;
1074 1054
1075 if (startio) { 1055 trace_xfs_writepage(inode, page, 0);
1076 if (wbc->sync_mode == WB_SYNC_NONE && wbc->nonblocking) 1056
1077 trylock |= BMAPI_TRYLOCK; 1057 ASSERT(page_has_buffers(page));
1078 } 1058
1059 /*
1060 * Refuse to write the page out if we are called from reclaim context.
1061 *
1062 * This avoids stack overflows when called from deeply used stacks in
1063 * random callers for direct reclaim or memcg reclaim. We explicitly
1064 * allow reclaim from kswapd as the stack usage there is relatively low.
1065 *
1066 * This should really be done by the core VM, but until that happens
1067 * filesystems like XFS, btrfs and ext4 have to take care of this
1068 * by themselves.
1069 */
1070 if ((current->flags & (PF_MEMALLOC|PF_KSWAPD)) == PF_MEMALLOC)
1071 goto out_fail;
1072
1073 /*
1074 * We need a transaction if there are delalloc or unwritten buffers
1075 * on the page.
1076 *
1077 * If we need a transaction and the process flags say we are already
1078 * in a transaction, or no IO is allowed then mark the page dirty
1079 * again and leave the page as is.
1080 */
1081 xfs_count_page_state(page, &delalloc, &unwritten);
1082 if ((current->flags & PF_FSTRANS) && (delalloc || unwritten))
1083 goto out_fail;
1079 1084
1080 /* Is this page beyond the end of the file? */ 1085 /* Is this page beyond the end of the file? */
1081 offset = i_size_read(inode); 1086 offset = i_size_read(inode);
@@ -1084,50 +1089,33 @@ xfs_page_state_convert(
1084 if (page->index >= end_index) { 1089 if (page->index >= end_index) {
1085 if ((page->index >= end_index + 1) || 1090 if ((page->index >= end_index + 1) ||
1086 !(i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) { 1091 !(i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) {
1087 if (startio) 1092 unlock_page(page);
1088 unlock_page(page);
1089 return 0; 1093 return 0;
1090 } 1094 }
1091 } 1095 }
1092 1096
1093 /*
1094 * page_dirty is initially a count of buffers on the page before
1095 * EOF and is decremented as we move each into a cleanable state.
1096 *
1097 * Derivation:
1098 *
1099 * End offset is the highest offset that this page should represent.
1100 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
1101 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
1102 * hence give us the correct page_dirty count. On any other page,
1103 * it will be zero and in that case we need page_dirty to be the
1104 * count of buffers on the page.
1105 */
1106 end_offset = min_t(unsigned long long, 1097 end_offset = min_t(unsigned long long,
1107 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT, offset); 1098 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
1099 offset);
1108 len = 1 << inode->i_blkbits; 1100 len = 1 << inode->i_blkbits;
1109 p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
1110 PAGE_CACHE_SIZE);
1111 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
1112 page_dirty = p_offset / len;
1113 1101
1114 bh = head = page_buffers(page); 1102 bh = head = page_buffers(page);
1115 offset = page_offset(page); 1103 offset = page_offset(page);
1116 flags = BMAPI_READ; 1104 flags = BMAPI_READ;
1117 type = IO_NEW; 1105 type = IO_NEW;
1118 1106
1119 /* TODO: cleanup count and page_dirty */
1120
1121 do { 1107 do {
1122 if (offset >= end_offset) 1108 if (offset >= end_offset)
1123 break; 1109 break;
1124 if (!buffer_uptodate(bh)) 1110 if (!buffer_uptodate(bh))
1125 uptodate = 0; 1111 uptodate = 0;
1126 if (!(PageUptodate(page) || buffer_uptodate(bh)) && !startio) { 1112
1127 /* 1113 /*
1128 * the iomap is actually still valid, but the ioend 1114 * A hole may still be marked uptodate because discard_buffer
1129 * isn't. shouldn't happen too often. 1115 * leaves the flag set.
1130 */ 1116 */
1117 if (!buffer_mapped(bh) && buffer_uptodate(bh)) {
1118 ASSERT(!buffer_dirty(bh));
1131 imap_valid = 0; 1119 imap_valid = 0;
1132 continue; 1120 continue;
1133 } 1121 }
@@ -1135,19 +1123,7 @@ xfs_page_state_convert(
1135 if (imap_valid) 1123 if (imap_valid)
1136 imap_valid = xfs_imap_valid(inode, &imap, offset); 1124 imap_valid = xfs_imap_valid(inode, &imap, offset);
1137 1125
1138 /* 1126 if (buffer_unwritten(bh) || buffer_delay(bh)) {
1139 * First case, map an unwritten extent and prepare for
1140 * extent state conversion transaction on completion.
1141 *
1142 * Second case, allocate space for a delalloc buffer.
1143 * We can return EAGAIN here in the release page case.
1144 *
1145 * Third case, an unmapped buffer was found, and we are
1146 * in a path where we need to write the whole page out.
1147 */
1148 if (buffer_unwritten(bh) || buffer_delay(bh) ||
1149 ((buffer_uptodate(bh) || PageUptodate(page)) &&
1150 !buffer_mapped(bh) && (unmapped || startio))) {
1151 int new_ioend = 0; 1127 int new_ioend = 0;
1152 1128
1153 /* 1129 /*
@@ -1161,15 +1137,16 @@ xfs_page_state_convert(
1161 flags = BMAPI_WRITE | BMAPI_IGNSTATE; 1137 flags = BMAPI_WRITE | BMAPI_IGNSTATE;
1162 } else if (buffer_delay(bh)) { 1138 } else if (buffer_delay(bh)) {
1163 type = IO_DELAY; 1139 type = IO_DELAY;
1164 flags = BMAPI_ALLOCATE | trylock; 1140 flags = BMAPI_ALLOCATE;
1165 } else { 1141
1166 type = IO_NEW; 1142 if (wbc->sync_mode == WB_SYNC_NONE &&
1167 flags = BMAPI_WRITE | BMAPI_MMAP; 1143 wbc->nonblocking)
1144 flags |= BMAPI_TRYLOCK;
1168 } 1145 }
1169 1146
1170 if (!imap_valid) { 1147 if (!imap_valid) {
1171 /* 1148 /*
1172 * if we didn't have a valid mapping then we 1149 * If we didn't have a valid mapping then we
1173 * need to ensure that we put the new mapping 1150 * need to ensure that we put the new mapping
1174 * in a new ioend structure. This needs to be 1151 * in a new ioend structure. This needs to be
1175 * done to ensure that the ioends correctly 1152 * done to ensure that the ioends correctly
@@ -1177,14 +1154,7 @@ xfs_page_state_convert(
1177 * for unwritten extent conversion. 1154 * for unwritten extent conversion.
1178 */ 1155 */
1179 new_ioend = 1; 1156 new_ioend = 1;
1180 if (type == IO_NEW) { 1157 err = xfs_map_blocks(inode, offset, len,
1181 size = xfs_probe_cluster(inode,
1182 page, bh, head, 0);
1183 } else {
1184 size = len;
1185 }
1186
1187 err = xfs_map_blocks(inode, offset, size,
1188 &imap, flags); 1158 &imap, flags);
1189 if (err) 1159 if (err)
1190 goto error; 1160 goto error;
@@ -1193,19 +1163,11 @@ xfs_page_state_convert(
1193 } 1163 }
1194 if (imap_valid) { 1164 if (imap_valid) {
1195 xfs_map_at_offset(inode, bh, &imap, offset); 1165 xfs_map_at_offset(inode, bh, &imap, offset);
1196 if (startio) { 1166 xfs_add_to_ioend(inode, bh, offset, type,
1197 xfs_add_to_ioend(inode, bh, offset, 1167 &ioend, new_ioend);
1198 type, &ioend,
1199 new_ioend);
1200 } else {
1201 set_buffer_dirty(bh);
1202 unlock_buffer(bh);
1203 mark_buffer_dirty(bh);
1204 }
1205 page_dirty--;
1206 count++; 1168 count++;
1207 } 1169 }
1208 } else if (buffer_uptodate(bh) && startio) { 1170 } else if (buffer_uptodate(bh)) {
1209 /* 1171 /*
1210 * we got here because the buffer is already mapped. 1172 * we got here because the buffer is already mapped.
1211 * That means it must already have extents allocated 1173 * That means it must already have extents allocated
@@ -1213,8 +1175,7 @@ xfs_page_state_convert(
1213 */ 1175 */
1214 if (!imap_valid || flags != BMAPI_READ) { 1176 if (!imap_valid || flags != BMAPI_READ) {
1215 flags = BMAPI_READ; 1177 flags = BMAPI_READ;
1216 size = xfs_probe_cluster(inode, page, bh, 1178 size = xfs_probe_cluster(inode, page, bh, head);
1217 head, 1);
1218 err = xfs_map_blocks(inode, offset, size, 1179 err = xfs_map_blocks(inode, offset, size,
1219 &imap, flags); 1180 &imap, flags);
1220 if (err) 1181 if (err)
@@ -1233,18 +1194,16 @@ xfs_page_state_convert(
1233 */ 1194 */
1234 type = IO_NEW; 1195 type = IO_NEW;
1235 if (trylock_buffer(bh)) { 1196 if (trylock_buffer(bh)) {
1236 ASSERT(buffer_mapped(bh));
1237 if (imap_valid) 1197 if (imap_valid)
1238 all_bh = 1; 1198 all_bh = 1;
1239 xfs_add_to_ioend(inode, bh, offset, type, 1199 xfs_add_to_ioend(inode, bh, offset, type,
1240 &ioend, !imap_valid); 1200 &ioend, !imap_valid);
1241 page_dirty--;
1242 count++; 1201 count++;
1243 } else { 1202 } else {
1244 imap_valid = 0; 1203 imap_valid = 0;
1245 } 1204 }
1246 } else if ((buffer_uptodate(bh) || PageUptodate(page)) && 1205 } else if (PageUptodate(page)) {
1247 (unmapped || startio)) { 1206 ASSERT(buffer_mapped(bh));
1248 imap_valid = 0; 1207 imap_valid = 0;
1249 } 1208 }
1250 1209
@@ -1256,8 +1215,7 @@ xfs_page_state_convert(
1256 if (uptodate && bh == head) 1215 if (uptodate && bh == head)
1257 SetPageUptodate(page); 1216 SetPageUptodate(page);
1258 1217
1259 if (startio) 1218 xfs_start_page_writeback(page, 1, count);
1260 xfs_start_page_writeback(page, 1, count);
1261 1219
1262 if (ioend && imap_valid) { 1220 if (ioend && imap_valid) {
1263 xfs_off_t end_index; 1221 xfs_off_t end_index;
@@ -1275,131 +1233,27 @@ xfs_page_state_convert(
1275 end_index = last_index; 1233 end_index = last_index;
1276 1234
1277 xfs_cluster_write(inode, page->index + 1, &imap, &ioend, 1235 xfs_cluster_write(inode, page->index + 1, &imap, &ioend,
1278 wbc, startio, all_bh, end_index); 1236 wbc, all_bh, end_index);
1279 } 1237 }
1280 1238
1281 if (iohead) 1239 if (iohead)
1282 xfs_submit_ioend(wbc, iohead); 1240 xfs_submit_ioend(wbc, iohead);
1283 1241
1284 return page_dirty; 1242 return 0;
1285 1243
1286error: 1244error:
1287 if (iohead) 1245 if (iohead)
1288 xfs_cancel_ioend(iohead); 1246 xfs_cancel_ioend(iohead);
1289 1247
1290 /* 1248 xfs_aops_discard_page(page);
1291 * If it's delalloc and we have nowhere to put it, 1249 ClearPageUptodate(page);
1292 * throw it away, unless the lower layers told 1250 unlock_page(page);
1293 * us to try again.
1294 */
1295 if (err != -EAGAIN) {
1296 if (!unmapped)
1297 xfs_aops_discard_page(page);
1298 ClearPageUptodate(page);
1299 }
1300 return err; 1251 return err;
1301}
1302
1303/*
1304 * writepage: Called from one of two places:
1305 *
1306 * 1. we are flushing a delalloc buffer head.
1307 *
1308 * 2. we are writing out a dirty page. Typically the page dirty
1309 * state is cleared before we get here. In this case is it
1310 * conceivable we have no buffer heads.
1311 *
1312 * For delalloc space on the page we need to allocate space and
1313 * flush it. For unmapped buffer heads on the page we should
1314 * allocate space if the page is uptodate. For any other dirty
1315 * buffer heads on the page we should flush them.
1316 *
1317 * If we detect that a transaction would be required to flush
1318 * the page, we have to check the process flags first, if we
1319 * are already in a transaction or disk I/O during allocations
1320 * is off, we need to fail the writepage and redirty the page.
1321 */
1322
1323STATIC int
1324xfs_vm_writepage(
1325 struct page *page,
1326 struct writeback_control *wbc)
1327{
1328 int error;
1329 int need_trans;
1330 int delalloc, unmapped, unwritten;
1331 struct inode *inode = page->mapping->host;
1332
1333 trace_xfs_writepage(inode, page, 0);
1334
1335 /*
1336 * Refuse to write the page out if we are called from reclaim context.
1337 *
1338 * This is primarily to avoid stack overflows when called from deep
1339 * used stacks in random callers for direct reclaim, but disabling
1340 * reclaim for kswap is a nice side-effect as kswapd causes rather
1341 * suboptimal I/O patters, too.
1342 *
1343 * This should really be done by the core VM, but until that happens
1344 * filesystems like XFS, btrfs and ext4 have to take care of this
1345 * by themselves.
1346 */
1347 if (current->flags & PF_MEMALLOC)
1348 goto out_fail;
1349
1350 /*
1351 * We need a transaction if:
1352 * 1. There are delalloc buffers on the page
1353 * 2. The page is uptodate and we have unmapped buffers
1354 * 3. The page is uptodate and we have no buffers
1355 * 4. There are unwritten buffers on the page
1356 */
1357
1358 if (!page_has_buffers(page)) {
1359 unmapped = 1;
1360 need_trans = 1;
1361 } else {
1362 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
1363 if (!PageUptodate(page))
1364 unmapped = 0;
1365 need_trans = delalloc + unmapped + unwritten;
1366 }
1367
1368 /*
1369 * If we need a transaction and the process flags say
1370 * we are already in a transaction, or no IO is allowed
1371 * then mark the page dirty again and leave the page
1372 * as is.
1373 */
1374 if (current_test_flags(PF_FSTRANS) && need_trans)
1375 goto out_fail;
1376
1377 /*
1378 * Delay hooking up buffer heads until we have
1379 * made our go/no-go decision.
1380 */
1381 if (!page_has_buffers(page))
1382 create_empty_buffers(page, 1 << inode->i_blkbits, 0);
1383
1384 /*
1385 * Convert delayed allocate, unwritten or unmapped space
1386 * to real space and flush out to disk.
1387 */
1388 error = xfs_page_state_convert(inode, page, wbc, 1, unmapped);
1389 if (error == -EAGAIN)
1390 goto out_fail;
1391 if (unlikely(error < 0))
1392 goto out_unlock;
1393
1394 return 0;
1395 1252
1396out_fail: 1253out_fail:
1397 redirty_page_for_writepage(wbc, page); 1254 redirty_page_for_writepage(wbc, page);
1398 unlock_page(page); 1255 unlock_page(page);
1399 return 0; 1256 return 0;
1400out_unlock:
1401 unlock_page(page);
1402 return error;
1403} 1257}
1404 1258
1405STATIC int 1259STATIC int
@@ -1413,65 +1267,27 @@ xfs_vm_writepages(
1413 1267
1414/* 1268/*
1415 * Called to move a page into cleanable state - and from there 1269 * Called to move a page into cleanable state - and from there
1416 * to be released. Possibly the page is already clean. We always 1270 * to be released. The page should already be clean. We always
1417 * have buffer heads in this call. 1271 * have buffer heads in this call.
1418 * 1272 *
1419 * Returns 0 if the page is ok to release, 1 otherwise. 1273 * Returns 1 if the page is ok to release, 0 otherwise.
1420 *
1421 * Possible scenarios are:
1422 *
1423 * 1. We are being called to release a page which has been written
1424 * to via regular I/O. buffer heads will be dirty and possibly
1425 * delalloc. If no delalloc buffer heads in this case then we
1426 * can just return zero.
1427 *
1428 * 2. We are called to release a page which has been written via
1429 * mmap, all we need to do is ensure there is no delalloc
1430 * state in the buffer heads, if not we can let the caller
1431 * free them and we should come back later via writepage.
1432 */ 1274 */
1433STATIC int 1275STATIC int
1434xfs_vm_releasepage( 1276xfs_vm_releasepage(
1435 struct page *page, 1277 struct page *page,
1436 gfp_t gfp_mask) 1278 gfp_t gfp_mask)
1437{ 1279{
1438 struct inode *inode = page->mapping->host; 1280 int delalloc, unwritten;
1439 int dirty, delalloc, unmapped, unwritten;
1440 struct writeback_control wbc = {
1441 .sync_mode = WB_SYNC_ALL,
1442 .nr_to_write = 1,
1443 };
1444 1281
1445 trace_xfs_releasepage(inode, page, 0); 1282 trace_xfs_releasepage(page->mapping->host, page, 0);
1446
1447 if (!page_has_buffers(page))
1448 return 0;
1449 1283
1450 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten); 1284 xfs_count_page_state(page, &delalloc, &unwritten);
1451 if (!delalloc && !unwritten)
1452 goto free_buffers;
1453 1285
1454 if (!(gfp_mask & __GFP_FS)) 1286 if (WARN_ON(delalloc))
1455 return 0; 1287 return 0;
1456 1288 if (WARN_ON(unwritten))
1457 /* If we are already inside a transaction or the thread cannot
1458 * do I/O, we cannot release this page.
1459 */
1460 if (current_test_flags(PF_FSTRANS))
1461 return 0; 1289 return 0;
1462 1290
1463 /*
1464 * Convert delalloc space to real space, do not flush the
1465 * data out to disk, that will be done by the caller.
1466 * Never need to allocate space here - we will always
1467 * come back to writepage in that case.
1468 */
1469 dirty = xfs_page_state_convert(inode, page, &wbc, 0, 0);
1470 if (dirty == 0 && !unwritten)
1471 goto free_buffers;
1472 return 0;
1473
1474free_buffers:
1475 return try_to_free_buffers(page); 1291 return try_to_free_buffers(page);
1476} 1292}
1477 1293
@@ -1481,9 +1297,9 @@ __xfs_get_blocks(
1481 sector_t iblock, 1297 sector_t iblock,
1482 struct buffer_head *bh_result, 1298 struct buffer_head *bh_result,
1483 int create, 1299 int create,
1484 int direct, 1300 int direct)
1485 bmapi_flags_t flags)
1486{ 1301{
1302 int flags = create ? BMAPI_WRITE : BMAPI_READ;
1487 struct xfs_bmbt_irec imap; 1303 struct xfs_bmbt_irec imap;
1488 xfs_off_t offset; 1304 xfs_off_t offset;
1489 ssize_t size; 1305 ssize_t size;
@@ -1498,8 +1314,11 @@ __xfs_get_blocks(
1498 if (!create && direct && offset >= i_size_read(inode)) 1314 if (!create && direct && offset >= i_size_read(inode))
1499 return 0; 1315 return 0;
1500 1316
1501 error = xfs_iomap(XFS_I(inode), offset, size, 1317 if (direct && create)
1502 create ? flags : BMAPI_READ, &imap, &nimap, &new); 1318 flags |= BMAPI_DIRECT;
1319
1320 error = xfs_iomap(XFS_I(inode), offset, size, flags, &imap, &nimap,
1321 &new);
1503 if (error) 1322 if (error)
1504 return -error; 1323 return -error;
1505 if (nimap == 0) 1324 if (nimap == 0)
@@ -1579,8 +1398,7 @@ xfs_get_blocks(
1579 struct buffer_head *bh_result, 1398 struct buffer_head *bh_result,
1580 int create) 1399 int create)
1581{ 1400{
1582 return __xfs_get_blocks(inode, iblock, 1401 return __xfs_get_blocks(inode, iblock, bh_result, create, 0);
1583 bh_result, create, 0, BMAPI_WRITE);
1584} 1402}
1585 1403
1586STATIC int 1404STATIC int
@@ -1590,61 +1408,59 @@ xfs_get_blocks_direct(
1590 struct buffer_head *bh_result, 1408 struct buffer_head *bh_result,
1591 int create) 1409 int create)
1592{ 1410{
1593 return __xfs_get_blocks(inode, iblock, 1411 return __xfs_get_blocks(inode, iblock, bh_result, create, 1);
1594 bh_result, create, 1, BMAPI_WRITE|BMAPI_DIRECT);
1595} 1412}
1596 1413
1414/*
1415 * Complete a direct I/O write request.
1416 *
1417 * If the private argument is non-NULL __xfs_get_blocks signals us that we
1418 * need to issue a transaction to convert the range from unwritten to written
1419 * extents. In case this is regular synchronous I/O we just call xfs_end_io
1420 * to do this and we are done. But in case this was a successfull AIO
1421 * request this handler is called from interrupt context, from which we
1422 * can't start transactions. In that case offload the I/O completion to
1423 * the workqueues we also use for buffered I/O completion.
1424 */
1597STATIC void 1425STATIC void
1598xfs_end_io_direct( 1426xfs_end_io_direct_write(
1599 struct kiocb *iocb, 1427 struct kiocb *iocb,
1600 loff_t offset, 1428 loff_t offset,
1601 ssize_t size, 1429 ssize_t size,
1602 void *private) 1430 void *private,
1431 int ret,
1432 bool is_async)
1603{ 1433{
1604 xfs_ioend_t *ioend = iocb->private; 1434 struct xfs_ioend *ioend = iocb->private;
1605 1435
1606 /* 1436 /*
1607 * Non-NULL private data means we need to issue a transaction to 1437 * blockdev_direct_IO can return an error even after the I/O
1608 * convert a range from unwritten to written extents. This needs 1438 * completion handler was called. Thus we need to protect
1609 * to happen from process context but aio+dio I/O completion 1439 * against double-freeing.
1610 * happens from irq context so we need to defer it to a workqueue.
1611 * This is not necessary for synchronous direct I/O, but we do
1612 * it anyway to keep the code uniform and simpler.
1613 *
1614 * Well, if only it were that simple. Because synchronous direct I/O
1615 * requires extent conversion to occur *before* we return to userspace,
1616 * we have to wait for extent conversion to complete. Look at the
1617 * iocb that has been passed to us to determine if this is AIO or
1618 * not. If it is synchronous, tell xfs_finish_ioend() to kick the
1619 * workqueue and wait for it to complete.
1620 *
1621 * The core direct I/O code might be changed to always call the
1622 * completion handler in the future, in which case all this can
1623 * go away.
1624 */ 1440 */
1441 iocb->private = NULL;
1442
1625 ioend->io_offset = offset; 1443 ioend->io_offset = offset;
1626 ioend->io_size = size; 1444 ioend->io_size = size;
1627 if (ioend->io_type == IO_READ) { 1445 if (private && size > 0)
1628 xfs_finish_ioend(ioend, 0); 1446 ioend->io_type = IO_UNWRITTEN;
1629 } else if (private && size > 0) { 1447
1630 xfs_finish_ioend(ioend, is_sync_kiocb(iocb)); 1448 if (is_async) {
1631 } else {
1632 /* 1449 /*
1633 * A direct I/O write ioend starts it's life in unwritten 1450 * If we are converting an unwritten extent we need to delay
1634 * state in case they map an unwritten extent. This write 1451 * the AIO completion until after the unwrittent extent
1635 * didn't map an unwritten extent so switch it's completion 1452 * conversion has completed, otherwise do it ASAP.
1636 * handler.
1637 */ 1453 */
1638 ioend->io_type = IO_NEW; 1454 if (ioend->io_type == IO_UNWRITTEN) {
1639 xfs_finish_ioend(ioend, 0); 1455 ioend->io_iocb = iocb;
1456 ioend->io_result = ret;
1457 } else {
1458 aio_complete(iocb, ret, 0);
1459 }
1460 xfs_finish_ioend(ioend);
1461 } else {
1462 xfs_finish_ioend_sync(ioend);
1640 } 1463 }
1641
1642 /*
1643 * blockdev_direct_IO can return an error even after the I/O
1644 * completion handler was called. Thus we need to protect
1645 * against double-freeing.
1646 */
1647 iocb->private = NULL;
1648} 1464}
1649 1465
1650STATIC ssize_t 1466STATIC ssize_t
@@ -1655,23 +1471,26 @@ xfs_vm_direct_IO(
1655 loff_t offset, 1471 loff_t offset,
1656 unsigned long nr_segs) 1472 unsigned long nr_segs)
1657{ 1473{
1658 struct file *file = iocb->ki_filp; 1474 struct inode *inode = iocb->ki_filp->f_mapping->host;
1659 struct inode *inode = file->f_mapping->host; 1475 struct block_device *bdev = xfs_find_bdev_for_inode(inode);
1660 struct block_device *bdev; 1476 ssize_t ret;
1661 ssize_t ret; 1477
1662 1478 if (rw & WRITE) {
1663 bdev = xfs_find_bdev_for_inode(inode); 1479 iocb->private = xfs_alloc_ioend(inode, IO_NEW);
1664 1480
1665 iocb->private = xfs_alloc_ioend(inode, rw == WRITE ? 1481 ret = blockdev_direct_IO_no_locking(rw, iocb, inode, bdev, iov,
1666 IO_UNWRITTEN : IO_READ); 1482 offset, nr_segs,
1667 1483 xfs_get_blocks_direct,
1668 ret = blockdev_direct_IO_no_locking(rw, iocb, inode, bdev, iov, 1484 xfs_end_io_direct_write);
1669 offset, nr_segs, 1485 if (ret != -EIOCBQUEUED && iocb->private)
1670 xfs_get_blocks_direct, 1486 xfs_destroy_ioend(iocb->private);
1671 xfs_end_io_direct); 1487 } else {
1488 ret = blockdev_direct_IO_no_locking(rw, iocb, inode, bdev, iov,
1489 offset, nr_segs,
1490 xfs_get_blocks_direct,
1491 NULL);
1492 }
1672 1493
1673 if (unlikely(ret != -EIOCBQUEUED && iocb->private))
1674 xfs_destroy_ioend(iocb->private);
1675 return ret; 1494 return ret;
1676} 1495}
1677 1496
@@ -1686,8 +1505,8 @@ xfs_vm_write_begin(
1686 void **fsdata) 1505 void **fsdata)
1687{ 1506{
1688 *pagep = NULL; 1507 *pagep = NULL;
1689 return block_write_begin(file, mapping, pos, len, flags, pagep, fsdata, 1508 return block_write_begin(file, mapping, pos, len, flags | AOP_FLAG_NOFS,
1690 xfs_get_blocks); 1509 pagep, fsdata, xfs_get_blocks);
1691} 1510}
1692 1511
1693STATIC sector_t 1512STATIC sector_t
@@ -1698,7 +1517,7 @@ xfs_vm_bmap(
1698 struct inode *inode = (struct inode *)mapping->host; 1517 struct inode *inode = (struct inode *)mapping->host;
1699 struct xfs_inode *ip = XFS_I(inode); 1518 struct xfs_inode *ip = XFS_I(inode);
1700 1519
1701 xfs_itrace_entry(XFS_I(inode)); 1520 trace_xfs_vm_bmap(XFS_I(inode));
1702 xfs_ilock(ip, XFS_IOLOCK_SHARED); 1521 xfs_ilock(ip, XFS_IOLOCK_SHARED);
1703 xfs_flush_pages(ip, (xfs_off_t)0, -1, 0, FI_REMAPF); 1522 xfs_flush_pages(ip, (xfs_off_t)0, -1, 0, FI_REMAPF);
1704 xfs_iunlock(ip, XFS_IOLOCK_SHARED); 1523 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
diff --git a/fs/xfs/linux-2.6/xfs_aops.h b/fs/xfs/linux-2.6/xfs_aops.h
index 4cfc6ea87df8..c5057fb6237a 100644
--- a/fs/xfs/linux-2.6/xfs_aops.h
+++ b/fs/xfs/linux-2.6/xfs_aops.h
@@ -37,6 +37,8 @@ typedef struct xfs_ioend {
37 size_t io_size; /* size of the extent */ 37 size_t io_size; /* size of the extent */
38 xfs_off_t io_offset; /* offset in the file */ 38 xfs_off_t io_offset; /* offset in the file */
39 struct work_struct io_work; /* xfsdatad work queue */ 39 struct work_struct io_work; /* xfsdatad work queue */
40 struct kiocb *io_iocb;
41 int io_result;
40} xfs_ioend_t; 42} xfs_ioend_t;
41 43
42extern const struct address_space_operations xfs_address_space_operations; 44extern const struct address_space_operations xfs_address_space_operations;
@@ -45,6 +47,6 @@ extern int xfs_get_blocks(struct inode *, sector_t, struct buffer_head *, int);
45extern void xfs_ioend_init(void); 47extern void xfs_ioend_init(void);
46extern void xfs_ioend_wait(struct xfs_inode *); 48extern void xfs_ioend_wait(struct xfs_inode *);
47 49
48extern void xfs_count_page_state(struct page *, int *, int *, int *); 50extern void xfs_count_page_state(struct page *, int *, int *);
49 51
50#endif /* __XFS_AOPS_H__ */ 52#endif /* __XFS_AOPS_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 649ade8ef598..ea79072f5210 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -39,13 +39,12 @@
39#include "xfs_inum.h" 39#include "xfs_inum.h"
40#include "xfs_log.h" 40#include "xfs_log.h"
41#include "xfs_ag.h" 41#include "xfs_ag.h"
42#include "xfs_dmapi.h"
43#include "xfs_mount.h" 42#include "xfs_mount.h"
44#include "xfs_trace.h" 43#include "xfs_trace.h"
45 44
46static kmem_zone_t *xfs_buf_zone; 45static kmem_zone_t *xfs_buf_zone;
47STATIC int xfsbufd(void *); 46STATIC int xfsbufd(void *);
48STATIC int xfsbufd_wakeup(int, gfp_t); 47STATIC int xfsbufd_wakeup(struct shrinker *, int, gfp_t);
49STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int); 48STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int);
50static struct shrinker xfs_buf_shake = { 49static struct shrinker xfs_buf_shake = {
51 .shrink = xfsbufd_wakeup, 50 .shrink = xfsbufd_wakeup,
@@ -340,7 +339,7 @@ _xfs_buf_lookup_pages(
340 __func__, gfp_mask); 339 __func__, gfp_mask);
341 340
342 XFS_STATS_INC(xb_page_retries); 341 XFS_STATS_INC(xb_page_retries);
343 xfsbufd_wakeup(0, gfp_mask); 342 xfsbufd_wakeup(NULL, 0, gfp_mask);
344 congestion_wait(BLK_RW_ASYNC, HZ/50); 343 congestion_wait(BLK_RW_ASYNC, HZ/50);
345 goto retry; 344 goto retry;
346 } 345 }
@@ -579,9 +578,9 @@ _xfs_buf_read(
579 XBF_READ_AHEAD | _XBF_RUN_QUEUES); 578 XBF_READ_AHEAD | _XBF_RUN_QUEUES);
580 579
581 status = xfs_buf_iorequest(bp); 580 status = xfs_buf_iorequest(bp);
582 if (!status && !(flags & XBF_ASYNC)) 581 if (status || XFS_BUF_ISERROR(bp) || (flags & XBF_ASYNC))
583 status = xfs_buf_iowait(bp); 582 return status;
584 return status; 583 return xfs_buf_iowait(bp);
585} 584}
586 585
587xfs_buf_t * 586xfs_buf_t *
@@ -897,36 +896,6 @@ xfs_buf_unlock(
897 trace_xfs_buf_unlock(bp, _RET_IP_); 896 trace_xfs_buf_unlock(bp, _RET_IP_);
898} 897}
899 898
900
901/*
902 * Pinning Buffer Storage in Memory
903 * Ensure that no attempt to force a buffer to disk will succeed.
904 */
905void
906xfs_buf_pin(
907 xfs_buf_t *bp)
908{
909 trace_xfs_buf_pin(bp, _RET_IP_);
910 atomic_inc(&bp->b_pin_count);
911}
912
913void
914xfs_buf_unpin(
915 xfs_buf_t *bp)
916{
917 trace_xfs_buf_unpin(bp, _RET_IP_);
918
919 if (atomic_dec_and_test(&bp->b_pin_count))
920 wake_up_all(&bp->b_waiters);
921}
922
923int
924xfs_buf_ispin(
925 xfs_buf_t *bp)
926{
927 return atomic_read(&bp->b_pin_count);
928}
929
930STATIC void 899STATIC void
931xfs_buf_wait_unpin( 900xfs_buf_wait_unpin(
932 xfs_buf_t *bp) 901 xfs_buf_t *bp)
@@ -1018,13 +987,12 @@ xfs_bwrite(
1018{ 987{
1019 int error; 988 int error;
1020 989
1021 bp->b_strat = xfs_bdstrat_cb;
1022 bp->b_mount = mp; 990 bp->b_mount = mp;
1023 bp->b_flags |= XBF_WRITE; 991 bp->b_flags |= XBF_WRITE;
1024 bp->b_flags &= ~(XBF_ASYNC | XBF_READ); 992 bp->b_flags &= ~(XBF_ASYNC | XBF_READ);
1025 993
1026 xfs_buf_delwri_dequeue(bp); 994 xfs_buf_delwri_dequeue(bp);
1027 xfs_buf_iostrategy(bp); 995 xfs_bdstrat_cb(bp);
1028 996
1029 error = xfs_buf_iowait(bp); 997 error = xfs_buf_iowait(bp);
1030 if (error) 998 if (error)
@@ -1040,7 +1008,6 @@ xfs_bdwrite(
1040{ 1008{
1041 trace_xfs_buf_bdwrite(bp, _RET_IP_); 1009 trace_xfs_buf_bdwrite(bp, _RET_IP_);
1042 1010
1043 bp->b_strat = xfs_bdstrat_cb;
1044 bp->b_mount = mp; 1011 bp->b_mount = mp;
1045 1012
1046 bp->b_flags &= ~XBF_READ; 1013 bp->b_flags &= ~XBF_READ;
@@ -1075,7 +1042,6 @@ xfs_bioerror(
1075 XFS_BUF_UNDONE(bp); 1042 XFS_BUF_UNDONE(bp);
1076 XFS_BUF_STALE(bp); 1043 XFS_BUF_STALE(bp);
1077 1044
1078 XFS_BUF_CLR_BDSTRAT_FUNC(bp);
1079 xfs_biodone(bp); 1045 xfs_biodone(bp);
1080 1046
1081 return EIO; 1047 return EIO;
@@ -1105,7 +1071,6 @@ xfs_bioerror_relse(
1105 XFS_BUF_DONE(bp); 1071 XFS_BUF_DONE(bp);
1106 XFS_BUF_STALE(bp); 1072 XFS_BUF_STALE(bp);
1107 XFS_BUF_CLR_IODONE_FUNC(bp); 1073 XFS_BUF_CLR_IODONE_FUNC(bp);
1108 XFS_BUF_CLR_BDSTRAT_FUNC(bp);
1109 if (!(fl & XBF_ASYNC)) { 1074 if (!(fl & XBF_ASYNC)) {
1110 /* 1075 /*
1111 * Mark b_error and B_ERROR _both_. 1076 * Mark b_error and B_ERROR _both_.
@@ -1311,8 +1276,19 @@ submit_io:
1311 if (size) 1276 if (size)
1312 goto next_chunk; 1277 goto next_chunk;
1313 } else { 1278 } else {
1314 bio_put(bio); 1279 /*
1280 * if we get here, no pages were added to the bio. However,
1281 * we can't just error out here - if the pages are locked then
1282 * we have to unlock them otherwise we can hang on a later
1283 * access to the page.
1284 */
1315 xfs_buf_ioerror(bp, EIO); 1285 xfs_buf_ioerror(bp, EIO);
1286 if (bp->b_flags & _XBF_PAGE_LOCKED) {
1287 int i;
1288 for (i = 0; i < bp->b_page_count; i++)
1289 unlock_page(bp->b_pages[i]);
1290 }
1291 bio_put(bio);
1316 } 1292 }
1317} 1293}
1318 1294
@@ -1762,6 +1738,7 @@ xfs_buf_runall_queues(
1762 1738
1763STATIC int 1739STATIC int
1764xfsbufd_wakeup( 1740xfsbufd_wakeup(
1741 struct shrinker *shrink,
1765 int priority, 1742 int priority,
1766 gfp_t mask) 1743 gfp_t mask)
1767{ 1744{
@@ -1803,7 +1780,7 @@ xfs_buf_delwri_split(
1803 trace_xfs_buf_delwri_split(bp, _RET_IP_); 1780 trace_xfs_buf_delwri_split(bp, _RET_IP_);
1804 ASSERT(bp->b_flags & XBF_DELWRI); 1781 ASSERT(bp->b_flags & XBF_DELWRI);
1805 1782
1806 if (!xfs_buf_ispin(bp) && !xfs_buf_cond_lock(bp)) { 1783 if (!XFS_BUF_ISPINNED(bp) && !xfs_buf_cond_lock(bp)) {
1807 if (!force && 1784 if (!force &&
1808 time_before(jiffies, bp->b_queuetime + age)) { 1785 time_before(jiffies, bp->b_queuetime + age)) {
1809 xfs_buf_unlock(bp); 1786 xfs_buf_unlock(bp);
@@ -1888,7 +1865,7 @@ xfsbufd(
1888 struct xfs_buf *bp; 1865 struct xfs_buf *bp;
1889 bp = list_first_entry(&tmp, struct xfs_buf, b_list); 1866 bp = list_first_entry(&tmp, struct xfs_buf, b_list);
1890 list_del_init(&bp->b_list); 1867 list_del_init(&bp->b_list);
1891 xfs_buf_iostrategy(bp); 1868 xfs_bdstrat_cb(bp);
1892 count++; 1869 count++;
1893 } 1870 }
1894 if (count) 1871 if (count)
@@ -1935,7 +1912,7 @@ xfs_flush_buftarg(
1935 bp->b_flags &= ~XBF_ASYNC; 1912 bp->b_flags &= ~XBF_ASYNC;
1936 list_add(&bp->b_list, &wait_list); 1913 list_add(&bp->b_list, &wait_list);
1937 } 1914 }
1938 xfs_buf_iostrategy(bp); 1915 xfs_bdstrat_cb(bp);
1939 } 1916 }
1940 1917
1941 if (wait) { 1918 if (wait) {
diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h
index 5fbecefa5dfd..d072e5ff923b 100644
--- a/fs/xfs/linux-2.6/xfs_buf.h
+++ b/fs/xfs/linux-2.6/xfs_buf.h
@@ -44,57 +44,57 @@ typedef enum {
44 XBRW_ZERO = 3, /* Zero target memory */ 44 XBRW_ZERO = 3, /* Zero target memory */
45} xfs_buf_rw_t; 45} xfs_buf_rw_t;
46 46
47typedef enum { 47#define XBF_READ (1 << 0) /* buffer intended for reading from device */
48 XBF_READ = (1 << 0), /* buffer intended for reading from device */ 48#define XBF_WRITE (1 << 1) /* buffer intended for writing to device */
49 XBF_WRITE = (1 << 1), /* buffer intended for writing to device */ 49#define XBF_MAPPED (1 << 2) /* buffer mapped (b_addr valid) */
50 XBF_MAPPED = (1 << 2), /* buffer mapped (b_addr valid) */ 50#define XBF_ASYNC (1 << 4) /* initiator will not wait for completion */
51 XBF_ASYNC = (1 << 4), /* initiator will not wait for completion */ 51#define XBF_DONE (1 << 5) /* all pages in the buffer uptodate */
52 XBF_DONE = (1 << 5), /* all pages in the buffer uptodate */ 52#define XBF_DELWRI (1 << 6) /* buffer has dirty pages */
53 XBF_DELWRI = (1 << 6), /* buffer has dirty pages */ 53#define XBF_STALE (1 << 7) /* buffer has been staled, do not find it */
54 XBF_STALE = (1 << 7), /* buffer has been staled, do not find it */ 54#define XBF_FS_MANAGED (1 << 8) /* filesystem controls freeing memory */
55 XBF_FS_MANAGED = (1 << 8), /* filesystem controls freeing memory */ 55#define XBF_ORDERED (1 << 11)/* use ordered writes */
56 XBF_ORDERED = (1 << 11), /* use ordered writes */ 56#define XBF_READ_AHEAD (1 << 12)/* asynchronous read-ahead */
57 XBF_READ_AHEAD = (1 << 12), /* asynchronous read-ahead */ 57#define XBF_LOG_BUFFER (1 << 13)/* this is a buffer used for the log */
58 XBF_LOG_BUFFER = (1 << 13), /* this is a buffer used for the log */ 58
59 59/* flags used only as arguments to access routines */
60 /* flags used only as arguments to access routines */ 60#define XBF_LOCK (1 << 14)/* lock requested */
61 XBF_LOCK = (1 << 14), /* lock requested */ 61#define XBF_TRYLOCK (1 << 15)/* lock requested, but do not wait */
62 XBF_TRYLOCK = (1 << 15), /* lock requested, but do not wait */ 62#define XBF_DONT_BLOCK (1 << 16)/* do not block in current thread */
63 XBF_DONT_BLOCK = (1 << 16), /* do not block in current thread */ 63
64 64/* flags used only internally */
65 /* flags used only internally */ 65#define _XBF_PAGE_CACHE (1 << 17)/* backed by pagecache */
66 _XBF_PAGE_CACHE = (1 << 17),/* backed by pagecache */ 66#define _XBF_PAGES (1 << 18)/* backed by refcounted pages */
67 _XBF_PAGES = (1 << 18), /* backed by refcounted pages */ 67#define _XBF_RUN_QUEUES (1 << 19)/* run block device task queue */
68 _XBF_RUN_QUEUES = (1 << 19),/* run block device task queue */ 68#define _XBF_DELWRI_Q (1 << 21)/* buffer on delwri queue */
69 _XBF_DELWRI_Q = (1 << 21), /* buffer on delwri queue */ 69
70 70/*
71 /* 71 * Special flag for supporting metadata blocks smaller than a FSB.
72 * Special flag for supporting metadata blocks smaller than a FSB. 72 *
73 * 73 * In this case we can have multiple xfs_buf_t on a single page and
74 * In this case we can have multiple xfs_buf_t on a single page and 74 * need to lock out concurrent xfs_buf_t readers as they only
75 * need to lock out concurrent xfs_buf_t readers as they only 75 * serialise access to the buffer.
76 * serialise access to the buffer. 76 *
77 * 77 * If the FSB size >= PAGE_CACHE_SIZE case, we have no serialisation
78 * If the FSB size >= PAGE_CACHE_SIZE case, we have no serialisation 78 * between reads of the page. Hence we can have one thread read the
79 * between reads of the page. Hence we can have one thread read the 79 * page and modify it, but then race with another thread that thinks
80 * page and modify it, but then race with another thread that thinks 80 * the page is not up-to-date and hence reads it again.
81 * the page is not up-to-date and hence reads it again. 81 *
82 * 82 * The result is that the first modifcation to the page is lost.
83 * The result is that the first modifcation to the page is lost. 83 * This sort of AGF/AGI reading race can happen when unlinking inodes
84 * This sort of AGF/AGI reading race can happen when unlinking inodes 84 * that require truncation and results in the AGI unlinked list
85 * that require truncation and results in the AGI unlinked list 85 * modifications being lost.
86 * modifications being lost. 86 */
87 */ 87#define _XBF_PAGE_LOCKED (1 << 22)
88 _XBF_PAGE_LOCKED = (1 << 22), 88
89 89/*
90 /* 90 * If we try a barrier write, but it fails we have to communicate
91 * If we try a barrier write, but it fails we have to communicate 91 * this to the upper layers. Unfortunately b_error gets overwritten
92 * this to the upper layers. Unfortunately b_error gets overwritten 92 * when the buffer is re-issued so we have to add another flag to
93 * when the buffer is re-issued so we have to add another flag to 93 * keep this information.
94 * keep this information. 94 */
95 */ 95#define _XFS_BARRIER_FAILED (1 << 23)
96 _XFS_BARRIER_FAILED = (1 << 23), 96
97} xfs_buf_flags_t; 97typedef unsigned int xfs_buf_flags_t;
98 98
99#define XFS_BUF_FLAGS \ 99#define XFS_BUF_FLAGS \
100 { XBF_READ, "READ" }, \ 100 { XBF_READ, "READ" }, \
@@ -187,7 +187,6 @@ typedef struct xfs_buf {
187 atomic_t b_io_remaining; /* #outstanding I/O requests */ 187 atomic_t b_io_remaining; /* #outstanding I/O requests */
188 xfs_buf_iodone_t b_iodone; /* I/O completion function */ 188 xfs_buf_iodone_t b_iodone; /* I/O completion function */
189 xfs_buf_relse_t b_relse; /* releasing function */ 189 xfs_buf_relse_t b_relse; /* releasing function */
190 xfs_buf_bdstrat_t b_strat; /* pre-write function */
191 struct completion b_iowait; /* queue for I/O waiters */ 190 struct completion b_iowait; /* queue for I/O waiters */
192 void *b_fspriv; 191 void *b_fspriv;
193 void *b_fspriv2; 192 void *b_fspriv2;
@@ -245,11 +244,6 @@ extern int xfs_buf_iowait(xfs_buf_t *);
245extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, void *, 244extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, void *,
246 xfs_buf_rw_t); 245 xfs_buf_rw_t);
247 246
248static inline int xfs_buf_iostrategy(xfs_buf_t *bp)
249{
250 return bp->b_strat ? bp->b_strat(bp) : xfs_buf_iorequest(bp);
251}
252
253static inline int xfs_buf_geterror(xfs_buf_t *bp) 247static inline int xfs_buf_geterror(xfs_buf_t *bp)
254{ 248{
255 return bp ? bp->b_error : ENOMEM; 249 return bp ? bp->b_error : ENOMEM;
@@ -258,11 +252,6 @@ static inline int xfs_buf_geterror(xfs_buf_t *bp)
258/* Buffer Utility Routines */ 252/* Buffer Utility Routines */
259extern xfs_caddr_t xfs_buf_offset(xfs_buf_t *, size_t); 253extern xfs_caddr_t xfs_buf_offset(xfs_buf_t *, size_t);
260 254
261/* Pinning Buffer Storage in Memory */
262extern void xfs_buf_pin(xfs_buf_t *);
263extern void xfs_buf_unpin(xfs_buf_t *);
264extern int xfs_buf_ispin(xfs_buf_t *);
265
266/* Delayed Write Buffer Routines */ 255/* Delayed Write Buffer Routines */
267extern void xfs_buf_delwri_dequeue(xfs_buf_t *); 256extern void xfs_buf_delwri_dequeue(xfs_buf_t *);
268extern void xfs_buf_delwri_promote(xfs_buf_t *); 257extern void xfs_buf_delwri_promote(xfs_buf_t *);
@@ -326,8 +315,6 @@ extern void xfs_buf_terminate(void);
326#define XFS_BUF_IODONE_FUNC(bp) ((bp)->b_iodone) 315#define XFS_BUF_IODONE_FUNC(bp) ((bp)->b_iodone)
327#define XFS_BUF_SET_IODONE_FUNC(bp, func) ((bp)->b_iodone = (func)) 316#define XFS_BUF_SET_IODONE_FUNC(bp, func) ((bp)->b_iodone = (func))
328#define XFS_BUF_CLR_IODONE_FUNC(bp) ((bp)->b_iodone = NULL) 317#define XFS_BUF_CLR_IODONE_FUNC(bp) ((bp)->b_iodone = NULL)
329#define XFS_BUF_SET_BDSTRAT_FUNC(bp, func) ((bp)->b_strat = (func))
330#define XFS_BUF_CLR_BDSTRAT_FUNC(bp) ((bp)->b_strat = NULL)
331 318
332#define XFS_BUF_FSPRIVATE(bp, type) ((type)(bp)->b_fspriv) 319#define XFS_BUF_FSPRIVATE(bp, type) ((type)(bp)->b_fspriv)
333#define XFS_BUF_SET_FSPRIVATE(bp, val) ((bp)->b_fspriv = (void*)(val)) 320#define XFS_BUF_SET_FSPRIVATE(bp, val) ((bp)->b_fspriv = (void*)(val))
@@ -351,7 +338,7 @@ extern void xfs_buf_terminate(void);
351#define XFS_BUF_SET_VTYPE(bp, type) do { } while (0) 338#define XFS_BUF_SET_VTYPE(bp, type) do { } while (0)
352#define XFS_BUF_SET_REF(bp, ref) do { } while (0) 339#define XFS_BUF_SET_REF(bp, ref) do { } while (0)
353 340
354#define XFS_BUF_ISPINNED(bp) xfs_buf_ispin(bp) 341#define XFS_BUF_ISPINNED(bp) atomic_read(&((bp)->b_pin_count))
355 342
356#define XFS_BUF_VALUSEMA(bp) xfs_buf_lock_value(bp) 343#define XFS_BUF_VALUSEMA(bp) xfs_buf_lock_value(bp)
357#define XFS_BUF_CPSEMA(bp) (xfs_buf_cond_lock(bp) == 0) 344#define XFS_BUF_CPSEMA(bp) (xfs_buf_cond_lock(bp) == 0)
@@ -370,8 +357,6 @@ static inline void xfs_buf_relse(xfs_buf_t *bp)
370 xfs_buf_rele(bp); 357 xfs_buf_rele(bp);
371} 358}
372 359
373#define xfs_bpin(bp) xfs_buf_pin(bp)
374#define xfs_bunpin(bp) xfs_buf_unpin(bp)
375#define xfs_biodone(bp) xfs_buf_ioend(bp, 0) 360#define xfs_biodone(bp) xfs_buf_ioend(bp, 0)
376 361
377#define xfs_biomove(bp, off, len, data, rw) \ 362#define xfs_biomove(bp, off, len, data, rw) \
diff --git a/fs/xfs/linux-2.6/xfs_dmapi_priv.h b/fs/xfs/linux-2.6/xfs_dmapi_priv.h
deleted file mode 100644
index a8b0b1685eed..000000000000
--- a/fs/xfs/linux-2.6/xfs_dmapi_priv.h
+++ /dev/null
@@ -1,28 +0,0 @@
1/*
2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#ifndef __XFS_DMAPI_PRIV_H__
19#define __XFS_DMAPI_PRIV_H__
20
21/*
22 * Based on IO_ISDIRECT, decide which i_ flag is set.
23 */
24#define DM_SEM_FLAG_RD(ioflags) (((ioflags) & IO_ISDIRECT) ? \
25 DM_FLAGS_IMUX : 0)
26#define DM_SEM_FLAG_WR (DM_FLAGS_IALLOCSEM_WR | DM_FLAGS_IMUX)
27
28#endif /*__XFS_DMAPI_PRIV_H__*/
diff --git a/fs/xfs/linux-2.6/xfs_export.c b/fs/xfs/linux-2.6/xfs_export.c
index 846b75aeb2ab..3764d74790ec 100644
--- a/fs/xfs/linux-2.6/xfs_export.c
+++ b/fs/xfs/linux-2.6/xfs_export.c
@@ -23,13 +23,13 @@
23#include "xfs_sb.h" 23#include "xfs_sb.h"
24#include "xfs_ag.h" 24#include "xfs_ag.h"
25#include "xfs_dir2.h" 25#include "xfs_dir2.h"
26#include "xfs_dmapi.h"
27#include "xfs_mount.h" 26#include "xfs_mount.h"
28#include "xfs_export.h" 27#include "xfs_export.h"
29#include "xfs_vnodeops.h" 28#include "xfs_vnodeops.h"
30#include "xfs_bmap_btree.h" 29#include "xfs_bmap_btree.h"
31#include "xfs_inode.h" 30#include "xfs_inode.h"
32#include "xfs_inode_item.h" 31#include "xfs_inode_item.h"
32#include "xfs_trace.h"
33 33
34/* 34/*
35 * Note that we only accept fileids which are long enough rather than allow 35 * Note that we only accept fileids which are long enough rather than allow
@@ -128,13 +128,11 @@ xfs_nfs_get_inode(
128 return ERR_PTR(-ESTALE); 128 return ERR_PTR(-ESTALE);
129 129
130 /* 130 /*
131 * The XFS_IGET_BULKSTAT means that an invalid inode number is just 131 * The XFS_IGET_UNTRUSTED means that an invalid inode number is just
132 * fine and not an indication of a corrupted filesystem. Because 132 * fine and not an indication of a corrupted filesystem as clients can
133 * clients can send any kind of invalid file handle, e.g. after 133 * send invalid file handles and we have to handle it gracefully..
134 * a restore on the server we have to deal with this case gracefully.
135 */ 134 */
136 error = xfs_iget(mp, NULL, ino, XFS_IGET_BULKSTAT, 135 error = xfs_iget(mp, NULL, ino, XFS_IGET_UNTRUSTED, 0, &ip);
137 XFS_ILOCK_SHARED, &ip, 0);
138 if (error) { 136 if (error) {
139 /* 137 /*
140 * EINVAL means the inode cluster doesn't exist anymore. 138 * EINVAL means the inode cluster doesn't exist anymore.
@@ -149,11 +147,10 @@ xfs_nfs_get_inode(
149 } 147 }
150 148
151 if (ip->i_d.di_gen != generation) { 149 if (ip->i_d.di_gen != generation) {
152 xfs_iput_new(ip, XFS_ILOCK_SHARED); 150 IRELE(ip);
153 return ERR_PTR(-ENOENT); 151 return ERR_PTR(-ENOENT);
154 } 152 }
155 153
156 xfs_iunlock(ip, XFS_ILOCK_SHARED);
157 return VFS_I(ip); 154 return VFS_I(ip);
158} 155}
159 156
diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c
index 257a56b127cf..ba8ad422a165 100644
--- a/fs/xfs/linux-2.6/xfs_file.c
+++ b/fs/xfs/linux-2.6/xfs_file.c
@@ -22,23 +22,15 @@
22#include "xfs_inum.h" 22#include "xfs_inum.h"
23#include "xfs_sb.h" 23#include "xfs_sb.h"
24#include "xfs_ag.h" 24#include "xfs_ag.h"
25#include "xfs_dir2.h"
26#include "xfs_trans.h" 25#include "xfs_trans.h"
27#include "xfs_dmapi.h"
28#include "xfs_mount.h" 26#include "xfs_mount.h"
29#include "xfs_bmap_btree.h" 27#include "xfs_bmap_btree.h"
30#include "xfs_alloc_btree.h"
31#include "xfs_ialloc_btree.h"
32#include "xfs_alloc.h" 28#include "xfs_alloc.h"
33#include "xfs_btree.h"
34#include "xfs_attr_sf.h"
35#include "xfs_dir2_sf.h"
36#include "xfs_dinode.h" 29#include "xfs_dinode.h"
37#include "xfs_inode.h" 30#include "xfs_inode.h"
38#include "xfs_inode_item.h" 31#include "xfs_inode_item.h"
39#include "xfs_bmap.h" 32#include "xfs_bmap.h"
40#include "xfs_error.h" 33#include "xfs_error.h"
41#include "xfs_rw.h"
42#include "xfs_vnodeops.h" 34#include "xfs_vnodeops.h"
43#include "xfs_da_btree.h" 35#include "xfs_da_btree.h"
44#include "xfs_ioctl.h" 36#include "xfs_ioctl.h"
@@ -108,7 +100,7 @@ xfs_file_fsync(
108 int error = 0; 100 int error = 0;
109 int log_flushed = 0; 101 int log_flushed = 0;
110 102
111 xfs_itrace_entry(ip); 103 trace_xfs_file_fsync(ip);
112 104
113 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) 105 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
114 return -XFS_ERROR(EIO); 106 return -XFS_ERROR(EIO);
@@ -166,8 +158,7 @@ xfs_file_fsync(
166 * transaction. So we play it safe and fire off the 158 * transaction. So we play it safe and fire off the
167 * transaction anyway. 159 * transaction anyway.
168 */ 160 */
169 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 161 xfs_trans_ijoin(tp, ip);
170 xfs_trans_ihold(tp, ip);
171 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 162 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
172 xfs_trans_set_sync(tp); 163 xfs_trans_set_sync(tp);
173 error = _xfs_trans_commit(tp, 0, &log_flushed); 164 error = _xfs_trans_commit(tp, 0, &log_flushed);
@@ -275,20 +266,6 @@ xfs_file_aio_read(
275 mutex_lock(&inode->i_mutex); 266 mutex_lock(&inode->i_mutex);
276 xfs_ilock(ip, XFS_IOLOCK_SHARED); 267 xfs_ilock(ip, XFS_IOLOCK_SHARED);
277 268
278 if (DM_EVENT_ENABLED(ip, DM_EVENT_READ) && !(ioflags & IO_INVIS)) {
279 int dmflags = FILP_DELAY_FLAG(file) | DM_SEM_FLAG_RD(ioflags);
280 int iolock = XFS_IOLOCK_SHARED;
281
282 ret = -XFS_SEND_DATA(mp, DM_EVENT_READ, ip, iocb->ki_pos, size,
283 dmflags, &iolock);
284 if (ret) {
285 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
286 if (unlikely(ioflags & IO_ISDIRECT))
287 mutex_unlock(&inode->i_mutex);
288 return ret;
289 }
290 }
291
292 if (unlikely(ioflags & IO_ISDIRECT)) { 269 if (unlikely(ioflags & IO_ISDIRECT)) {
293 if (inode->i_mapping->nrpages) { 270 if (inode->i_mapping->nrpages) {
294 ret = -xfs_flushinval_pages(ip, 271 ret = -xfs_flushinval_pages(ip,
@@ -321,7 +298,6 @@ xfs_file_splice_read(
321 unsigned int flags) 298 unsigned int flags)
322{ 299{
323 struct xfs_inode *ip = XFS_I(infilp->f_mapping->host); 300 struct xfs_inode *ip = XFS_I(infilp->f_mapping->host);
324 struct xfs_mount *mp = ip->i_mount;
325 int ioflags = 0; 301 int ioflags = 0;
326 ssize_t ret; 302 ssize_t ret;
327 303
@@ -335,18 +311,6 @@ xfs_file_splice_read(
335 311
336 xfs_ilock(ip, XFS_IOLOCK_SHARED); 312 xfs_ilock(ip, XFS_IOLOCK_SHARED);
337 313
338 if (DM_EVENT_ENABLED(ip, DM_EVENT_READ) && !(ioflags & IO_INVIS)) {
339 int iolock = XFS_IOLOCK_SHARED;
340 int error;
341
342 error = XFS_SEND_DATA(mp, DM_EVENT_READ, ip, *ppos, count,
343 FILP_DELAY_FLAG(infilp), &iolock);
344 if (error) {
345 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
346 return -error;
347 }
348 }
349
350 trace_xfs_file_splice_read(ip, count, *ppos, ioflags); 314 trace_xfs_file_splice_read(ip, count, *ppos, ioflags);
351 315
352 ret = generic_file_splice_read(infilp, ppos, pipe, count, flags); 316 ret = generic_file_splice_read(infilp, ppos, pipe, count, flags);
@@ -367,7 +331,6 @@ xfs_file_splice_write(
367{ 331{
368 struct inode *inode = outfilp->f_mapping->host; 332 struct inode *inode = outfilp->f_mapping->host;
369 struct xfs_inode *ip = XFS_I(inode); 333 struct xfs_inode *ip = XFS_I(inode);
370 struct xfs_mount *mp = ip->i_mount;
371 xfs_fsize_t isize, new_size; 334 xfs_fsize_t isize, new_size;
372 int ioflags = 0; 335 int ioflags = 0;
373 ssize_t ret; 336 ssize_t ret;
@@ -382,18 +345,6 @@ xfs_file_splice_write(
382 345
383 xfs_ilock(ip, XFS_IOLOCK_EXCL); 346 xfs_ilock(ip, XFS_IOLOCK_EXCL);
384 347
385 if (DM_EVENT_ENABLED(ip, DM_EVENT_WRITE) && !(ioflags & IO_INVIS)) {
386 int iolock = XFS_IOLOCK_EXCL;
387 int error;
388
389 error = XFS_SEND_DATA(mp, DM_EVENT_WRITE, ip, *ppos, count,
390 FILP_DELAY_FLAG(outfilp), &iolock);
391 if (error) {
392 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
393 return -error;
394 }
395 }
396
397 new_size = *ppos + count; 348 new_size = *ppos + count;
398 349
399 xfs_ilock(ip, XFS_ILOCK_EXCL); 350 xfs_ilock(ip, XFS_ILOCK_EXCL);
@@ -463,7 +414,7 @@ xfs_zero_last_block(
463 last_fsb = XFS_B_TO_FSBT(mp, isize); 414 last_fsb = XFS_B_TO_FSBT(mp, isize);
464 nimaps = 1; 415 nimaps = 1;
465 error = xfs_bmapi(NULL, ip, last_fsb, 1, 0, NULL, 0, &imap, 416 error = xfs_bmapi(NULL, ip, last_fsb, 1, 0, NULL, 0, &imap,
466 &nimaps, NULL, NULL); 417 &nimaps, NULL);
467 if (error) { 418 if (error) {
468 return error; 419 return error;
469 } 420 }
@@ -558,7 +509,7 @@ xfs_zero_eof(
558 nimaps = 1; 509 nimaps = 1;
559 zero_count_fsb = end_zero_fsb - start_zero_fsb + 1; 510 zero_count_fsb = end_zero_fsb - start_zero_fsb + 1;
560 error = xfs_bmapi(NULL, ip, start_zero_fsb, zero_count_fsb, 511 error = xfs_bmapi(NULL, ip, start_zero_fsb, zero_count_fsb,
561 0, NULL, 0, &imap, &nimaps, NULL, NULL); 512 0, NULL, 0, &imap, &nimaps, NULL);
562 if (error) { 513 if (error) {
563 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL)); 514 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
564 return error; 515 return error;
@@ -627,7 +578,6 @@ xfs_file_aio_write(
627 int ioflags = 0; 578 int ioflags = 0;
628 xfs_fsize_t isize, new_size; 579 xfs_fsize_t isize, new_size;
629 int iolock; 580 int iolock;
630 int eventsent = 0;
631 size_t ocount = 0, count; 581 size_t ocount = 0, count;
632 int need_i_mutex; 582 int need_i_mutex;
633 583
@@ -673,33 +623,6 @@ start:
673 goto out_unlock_mutex; 623 goto out_unlock_mutex;
674 } 624 }
675 625
676 if ((DM_EVENT_ENABLED(ip, DM_EVENT_WRITE) &&
677 !(ioflags & IO_INVIS) && !eventsent)) {
678 int dmflags = FILP_DELAY_FLAG(file);
679
680 if (need_i_mutex)
681 dmflags |= DM_FLAGS_IMUX;
682
683 xfs_iunlock(ip, XFS_ILOCK_EXCL);
684 error = XFS_SEND_DATA(ip->i_mount, DM_EVENT_WRITE, ip,
685 pos, count, dmflags, &iolock);
686 if (error) {
687 goto out_unlock_internal;
688 }
689 xfs_ilock(ip, XFS_ILOCK_EXCL);
690 eventsent = 1;
691
692 /*
693 * The iolock was dropped and reacquired in XFS_SEND_DATA
694 * so we have to recheck the size when appending.
695 * We will only "goto start;" once, since having sent the
696 * event prevents another call to XFS_SEND_DATA, which is
697 * what allows the size to change in the first place.
698 */
699 if ((file->f_flags & O_APPEND) && pos != ip->i_size)
700 goto start;
701 }
702
703 if (ioflags & IO_ISDIRECT) { 626 if (ioflags & IO_ISDIRECT) {
704 xfs_buftarg_t *target = 627 xfs_buftarg_t *target =
705 XFS_IS_REALTIME_INODE(ip) ? 628 XFS_IS_REALTIME_INODE(ip) ?
@@ -830,22 +753,6 @@ write_retry:
830 xfs_iunlock(ip, XFS_ILOCK_EXCL); 753 xfs_iunlock(ip, XFS_ILOCK_EXCL);
831 } 754 }
832 755
833 if (ret == -ENOSPC &&
834 DM_EVENT_ENABLED(ip, DM_EVENT_NOSPACE) && !(ioflags & IO_INVIS)) {
835 xfs_iunlock(ip, iolock);
836 if (need_i_mutex)
837 mutex_unlock(&inode->i_mutex);
838 error = XFS_SEND_NAMESP(ip->i_mount, DM_EVENT_NOSPACE, ip,
839 DM_RIGHT_NULL, ip, DM_RIGHT_NULL, NULL, NULL,
840 0, 0, 0); /* Delay flag intentionally unused */
841 if (need_i_mutex)
842 mutex_lock(&inode->i_mutex);
843 xfs_ilock(ip, iolock);
844 if (error)
845 goto out_unlock_internal;
846 goto start;
847 }
848
849 error = -ret; 756 error = -ret;
850 if (ret <= 0) 757 if (ret <= 0)
851 goto out_unlock_internal; 758 goto out_unlock_internal;
@@ -1014,9 +921,6 @@ const struct file_operations xfs_file_operations = {
1014 .open = xfs_file_open, 921 .open = xfs_file_open,
1015 .release = xfs_file_release, 922 .release = xfs_file_release,
1016 .fsync = xfs_file_fsync, 923 .fsync = xfs_file_fsync,
1017#ifdef HAVE_FOP_OPEN_EXEC
1018 .open_exec = xfs_file_open_exec,
1019#endif
1020}; 924};
1021 925
1022const struct file_operations xfs_dir_file_operations = { 926const struct file_operations xfs_dir_file_operations = {
diff --git a/fs/xfs/linux-2.6/xfs_fs_subr.c b/fs/xfs/linux-2.6/xfs_fs_subr.c
index b6918d76bc7b..1f279b012f94 100644
--- a/fs/xfs/linux-2.6/xfs_fs_subr.c
+++ b/fs/xfs/linux-2.6/xfs_fs_subr.c
@@ -21,10 +21,6 @@
21#include "xfs_inode.h" 21#include "xfs_inode.h"
22#include "xfs_trace.h" 22#include "xfs_trace.h"
23 23
24int fs_noerr(void) { return 0; }
25int fs_nosys(void) { return ENOSYS; }
26void fs_noval(void) { return; }
27
28/* 24/*
29 * note: all filemap functions return negative error codes. These 25 * note: all filemap functions return negative error codes. These
30 * need to be inverted before returning to the xfs core functions. 26 * need to be inverted before returning to the xfs core functions.
diff --git a/fs/xfs/linux-2.6/xfs_fs_subr.h b/fs/xfs/linux-2.6/xfs_fs_subr.h
deleted file mode 100644
index 82bb19b2599e..000000000000
--- a/fs/xfs/linux-2.6/xfs_fs_subr.h
+++ /dev/null
@@ -1,25 +0,0 @@
1/*
2 * Copyright (c) 2000,2002,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#ifndef __XFS_FS_SUBR_H__
19#define __XFS_FS_SUBR_H__
20
21extern int fs_noerr(void);
22extern int fs_nosys(void);
23extern void fs_noval(void);
24
25#endif /* __XFS_FS_SUBR_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
index 699b60cbab9c..237f5ffb2ee8 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
@@ -23,24 +23,15 @@
23#include "xfs_trans.h" 23#include "xfs_trans.h"
24#include "xfs_sb.h" 24#include "xfs_sb.h"
25#include "xfs_ag.h" 25#include "xfs_ag.h"
26#include "xfs_dir2.h"
27#include "xfs_alloc.h" 26#include "xfs_alloc.h"
28#include "xfs_dmapi.h"
29#include "xfs_mount.h" 27#include "xfs_mount.h"
30#include "xfs_bmap_btree.h" 28#include "xfs_bmap_btree.h"
31#include "xfs_alloc_btree.h"
32#include "xfs_ialloc_btree.h"
33#include "xfs_attr_sf.h"
34#include "xfs_dir2_sf.h"
35#include "xfs_dinode.h" 29#include "xfs_dinode.h"
36#include "xfs_inode.h" 30#include "xfs_inode.h"
37#include "xfs_ioctl.h" 31#include "xfs_ioctl.h"
38#include "xfs_btree.h"
39#include "xfs_ialloc.h"
40#include "xfs_rtalloc.h" 32#include "xfs_rtalloc.h"
41#include "xfs_itable.h" 33#include "xfs_itable.h"
42#include "xfs_error.h" 34#include "xfs_error.h"
43#include "xfs_rw.h"
44#include "xfs_attr.h" 35#include "xfs_attr.h"
45#include "xfs_bmap.h" 36#include "xfs_bmap.h"
46#include "xfs_buf_item.h" 37#include "xfs_buf_item.h"
@@ -679,10 +670,9 @@ xfs_ioc_bulkstat(
679 error = xfs_bulkstat_single(mp, &inlast, 670 error = xfs_bulkstat_single(mp, &inlast,
680 bulkreq.ubuffer, &done); 671 bulkreq.ubuffer, &done);
681 else /* XFS_IOC_FSBULKSTAT */ 672 else /* XFS_IOC_FSBULKSTAT */
682 error = xfs_bulkstat(mp, &inlast, &count, 673 error = xfs_bulkstat(mp, &inlast, &count, xfs_bulkstat_one,
683 (bulkstat_one_pf)xfs_bulkstat_one, NULL, 674 sizeof(xfs_bstat_t), bulkreq.ubuffer,
684 sizeof(xfs_bstat_t), bulkreq.ubuffer, 675 &done);
685 BULKSTAT_FG_QUICK, &done);
686 676
687 if (error) 677 if (error)
688 return -error; 678 return -error;
@@ -909,7 +899,7 @@ xfs_ioctl_setattr(
909 struct xfs_dquot *olddquot = NULL; 899 struct xfs_dquot *olddquot = NULL;
910 int code; 900 int code;
911 901
912 xfs_itrace_entry(ip); 902 trace_xfs_ioctl_setattr(ip);
913 903
914 if (mp->m_flags & XFS_MOUNT_RDONLY) 904 if (mp->m_flags & XFS_MOUNT_RDONLY)
915 return XFS_ERROR(EROFS); 905 return XFS_ERROR(EROFS);
@@ -1044,8 +1034,7 @@ xfs_ioctl_setattr(
1044 } 1034 }
1045 } 1035 }
1046 1036
1047 xfs_trans_ijoin(tp, ip, lock_flags); 1037 xfs_trans_ijoin(tp, ip);
1048 xfs_trans_ihold(tp, ip);
1049 1038
1050 /* 1039 /*
1051 * Change file ownership. Must be the owner or privileged. 1040 * Change file ownership. Must be the owner or privileged.
@@ -1117,16 +1106,7 @@ xfs_ioctl_setattr(
1117 xfs_qm_dqrele(udqp); 1106 xfs_qm_dqrele(udqp);
1118 xfs_qm_dqrele(gdqp); 1107 xfs_qm_dqrele(gdqp);
1119 1108
1120 if (code) 1109 return code;
1121 return code;
1122
1123 if (DM_EVENT_ENABLED(ip, DM_EVENT_ATTRIBUTE)) {
1124 XFS_SEND_NAMESP(mp, DM_EVENT_ATTRIBUTE, ip, DM_RIGHT_NULL,
1125 NULL, DM_RIGHT_NULL, NULL, NULL, 0, 0,
1126 (mask & FSX_NONBLOCK) ? DM_FLAGS_NDELAY : 0);
1127 }
1128
1129 return 0;
1130 1110
1131 error_return: 1111 error_return:
1132 xfs_qm_dqrele(udqp); 1112 xfs_qm_dqrele(udqp);
@@ -1302,7 +1282,7 @@ xfs_file_ioctl(
1302 if (filp->f_mode & FMODE_NOCMTIME) 1282 if (filp->f_mode & FMODE_NOCMTIME)
1303 ioflags |= IO_INVIS; 1283 ioflags |= IO_INVIS;
1304 1284
1305 xfs_itrace_entry(ip); 1285 trace_xfs_file_ioctl(ip);
1306 1286
1307 switch (cmd) { 1287 switch (cmd) {
1308 case XFS_IOC_ALLOCSP: 1288 case XFS_IOC_ALLOCSP:
diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c
index 9287135e9bfc..6c83f7f62dc9 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl32.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl32.c
@@ -28,12 +28,8 @@
28#include "xfs_trans.h" 28#include "xfs_trans.h"
29#include "xfs_sb.h" 29#include "xfs_sb.h"
30#include "xfs_ag.h" 30#include "xfs_ag.h"
31#include "xfs_dir2.h"
32#include "xfs_dmapi.h"
33#include "xfs_mount.h" 31#include "xfs_mount.h"
34#include "xfs_bmap_btree.h" 32#include "xfs_bmap_btree.h"
35#include "xfs_attr_sf.h"
36#include "xfs_dir2_sf.h"
37#include "xfs_vnode.h" 33#include "xfs_vnode.h"
38#include "xfs_dinode.h" 34#include "xfs_dinode.h"
39#include "xfs_inode.h" 35#include "xfs_inode.h"
@@ -237,15 +233,12 @@ xfs_bulkstat_one_compat(
237 xfs_ino_t ino, /* inode number to get data for */ 233 xfs_ino_t ino, /* inode number to get data for */
238 void __user *buffer, /* buffer to place output in */ 234 void __user *buffer, /* buffer to place output in */
239 int ubsize, /* size of buffer */ 235 int ubsize, /* size of buffer */
240 void *private_data, /* my private data */
241 xfs_daddr_t bno, /* starting bno of inode cluster */
242 int *ubused, /* bytes used by me */ 236 int *ubused, /* bytes used by me */
243 void *dibuff, /* on-disk inode buffer */
244 int *stat) /* BULKSTAT_RV_... */ 237 int *stat) /* BULKSTAT_RV_... */
245{ 238{
246 return xfs_bulkstat_one_int(mp, ino, buffer, ubsize, 239 return xfs_bulkstat_one_int(mp, ino, buffer, ubsize,
247 xfs_bulkstat_one_fmt_compat, bno, 240 xfs_bulkstat_one_fmt_compat,
248 ubused, dibuff, stat); 241 ubused, stat);
249} 242}
250 243
251/* copied from xfs_ioctl.c */ 244/* copied from xfs_ioctl.c */
@@ -298,13 +291,11 @@ xfs_compat_ioc_bulkstat(
298 int res; 291 int res;
299 292
300 error = xfs_bulkstat_one_compat(mp, inlast, bulkreq.ubuffer, 293 error = xfs_bulkstat_one_compat(mp, inlast, bulkreq.ubuffer,
301 sizeof(compat_xfs_bstat_t), 294 sizeof(compat_xfs_bstat_t), 0, &res);
302 NULL, 0, NULL, NULL, &res);
303 } else if (cmd == XFS_IOC_FSBULKSTAT_32) { 295 } else if (cmd == XFS_IOC_FSBULKSTAT_32) {
304 error = xfs_bulkstat(mp, &inlast, &count, 296 error = xfs_bulkstat(mp, &inlast, &count,
305 xfs_bulkstat_one_compat, NULL, 297 xfs_bulkstat_one_compat, sizeof(compat_xfs_bstat_t),
306 sizeof(compat_xfs_bstat_t), bulkreq.ubuffer, 298 bulkreq.ubuffer, &done);
307 BULKSTAT_FG_QUICK, &done);
308 } else 299 } else
309 error = XFS_ERROR(EINVAL); 300 error = XFS_ERROR(EINVAL);
310 if (error) 301 if (error)
@@ -549,7 +540,7 @@ xfs_file_compat_ioctl(
549 if (filp->f_mode & FMODE_NOCMTIME) 540 if (filp->f_mode & FMODE_NOCMTIME)
550 ioflags |= IO_INVIS; 541 ioflags |= IO_INVIS;
551 542
552 xfs_itrace_entry(ip); 543 trace_xfs_file_compat_ioctl(ip);
553 544
554 switch (cmd) { 545 switch (cmd) {
555 /* No size or alignment issues on any arch */ 546 /* No size or alignment issues on any arch */
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
index 44f0b2de153e..536b81e63a3d 100644
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ b/fs/xfs/linux-2.6/xfs_iops.c
@@ -24,21 +24,13 @@
24#include "xfs_trans.h" 24#include "xfs_trans.h"
25#include "xfs_sb.h" 25#include "xfs_sb.h"
26#include "xfs_ag.h" 26#include "xfs_ag.h"
27#include "xfs_dir2.h"
28#include "xfs_alloc.h" 27#include "xfs_alloc.h"
29#include "xfs_dmapi.h"
30#include "xfs_quota.h" 28#include "xfs_quota.h"
31#include "xfs_mount.h" 29#include "xfs_mount.h"
32#include "xfs_bmap_btree.h" 30#include "xfs_bmap_btree.h"
33#include "xfs_alloc_btree.h"
34#include "xfs_ialloc_btree.h"
35#include "xfs_dir2_sf.h"
36#include "xfs_attr_sf.h"
37#include "xfs_dinode.h" 31#include "xfs_dinode.h"
38#include "xfs_inode.h" 32#include "xfs_inode.h"
39#include "xfs_bmap.h" 33#include "xfs_bmap.h"
40#include "xfs_btree.h"
41#include "xfs_ialloc.h"
42#include "xfs_rtalloc.h" 34#include "xfs_rtalloc.h"
43#include "xfs_error.h" 35#include "xfs_error.h"
44#include "xfs_itable.h" 36#include "xfs_itable.h"
@@ -496,7 +488,7 @@ xfs_vn_getattr(
496 struct xfs_inode *ip = XFS_I(inode); 488 struct xfs_inode *ip = XFS_I(inode);
497 struct xfs_mount *mp = ip->i_mount; 489 struct xfs_mount *mp = ip->i_mount;
498 490
499 xfs_itrace_entry(ip); 491 trace_xfs_getattr(ip);
500 492
501 if (XFS_FORCED_SHUTDOWN(mp)) 493 if (XFS_FORCED_SHUTDOWN(mp))
502 return XFS_ERROR(EIO); 494 return XFS_ERROR(EIO);
diff --git a/fs/xfs/linux-2.6/xfs_linux.h b/fs/xfs/linux-2.6/xfs_linux.h
index facfb323a706..998a9d7fb9c8 100644
--- a/fs/xfs/linux-2.6/xfs_linux.h
+++ b/fs/xfs/linux-2.6/xfs_linux.h
@@ -87,7 +87,6 @@
87#include <xfs_aops.h> 87#include <xfs_aops.h>
88#include <xfs_super.h> 88#include <xfs_super.h>
89#include <xfs_globals.h> 89#include <xfs_globals.h>
90#include <xfs_fs_subr.h>
91#include <xfs_buf.h> 90#include <xfs_buf.h>
92 91
93/* 92/*
diff --git a/fs/xfs/linux-2.6/xfs_quotaops.c b/fs/xfs/linux-2.6/xfs_quotaops.c
index 067cafbfc635..bfd5ac9d1f6f 100644
--- a/fs/xfs/linux-2.6/xfs_quotaops.c
+++ b/fs/xfs/linux-2.6/xfs_quotaops.c
@@ -16,7 +16,6 @@
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */ 17 */
18#include "xfs.h" 18#include "xfs.h"
19#include "xfs_dmapi.h"
20#include "xfs_sb.h" 19#include "xfs_sb.h"
21#include "xfs_inum.h" 20#include "xfs_inum.h"
22#include "xfs_log.h" 21#include "xfs_log.h"
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index f2d1718c9165..758df94690ed 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -25,14 +25,11 @@
25#include "xfs_ag.h" 25#include "xfs_ag.h"
26#include "xfs_dir2.h" 26#include "xfs_dir2.h"
27#include "xfs_alloc.h" 27#include "xfs_alloc.h"
28#include "xfs_dmapi.h"
29#include "xfs_quota.h" 28#include "xfs_quota.h"
30#include "xfs_mount.h" 29#include "xfs_mount.h"
31#include "xfs_bmap_btree.h" 30#include "xfs_bmap_btree.h"
32#include "xfs_alloc_btree.h" 31#include "xfs_alloc_btree.h"
33#include "xfs_ialloc_btree.h" 32#include "xfs_ialloc_btree.h"
34#include "xfs_dir2_sf.h"
35#include "xfs_attr_sf.h"
36#include "xfs_dinode.h" 33#include "xfs_dinode.h"
37#include "xfs_inode.h" 34#include "xfs_inode.h"
38#include "xfs_btree.h" 35#include "xfs_btree.h"
@@ -43,7 +40,6 @@
43#include "xfs_error.h" 40#include "xfs_error.h"
44#include "xfs_itable.h" 41#include "xfs_itable.h"
45#include "xfs_fsops.h" 42#include "xfs_fsops.h"
46#include "xfs_rw.h"
47#include "xfs_attr.h" 43#include "xfs_attr.h"
48#include "xfs_buf_item.h" 44#include "xfs_buf_item.h"
49#include "xfs_utils.h" 45#include "xfs_utils.h"
@@ -94,7 +90,6 @@ mempool_t *xfs_ioend_pool;
94#define MNTOPT_BARRIER "barrier" /* use writer barriers for log write and 90#define MNTOPT_BARRIER "barrier" /* use writer barriers for log write and
95 * unwritten extent conversion */ 91 * unwritten extent conversion */
96#define MNTOPT_NOBARRIER "nobarrier" /* .. disable */ 92#define MNTOPT_NOBARRIER "nobarrier" /* .. disable */
97#define MNTOPT_OSYNCISOSYNC "osyncisosync" /* o_sync is REALLY o_sync */
98#define MNTOPT_64BITINODE "inode64" /* inodes can be allocated anywhere */ 93#define MNTOPT_64BITINODE "inode64" /* inodes can be allocated anywhere */
99#define MNTOPT_IKEEP "ikeep" /* do not free empty inode clusters */ 94#define MNTOPT_IKEEP "ikeep" /* do not free empty inode clusters */
100#define MNTOPT_NOIKEEP "noikeep" /* free empty inode clusters */ 95#define MNTOPT_NOIKEEP "noikeep" /* free empty inode clusters */
@@ -116,9 +111,6 @@ mempool_t *xfs_ioend_pool;
116#define MNTOPT_GQUOTANOENF "gqnoenforce"/* group quota limit enforcement */ 111#define MNTOPT_GQUOTANOENF "gqnoenforce"/* group quota limit enforcement */
117#define MNTOPT_PQUOTANOENF "pqnoenforce"/* project quota limit enforcement */ 112#define MNTOPT_PQUOTANOENF "pqnoenforce"/* project quota limit enforcement */
118#define MNTOPT_QUOTANOENF "qnoenforce" /* same as uqnoenforce */ 113#define MNTOPT_QUOTANOENF "qnoenforce" /* same as uqnoenforce */
119#define MNTOPT_DMAPI "dmapi" /* DMI enabled (DMAPI / XDSM) */
120#define MNTOPT_XDSM "xdsm" /* DMI enabled (DMAPI / XDSM) */
121#define MNTOPT_DMI "dmi" /* DMI enabled (DMAPI / XDSM) */
122#define MNTOPT_DELAYLOG "delaylog" /* Delayed loging enabled */ 114#define MNTOPT_DELAYLOG "delaylog" /* Delayed loging enabled */
123#define MNTOPT_NODELAYLOG "nodelaylog" /* Delayed loging disabled */ 115#define MNTOPT_NODELAYLOG "nodelaylog" /* Delayed loging disabled */
124 116
@@ -172,15 +164,13 @@ suffix_strtoul(char *s, char **endp, unsigned int base)
172STATIC int 164STATIC int
173xfs_parseargs( 165xfs_parseargs(
174 struct xfs_mount *mp, 166 struct xfs_mount *mp,
175 char *options, 167 char *options)
176 char **mtpt)
177{ 168{
178 struct super_block *sb = mp->m_super; 169 struct super_block *sb = mp->m_super;
179 char *this_char, *value, *eov; 170 char *this_char, *value, *eov;
180 int dsunit = 0; 171 int dsunit = 0;
181 int dswidth = 0; 172 int dswidth = 0;
182 int iosize = 0; 173 int iosize = 0;
183 int dmapi_implies_ikeep = 1;
184 __uint8_t iosizelog = 0; 174 __uint8_t iosizelog = 0;
185 175
186 /* 176 /*
@@ -243,15 +233,10 @@ xfs_parseargs(
243 if (!mp->m_logname) 233 if (!mp->m_logname)
244 return ENOMEM; 234 return ENOMEM;
245 } else if (!strcmp(this_char, MNTOPT_MTPT)) { 235 } else if (!strcmp(this_char, MNTOPT_MTPT)) {
246 if (!value || !*value) { 236 cmn_err(CE_WARN,
247 cmn_err(CE_WARN, 237 "XFS: %s option not allowed on this system",
248 "XFS: %s option requires an argument", 238 this_char);
249 this_char); 239 return EINVAL;
250 return EINVAL;
251 }
252 *mtpt = kstrndup(value, MAXNAMELEN, GFP_KERNEL);
253 if (!*mtpt)
254 return ENOMEM;
255 } else if (!strcmp(this_char, MNTOPT_RTDEV)) { 240 } else if (!strcmp(this_char, MNTOPT_RTDEV)) {
256 if (!value || !*value) { 241 if (!value || !*value) {
257 cmn_err(CE_WARN, 242 cmn_err(CE_WARN,
@@ -288,8 +273,6 @@ xfs_parseargs(
288 mp->m_flags &= ~XFS_MOUNT_GRPID; 273 mp->m_flags &= ~XFS_MOUNT_GRPID;
289 } else if (!strcmp(this_char, MNTOPT_WSYNC)) { 274 } else if (!strcmp(this_char, MNTOPT_WSYNC)) {
290 mp->m_flags |= XFS_MOUNT_WSYNC; 275 mp->m_flags |= XFS_MOUNT_WSYNC;
291 } else if (!strcmp(this_char, MNTOPT_OSYNCISOSYNC)) {
292 mp->m_flags |= XFS_MOUNT_OSYNCISOSYNC;
293 } else if (!strcmp(this_char, MNTOPT_NORECOVERY)) { 276 } else if (!strcmp(this_char, MNTOPT_NORECOVERY)) {
294 mp->m_flags |= XFS_MOUNT_NORECOVERY; 277 mp->m_flags |= XFS_MOUNT_NORECOVERY;
295 } else if (!strcmp(this_char, MNTOPT_NOALIGN)) { 278 } else if (!strcmp(this_char, MNTOPT_NOALIGN)) {
@@ -329,7 +312,6 @@ xfs_parseargs(
329 } else if (!strcmp(this_char, MNTOPT_IKEEP)) { 312 } else if (!strcmp(this_char, MNTOPT_IKEEP)) {
330 mp->m_flags |= XFS_MOUNT_IKEEP; 313 mp->m_flags |= XFS_MOUNT_IKEEP;
331 } else if (!strcmp(this_char, MNTOPT_NOIKEEP)) { 314 } else if (!strcmp(this_char, MNTOPT_NOIKEEP)) {
332 dmapi_implies_ikeep = 0;
333 mp->m_flags &= ~XFS_MOUNT_IKEEP; 315 mp->m_flags &= ~XFS_MOUNT_IKEEP;
334 } else if (!strcmp(this_char, MNTOPT_LARGEIO)) { 316 } else if (!strcmp(this_char, MNTOPT_LARGEIO)) {
335 mp->m_flags &= ~XFS_MOUNT_COMPAT_IOSIZE; 317 mp->m_flags &= ~XFS_MOUNT_COMPAT_IOSIZE;
@@ -370,12 +352,6 @@ xfs_parseargs(
370 } else if (!strcmp(this_char, MNTOPT_GQUOTANOENF)) { 352 } else if (!strcmp(this_char, MNTOPT_GQUOTANOENF)) {
371 mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE); 353 mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE);
372 mp->m_qflags &= ~XFS_OQUOTA_ENFD; 354 mp->m_qflags &= ~XFS_OQUOTA_ENFD;
373 } else if (!strcmp(this_char, MNTOPT_DMAPI)) {
374 mp->m_flags |= XFS_MOUNT_DMAPI;
375 } else if (!strcmp(this_char, MNTOPT_XDSM)) {
376 mp->m_flags |= XFS_MOUNT_DMAPI;
377 } else if (!strcmp(this_char, MNTOPT_DMI)) {
378 mp->m_flags |= XFS_MOUNT_DMAPI;
379 } else if (!strcmp(this_char, MNTOPT_DELAYLOG)) { 355 } else if (!strcmp(this_char, MNTOPT_DELAYLOG)) {
380 mp->m_flags |= XFS_MOUNT_DELAYLOG; 356 mp->m_flags |= XFS_MOUNT_DELAYLOG;
381 cmn_err(CE_WARN, 357 cmn_err(CE_WARN,
@@ -387,9 +363,11 @@ xfs_parseargs(
387 cmn_err(CE_WARN, 363 cmn_err(CE_WARN,
388 "XFS: ihashsize no longer used, option is deprecated."); 364 "XFS: ihashsize no longer used, option is deprecated.");
389 } else if (!strcmp(this_char, "osyncisdsync")) { 365 } else if (!strcmp(this_char, "osyncisdsync")) {
390 /* no-op, this is now the default */
391 cmn_err(CE_WARN, 366 cmn_err(CE_WARN,
392 "XFS: osyncisdsync is now the default, option is deprecated."); 367 "XFS: osyncisdsync has no effect, option is deprecated.");
368 } else if (!strcmp(this_char, "osyncisosync")) {
369 cmn_err(CE_WARN,
370 "XFS: osyncisosync has no effect, option is deprecated.");
393 } else if (!strcmp(this_char, "irixsgid")) { 371 } else if (!strcmp(this_char, "irixsgid")) {
394 cmn_err(CE_WARN, 372 cmn_err(CE_WARN,
395 "XFS: irixsgid is now a sysctl(2) variable, option is deprecated."); 373 "XFS: irixsgid is now a sysctl(2) variable, option is deprecated.");
@@ -430,12 +408,6 @@ xfs_parseargs(
430 return EINVAL; 408 return EINVAL;
431 } 409 }
432 410
433 if ((mp->m_flags & XFS_MOUNT_DMAPI) && (!*mtpt || *mtpt[0] == '\0')) {
434 printk("XFS: %s option needs the mount point option as well\n",
435 MNTOPT_DMAPI);
436 return EINVAL;
437 }
438
439 if ((dsunit && !dswidth) || (!dsunit && dswidth)) { 411 if ((dsunit && !dswidth) || (!dsunit && dswidth)) {
440 cmn_err(CE_WARN, 412 cmn_err(CE_WARN,
441 "XFS: sunit and swidth must be specified together"); 413 "XFS: sunit and swidth must be specified together");
@@ -449,18 +421,6 @@ xfs_parseargs(
449 return EINVAL; 421 return EINVAL;
450 } 422 }
451 423
452 /*
453 * Applications using DMI filesystems often expect the
454 * inode generation number to be monotonically increasing.
455 * If we delete inode chunks we break this assumption, so
456 * keep unused inode chunks on disk for DMI filesystems
457 * until we come up with a better solution.
458 * Note that if "ikeep" or "noikeep" mount options are
459 * supplied, then they are honored.
460 */
461 if ((mp->m_flags & XFS_MOUNT_DMAPI) && dmapi_implies_ikeep)
462 mp->m_flags |= XFS_MOUNT_IKEEP;
463
464done: 424done:
465 if (!(mp->m_flags & XFS_MOUNT_NOALIGN)) { 425 if (!(mp->m_flags & XFS_MOUNT_NOALIGN)) {
466 /* 426 /*
@@ -539,10 +499,8 @@ xfs_showargs(
539 { XFS_MOUNT_SWALLOC, "," MNTOPT_SWALLOC }, 499 { XFS_MOUNT_SWALLOC, "," MNTOPT_SWALLOC },
540 { XFS_MOUNT_NOUUID, "," MNTOPT_NOUUID }, 500 { XFS_MOUNT_NOUUID, "," MNTOPT_NOUUID },
541 { XFS_MOUNT_NORECOVERY, "," MNTOPT_NORECOVERY }, 501 { XFS_MOUNT_NORECOVERY, "," MNTOPT_NORECOVERY },
542 { XFS_MOUNT_OSYNCISOSYNC, "," MNTOPT_OSYNCISOSYNC },
543 { XFS_MOUNT_ATTR2, "," MNTOPT_ATTR2 }, 502 { XFS_MOUNT_ATTR2, "," MNTOPT_ATTR2 },
544 { XFS_MOUNT_FILESTREAMS, "," MNTOPT_FILESTREAM }, 503 { XFS_MOUNT_FILESTREAMS, "," MNTOPT_FILESTREAM },
545 { XFS_MOUNT_DMAPI, "," MNTOPT_DMAPI },
546 { XFS_MOUNT_GRPID, "," MNTOPT_GRPID }, 504 { XFS_MOUNT_GRPID, "," MNTOPT_GRPID },
547 { XFS_MOUNT_DELAYLOG, "," MNTOPT_DELAYLOG }, 505 { XFS_MOUNT_DELAYLOG, "," MNTOPT_DELAYLOG },
548 { 0, NULL } 506 { 0, NULL }
@@ -947,7 +905,7 @@ xfs_fs_destroy_inode(
947{ 905{
948 struct xfs_inode *ip = XFS_I(inode); 906 struct xfs_inode *ip = XFS_I(inode);
949 907
950 xfs_itrace_entry(ip); 908 trace_xfs_destroy_inode(ip);
951 909
952 XFS_STATS_INC(vn_reclaim); 910 XFS_STATS_INC(vn_reclaim);
953 911
@@ -1063,10 +1021,8 @@ xfs_log_inode(
1063 * an inode in another recent transaction. So we play it safe and 1021 * an inode in another recent transaction. So we play it safe and
1064 * fire off the transaction anyway. 1022 * fire off the transaction anyway.
1065 */ 1023 */
1066 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 1024 xfs_trans_ijoin(tp, ip);
1067 xfs_trans_ihold(tp, ip);
1068 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 1025 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1069 xfs_trans_set_sync(tp);
1070 error = xfs_trans_commit(tp, 0); 1026 error = xfs_trans_commit(tp, 0);
1071 xfs_ilock_demote(ip, XFS_ILOCK_EXCL); 1027 xfs_ilock_demote(ip, XFS_ILOCK_EXCL);
1072 1028
@@ -1082,27 +1038,18 @@ xfs_fs_write_inode(
1082 struct xfs_mount *mp = ip->i_mount; 1038 struct xfs_mount *mp = ip->i_mount;
1083 int error = EAGAIN; 1039 int error = EAGAIN;
1084 1040
1085 xfs_itrace_entry(ip); 1041 trace_xfs_write_inode(ip);
1086 1042
1087 if (XFS_FORCED_SHUTDOWN(mp)) 1043 if (XFS_FORCED_SHUTDOWN(mp))
1088 return XFS_ERROR(EIO); 1044 return XFS_ERROR(EIO);
1089 1045
1090 if (wbc->sync_mode == WB_SYNC_ALL) { 1046 if (wbc->sync_mode == WB_SYNC_ALL) {
1091 /* 1047 /*
1092 * Make sure the inode has hit stable storage. By using the 1048 * Make sure the inode has made it it into the log. Instead
1093 * log and the fsync transactions we reduce the IOs we have 1049 * of forcing it all the way to stable storage using a
1094 * to do here from two (log and inode) to just the log. 1050 * synchronous transaction we let the log force inside the
1095 * 1051 * ->sync_fs call do that for thus, which reduces the number
1096 * Note: We still need to do a delwri write of the inode after 1052 * of synchronous log foces dramatically.
1097 * this to flush it to the backing buffer so that bulkstat
1098 * works properly if this is the first time the inode has been
1099 * written. Because we hold the ilock atomically over the
1100 * transaction commit and the inode flush we are guaranteed
1101 * that the inode is not pinned when it returns. If the flush
1102 * lock is already held, then the inode has already been
1103 * flushed once and we don't need to flush it again. Hence
1104 * the code will only flush the inode if it isn't already
1105 * being flushed.
1106 */ 1053 */
1107 xfs_ioend_wait(ip); 1054 xfs_ioend_wait(ip);
1108 xfs_ilock(ip, XFS_ILOCK_SHARED); 1055 xfs_ilock(ip, XFS_ILOCK_SHARED);
@@ -1116,27 +1063,29 @@ xfs_fs_write_inode(
1116 * We make this non-blocking if the inode is contended, return 1063 * We make this non-blocking if the inode is contended, return
1117 * EAGAIN to indicate to the caller that they did not succeed. 1064 * EAGAIN to indicate to the caller that they did not succeed.
1118 * This prevents the flush path from blocking on inodes inside 1065 * This prevents the flush path from blocking on inodes inside
1119 * another operation right now, they get caught later by xfs_sync. 1066 * another operation right now, they get caught later by
1067 * xfs_sync.
1120 */ 1068 */
1121 if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) 1069 if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED))
1122 goto out; 1070 goto out;
1123 }
1124 1071
1125 if (xfs_ipincount(ip) || !xfs_iflock_nowait(ip)) 1072 if (xfs_ipincount(ip) || !xfs_iflock_nowait(ip))
1126 goto out_unlock; 1073 goto out_unlock;
1127 1074
1128 /* 1075 /*
1129 * Now we have the flush lock and the inode is not pinned, we can check 1076 * Now we have the flush lock and the inode is not pinned, we
1130 * if the inode is really clean as we know that there are no pending 1077 * can check if the inode is really clean as we know that
1131 * transaction completions, it is not waiting on the delayed write 1078 * there are no pending transaction completions, it is not
1132 * queue and there is no IO in progress. 1079 * waiting on the delayed write queue and there is no IO in
1133 */ 1080 * progress.
1134 if (xfs_inode_clean(ip)) { 1081 */
1135 xfs_ifunlock(ip); 1082 if (xfs_inode_clean(ip)) {
1136 error = 0; 1083 xfs_ifunlock(ip);
1137 goto out_unlock; 1084 error = 0;
1085 goto out_unlock;
1086 }
1087 error = xfs_iflush(ip, 0);
1138 } 1088 }
1139 error = xfs_iflush(ip, 0);
1140 1089
1141 out_unlock: 1090 out_unlock:
1142 xfs_iunlock(ip, XFS_ILOCK_SHARED); 1091 xfs_iunlock(ip, XFS_ILOCK_SHARED);
@@ -1156,7 +1105,8 @@ xfs_fs_clear_inode(
1156{ 1105{
1157 xfs_inode_t *ip = XFS_I(inode); 1106 xfs_inode_t *ip = XFS_I(inode);
1158 1107
1159 xfs_itrace_entry(ip); 1108 trace_xfs_clear_inode(ip);
1109
1160 XFS_STATS_INC(vn_rele); 1110 XFS_STATS_INC(vn_rele);
1161 XFS_STATS_INC(vn_remove); 1111 XFS_STATS_INC(vn_remove);
1162 XFS_STATS_DEC(vn_active); 1112 XFS_STATS_DEC(vn_active);
@@ -1193,22 +1143,13 @@ xfs_fs_put_super(
1193{ 1143{
1194 struct xfs_mount *mp = XFS_M(sb); 1144 struct xfs_mount *mp = XFS_M(sb);
1195 1145
1146 /*
1147 * Unregister the memory shrinker before we tear down the mount
1148 * structure so we don't have memory reclaim racing with us here.
1149 */
1150 xfs_inode_shrinker_unregister(mp);
1196 xfs_syncd_stop(mp); 1151 xfs_syncd_stop(mp);
1197 1152
1198 if (!(sb->s_flags & MS_RDONLY)) {
1199 /*
1200 * XXX(hch): this should be SYNC_WAIT.
1201 *
1202 * Or more likely not needed at all because the VFS is already
1203 * calling ->sync_fs after shutting down all filestem
1204 * operations and just before calling ->put_super.
1205 */
1206 xfs_sync_data(mp, 0);
1207 xfs_sync_attr(mp, 0);
1208 }
1209
1210 XFS_SEND_PREUNMOUNT(mp);
1211
1212 /* 1153 /*
1213 * Blow away any referenced inode in the filestreams cache. 1154 * Blow away any referenced inode in the filestreams cache.
1214 * This can and will cause log traffic as inodes go inactive 1155 * This can and will cause log traffic as inodes go inactive
@@ -1218,14 +1159,10 @@ xfs_fs_put_super(
1218 1159
1219 XFS_bflush(mp->m_ddev_targp); 1160 XFS_bflush(mp->m_ddev_targp);
1220 1161
1221 XFS_SEND_UNMOUNT(mp);
1222
1223 xfs_unmountfs(mp); 1162 xfs_unmountfs(mp);
1224 xfs_freesb(mp); 1163 xfs_freesb(mp);
1225 xfs_inode_shrinker_unregister(mp);
1226 xfs_icsb_destroy_counters(mp); 1164 xfs_icsb_destroy_counters(mp);
1227 xfs_close_devices(mp); 1165 xfs_close_devices(mp);
1228 xfs_dmops_put(mp);
1229 xfs_free_fsname(mp); 1166 xfs_free_fsname(mp);
1230 kfree(mp); 1167 kfree(mp);
1231} 1168}
@@ -1543,7 +1480,6 @@ xfs_fs_fill_super(
1543 struct inode *root; 1480 struct inode *root;
1544 struct xfs_mount *mp = NULL; 1481 struct xfs_mount *mp = NULL;
1545 int flags = 0, error = ENOMEM; 1482 int flags = 0, error = ENOMEM;
1546 char *mtpt = NULL;
1547 1483
1548 mp = kzalloc(sizeof(struct xfs_mount), GFP_KERNEL); 1484 mp = kzalloc(sizeof(struct xfs_mount), GFP_KERNEL);
1549 if (!mp) 1485 if (!mp)
@@ -1559,7 +1495,7 @@ xfs_fs_fill_super(
1559 mp->m_super = sb; 1495 mp->m_super = sb;
1560 sb->s_fs_info = mp; 1496 sb->s_fs_info = mp;
1561 1497
1562 error = xfs_parseargs(mp, (char *)data, &mtpt); 1498 error = xfs_parseargs(mp, (char *)data);
1563 if (error) 1499 if (error)
1564 goto out_free_fsname; 1500 goto out_free_fsname;
1565 1501
@@ -1571,16 +1507,12 @@ xfs_fs_fill_super(
1571#endif 1507#endif
1572 sb->s_op = &xfs_super_operations; 1508 sb->s_op = &xfs_super_operations;
1573 1509
1574 error = xfs_dmops_get(mp);
1575 if (error)
1576 goto out_free_fsname;
1577
1578 if (silent) 1510 if (silent)
1579 flags |= XFS_MFSI_QUIET; 1511 flags |= XFS_MFSI_QUIET;
1580 1512
1581 error = xfs_open_devices(mp); 1513 error = xfs_open_devices(mp);
1582 if (error) 1514 if (error)
1583 goto out_put_dmops; 1515 goto out_free_fsname;
1584 1516
1585 if (xfs_icsb_init_counters(mp)) 1517 if (xfs_icsb_init_counters(mp))
1586 mp->m_flags |= XFS_MOUNT_NO_PERCPU_SB; 1518 mp->m_flags |= XFS_MOUNT_NO_PERCPU_SB;
@@ -1608,8 +1540,6 @@ xfs_fs_fill_super(
1608 if (error) 1540 if (error)
1609 goto out_filestream_unmount; 1541 goto out_filestream_unmount;
1610 1542
1611 XFS_SEND_MOUNT(mp, DM_RIGHT_NULL, mtpt, mp->m_fsname);
1612
1613 sb->s_magic = XFS_SB_MAGIC; 1543 sb->s_magic = XFS_SB_MAGIC;
1614 sb->s_blocksize = mp->m_sb.sb_blocksize; 1544 sb->s_blocksize = mp->m_sb.sb_blocksize;
1615 sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1; 1545 sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
@@ -1638,7 +1568,6 @@ xfs_fs_fill_super(
1638 1568
1639 xfs_inode_shrinker_register(mp); 1569 xfs_inode_shrinker_register(mp);
1640 1570
1641 kfree(mtpt);
1642 return 0; 1571 return 0;
1643 1572
1644 out_filestream_unmount: 1573 out_filestream_unmount:
@@ -1648,11 +1577,8 @@ xfs_fs_fill_super(
1648 out_destroy_counters: 1577 out_destroy_counters:
1649 xfs_icsb_destroy_counters(mp); 1578 xfs_icsb_destroy_counters(mp);
1650 xfs_close_devices(mp); 1579 xfs_close_devices(mp);
1651 out_put_dmops:
1652 xfs_dmops_put(mp);
1653 out_free_fsname: 1580 out_free_fsname:
1654 xfs_free_fsname(mp); 1581 xfs_free_fsname(mp);
1655 kfree(mtpt);
1656 kfree(mp); 1582 kfree(mp);
1657 out: 1583 out:
1658 return -error; 1584 return -error;
@@ -1759,6 +1685,12 @@ xfs_init_zones(void)
1759 if (!xfs_trans_zone) 1685 if (!xfs_trans_zone)
1760 goto out_destroy_ifork_zone; 1686 goto out_destroy_ifork_zone;
1761 1687
1688 xfs_log_item_desc_zone =
1689 kmem_zone_init(sizeof(struct xfs_log_item_desc),
1690 "xfs_log_item_desc");
1691 if (!xfs_log_item_desc_zone)
1692 goto out_destroy_trans_zone;
1693
1762 /* 1694 /*
1763 * The size of the zone allocated buf log item is the maximum 1695 * The size of the zone allocated buf log item is the maximum
1764 * size possible under XFS. This wastes a little bit of memory, 1696 * size possible under XFS. This wastes a little bit of memory,
@@ -1768,7 +1700,7 @@ xfs_init_zones(void)
1768 (((XFS_MAX_BLOCKSIZE / XFS_BLF_CHUNK) / 1700 (((XFS_MAX_BLOCKSIZE / XFS_BLF_CHUNK) /
1769 NBWORD) * sizeof(int))), "xfs_buf_item"); 1701 NBWORD) * sizeof(int))), "xfs_buf_item");
1770 if (!xfs_buf_item_zone) 1702 if (!xfs_buf_item_zone)
1771 goto out_destroy_trans_zone; 1703 goto out_destroy_log_item_desc_zone;
1772 1704
1773 xfs_efd_zone = kmem_zone_init((sizeof(xfs_efd_log_item_t) + 1705 xfs_efd_zone = kmem_zone_init((sizeof(xfs_efd_log_item_t) +
1774 ((XFS_EFD_MAX_FAST_EXTENTS - 1) * 1706 ((XFS_EFD_MAX_FAST_EXTENTS - 1) *
@@ -1805,6 +1737,8 @@ xfs_init_zones(void)
1805 kmem_zone_destroy(xfs_efd_zone); 1737 kmem_zone_destroy(xfs_efd_zone);
1806 out_destroy_buf_item_zone: 1738 out_destroy_buf_item_zone:
1807 kmem_zone_destroy(xfs_buf_item_zone); 1739 kmem_zone_destroy(xfs_buf_item_zone);
1740 out_destroy_log_item_desc_zone:
1741 kmem_zone_destroy(xfs_log_item_desc_zone);
1808 out_destroy_trans_zone: 1742 out_destroy_trans_zone:
1809 kmem_zone_destroy(xfs_trans_zone); 1743 kmem_zone_destroy(xfs_trans_zone);
1810 out_destroy_ifork_zone: 1744 out_destroy_ifork_zone:
@@ -1835,6 +1769,7 @@ xfs_destroy_zones(void)
1835 kmem_zone_destroy(xfs_efi_zone); 1769 kmem_zone_destroy(xfs_efi_zone);
1836 kmem_zone_destroy(xfs_efd_zone); 1770 kmem_zone_destroy(xfs_efd_zone);
1837 kmem_zone_destroy(xfs_buf_item_zone); 1771 kmem_zone_destroy(xfs_buf_item_zone);
1772 kmem_zone_destroy(xfs_log_item_desc_zone);
1838 kmem_zone_destroy(xfs_trans_zone); 1773 kmem_zone_destroy(xfs_trans_zone);
1839 kmem_zone_destroy(xfs_ifork_zone); 1774 kmem_zone_destroy(xfs_ifork_zone);
1840 kmem_zone_destroy(xfs_dabuf_zone); 1775 kmem_zone_destroy(xfs_dabuf_zone);
@@ -1883,7 +1818,6 @@ init_xfs_fs(void)
1883 goto out_cleanup_procfs; 1818 goto out_cleanup_procfs;
1884 1819
1885 vfs_initquota(); 1820 vfs_initquota();
1886 xfs_inode_shrinker_init();
1887 1821
1888 error = register_filesystem(&xfs_fs_type); 1822 error = register_filesystem(&xfs_fs_type);
1889 if (error) 1823 if (error)
@@ -1911,7 +1845,6 @@ exit_xfs_fs(void)
1911{ 1845{
1912 vfs_exitquota(); 1846 vfs_exitquota();
1913 unregister_filesystem(&xfs_fs_type); 1847 unregister_filesystem(&xfs_fs_type);
1914 xfs_inode_shrinker_destroy();
1915 xfs_sysctl_unregister(); 1848 xfs_sysctl_unregister();
1916 xfs_cleanup_procfs(); 1849 xfs_cleanup_procfs();
1917 xfs_buf_terminate(); 1850 xfs_buf_terminate();
diff --git a/fs/xfs/linux-2.6/xfs_super.h b/fs/xfs/linux-2.6/xfs_super.h
index 519618e9279e..1ef4a4d2d997 100644
--- a/fs/xfs/linux-2.6/xfs_super.h
+++ b/fs/xfs/linux-2.6/xfs_super.h
@@ -56,12 +56,6 @@ extern void xfs_qm_exit(void);
56# define XFS_BIGFS_STRING 56# define XFS_BIGFS_STRING
57#endif 57#endif
58 58
59#ifdef CONFIG_XFS_DMAPI
60# define XFS_DMAPI_STRING "dmapi support, "
61#else
62# define XFS_DMAPI_STRING
63#endif
64
65#ifdef DEBUG 59#ifdef DEBUG
66# define XFS_DBG_STRING "debug" 60# define XFS_DBG_STRING "debug"
67#else 61#else
@@ -72,7 +66,6 @@ extern void xfs_qm_exit(void);
72 XFS_SECURITY_STRING \ 66 XFS_SECURITY_STRING \
73 XFS_REALTIME_STRING \ 67 XFS_REALTIME_STRING \
74 XFS_BIGFS_STRING \ 68 XFS_BIGFS_STRING \
75 XFS_DMAPI_STRING \
76 XFS_DBG_STRING /* DBG must be last */ 69 XFS_DBG_STRING /* DBG must be last */
77 70
78struct xfs_inode; 71struct xfs_inode;
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
index ef7f0218bccb..dfcbd98d1599 100644
--- a/fs/xfs/linux-2.6/xfs_sync.c
+++ b/fs/xfs/linux-2.6/xfs_sync.c
@@ -24,25 +24,14 @@
24#include "xfs_trans.h" 24#include "xfs_trans.h"
25#include "xfs_sb.h" 25#include "xfs_sb.h"
26#include "xfs_ag.h" 26#include "xfs_ag.h"
27#include "xfs_dir2.h"
28#include "xfs_dmapi.h"
29#include "xfs_mount.h" 27#include "xfs_mount.h"
30#include "xfs_bmap_btree.h" 28#include "xfs_bmap_btree.h"
31#include "xfs_alloc_btree.h"
32#include "xfs_ialloc_btree.h"
33#include "xfs_btree.h"
34#include "xfs_dir2_sf.h"
35#include "xfs_attr_sf.h"
36#include "xfs_inode.h" 29#include "xfs_inode.h"
37#include "xfs_dinode.h" 30#include "xfs_dinode.h"
38#include "xfs_error.h" 31#include "xfs_error.h"
39#include "xfs_mru_cache.h"
40#include "xfs_filestream.h" 32#include "xfs_filestream.h"
41#include "xfs_vnodeops.h" 33#include "xfs_vnodeops.h"
42#include "xfs_utils.h"
43#include "xfs_buf_item.h"
44#include "xfs_inode_item.h" 34#include "xfs_inode_item.h"
45#include "xfs_rw.h"
46#include "xfs_quota.h" 35#include "xfs_quota.h"
47#include "xfs_trace.h" 36#include "xfs_trace.h"
48 37
@@ -144,6 +133,41 @@ restart:
144 return last_error; 133 return last_error;
145} 134}
146 135
136/*
137 * Select the next per-ag structure to iterate during the walk. The reclaim
138 * walk is optimised only to walk AGs with reclaimable inodes in them.
139 */
140static struct xfs_perag *
141xfs_inode_ag_iter_next_pag(
142 struct xfs_mount *mp,
143 xfs_agnumber_t *first,
144 int tag)
145{
146 struct xfs_perag *pag = NULL;
147
148 if (tag == XFS_ICI_RECLAIM_TAG) {
149 int found;
150 int ref;
151
152 spin_lock(&mp->m_perag_lock);
153 found = radix_tree_gang_lookup_tag(&mp->m_perag_tree,
154 (void **)&pag, *first, 1, tag);
155 if (found <= 0) {
156 spin_unlock(&mp->m_perag_lock);
157 return NULL;
158 }
159 *first = pag->pag_agno + 1;
160 /* open coded pag reference increment */
161 ref = atomic_inc_return(&pag->pag_ref);
162 spin_unlock(&mp->m_perag_lock);
163 trace_xfs_perag_get_reclaim(mp, pag->pag_agno, ref, _RET_IP_);
164 } else {
165 pag = xfs_perag_get(mp, *first);
166 (*first)++;
167 }
168 return pag;
169}
170
147int 171int
148xfs_inode_ag_iterator( 172xfs_inode_ag_iterator(
149 struct xfs_mount *mp, 173 struct xfs_mount *mp,
@@ -154,16 +178,15 @@ xfs_inode_ag_iterator(
154 int exclusive, 178 int exclusive,
155 int *nr_to_scan) 179 int *nr_to_scan)
156{ 180{
181 struct xfs_perag *pag;
157 int error = 0; 182 int error = 0;
158 int last_error = 0; 183 int last_error = 0;
159 xfs_agnumber_t ag; 184 xfs_agnumber_t ag;
160 int nr; 185 int nr;
161 186
162 nr = nr_to_scan ? *nr_to_scan : INT_MAX; 187 nr = nr_to_scan ? *nr_to_scan : INT_MAX;
163 for (ag = 0; ag < mp->m_sb.sb_agcount; ag++) { 188 ag = 0;
164 struct xfs_perag *pag; 189 while ((pag = xfs_inode_ag_iter_next_pag(mp, &ag, tag))) {
165
166 pag = xfs_perag_get(mp, ag);
167 error = xfs_inode_ag_walk(mp, pag, execute, flags, tag, 190 error = xfs_inode_ag_walk(mp, pag, execute, flags, tag,
168 exclusive, &nr); 191 exclusive, &nr);
169 xfs_perag_put(pag); 192 xfs_perag_put(pag);
@@ -285,7 +308,7 @@ xfs_sync_inode_attr(
285/* 308/*
286 * Write out pagecache data for the whole filesystem. 309 * Write out pagecache data for the whole filesystem.
287 */ 310 */
288int 311STATIC int
289xfs_sync_data( 312xfs_sync_data(
290 struct xfs_mount *mp, 313 struct xfs_mount *mp,
291 int flags) 314 int flags)
@@ -306,7 +329,7 @@ xfs_sync_data(
306/* 329/*
307 * Write out inode metadata (attributes) for the whole filesystem. 330 * Write out inode metadata (attributes) for the whole filesystem.
308 */ 331 */
309int 332STATIC int
310xfs_sync_attr( 333xfs_sync_attr(
311 struct xfs_mount *mp, 334 struct xfs_mount *mp,
312 int flags) 335 int flags)
@@ -339,8 +362,7 @@ xfs_commit_dummy_trans(
339 362
340 xfs_ilock(ip, XFS_ILOCK_EXCL); 363 xfs_ilock(ip, XFS_ILOCK_EXCL);
341 364
342 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 365 xfs_trans_ijoin(tp, ip);
343 xfs_trans_ihold(tp, ip);
344 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 366 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
345 error = xfs_trans_commit(tp, 0); 367 error = xfs_trans_commit(tp, 0);
346 xfs_iunlock(ip, XFS_ILOCK_EXCL); 368 xfs_iunlock(ip, XFS_ILOCK_EXCL);
@@ -640,6 +662,17 @@ __xfs_inode_set_reclaim_tag(
640 radix_tree_tag_set(&pag->pag_ici_root, 662 radix_tree_tag_set(&pag->pag_ici_root,
641 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), 663 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
642 XFS_ICI_RECLAIM_TAG); 664 XFS_ICI_RECLAIM_TAG);
665
666 if (!pag->pag_ici_reclaimable) {
667 /* propagate the reclaim tag up into the perag radix tree */
668 spin_lock(&ip->i_mount->m_perag_lock);
669 radix_tree_tag_set(&ip->i_mount->m_perag_tree,
670 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
671 XFS_ICI_RECLAIM_TAG);
672 spin_unlock(&ip->i_mount->m_perag_lock);
673 trace_xfs_perag_set_reclaim(ip->i_mount, pag->pag_agno,
674 -1, _RET_IP_);
675 }
643 pag->pag_ici_reclaimable++; 676 pag->pag_ici_reclaimable++;
644} 677}
645 678
@@ -674,6 +707,16 @@ __xfs_inode_clear_reclaim_tag(
674 radix_tree_tag_clear(&pag->pag_ici_root, 707 radix_tree_tag_clear(&pag->pag_ici_root,
675 XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG); 708 XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG);
676 pag->pag_ici_reclaimable--; 709 pag->pag_ici_reclaimable--;
710 if (!pag->pag_ici_reclaimable) {
711 /* clear the reclaim tag from the perag radix tree */
712 spin_lock(&ip->i_mount->m_perag_lock);
713 radix_tree_tag_clear(&ip->i_mount->m_perag_tree,
714 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
715 XFS_ICI_RECLAIM_TAG);
716 spin_unlock(&ip->i_mount->m_perag_lock);
717 trace_xfs_perag_clear_reclaim(ip->i_mount, pag->pag_agno,
718 -1, _RET_IP_);
719 }
677} 720}
678 721
679/* 722/*
@@ -812,7 +855,36 @@ out:
812reclaim: 855reclaim:
813 xfs_ifunlock(ip); 856 xfs_ifunlock(ip);
814 xfs_iunlock(ip, XFS_ILOCK_EXCL); 857 xfs_iunlock(ip, XFS_ILOCK_EXCL);
815 xfs_ireclaim(ip); 858
859 XFS_STATS_INC(xs_ig_reclaims);
860 /*
861 * Remove the inode from the per-AG radix tree.
862 *
863 * Because radix_tree_delete won't complain even if the item was never
864 * added to the tree assert that it's been there before to catch
865 * problems with the inode life time early on.
866 */
867 write_lock(&pag->pag_ici_lock);
868 if (!radix_tree_delete(&pag->pag_ici_root,
869 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino)))
870 ASSERT(0);
871 write_unlock(&pag->pag_ici_lock);
872
873 /*
874 * Here we do an (almost) spurious inode lock in order to coordinate
875 * with inode cache radix tree lookups. This is because the lookup
876 * can reference the inodes in the cache without taking references.
877 *
878 * We make that OK here by ensuring that we wait until the inode is
879 * unlocked after the lookup before we go ahead and free it. We get
880 * both the ilock and the iolock because the code may need to drop the
881 * ilock one but will still hold the iolock.
882 */
883 xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
884 xfs_qm_dqdetach(ip);
885 xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
886
887 xfs_inode_free(ip);
816 return error; 888 return error;
817 889
818} 890}
@@ -828,83 +900,52 @@ xfs_reclaim_inodes(
828 900
829/* 901/*
830 * Shrinker infrastructure. 902 * Shrinker infrastructure.
831 *
832 * This is all far more complex than it needs to be. It adds a global list of
833 * mounts because the shrinkers can only call a global context. We need to make
834 * the shrinkers pass a context to avoid the need for global state.
835 */ 903 */
836static LIST_HEAD(xfs_mount_list);
837static struct rw_semaphore xfs_mount_list_lock;
838
839static int 904static int
840xfs_reclaim_inode_shrink( 905xfs_reclaim_inode_shrink(
906 struct shrinker *shrink,
841 int nr_to_scan, 907 int nr_to_scan,
842 gfp_t gfp_mask) 908 gfp_t gfp_mask)
843{ 909{
844 struct xfs_mount *mp; 910 struct xfs_mount *mp;
845 struct xfs_perag *pag; 911 struct xfs_perag *pag;
846 xfs_agnumber_t ag; 912 xfs_agnumber_t ag;
847 int reclaimable = 0; 913 int reclaimable;
848 914
915 mp = container_of(shrink, struct xfs_mount, m_inode_shrink);
849 if (nr_to_scan) { 916 if (nr_to_scan) {
850 if (!(gfp_mask & __GFP_FS)) 917 if (!(gfp_mask & __GFP_FS))
851 return -1; 918 return -1;
852 919
853 down_read(&xfs_mount_list_lock); 920 xfs_inode_ag_iterator(mp, xfs_reclaim_inode, 0,
854 list_for_each_entry(mp, &xfs_mount_list, m_mplist) {
855 xfs_inode_ag_iterator(mp, xfs_reclaim_inode, 0,
856 XFS_ICI_RECLAIM_TAG, 1, &nr_to_scan); 921 XFS_ICI_RECLAIM_TAG, 1, &nr_to_scan);
857 if (nr_to_scan <= 0) 922 /* if we don't exhaust the scan, don't bother coming back */
858 break; 923 if (nr_to_scan > 0)
859 } 924 return -1;
860 up_read(&xfs_mount_list_lock); 925 }
861 }
862 926
863 down_read(&xfs_mount_list_lock); 927 reclaimable = 0;
864 list_for_each_entry(mp, &xfs_mount_list, m_mplist) { 928 ag = 0;
865 for (ag = 0; ag < mp->m_sb.sb_agcount; ag++) { 929 while ((pag = xfs_inode_ag_iter_next_pag(mp, &ag,
866 pag = xfs_perag_get(mp, ag); 930 XFS_ICI_RECLAIM_TAG))) {
867 reclaimable += pag->pag_ici_reclaimable; 931 reclaimable += pag->pag_ici_reclaimable;
868 xfs_perag_put(pag); 932 xfs_perag_put(pag);
869 }
870 } 933 }
871 up_read(&xfs_mount_list_lock);
872 return reclaimable; 934 return reclaimable;
873} 935}
874 936
875static struct shrinker xfs_inode_shrinker = {
876 .shrink = xfs_reclaim_inode_shrink,
877 .seeks = DEFAULT_SEEKS,
878};
879
880void __init
881xfs_inode_shrinker_init(void)
882{
883 init_rwsem(&xfs_mount_list_lock);
884 register_shrinker(&xfs_inode_shrinker);
885}
886
887void
888xfs_inode_shrinker_destroy(void)
889{
890 ASSERT(list_empty(&xfs_mount_list));
891 unregister_shrinker(&xfs_inode_shrinker);
892}
893
894void 937void
895xfs_inode_shrinker_register( 938xfs_inode_shrinker_register(
896 struct xfs_mount *mp) 939 struct xfs_mount *mp)
897{ 940{
898 down_write(&xfs_mount_list_lock); 941 mp->m_inode_shrink.shrink = xfs_reclaim_inode_shrink;
899 list_add_tail(&mp->m_mplist, &xfs_mount_list); 942 mp->m_inode_shrink.seeks = DEFAULT_SEEKS;
900 up_write(&xfs_mount_list_lock); 943 register_shrinker(&mp->m_inode_shrink);
901} 944}
902 945
903void 946void
904xfs_inode_shrinker_unregister( 947xfs_inode_shrinker_unregister(
905 struct xfs_mount *mp) 948 struct xfs_mount *mp)
906{ 949{
907 down_write(&xfs_mount_list_lock); 950 unregister_shrinker(&mp->m_inode_shrink);
908 list_del(&mp->m_mplist);
909 up_write(&xfs_mount_list_lock);
910} 951}
diff --git a/fs/xfs/linux-2.6/xfs_sync.h b/fs/xfs/linux-2.6/xfs_sync.h
index cdcbaaca9880..fe78726196f8 100644
--- a/fs/xfs/linux-2.6/xfs_sync.h
+++ b/fs/xfs/linux-2.6/xfs_sync.h
@@ -35,9 +35,6 @@ typedef struct xfs_sync_work {
35int xfs_syncd_init(struct xfs_mount *mp); 35int xfs_syncd_init(struct xfs_mount *mp);
36void xfs_syncd_stop(struct xfs_mount *mp); 36void xfs_syncd_stop(struct xfs_mount *mp);
37 37
38int xfs_sync_attr(struct xfs_mount *mp, int flags);
39int xfs_sync_data(struct xfs_mount *mp, int flags);
40
41int xfs_quiesce_data(struct xfs_mount *mp); 38int xfs_quiesce_data(struct xfs_mount *mp);
42void xfs_quiesce_attr(struct xfs_mount *mp); 39void xfs_quiesce_attr(struct xfs_mount *mp);
43 40
@@ -55,8 +52,6 @@ int xfs_inode_ag_iterator(struct xfs_mount *mp,
55 int (*execute)(struct xfs_inode *ip, struct xfs_perag *pag, int flags), 52 int (*execute)(struct xfs_inode *ip, struct xfs_perag *pag, int flags),
56 int flags, int tag, int write_lock, int *nr_to_scan); 53 int flags, int tag, int write_lock, int *nr_to_scan);
57 54
58void xfs_inode_shrinker_init(void);
59void xfs_inode_shrinker_destroy(void);
60void xfs_inode_shrinker_register(struct xfs_mount *mp); 55void xfs_inode_shrinker_register(struct xfs_mount *mp);
61void xfs_inode_shrinker_unregister(struct xfs_mount *mp); 56void xfs_inode_shrinker_unregister(struct xfs_mount *mp);
62 57
diff --git a/fs/xfs/linux-2.6/xfs_trace.c b/fs/xfs/linux-2.6/xfs_trace.c
index d12be8470cba..88d25d4aa56e 100644
--- a/fs/xfs/linux-2.6/xfs_trace.c
+++ b/fs/xfs/linux-2.6/xfs_trace.c
@@ -24,17 +24,13 @@
24#include "xfs_trans.h" 24#include "xfs_trans.h"
25#include "xfs_sb.h" 25#include "xfs_sb.h"
26#include "xfs_ag.h" 26#include "xfs_ag.h"
27#include "xfs_dir2.h"
28#include "xfs_da_btree.h" 27#include "xfs_da_btree.h"
29#include "xfs_bmap_btree.h" 28#include "xfs_bmap_btree.h"
30#include "xfs_alloc_btree.h" 29#include "xfs_alloc_btree.h"
31#include "xfs_ialloc_btree.h" 30#include "xfs_ialloc_btree.h"
32#include "xfs_dir2_sf.h"
33#include "xfs_attr_sf.h"
34#include "xfs_dinode.h" 31#include "xfs_dinode.h"
35#include "xfs_inode.h" 32#include "xfs_inode.h"
36#include "xfs_btree.h" 33#include "xfs_btree.h"
37#include "xfs_dmapi.h"
38#include "xfs_mount.h" 34#include "xfs_mount.h"
39#include "xfs_ialloc.h" 35#include "xfs_ialloc.h"
40#include "xfs_itable.h" 36#include "xfs_itable.h"
diff --git a/fs/xfs/linux-2.6/xfs_trace.h b/fs/xfs/linux-2.6/xfs_trace.h
index 73d5aa117384..c657cdca2cd2 100644
--- a/fs/xfs/linux-2.6/xfs_trace.h
+++ b/fs/xfs/linux-2.6/xfs_trace.h
@@ -124,7 +124,10 @@ DEFINE_EVENT(xfs_perag_class, name, \
124 unsigned long caller_ip), \ 124 unsigned long caller_ip), \
125 TP_ARGS(mp, agno, refcount, caller_ip)) 125 TP_ARGS(mp, agno, refcount, caller_ip))
126DEFINE_PERAG_REF_EVENT(xfs_perag_get); 126DEFINE_PERAG_REF_EVENT(xfs_perag_get);
127DEFINE_PERAG_REF_EVENT(xfs_perag_get_reclaim);
127DEFINE_PERAG_REF_EVENT(xfs_perag_put); 128DEFINE_PERAG_REF_EVENT(xfs_perag_put);
129DEFINE_PERAG_REF_EVENT(xfs_perag_set_reclaim);
130DEFINE_PERAG_REF_EVENT(xfs_perag_clear_reclaim);
128 131
129TRACE_EVENT(xfs_attr_list_node_descend, 132TRACE_EVENT(xfs_attr_list_node_descend,
130 TP_PROTO(struct xfs_attr_list_context *ctx, 133 TP_PROTO(struct xfs_attr_list_context *ctx,
@@ -314,8 +317,6 @@ DEFINE_BUF_EVENT(xfs_buf_init);
314DEFINE_BUF_EVENT(xfs_buf_free); 317DEFINE_BUF_EVENT(xfs_buf_free);
315DEFINE_BUF_EVENT(xfs_buf_hold); 318DEFINE_BUF_EVENT(xfs_buf_hold);
316DEFINE_BUF_EVENT(xfs_buf_rele); 319DEFINE_BUF_EVENT(xfs_buf_rele);
317DEFINE_BUF_EVENT(xfs_buf_pin);
318DEFINE_BUF_EVENT(xfs_buf_unpin);
319DEFINE_BUF_EVENT(xfs_buf_iodone); 320DEFINE_BUF_EVENT(xfs_buf_iodone);
320DEFINE_BUF_EVENT(xfs_buf_iorequest); 321DEFINE_BUF_EVENT(xfs_buf_iorequest);
321DEFINE_BUF_EVENT(xfs_buf_bawrite); 322DEFINE_BUF_EVENT(xfs_buf_bawrite);
@@ -538,7 +539,7 @@ DEFINE_LOCK_EVENT(xfs_ilock_nowait);
538DEFINE_LOCK_EVENT(xfs_ilock_demote); 539DEFINE_LOCK_EVENT(xfs_ilock_demote);
539DEFINE_LOCK_EVENT(xfs_iunlock); 540DEFINE_LOCK_EVENT(xfs_iunlock);
540 541
541DECLARE_EVENT_CLASS(xfs_iget_class, 542DECLARE_EVENT_CLASS(xfs_inode_class,
542 TP_PROTO(struct xfs_inode *ip), 543 TP_PROTO(struct xfs_inode *ip),
543 TP_ARGS(ip), 544 TP_ARGS(ip),
544 TP_STRUCT__entry( 545 TP_STRUCT__entry(
@@ -554,16 +555,38 @@ DECLARE_EVENT_CLASS(xfs_iget_class,
554 __entry->ino) 555 __entry->ino)
555) 556)
556 557
557#define DEFINE_IGET_EVENT(name) \ 558#define DEFINE_INODE_EVENT(name) \
558DEFINE_EVENT(xfs_iget_class, name, \ 559DEFINE_EVENT(xfs_inode_class, name, \
559 TP_PROTO(struct xfs_inode *ip), \ 560 TP_PROTO(struct xfs_inode *ip), \
560 TP_ARGS(ip)) 561 TP_ARGS(ip))
561DEFINE_IGET_EVENT(xfs_iget_skip); 562DEFINE_INODE_EVENT(xfs_iget_skip);
562DEFINE_IGET_EVENT(xfs_iget_reclaim); 563DEFINE_INODE_EVENT(xfs_iget_reclaim);
563DEFINE_IGET_EVENT(xfs_iget_found); 564DEFINE_INODE_EVENT(xfs_iget_reclaim_fail);
564DEFINE_IGET_EVENT(xfs_iget_alloc); 565DEFINE_INODE_EVENT(xfs_iget_hit);
565 566DEFINE_INODE_EVENT(xfs_iget_miss);
566DECLARE_EVENT_CLASS(xfs_inode_class, 567
568DEFINE_INODE_EVENT(xfs_getattr);
569DEFINE_INODE_EVENT(xfs_setattr);
570DEFINE_INODE_EVENT(xfs_readlink);
571DEFINE_INODE_EVENT(xfs_alloc_file_space);
572DEFINE_INODE_EVENT(xfs_free_file_space);
573DEFINE_INODE_EVENT(xfs_readdir);
574#ifdef CONFIG_XFS_POSIX_ACL
575DEFINE_INODE_EVENT(xfs_check_acl);
576#endif
577DEFINE_INODE_EVENT(xfs_vm_bmap);
578DEFINE_INODE_EVENT(xfs_file_ioctl);
579DEFINE_INODE_EVENT(xfs_file_compat_ioctl);
580DEFINE_INODE_EVENT(xfs_ioctl_setattr);
581DEFINE_INODE_EVENT(xfs_file_fsync);
582DEFINE_INODE_EVENT(xfs_destroy_inode);
583DEFINE_INODE_EVENT(xfs_write_inode);
584DEFINE_INODE_EVENT(xfs_clear_inode);
585
586DEFINE_INODE_EVENT(xfs_dquot_dqalloc);
587DEFINE_INODE_EVENT(xfs_dquot_dqdetach);
588
589DECLARE_EVENT_CLASS(xfs_iref_class,
567 TP_PROTO(struct xfs_inode *ip, unsigned long caller_ip), 590 TP_PROTO(struct xfs_inode *ip, unsigned long caller_ip),
568 TP_ARGS(ip, caller_ip), 591 TP_ARGS(ip, caller_ip),
569 TP_STRUCT__entry( 592 TP_STRUCT__entry(
@@ -588,20 +611,71 @@ DECLARE_EVENT_CLASS(xfs_inode_class,
588 (char *)__entry->caller_ip) 611 (char *)__entry->caller_ip)
589) 612)
590 613
591#define DEFINE_INODE_EVENT(name) \ 614#define DEFINE_IREF_EVENT(name) \
592DEFINE_EVENT(xfs_inode_class, name, \ 615DEFINE_EVENT(xfs_iref_class, name, \
593 TP_PROTO(struct xfs_inode *ip, unsigned long caller_ip), \ 616 TP_PROTO(struct xfs_inode *ip, unsigned long caller_ip), \
594 TP_ARGS(ip, caller_ip)) 617 TP_ARGS(ip, caller_ip))
595DEFINE_INODE_EVENT(xfs_ihold); 618DEFINE_IREF_EVENT(xfs_ihold);
596DEFINE_INODE_EVENT(xfs_irele); 619DEFINE_IREF_EVENT(xfs_irele);
597DEFINE_INODE_EVENT(xfs_inode_pin); 620DEFINE_IREF_EVENT(xfs_inode_pin);
598DEFINE_INODE_EVENT(xfs_inode_unpin); 621DEFINE_IREF_EVENT(xfs_inode_unpin);
599DEFINE_INODE_EVENT(xfs_inode_unpin_nowait); 622DEFINE_IREF_EVENT(xfs_inode_unpin_nowait);
623
624DECLARE_EVENT_CLASS(xfs_namespace_class,
625 TP_PROTO(struct xfs_inode *dp, struct xfs_name *name),
626 TP_ARGS(dp, name),
627 TP_STRUCT__entry(
628 __field(dev_t, dev)
629 __field(xfs_ino_t, dp_ino)
630 __dynamic_array(char, name, name->len)
631 ),
632 TP_fast_assign(
633 __entry->dev = VFS_I(dp)->i_sb->s_dev;
634 __entry->dp_ino = dp->i_ino;
635 memcpy(__get_str(name), name->name, name->len);
636 ),
637 TP_printk("dev %d:%d dp ino 0x%llx name %s",
638 MAJOR(__entry->dev), MINOR(__entry->dev),
639 __entry->dp_ino,
640 __get_str(name))
641)
600 642
601/* the old xfs_itrace_entry tracer - to be replaced by s.th. in the VFS */ 643#define DEFINE_NAMESPACE_EVENT(name) \
602DEFINE_INODE_EVENT(xfs_inode); 644DEFINE_EVENT(xfs_namespace_class, name, \
603#define xfs_itrace_entry(ip) \ 645 TP_PROTO(struct xfs_inode *dp, struct xfs_name *name), \
604 trace_xfs_inode(ip, _THIS_IP_) 646 TP_ARGS(dp, name))
647DEFINE_NAMESPACE_EVENT(xfs_remove);
648DEFINE_NAMESPACE_EVENT(xfs_link);
649DEFINE_NAMESPACE_EVENT(xfs_lookup);
650DEFINE_NAMESPACE_EVENT(xfs_create);
651DEFINE_NAMESPACE_EVENT(xfs_symlink);
652
653TRACE_EVENT(xfs_rename,
654 TP_PROTO(struct xfs_inode *src_dp, struct xfs_inode *target_dp,
655 struct xfs_name *src_name, struct xfs_name *target_name),
656 TP_ARGS(src_dp, target_dp, src_name, target_name),
657 TP_STRUCT__entry(
658 __field(dev_t, dev)
659 __field(xfs_ino_t, src_dp_ino)
660 __field(xfs_ino_t, target_dp_ino)
661 __dynamic_array(char, src_name, src_name->len)
662 __dynamic_array(char, target_name, target_name->len)
663 ),
664 TP_fast_assign(
665 __entry->dev = VFS_I(src_dp)->i_sb->s_dev;
666 __entry->src_dp_ino = src_dp->i_ino;
667 __entry->target_dp_ino = target_dp->i_ino;
668 memcpy(__get_str(src_name), src_name->name, src_name->len);
669 memcpy(__get_str(target_name), target_name->name, target_name->len);
670 ),
671 TP_printk("dev %d:%d src dp ino 0x%llx target dp ino 0x%llx"
672 " src name %s target name %s",
673 MAJOR(__entry->dev), MINOR(__entry->dev),
674 __entry->src_dp_ino,
675 __entry->target_dp_ino,
676 __get_str(src_name),
677 __get_str(target_name))
678)
605 679
606DECLARE_EVENT_CLASS(xfs_dquot_class, 680DECLARE_EVENT_CLASS(xfs_dquot_class,
607 TP_PROTO(struct xfs_dquot *dqp), 681 TP_PROTO(struct xfs_dquot *dqp),
@@ -681,9 +755,6 @@ DEFINE_DQUOT_EVENT(xfs_dqrele);
681DEFINE_DQUOT_EVENT(xfs_dqflush); 755DEFINE_DQUOT_EVENT(xfs_dqflush);
682DEFINE_DQUOT_EVENT(xfs_dqflush_force); 756DEFINE_DQUOT_EVENT(xfs_dqflush_force);
683DEFINE_DQUOT_EVENT(xfs_dqflush_done); 757DEFINE_DQUOT_EVENT(xfs_dqflush_done);
684/* not really iget events, but we re-use the format */
685DEFINE_IGET_EVENT(xfs_dquot_dqalloc);
686DEFINE_IGET_EVENT(xfs_dquot_dqdetach);
687 758
688DECLARE_EVENT_CLASS(xfs_loggrant_class, 759DECLARE_EVENT_CLASS(xfs_loggrant_class,
689 TP_PROTO(struct log *log, struct xlog_ticket *tic), 760 TP_PROTO(struct log *log, struct xlog_ticket *tic),
@@ -831,33 +902,29 @@ DECLARE_EVENT_CLASS(xfs_page_class,
831 __field(loff_t, size) 902 __field(loff_t, size)
832 __field(unsigned long, offset) 903 __field(unsigned long, offset)
833 __field(int, delalloc) 904 __field(int, delalloc)
834 __field(int, unmapped)
835 __field(int, unwritten) 905 __field(int, unwritten)
836 ), 906 ),
837 TP_fast_assign( 907 TP_fast_assign(
838 int delalloc = -1, unmapped = -1, unwritten = -1; 908 int delalloc = -1, unwritten = -1;
839 909
840 if (page_has_buffers(page)) 910 if (page_has_buffers(page))
841 xfs_count_page_state(page, &delalloc, 911 xfs_count_page_state(page, &delalloc, &unwritten);
842 &unmapped, &unwritten);
843 __entry->dev = inode->i_sb->s_dev; 912 __entry->dev = inode->i_sb->s_dev;
844 __entry->ino = XFS_I(inode)->i_ino; 913 __entry->ino = XFS_I(inode)->i_ino;
845 __entry->pgoff = page_offset(page); 914 __entry->pgoff = page_offset(page);
846 __entry->size = i_size_read(inode); 915 __entry->size = i_size_read(inode);
847 __entry->offset = off; 916 __entry->offset = off;
848 __entry->delalloc = delalloc; 917 __entry->delalloc = delalloc;
849 __entry->unmapped = unmapped;
850 __entry->unwritten = unwritten; 918 __entry->unwritten = unwritten;
851 ), 919 ),
852 TP_printk("dev %d:%d ino 0x%llx pgoff 0x%lx size 0x%llx offset %lx " 920 TP_printk("dev %d:%d ino 0x%llx pgoff 0x%lx size 0x%llx offset %lx "
853 "delalloc %d unmapped %d unwritten %d", 921 "delalloc %d unwritten %d",
854 MAJOR(__entry->dev), MINOR(__entry->dev), 922 MAJOR(__entry->dev), MINOR(__entry->dev),
855 __entry->ino, 923 __entry->ino,
856 __entry->pgoff, 924 __entry->pgoff,
857 __entry->size, 925 __entry->size,
858 __entry->offset, 926 __entry->offset,
859 __entry->delalloc, 927 __entry->delalloc,
860 __entry->unmapped,
861 __entry->unwritten) 928 __entry->unwritten)
862) 929)
863 930
diff --git a/fs/xfs/quota/xfs_dquot.c b/fs/xfs/quota/xfs_dquot.c
index 585e7633dfc7..e1a2f6800e01 100644
--- a/fs/xfs/quota/xfs_dquot.c
+++ b/fs/xfs/quota/xfs_dquot.c
@@ -23,25 +23,15 @@
23#include "xfs_trans.h" 23#include "xfs_trans.h"
24#include "xfs_sb.h" 24#include "xfs_sb.h"
25#include "xfs_ag.h" 25#include "xfs_ag.h"
26#include "xfs_dir2.h"
27#include "xfs_alloc.h" 26#include "xfs_alloc.h"
28#include "xfs_dmapi.h"
29#include "xfs_quota.h" 27#include "xfs_quota.h"
30#include "xfs_mount.h" 28#include "xfs_mount.h"
31#include "xfs_bmap_btree.h" 29#include "xfs_bmap_btree.h"
32#include "xfs_alloc_btree.h"
33#include "xfs_ialloc_btree.h"
34#include "xfs_dir2_sf.h"
35#include "xfs_attr_sf.h"
36#include "xfs_dinode.h"
37#include "xfs_inode.h" 30#include "xfs_inode.h"
38#include "xfs_btree.h"
39#include "xfs_ialloc.h"
40#include "xfs_bmap.h" 31#include "xfs_bmap.h"
41#include "xfs_rtalloc.h" 32#include "xfs_rtalloc.h"
42#include "xfs_error.h" 33#include "xfs_error.h"
43#include "xfs_itable.h" 34#include "xfs_itable.h"
44#include "xfs_rw.h"
45#include "xfs_attr.h" 35#include "xfs_attr.h"
46#include "xfs_buf_item.h" 36#include "xfs_buf_item.h"
47#include "xfs_trans_space.h" 37#include "xfs_trans_space.h"
@@ -64,8 +54,6 @@
64 flush lock - ditto. 54 flush lock - ditto.
65*/ 55*/
66 56
67STATIC void xfs_qm_dqflush_done(xfs_buf_t *, xfs_dq_logitem_t *);
68
69#ifdef DEBUG 57#ifdef DEBUG
70xfs_buftarg_t *xfs_dqerror_target; 58xfs_buftarg_t *xfs_dqerror_target;
71int xfs_do_dqerror; 59int xfs_do_dqerror;
@@ -390,21 +378,14 @@ xfs_qm_dqalloc(
390 return (ESRCH); 378 return (ESRCH);
391 } 379 }
392 380
393 /* 381 xfs_trans_ijoin_ref(tp, quotip, XFS_ILOCK_EXCL);
394 * xfs_trans_commit normally decrements the vnode ref count
395 * when it unlocks the inode. Since we want to keep the quota
396 * inode around, we bump the vnode ref count now.
397 */
398 IHOLD(quotip);
399
400 xfs_trans_ijoin(tp, quotip, XFS_ILOCK_EXCL);
401 nmaps = 1; 382 nmaps = 1;
402 if ((error = xfs_bmapi(tp, quotip, 383 if ((error = xfs_bmapi(tp, quotip,
403 offset_fsb, XFS_DQUOT_CLUSTER_SIZE_FSB, 384 offset_fsb, XFS_DQUOT_CLUSTER_SIZE_FSB,
404 XFS_BMAPI_METADATA | XFS_BMAPI_WRITE, 385 XFS_BMAPI_METADATA | XFS_BMAPI_WRITE,
405 &firstblock, 386 &firstblock,
406 XFS_QM_DQALLOC_SPACE_RES(mp), 387 XFS_QM_DQALLOC_SPACE_RES(mp),
407 &map, &nmaps, &flist, NULL))) { 388 &map, &nmaps, &flist))) {
408 goto error0; 389 goto error0;
409 } 390 }
410 ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB); 391 ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB);
@@ -520,7 +501,7 @@ xfs_qm_dqtobp(
520 error = xfs_bmapi(NULL, quotip, dqp->q_fileoffset, 501 error = xfs_bmapi(NULL, quotip, dqp->q_fileoffset,
521 XFS_DQUOT_CLUSTER_SIZE_FSB, 502 XFS_DQUOT_CLUSTER_SIZE_FSB,
522 XFS_BMAPI_METADATA, 503 XFS_BMAPI_METADATA,
523 NULL, 0, &map, &nmaps, NULL, NULL); 504 NULL, 0, &map, &nmaps, NULL);
524 505
525 xfs_iunlock(quotip, XFS_ILOCK_SHARED); 506 xfs_iunlock(quotip, XFS_ILOCK_SHARED);
526 if (error) 507 if (error)
@@ -1141,6 +1122,46 @@ xfs_qm_dqrele(
1141 xfs_qm_dqput(dqp); 1122 xfs_qm_dqput(dqp);
1142} 1123}
1143 1124
1125/*
1126 * This is the dquot flushing I/O completion routine. It is called
1127 * from interrupt level when the buffer containing the dquot is
1128 * flushed to disk. It is responsible for removing the dquot logitem
1129 * from the AIL if it has not been re-logged, and unlocking the dquot's
1130 * flush lock. This behavior is very similar to that of inodes..
1131 */
1132STATIC void
1133xfs_qm_dqflush_done(
1134 struct xfs_buf *bp,
1135 struct xfs_log_item *lip)
1136{
1137 xfs_dq_logitem_t *qip = (struct xfs_dq_logitem *)lip;
1138 xfs_dquot_t *dqp = qip->qli_dquot;
1139 struct xfs_ail *ailp = lip->li_ailp;
1140
1141 /*
1142 * We only want to pull the item from the AIL if its
1143 * location in the log has not changed since we started the flush.
1144 * Thus, we only bother if the dquot's lsn has
1145 * not changed. First we check the lsn outside the lock
1146 * since it's cheaper, and then we recheck while
1147 * holding the lock before removing the dquot from the AIL.
1148 */
1149 if ((lip->li_flags & XFS_LI_IN_AIL) &&
1150 lip->li_lsn == qip->qli_flush_lsn) {
1151
1152 /* xfs_trans_ail_delete() drops the AIL lock. */
1153 spin_lock(&ailp->xa_lock);
1154 if (lip->li_lsn == qip->qli_flush_lsn)
1155 xfs_trans_ail_delete(ailp, lip);
1156 else
1157 spin_unlock(&ailp->xa_lock);
1158 }
1159
1160 /*
1161 * Release the dq's flush lock since we're done with it.
1162 */
1163 xfs_dqfunlock(dqp);
1164}
1144 1165
1145/* 1166/*
1146 * Write a modified dquot to disk. 1167 * Write a modified dquot to disk.
@@ -1222,8 +1243,9 @@ xfs_qm_dqflush(
1222 * Attach an iodone routine so that we can remove this dquot from the 1243 * Attach an iodone routine so that we can remove this dquot from the
1223 * AIL and release the flush lock once the dquot is synced to disk. 1244 * AIL and release the flush lock once the dquot is synced to disk.
1224 */ 1245 */
1225 xfs_buf_attach_iodone(bp, (void(*)(xfs_buf_t *, xfs_log_item_t *)) 1246 xfs_buf_attach_iodone(bp, xfs_qm_dqflush_done,
1226 xfs_qm_dqflush_done, &(dqp->q_logitem.qli_item)); 1247 &dqp->q_logitem.qli_item);
1248
1227 /* 1249 /*
1228 * If the buffer is pinned then push on the log so we won't 1250 * If the buffer is pinned then push on the log so we won't
1229 * get stuck waiting in the write for too long. 1251 * get stuck waiting in the write for too long.
@@ -1247,50 +1269,6 @@ xfs_qm_dqflush(
1247 1269
1248} 1270}
1249 1271
1250/*
1251 * This is the dquot flushing I/O completion routine. It is called
1252 * from interrupt level when the buffer containing the dquot is
1253 * flushed to disk. It is responsible for removing the dquot logitem
1254 * from the AIL if it has not been re-logged, and unlocking the dquot's
1255 * flush lock. This behavior is very similar to that of inodes..
1256 */
1257/*ARGSUSED*/
1258STATIC void
1259xfs_qm_dqflush_done(
1260 xfs_buf_t *bp,
1261 xfs_dq_logitem_t *qip)
1262{
1263 xfs_dquot_t *dqp;
1264 struct xfs_ail *ailp;
1265
1266 dqp = qip->qli_dquot;
1267 ailp = qip->qli_item.li_ailp;
1268
1269 /*
1270 * We only want to pull the item from the AIL if its
1271 * location in the log has not changed since we started the flush.
1272 * Thus, we only bother if the dquot's lsn has
1273 * not changed. First we check the lsn outside the lock
1274 * since it's cheaper, and then we recheck while
1275 * holding the lock before removing the dquot from the AIL.
1276 */
1277 if ((qip->qli_item.li_flags & XFS_LI_IN_AIL) &&
1278 qip->qli_item.li_lsn == qip->qli_flush_lsn) {
1279
1280 /* xfs_trans_ail_delete() drops the AIL lock. */
1281 spin_lock(&ailp->xa_lock);
1282 if (qip->qli_item.li_lsn == qip->qli_flush_lsn)
1283 xfs_trans_ail_delete(ailp, (xfs_log_item_t*)qip);
1284 else
1285 spin_unlock(&ailp->xa_lock);
1286 }
1287
1288 /*
1289 * Release the dq's flush lock since we're done with it.
1290 */
1291 xfs_dqfunlock(dqp);
1292}
1293
1294int 1272int
1295xfs_qm_dqlock_nowait( 1273xfs_qm_dqlock_nowait(
1296 xfs_dquot_t *dqp) 1274 xfs_dquot_t *dqp)
diff --git a/fs/xfs/quota/xfs_dquot_item.c b/fs/xfs/quota/xfs_dquot_item.c
index 8d89a24ae324..2a1f3dc10a02 100644
--- a/fs/xfs/quota/xfs_dquot_item.c
+++ b/fs/xfs/quota/xfs_dquot_item.c
@@ -23,42 +23,36 @@
23#include "xfs_trans.h" 23#include "xfs_trans.h"
24#include "xfs_sb.h" 24#include "xfs_sb.h"
25#include "xfs_ag.h" 25#include "xfs_ag.h"
26#include "xfs_dir2.h"
27#include "xfs_alloc.h" 26#include "xfs_alloc.h"
28#include "xfs_dmapi.h"
29#include "xfs_quota.h" 27#include "xfs_quota.h"
30#include "xfs_mount.h" 28#include "xfs_mount.h"
31#include "xfs_bmap_btree.h" 29#include "xfs_bmap_btree.h"
32#include "xfs_alloc_btree.h"
33#include "xfs_ialloc_btree.h"
34#include "xfs_dir2_sf.h"
35#include "xfs_attr_sf.h"
36#include "xfs_dinode.h"
37#include "xfs_inode.h" 30#include "xfs_inode.h"
38#include "xfs_bmap.h" 31#include "xfs_bmap.h"
39#include "xfs_btree.h"
40#include "xfs_ialloc.h"
41#include "xfs_rtalloc.h" 32#include "xfs_rtalloc.h"
42#include "xfs_error.h" 33#include "xfs_error.h"
43#include "xfs_itable.h" 34#include "xfs_itable.h"
44#include "xfs_rw.h"
45#include "xfs_attr.h" 35#include "xfs_attr.h"
46#include "xfs_buf_item.h" 36#include "xfs_buf_item.h"
47#include "xfs_trans_priv.h" 37#include "xfs_trans_priv.h"
48#include "xfs_qm.h" 38#include "xfs_qm.h"
49 39
40static inline struct xfs_dq_logitem *DQUOT_ITEM(struct xfs_log_item *lip)
41{
42 return container_of(lip, struct xfs_dq_logitem, qli_item);
43}
44
50/* 45/*
51 * returns the number of iovecs needed to log the given dquot item. 46 * returns the number of iovecs needed to log the given dquot item.
52 */ 47 */
53/* ARGSUSED */
54STATIC uint 48STATIC uint
55xfs_qm_dquot_logitem_size( 49xfs_qm_dquot_logitem_size(
56 xfs_dq_logitem_t *logitem) 50 struct xfs_log_item *lip)
57{ 51{
58 /* 52 /*
59 * we need only two iovecs, one for the format, one for the real thing 53 * we need only two iovecs, one for the format, one for the real thing
60 */ 54 */
61 return (2); 55 return 2;
62} 56}
63 57
64/* 58/*
@@ -66,22 +60,21 @@ xfs_qm_dquot_logitem_size(
66 */ 60 */
67STATIC void 61STATIC void
68xfs_qm_dquot_logitem_format( 62xfs_qm_dquot_logitem_format(
69 xfs_dq_logitem_t *logitem, 63 struct xfs_log_item *lip,
70 xfs_log_iovec_t *logvec) 64 struct xfs_log_iovec *logvec)
71{ 65{
72 ASSERT(logitem); 66 struct xfs_dq_logitem *qlip = DQUOT_ITEM(lip);
73 ASSERT(logitem->qli_dquot);
74 67
75 logvec->i_addr = (xfs_caddr_t)&logitem->qli_format; 68 logvec->i_addr = &qlip->qli_format;
76 logvec->i_len = sizeof(xfs_dq_logformat_t); 69 logvec->i_len = sizeof(xfs_dq_logformat_t);
77 logvec->i_type = XLOG_REG_TYPE_QFORMAT; 70 logvec->i_type = XLOG_REG_TYPE_QFORMAT;
78 logvec++; 71 logvec++;
79 logvec->i_addr = (xfs_caddr_t)&logitem->qli_dquot->q_core; 72 logvec->i_addr = &qlip->qli_dquot->q_core;
80 logvec->i_len = sizeof(xfs_disk_dquot_t); 73 logvec->i_len = sizeof(xfs_disk_dquot_t);
81 logvec->i_type = XLOG_REG_TYPE_DQUOT; 74 logvec->i_type = XLOG_REG_TYPE_DQUOT;
82 75
83 ASSERT(2 == logitem->qli_item.li_desc->lid_size); 76 ASSERT(2 == lip->li_desc->lid_size);
84 logitem->qli_format.qlf_size = 2; 77 qlip->qli_format.qlf_size = 2;
85 78
86} 79}
87 80
@@ -90,9 +83,9 @@ xfs_qm_dquot_logitem_format(
90 */ 83 */
91STATIC void 84STATIC void
92xfs_qm_dquot_logitem_pin( 85xfs_qm_dquot_logitem_pin(
93 xfs_dq_logitem_t *logitem) 86 struct xfs_log_item *lip)
94{ 87{
95 xfs_dquot_t *dqp = logitem->qli_dquot; 88 struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot;
96 89
97 ASSERT(XFS_DQ_IS_LOCKED(dqp)); 90 ASSERT(XFS_DQ_IS_LOCKED(dqp));
98 atomic_inc(&dqp->q_pincount); 91 atomic_inc(&dqp->q_pincount);
@@ -104,27 +97,18 @@ xfs_qm_dquot_logitem_pin(
104 * dquot must have been previously pinned with a call to 97 * dquot must have been previously pinned with a call to
105 * xfs_qm_dquot_logitem_pin(). 98 * xfs_qm_dquot_logitem_pin().
106 */ 99 */
107/* ARGSUSED */
108STATIC void 100STATIC void
109xfs_qm_dquot_logitem_unpin( 101xfs_qm_dquot_logitem_unpin(
110 xfs_dq_logitem_t *logitem) 102 struct xfs_log_item *lip,
103 int remove)
111{ 104{
112 xfs_dquot_t *dqp = logitem->qli_dquot; 105 struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot;
113 106
114 ASSERT(atomic_read(&dqp->q_pincount) > 0); 107 ASSERT(atomic_read(&dqp->q_pincount) > 0);
115 if (atomic_dec_and_test(&dqp->q_pincount)) 108 if (atomic_dec_and_test(&dqp->q_pincount))
116 wake_up(&dqp->q_pinwait); 109 wake_up(&dqp->q_pinwait);
117} 110}
118 111
119/* ARGSUSED */
120STATIC void
121xfs_qm_dquot_logitem_unpin_remove(
122 xfs_dq_logitem_t *logitem,
123 xfs_trans_t *tp)
124{
125 xfs_qm_dquot_logitem_unpin(logitem);
126}
127
128/* 112/*
129 * Given the logitem, this writes the corresponding dquot entry to disk 113 * Given the logitem, this writes the corresponding dquot entry to disk
130 * asynchronously. This is called with the dquot entry securely locked; 114 * asynchronously. This is called with the dquot entry securely locked;
@@ -133,12 +117,10 @@ xfs_qm_dquot_logitem_unpin_remove(
133 */ 117 */
134STATIC void 118STATIC void
135xfs_qm_dquot_logitem_push( 119xfs_qm_dquot_logitem_push(
136 xfs_dq_logitem_t *logitem) 120 struct xfs_log_item *lip)
137{ 121{
138 xfs_dquot_t *dqp; 122 struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot;
139 int error; 123 int error;
140
141 dqp = logitem->qli_dquot;
142 124
143 ASSERT(XFS_DQ_IS_LOCKED(dqp)); 125 ASSERT(XFS_DQ_IS_LOCKED(dqp));
144 ASSERT(!completion_done(&dqp->q_flush)); 126 ASSERT(!completion_done(&dqp->q_flush));
@@ -160,27 +142,25 @@ xfs_qm_dquot_logitem_push(
160 xfs_dqunlock(dqp); 142 xfs_dqunlock(dqp);
161} 143}
162 144
163/*ARGSUSED*/
164STATIC xfs_lsn_t 145STATIC xfs_lsn_t
165xfs_qm_dquot_logitem_committed( 146xfs_qm_dquot_logitem_committed(
166 xfs_dq_logitem_t *l, 147 struct xfs_log_item *lip,
167 xfs_lsn_t lsn) 148 xfs_lsn_t lsn)
168{ 149{
169 /* 150 /*
170 * We always re-log the entire dquot when it becomes dirty, 151 * We always re-log the entire dquot when it becomes dirty,
171 * so, the latest copy _is_ the only one that matters. 152 * so, the latest copy _is_ the only one that matters.
172 */ 153 */
173 return (lsn); 154 return lsn;
174} 155}
175 156
176
177/* 157/*
178 * This is called to wait for the given dquot to be unpinned. 158 * This is called to wait for the given dquot to be unpinned.
179 * Most of these pin/unpin routines are plagiarized from inode code. 159 * Most of these pin/unpin routines are plagiarized from inode code.
180 */ 160 */
181void 161void
182xfs_qm_dqunpin_wait( 162xfs_qm_dqunpin_wait(
183 xfs_dquot_t *dqp) 163 struct xfs_dquot *dqp)
184{ 164{
185 ASSERT(XFS_DQ_IS_LOCKED(dqp)); 165 ASSERT(XFS_DQ_IS_LOCKED(dqp));
186 if (atomic_read(&dqp->q_pincount) == 0) 166 if (atomic_read(&dqp->q_pincount) == 0)
@@ -206,13 +186,12 @@ xfs_qm_dqunpin_wait(
206 */ 186 */
207STATIC void 187STATIC void
208xfs_qm_dquot_logitem_pushbuf( 188xfs_qm_dquot_logitem_pushbuf(
209 xfs_dq_logitem_t *qip) 189 struct xfs_log_item *lip)
210{ 190{
211 xfs_dquot_t *dqp; 191 struct xfs_dq_logitem *qlip = DQUOT_ITEM(lip);
212 xfs_mount_t *mp; 192 struct xfs_dquot *dqp = qlip->qli_dquot;
213 xfs_buf_t *bp; 193 struct xfs_buf *bp;
214 194
215 dqp = qip->qli_dquot;
216 ASSERT(XFS_DQ_IS_LOCKED(dqp)); 195 ASSERT(XFS_DQ_IS_LOCKED(dqp));
217 196
218 /* 197 /*
@@ -220,22 +199,20 @@ xfs_qm_dquot_logitem_pushbuf(
220 * inode flush completed and the inode was taken off the AIL. 199 * inode flush completed and the inode was taken off the AIL.
221 * So, just get out. 200 * So, just get out.
222 */ 201 */
223 if (completion_done(&dqp->q_flush) || 202 if (completion_done(&dqp->q_flush) ||
224 ((qip->qli_item.li_flags & XFS_LI_IN_AIL) == 0)) { 203 !(lip->li_flags & XFS_LI_IN_AIL)) {
225 xfs_dqunlock(dqp); 204 xfs_dqunlock(dqp);
226 return; 205 return;
227 } 206 }
228 mp = dqp->q_mount; 207
229 bp = xfs_incore(mp->m_ddev_targp, qip->qli_format.qlf_blkno, 208 bp = xfs_incore(dqp->q_mount->m_ddev_targp, qlip->qli_format.qlf_blkno,
230 mp->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK); 209 dqp->q_mount->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK);
231 xfs_dqunlock(dqp); 210 xfs_dqunlock(dqp);
232 if (!bp) 211 if (!bp)
233 return; 212 return;
234 if (XFS_BUF_ISDELAYWRITE(bp)) 213 if (XFS_BUF_ISDELAYWRITE(bp))
235 xfs_buf_delwri_promote(bp); 214 xfs_buf_delwri_promote(bp);
236 xfs_buf_relse(bp); 215 xfs_buf_relse(bp);
237 return;
238
239} 216}
240 217
241/* 218/*
@@ -250,15 +227,14 @@ xfs_qm_dquot_logitem_pushbuf(
250 */ 227 */
251STATIC uint 228STATIC uint
252xfs_qm_dquot_logitem_trylock( 229xfs_qm_dquot_logitem_trylock(
253 xfs_dq_logitem_t *qip) 230 struct xfs_log_item *lip)
254{ 231{
255 xfs_dquot_t *dqp; 232 struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot;
256 233
257 dqp = qip->qli_dquot;
258 if (atomic_read(&dqp->q_pincount) > 0) 234 if (atomic_read(&dqp->q_pincount) > 0)
259 return XFS_ITEM_PINNED; 235 return XFS_ITEM_PINNED;
260 236
261 if (! xfs_qm_dqlock_nowait(dqp)) 237 if (!xfs_qm_dqlock_nowait(dqp))
262 return XFS_ITEM_LOCKED; 238 return XFS_ITEM_LOCKED;
263 239
264 if (!xfs_dqflock_nowait(dqp)) { 240 if (!xfs_dqflock_nowait(dqp)) {
@@ -269,11 +245,10 @@ xfs_qm_dquot_logitem_trylock(
269 return XFS_ITEM_PUSHBUF; 245 return XFS_ITEM_PUSHBUF;
270 } 246 }
271 247
272 ASSERT(qip->qli_item.li_flags & XFS_LI_IN_AIL); 248 ASSERT(lip->li_flags & XFS_LI_IN_AIL);
273 return XFS_ITEM_SUCCESS; 249 return XFS_ITEM_SUCCESS;
274} 250}
275 251
276
277/* 252/*
278 * Unlock the dquot associated with the log item. 253 * Unlock the dquot associated with the log item.
279 * Clear the fields of the dquot and dquot log item that 254 * Clear the fields of the dquot and dquot log item that
@@ -282,12 +257,10 @@ xfs_qm_dquot_logitem_trylock(
282 */ 257 */
283STATIC void 258STATIC void
284xfs_qm_dquot_logitem_unlock( 259xfs_qm_dquot_logitem_unlock(
285 xfs_dq_logitem_t *ql) 260 struct xfs_log_item *lip)
286{ 261{
287 xfs_dquot_t *dqp; 262 struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot;
288 263
289 ASSERT(ql != NULL);
290 dqp = ql->qli_dquot;
291 ASSERT(XFS_DQ_IS_LOCKED(dqp)); 264 ASSERT(XFS_DQ_IS_LOCKED(dqp));
292 265
293 /* 266 /*
@@ -304,43 +277,32 @@ xfs_qm_dquot_logitem_unlock(
304 xfs_dqunlock(dqp); 277 xfs_dqunlock(dqp);
305} 278}
306 279
307
308/* 280/*
309 * this needs to stamp an lsn into the dquot, I think. 281 * this needs to stamp an lsn into the dquot, I think.
310 * rpc's that look at user dquot's would then have to 282 * rpc's that look at user dquot's would then have to
311 * push on the dependency recorded in the dquot 283 * push on the dependency recorded in the dquot
312 */ 284 */
313/* ARGSUSED */
314STATIC void 285STATIC void
315xfs_qm_dquot_logitem_committing( 286xfs_qm_dquot_logitem_committing(
316 xfs_dq_logitem_t *l, 287 struct xfs_log_item *lip,
317 xfs_lsn_t lsn) 288 xfs_lsn_t lsn)
318{ 289{
319 return;
320} 290}
321 291
322
323/* 292/*
324 * This is the ops vector for dquots 293 * This is the ops vector for dquots
325 */ 294 */
326static struct xfs_item_ops xfs_dquot_item_ops = { 295static struct xfs_item_ops xfs_dquot_item_ops = {
327 .iop_size = (uint(*)(xfs_log_item_t*))xfs_qm_dquot_logitem_size, 296 .iop_size = xfs_qm_dquot_logitem_size,
328 .iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*)) 297 .iop_format = xfs_qm_dquot_logitem_format,
329 xfs_qm_dquot_logitem_format, 298 .iop_pin = xfs_qm_dquot_logitem_pin,
330 .iop_pin = (void(*)(xfs_log_item_t*))xfs_qm_dquot_logitem_pin, 299 .iop_unpin = xfs_qm_dquot_logitem_unpin,
331 .iop_unpin = (void(*)(xfs_log_item_t*))xfs_qm_dquot_logitem_unpin, 300 .iop_trylock = xfs_qm_dquot_logitem_trylock,
332 .iop_unpin_remove = (void(*)(xfs_log_item_t*, xfs_trans_t*)) 301 .iop_unlock = xfs_qm_dquot_logitem_unlock,
333 xfs_qm_dquot_logitem_unpin_remove, 302 .iop_committed = xfs_qm_dquot_logitem_committed,
334 .iop_trylock = (uint(*)(xfs_log_item_t*)) 303 .iop_push = xfs_qm_dquot_logitem_push,
335 xfs_qm_dquot_logitem_trylock, 304 .iop_pushbuf = xfs_qm_dquot_logitem_pushbuf,
336 .iop_unlock = (void(*)(xfs_log_item_t*))xfs_qm_dquot_logitem_unlock, 305 .iop_committing = xfs_qm_dquot_logitem_committing
337 .iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t))
338 xfs_qm_dquot_logitem_committed,
339 .iop_push = (void(*)(xfs_log_item_t*))xfs_qm_dquot_logitem_push,
340 .iop_pushbuf = (void(*)(xfs_log_item_t*))
341 xfs_qm_dquot_logitem_pushbuf,
342 .iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t))
343 xfs_qm_dquot_logitem_committing
344}; 306};
345 307
346/* 308/*
@@ -350,10 +312,9 @@ static struct xfs_item_ops xfs_dquot_item_ops = {
350 */ 312 */
351void 313void
352xfs_qm_dquot_logitem_init( 314xfs_qm_dquot_logitem_init(
353 struct xfs_dquot *dqp) 315 struct xfs_dquot *dqp)
354{ 316{
355 xfs_dq_logitem_t *lp; 317 struct xfs_dq_logitem *lp = &dqp->q_logitem;
356 lp = &dqp->q_logitem;
357 318
358 xfs_log_item_init(dqp->q_mount, &lp->qli_item, XFS_LI_DQUOT, 319 xfs_log_item_init(dqp->q_mount, &lp->qli_item, XFS_LI_DQUOT,
359 &xfs_dquot_item_ops); 320 &xfs_dquot_item_ops);
@@ -374,16 +335,22 @@ xfs_qm_dquot_logitem_init(
374 335
375/*------------------ QUOTAOFF LOG ITEMS -------------------*/ 336/*------------------ QUOTAOFF LOG ITEMS -------------------*/
376 337
338static inline struct xfs_qoff_logitem *QOFF_ITEM(struct xfs_log_item *lip)
339{
340 return container_of(lip, struct xfs_qoff_logitem, qql_item);
341}
342
343
377/* 344/*
378 * This returns the number of iovecs needed to log the given quotaoff item. 345 * This returns the number of iovecs needed to log the given quotaoff item.
379 * We only need 1 iovec for an quotaoff item. It just logs the 346 * We only need 1 iovec for an quotaoff item. It just logs the
380 * quotaoff_log_format structure. 347 * quotaoff_log_format structure.
381 */ 348 */
382/*ARGSUSED*/
383STATIC uint 349STATIC uint
384xfs_qm_qoff_logitem_size(xfs_qoff_logitem_t *qf) 350xfs_qm_qoff_logitem_size(
351 struct xfs_log_item *lip)
385{ 352{
386 return (1); 353 return 1;
387} 354}
388 355
389/* 356/*
@@ -394,53 +361,46 @@ xfs_qm_qoff_logitem_size(xfs_qoff_logitem_t *qf)
394 * slots in the quotaoff item have been filled. 361 * slots in the quotaoff item have been filled.
395 */ 362 */
396STATIC void 363STATIC void
397xfs_qm_qoff_logitem_format(xfs_qoff_logitem_t *qf, 364xfs_qm_qoff_logitem_format(
398 xfs_log_iovec_t *log_vector) 365 struct xfs_log_item *lip,
366 struct xfs_log_iovec *log_vector)
399{ 367{
400 ASSERT(qf->qql_format.qf_type == XFS_LI_QUOTAOFF); 368 struct xfs_qoff_logitem *qflip = QOFF_ITEM(lip);
369
370 ASSERT(qflip->qql_format.qf_type == XFS_LI_QUOTAOFF);
401 371
402 log_vector->i_addr = (xfs_caddr_t)&(qf->qql_format); 372 log_vector->i_addr = &qflip->qql_format;
403 log_vector->i_len = sizeof(xfs_qoff_logitem_t); 373 log_vector->i_len = sizeof(xfs_qoff_logitem_t);
404 log_vector->i_type = XLOG_REG_TYPE_QUOTAOFF; 374 log_vector->i_type = XLOG_REG_TYPE_QUOTAOFF;
405 qf->qql_format.qf_size = 1; 375 qflip->qql_format.qf_size = 1;
406} 376}
407 377
408
409/* 378/*
410 * Pinning has no meaning for an quotaoff item, so just return. 379 * Pinning has no meaning for an quotaoff item, so just return.
411 */ 380 */
412/*ARGSUSED*/
413STATIC void 381STATIC void
414xfs_qm_qoff_logitem_pin(xfs_qoff_logitem_t *qf) 382xfs_qm_qoff_logitem_pin(
383 struct xfs_log_item *lip)
415{ 384{
416 return;
417} 385}
418 386
419
420/* 387/*
421 * Since pinning has no meaning for an quotaoff item, unpinning does 388 * Since pinning has no meaning for an quotaoff item, unpinning does
422 * not either. 389 * not either.
423 */ 390 */
424/*ARGSUSED*/
425STATIC void 391STATIC void
426xfs_qm_qoff_logitem_unpin(xfs_qoff_logitem_t *qf) 392xfs_qm_qoff_logitem_unpin(
393 struct xfs_log_item *lip,
394 int remove)
427{ 395{
428 return;
429}
430
431/*ARGSUSED*/
432STATIC void
433xfs_qm_qoff_logitem_unpin_remove(xfs_qoff_logitem_t *qf, xfs_trans_t *tp)
434{
435 return;
436} 396}
437 397
438/* 398/*
439 * Quotaoff items have no locking, so just return success. 399 * Quotaoff items have no locking, so just return success.
440 */ 400 */
441/*ARGSUSED*/
442STATIC uint 401STATIC uint
443xfs_qm_qoff_logitem_trylock(xfs_qoff_logitem_t *qf) 402xfs_qm_qoff_logitem_trylock(
403 struct xfs_log_item *lip)
444{ 404{
445 return XFS_ITEM_LOCKED; 405 return XFS_ITEM_LOCKED;
446} 406}
@@ -449,53 +409,51 @@ xfs_qm_qoff_logitem_trylock(xfs_qoff_logitem_t *qf)
449 * Quotaoff items have no locking or pushing, so return failure 409 * Quotaoff items have no locking or pushing, so return failure
450 * so that the caller doesn't bother with us. 410 * so that the caller doesn't bother with us.
451 */ 411 */
452/*ARGSUSED*/
453STATIC void 412STATIC void
454xfs_qm_qoff_logitem_unlock(xfs_qoff_logitem_t *qf) 413xfs_qm_qoff_logitem_unlock(
414 struct xfs_log_item *lip)
455{ 415{
456 return;
457} 416}
458 417
459/* 418/*
460 * The quotaoff-start-item is logged only once and cannot be moved in the log, 419 * The quotaoff-start-item is logged only once and cannot be moved in the log,
461 * so simply return the lsn at which it's been logged. 420 * so simply return the lsn at which it's been logged.
462 */ 421 */
463/*ARGSUSED*/
464STATIC xfs_lsn_t 422STATIC xfs_lsn_t
465xfs_qm_qoff_logitem_committed(xfs_qoff_logitem_t *qf, xfs_lsn_t lsn) 423xfs_qm_qoff_logitem_committed(
424 struct xfs_log_item *lip,
425 xfs_lsn_t lsn)
466{ 426{
467 return (lsn); 427 return lsn;
468} 428}
469 429
470/* 430/*
471 * There isn't much you can do to push on an quotaoff item. It is simply 431 * There isn't much you can do to push on an quotaoff item. It is simply
472 * stuck waiting for the log to be flushed to disk. 432 * stuck waiting for the log to be flushed to disk.
473 */ 433 */
474/*ARGSUSED*/
475STATIC void 434STATIC void
476xfs_qm_qoff_logitem_push(xfs_qoff_logitem_t *qf) 435xfs_qm_qoff_logitem_push(
436 struct xfs_log_item *lip)
477{ 437{
478 return;
479} 438}
480 439
481 440
482/*ARGSUSED*/
483STATIC xfs_lsn_t 441STATIC xfs_lsn_t
484xfs_qm_qoffend_logitem_committed( 442xfs_qm_qoffend_logitem_committed(
485 xfs_qoff_logitem_t *qfe, 443 struct xfs_log_item *lip,
486 xfs_lsn_t lsn) 444 xfs_lsn_t lsn)
487{ 445{
488 xfs_qoff_logitem_t *qfs; 446 struct xfs_qoff_logitem *qfe = QOFF_ITEM(lip);
489 struct xfs_ail *ailp; 447 struct xfs_qoff_logitem *qfs = qfe->qql_start_lip;
448 struct xfs_ail *ailp = qfs->qql_item.li_ailp;
490 449
491 qfs = qfe->qql_start_lip;
492 ailp = qfs->qql_item.li_ailp;
493 spin_lock(&ailp->xa_lock);
494 /* 450 /*
495 * Delete the qoff-start logitem from the AIL. 451 * Delete the qoff-start logitem from the AIL.
496 * xfs_trans_ail_delete() drops the AIL lock. 452 * xfs_trans_ail_delete() drops the AIL lock.
497 */ 453 */
454 spin_lock(&ailp->xa_lock);
498 xfs_trans_ail_delete(ailp, (xfs_log_item_t *)qfs); 455 xfs_trans_ail_delete(ailp, (xfs_log_item_t *)qfs);
456
499 kmem_free(qfs); 457 kmem_free(qfs);
500 kmem_free(qfe); 458 kmem_free(qfe);
501 return (xfs_lsn_t)-1; 459 return (xfs_lsn_t)-1;
@@ -515,71 +473,52 @@ xfs_qm_qoffend_logitem_committed(
515 * (truly makes the quotaoff irrevocable). If we do something else, 473 * (truly makes the quotaoff irrevocable). If we do something else,
516 * then maybe we don't need two. 474 * then maybe we don't need two.
517 */ 475 */
518/* ARGSUSED */
519STATIC void
520xfs_qm_qoff_logitem_committing(xfs_qoff_logitem_t *qip, xfs_lsn_t commit_lsn)
521{
522 return;
523}
524
525/* ARGSUSED */
526STATIC void 476STATIC void
527xfs_qm_qoffend_logitem_committing(xfs_qoff_logitem_t *qip, xfs_lsn_t commit_lsn) 477xfs_qm_qoff_logitem_committing(
478 struct xfs_log_item *lip,
479 xfs_lsn_t commit_lsn)
528{ 480{
529 return;
530} 481}
531 482
532static struct xfs_item_ops xfs_qm_qoffend_logitem_ops = { 483static struct xfs_item_ops xfs_qm_qoffend_logitem_ops = {
533 .iop_size = (uint(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_size, 484 .iop_size = xfs_qm_qoff_logitem_size,
534 .iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*)) 485 .iop_format = xfs_qm_qoff_logitem_format,
535 xfs_qm_qoff_logitem_format, 486 .iop_pin = xfs_qm_qoff_logitem_pin,
536 .iop_pin = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_pin, 487 .iop_unpin = xfs_qm_qoff_logitem_unpin,
537 .iop_unpin = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_unpin, 488 .iop_trylock = xfs_qm_qoff_logitem_trylock,
538 .iop_unpin_remove = (void(*)(xfs_log_item_t*,xfs_trans_t*)) 489 .iop_unlock = xfs_qm_qoff_logitem_unlock,
539 xfs_qm_qoff_logitem_unpin_remove, 490 .iop_committed = xfs_qm_qoffend_logitem_committed,
540 .iop_trylock = (uint(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_trylock, 491 .iop_push = xfs_qm_qoff_logitem_push,
541 .iop_unlock = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_unlock, 492 .iop_committing = xfs_qm_qoff_logitem_committing
542 .iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t))
543 xfs_qm_qoffend_logitem_committed,
544 .iop_push = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_push,
545 .iop_pushbuf = NULL,
546 .iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t))
547 xfs_qm_qoffend_logitem_committing
548}; 493};
549 494
550/* 495/*
551 * This is the ops vector shared by all quotaoff-start log items. 496 * This is the ops vector shared by all quotaoff-start log items.
552 */ 497 */
553static struct xfs_item_ops xfs_qm_qoff_logitem_ops = { 498static struct xfs_item_ops xfs_qm_qoff_logitem_ops = {
554 .iop_size = (uint(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_size, 499 .iop_size = xfs_qm_qoff_logitem_size,
555 .iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*)) 500 .iop_format = xfs_qm_qoff_logitem_format,
556 xfs_qm_qoff_logitem_format, 501 .iop_pin = xfs_qm_qoff_logitem_pin,
557 .iop_pin = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_pin, 502 .iop_unpin = xfs_qm_qoff_logitem_unpin,
558 .iop_unpin = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_unpin, 503 .iop_trylock = xfs_qm_qoff_logitem_trylock,
559 .iop_unpin_remove = (void(*)(xfs_log_item_t*,xfs_trans_t*)) 504 .iop_unlock = xfs_qm_qoff_logitem_unlock,
560 xfs_qm_qoff_logitem_unpin_remove, 505 .iop_committed = xfs_qm_qoff_logitem_committed,
561 .iop_trylock = (uint(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_trylock, 506 .iop_push = xfs_qm_qoff_logitem_push,
562 .iop_unlock = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_unlock, 507 .iop_committing = xfs_qm_qoff_logitem_committing
563 .iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t))
564 xfs_qm_qoff_logitem_committed,
565 .iop_push = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_push,
566 .iop_pushbuf = NULL,
567 .iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t))
568 xfs_qm_qoff_logitem_committing
569}; 508};
570 509
571/* 510/*
572 * Allocate and initialize an quotaoff item of the correct quota type(s). 511 * Allocate and initialize an quotaoff item of the correct quota type(s).
573 */ 512 */
574xfs_qoff_logitem_t * 513struct xfs_qoff_logitem *
575xfs_qm_qoff_logitem_init( 514xfs_qm_qoff_logitem_init(
576 struct xfs_mount *mp, 515 struct xfs_mount *mp,
577 xfs_qoff_logitem_t *start, 516 struct xfs_qoff_logitem *start,
578 uint flags) 517 uint flags)
579{ 518{
580 xfs_qoff_logitem_t *qf; 519 struct xfs_qoff_logitem *qf;
581 520
582 qf = (xfs_qoff_logitem_t*) kmem_zalloc(sizeof(xfs_qoff_logitem_t), KM_SLEEP); 521 qf = kmem_zalloc(sizeof(struct xfs_qoff_logitem), KM_SLEEP);
583 522
584 xfs_log_item_init(mp, &qf->qql_item, XFS_LI_QUOTAOFF, start ? 523 xfs_log_item_init(mp, &qf->qql_item, XFS_LI_QUOTAOFF, start ?
585 &xfs_qm_qoffend_logitem_ops : &xfs_qm_qoff_logitem_ops); 524 &xfs_qm_qoffend_logitem_ops : &xfs_qm_qoff_logitem_ops);
@@ -587,5 +526,5 @@ xfs_qm_qoff_logitem_init(
587 qf->qql_format.qf_type = XFS_LI_QUOTAOFF; 526 qf->qql_format.qf_type = XFS_LI_QUOTAOFF;
588 qf->qql_format.qf_flags = flags; 527 qf->qql_format.qf_flags = flags;
589 qf->qql_start_lip = start; 528 qf->qql_start_lip = start;
590 return (qf); 529 return qf;
591} 530}
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c
index 2d8b7bc792c9..9a92407109a1 100644
--- a/fs/xfs/quota/xfs_qm.c
+++ b/fs/xfs/quota/xfs_qm.c
@@ -23,25 +23,18 @@
23#include "xfs_trans.h" 23#include "xfs_trans.h"
24#include "xfs_sb.h" 24#include "xfs_sb.h"
25#include "xfs_ag.h" 25#include "xfs_ag.h"
26#include "xfs_dir2.h"
27#include "xfs_alloc.h" 26#include "xfs_alloc.h"
28#include "xfs_dmapi.h"
29#include "xfs_quota.h" 27#include "xfs_quota.h"
30#include "xfs_mount.h" 28#include "xfs_mount.h"
31#include "xfs_bmap_btree.h" 29#include "xfs_bmap_btree.h"
32#include "xfs_alloc_btree.h"
33#include "xfs_ialloc_btree.h" 30#include "xfs_ialloc_btree.h"
34#include "xfs_dir2_sf.h"
35#include "xfs_attr_sf.h"
36#include "xfs_dinode.h" 31#include "xfs_dinode.h"
37#include "xfs_inode.h" 32#include "xfs_inode.h"
38#include "xfs_btree.h"
39#include "xfs_ialloc.h" 33#include "xfs_ialloc.h"
40#include "xfs_itable.h" 34#include "xfs_itable.h"
41#include "xfs_rtalloc.h" 35#include "xfs_rtalloc.h"
42#include "xfs_error.h" 36#include "xfs_error.h"
43#include "xfs_bmap.h" 37#include "xfs_bmap.h"
44#include "xfs_rw.h"
45#include "xfs_attr.h" 38#include "xfs_attr.h"
46#include "xfs_buf_item.h" 39#include "xfs_buf_item.h"
47#include "xfs_trans_space.h" 40#include "xfs_trans_space.h"
@@ -69,7 +62,7 @@ STATIC void xfs_qm_list_destroy(xfs_dqlist_t *);
69 62
70STATIC int xfs_qm_init_quotainos(xfs_mount_t *); 63STATIC int xfs_qm_init_quotainos(xfs_mount_t *);
71STATIC int xfs_qm_init_quotainfo(xfs_mount_t *); 64STATIC int xfs_qm_init_quotainfo(xfs_mount_t *);
72STATIC int xfs_qm_shake(int, gfp_t); 65STATIC int xfs_qm_shake(struct shrinker *, int, gfp_t);
73 66
74static struct shrinker xfs_qm_shaker = { 67static struct shrinker xfs_qm_shaker = {
75 .shrink = xfs_qm_shake, 68 .shrink = xfs_qm_shake,
@@ -1497,7 +1490,7 @@ xfs_qm_dqiterate(
1497 maxlblkcnt - lblkno, 1490 maxlblkcnt - lblkno,
1498 XFS_BMAPI_METADATA, 1491 XFS_BMAPI_METADATA,
1499 NULL, 1492 NULL,
1500 0, map, &nmaps, NULL, NULL); 1493 0, map, &nmaps, NULL);
1501 xfs_iunlock(qip, XFS_ILOCK_SHARED); 1494 xfs_iunlock(qip, XFS_ILOCK_SHARED);
1502 if (error) 1495 if (error)
1503 break; 1496 break;
@@ -1632,10 +1625,7 @@ xfs_qm_dqusage_adjust(
1632 xfs_ino_t ino, /* inode number to get data for */ 1625 xfs_ino_t ino, /* inode number to get data for */
1633 void __user *buffer, /* not used */ 1626 void __user *buffer, /* not used */
1634 int ubsize, /* not used */ 1627 int ubsize, /* not used */
1635 void *private_data, /* not used */
1636 xfs_daddr_t bno, /* starting block of inode cluster */
1637 int *ubused, /* not used */ 1628 int *ubused, /* not used */
1638 void *dip, /* on-disk inode pointer (not used) */
1639 int *res) /* result code value */ 1629 int *res) /* result code value */
1640{ 1630{
1641 xfs_inode_t *ip; 1631 xfs_inode_t *ip;
@@ -1660,7 +1650,7 @@ xfs_qm_dqusage_adjust(
1660 * the case in all other instances. It's OK that we do this because 1650 * the case in all other instances. It's OK that we do this because
1661 * quotacheck is done only at mount time. 1651 * quotacheck is done only at mount time.
1662 */ 1652 */
1663 if ((error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip, bno))) { 1653 if ((error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip))) {
1664 *res = BULKSTAT_RV_NOTHING; 1654 *res = BULKSTAT_RV_NOTHING;
1665 return error; 1655 return error;
1666 } 1656 }
@@ -1672,7 +1662,8 @@ xfs_qm_dqusage_adjust(
1672 * making us disable quotas for the file system. 1662 * making us disable quotas for the file system.
1673 */ 1663 */
1674 if ((error = xfs_qm_dqget_noattach(ip, &udqp, &gdqp))) { 1664 if ((error = xfs_qm_dqget_noattach(ip, &udqp, &gdqp))) {
1675 xfs_iput(ip, XFS_ILOCK_EXCL); 1665 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1666 IRELE(ip);
1676 *res = BULKSTAT_RV_GIVEUP; 1667 *res = BULKSTAT_RV_GIVEUP;
1677 return error; 1668 return error;
1678 } 1669 }
@@ -1685,7 +1676,8 @@ xfs_qm_dqusage_adjust(
1685 * Walk thru the extent list and count the realtime blocks. 1676 * Walk thru the extent list and count the realtime blocks.
1686 */ 1677 */
1687 if ((error = xfs_qm_get_rtblks(ip, &rtblks))) { 1678 if ((error = xfs_qm_get_rtblks(ip, &rtblks))) {
1688 xfs_iput(ip, XFS_ILOCK_EXCL); 1679 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1680 IRELE(ip);
1689 if (udqp) 1681 if (udqp)
1690 xfs_qm_dqput(udqp); 1682 xfs_qm_dqput(udqp);
1691 if (gdqp) 1683 if (gdqp)
@@ -1796,12 +1788,13 @@ xfs_qm_quotacheck(
1796 * Iterate thru all the inodes in the file system, 1788 * Iterate thru all the inodes in the file system,
1797 * adjusting the corresponding dquot counters in core. 1789 * adjusting the corresponding dquot counters in core.
1798 */ 1790 */
1799 if ((error = xfs_bulkstat(mp, &lastino, &count, 1791 error = xfs_bulkstat(mp, &lastino, &count,
1800 xfs_qm_dqusage_adjust, NULL, 1792 xfs_qm_dqusage_adjust,
1801 structsz, NULL, BULKSTAT_FG_IGET, &done))) 1793 structsz, NULL, &done);
1794 if (error)
1802 break; 1795 break;
1803 1796
1804 } while (! done); 1797 } while (!done);
1805 1798
1806 /* 1799 /*
1807 * We've made all the changes that we need to make incore. 1800 * We've made all the changes that we need to make incore.
@@ -1889,14 +1882,14 @@ xfs_qm_init_quotainos(
1889 mp->m_sb.sb_uquotino != NULLFSINO) { 1882 mp->m_sb.sb_uquotino != NULLFSINO) {
1890 ASSERT(mp->m_sb.sb_uquotino > 0); 1883 ASSERT(mp->m_sb.sb_uquotino > 0);
1891 if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, 1884 if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
1892 0, 0, &uip, 0))) 1885 0, 0, &uip)))
1893 return XFS_ERROR(error); 1886 return XFS_ERROR(error);
1894 } 1887 }
1895 if (XFS_IS_OQUOTA_ON(mp) && 1888 if (XFS_IS_OQUOTA_ON(mp) &&
1896 mp->m_sb.sb_gquotino != NULLFSINO) { 1889 mp->m_sb.sb_gquotino != NULLFSINO) {
1897 ASSERT(mp->m_sb.sb_gquotino > 0); 1890 ASSERT(mp->m_sb.sb_gquotino > 0);
1898 if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, 1891 if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
1899 0, 0, &gip, 0))) { 1892 0, 0, &gip))) {
1900 if (uip) 1893 if (uip)
1901 IRELE(uip); 1894 IRELE(uip);
1902 return XFS_ERROR(error); 1895 return XFS_ERROR(error);
@@ -2119,7 +2112,10 @@ xfs_qm_shake_freelist(
2119 */ 2112 */
2120/* ARGSUSED */ 2113/* ARGSUSED */
2121STATIC int 2114STATIC int
2122xfs_qm_shake(int nr_to_scan, gfp_t gfp_mask) 2115xfs_qm_shake(
2116 struct shrinker *shrink,
2117 int nr_to_scan,
2118 gfp_t gfp_mask)
2123{ 2119{
2124 int ndqused, nfree, n; 2120 int ndqused, nfree, n;
2125 2121
diff --git a/fs/xfs/quota/xfs_qm_bhv.c b/fs/xfs/quota/xfs_qm_bhv.c
index 97b410c12794..bea02d786c5d 100644
--- a/fs/xfs/quota/xfs_qm_bhv.c
+++ b/fs/xfs/quota/xfs_qm_bhv.c
@@ -23,25 +23,15 @@
23#include "xfs_trans.h" 23#include "xfs_trans.h"
24#include "xfs_sb.h" 24#include "xfs_sb.h"
25#include "xfs_ag.h" 25#include "xfs_ag.h"
26#include "xfs_dir2.h"
27#include "xfs_alloc.h" 26#include "xfs_alloc.h"
28#include "xfs_dmapi.h"
29#include "xfs_quota.h" 27#include "xfs_quota.h"
30#include "xfs_mount.h" 28#include "xfs_mount.h"
31#include "xfs_bmap_btree.h" 29#include "xfs_bmap_btree.h"
32#include "xfs_alloc_btree.h"
33#include "xfs_ialloc_btree.h"
34#include "xfs_dir2_sf.h"
35#include "xfs_attr_sf.h"
36#include "xfs_dinode.h"
37#include "xfs_inode.h" 30#include "xfs_inode.h"
38#include "xfs_ialloc.h"
39#include "xfs_itable.h" 31#include "xfs_itable.h"
40#include "xfs_btree.h"
41#include "xfs_bmap.h" 32#include "xfs_bmap.h"
42#include "xfs_rtalloc.h" 33#include "xfs_rtalloc.h"
43#include "xfs_error.h" 34#include "xfs_error.h"
44#include "xfs_rw.h"
45#include "xfs_attr.h" 35#include "xfs_attr.h"
46#include "xfs_buf_item.h" 36#include "xfs_buf_item.h"
47#include "xfs_qm.h" 37#include "xfs_qm.h"
diff --git a/fs/xfs/quota/xfs_qm_stats.c b/fs/xfs/quota/xfs_qm_stats.c
index 3d1fc79532e2..8671a0b32644 100644
--- a/fs/xfs/quota/xfs_qm_stats.c
+++ b/fs/xfs/quota/xfs_qm_stats.c
@@ -23,25 +23,15 @@
23#include "xfs_trans.h" 23#include "xfs_trans.h"
24#include "xfs_sb.h" 24#include "xfs_sb.h"
25#include "xfs_ag.h" 25#include "xfs_ag.h"
26#include "xfs_dir2.h"
27#include "xfs_alloc.h" 26#include "xfs_alloc.h"
28#include "xfs_dmapi.h"
29#include "xfs_quota.h" 27#include "xfs_quota.h"
30#include "xfs_mount.h" 28#include "xfs_mount.h"
31#include "xfs_bmap_btree.h" 29#include "xfs_bmap_btree.h"
32#include "xfs_alloc_btree.h"
33#include "xfs_ialloc_btree.h"
34#include "xfs_dir2_sf.h"
35#include "xfs_attr_sf.h"
36#include "xfs_dinode.h"
37#include "xfs_inode.h" 30#include "xfs_inode.h"
38#include "xfs_ialloc.h"
39#include "xfs_itable.h" 31#include "xfs_itable.h"
40#include "xfs_bmap.h" 32#include "xfs_bmap.h"
41#include "xfs_btree.h"
42#include "xfs_rtalloc.h" 33#include "xfs_rtalloc.h"
43#include "xfs_error.h" 34#include "xfs_error.h"
44#include "xfs_rw.h"
45#include "xfs_attr.h" 35#include "xfs_attr.h"
46#include "xfs_buf_item.h" 36#include "xfs_buf_item.h"
47#include "xfs_qm.h" 37#include "xfs_qm.h"
diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c
index 92b002f1805f..d257eb8557c4 100644
--- a/fs/xfs/quota/xfs_qm_syscalls.c
+++ b/fs/xfs/quota/xfs_qm_syscalls.c
@@ -26,25 +26,15 @@
26#include "xfs_trans.h" 26#include "xfs_trans.h"
27#include "xfs_sb.h" 27#include "xfs_sb.h"
28#include "xfs_ag.h" 28#include "xfs_ag.h"
29#include "xfs_dir2.h"
30#include "xfs_alloc.h" 29#include "xfs_alloc.h"
31#include "xfs_dmapi.h"
32#include "xfs_quota.h" 30#include "xfs_quota.h"
33#include "xfs_mount.h" 31#include "xfs_mount.h"
34#include "xfs_bmap_btree.h" 32#include "xfs_bmap_btree.h"
35#include "xfs_alloc_btree.h"
36#include "xfs_ialloc_btree.h"
37#include "xfs_dir2_sf.h"
38#include "xfs_attr_sf.h"
39#include "xfs_dinode.h"
40#include "xfs_inode.h" 33#include "xfs_inode.h"
41#include "xfs_ialloc.h"
42#include "xfs_itable.h" 34#include "xfs_itable.h"
43#include "xfs_bmap.h" 35#include "xfs_bmap.h"
44#include "xfs_btree.h"
45#include "xfs_rtalloc.h" 36#include "xfs_rtalloc.h"
46#include "xfs_error.h" 37#include "xfs_error.h"
47#include "xfs_rw.h"
48#include "xfs_attr.h" 38#include "xfs_attr.h"
49#include "xfs_buf_item.h" 39#include "xfs_buf_item.h"
50#include "xfs_utils.h" 40#include "xfs_utils.h"
@@ -248,40 +238,74 @@ out_unlock:
248 return error; 238 return error;
249} 239}
250 240
241STATIC int
242xfs_qm_scall_trunc_qfile(
243 struct xfs_mount *mp,
244 xfs_ino_t ino)
245{
246 struct xfs_inode *ip;
247 struct xfs_trans *tp;
248 int error;
249
250 if (ino == NULLFSINO)
251 return 0;
252
253 error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
254 if (error)
255 return error;
256
257 xfs_ilock(ip, XFS_IOLOCK_EXCL);
258
259 tp = xfs_trans_alloc(mp, XFS_TRANS_TRUNCATE_FILE);
260 error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0,
261 XFS_TRANS_PERM_LOG_RES,
262 XFS_ITRUNCATE_LOG_COUNT);
263 if (error) {
264 xfs_trans_cancel(tp, 0);
265 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
266 goto out_put;
267 }
268
269 xfs_ilock(ip, XFS_ILOCK_EXCL);
270 xfs_trans_ijoin(tp, ip);
271
272 error = xfs_itruncate_finish(&tp, ip, 0, XFS_DATA_FORK, 1);
273 if (error) {
274 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES |
275 XFS_TRANS_ABORT);
276 goto out_unlock;
277 }
278
279 xfs_ichgtime(ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
280 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
281
282out_unlock:
283 xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
284out_put:
285 IRELE(ip);
286 return error;
287}
288
251int 289int
252xfs_qm_scall_trunc_qfiles( 290xfs_qm_scall_trunc_qfiles(
253 xfs_mount_t *mp, 291 xfs_mount_t *mp,
254 uint flags) 292 uint flags)
255{ 293{
256 int error = 0, error2 = 0; 294 int error = 0, error2 = 0;
257 xfs_inode_t *qip;
258 295
259 if (!xfs_sb_version_hasquota(&mp->m_sb) || flags == 0) { 296 if (!xfs_sb_version_hasquota(&mp->m_sb) || flags == 0) {
260 qdprintk("qtrunc flags=%x m_qflags=%x\n", flags, mp->m_qflags); 297 qdprintk("qtrunc flags=%x m_qflags=%x\n", flags, mp->m_qflags);
261 return XFS_ERROR(EINVAL); 298 return XFS_ERROR(EINVAL);
262 } 299 }
263 300
264 if ((flags & XFS_DQ_USER) && mp->m_sb.sb_uquotino != NULLFSINO) { 301 if (flags & XFS_DQ_USER)
265 error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, 0, 0, &qip, 0); 302 error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_uquotino);
266 if (!error) { 303 if (flags & (XFS_DQ_GROUP|XFS_DQ_PROJ))
267 error = xfs_truncate_file(mp, qip); 304 error2 = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_gquotino);
268 IRELE(qip);
269 }
270 }
271
272 if ((flags & (XFS_DQ_GROUP|XFS_DQ_PROJ)) &&
273 mp->m_sb.sb_gquotino != NULLFSINO) {
274 error2 = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, 0, 0, &qip, 0);
275 if (!error2) {
276 error2 = xfs_truncate_file(mp, qip);
277 IRELE(qip);
278 }
279 }
280 305
281 return error ? error : error2; 306 return error ? error : error2;
282} 307}
283 308
284
285/* 309/*
286 * Switch on (a given) quota enforcement for a filesystem. This takes 310 * Switch on (a given) quota enforcement for a filesystem. This takes
287 * effect immediately. 311 * effect immediately.
@@ -417,12 +441,12 @@ xfs_qm_scall_getqstat(
417 } 441 }
418 if (!uip && mp->m_sb.sb_uquotino != NULLFSINO) { 442 if (!uip && mp->m_sb.sb_uquotino != NULLFSINO) {
419 if (xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, 443 if (xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
420 0, 0, &uip, 0) == 0) 444 0, 0, &uip) == 0)
421 tempuqip = B_TRUE; 445 tempuqip = B_TRUE;
422 } 446 }
423 if (!gip && mp->m_sb.sb_gquotino != NULLFSINO) { 447 if (!gip && mp->m_sb.sb_gquotino != NULLFSINO) {
424 if (xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, 448 if (xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
425 0, 0, &gip, 0) == 0) 449 0, 0, &gip) == 0)
426 tempgqip = B_TRUE; 450 tempgqip = B_TRUE;
427 } 451 }
428 if (uip) { 452 if (uip) {
@@ -875,8 +899,9 @@ xfs_dqrele_inode(
875 xfs_qm_dqrele(ip->i_gdquot); 899 xfs_qm_dqrele(ip->i_gdquot);
876 ip->i_gdquot = NULL; 900 ip->i_gdquot = NULL;
877 } 901 }
878 xfs_iput(ip, XFS_ILOCK_EXCL); 902 xfs_iunlock(ip, XFS_ILOCK_EXCL);
879 903
904 IRELE(ip);
880 return 0; 905 return 0;
881} 906}
882 907
@@ -1109,10 +1134,7 @@ xfs_qm_internalqcheck_adjust(
1109 xfs_ino_t ino, /* inode number to get data for */ 1134 xfs_ino_t ino, /* inode number to get data for */
1110 void __user *buffer, /* not used */ 1135 void __user *buffer, /* not used */
1111 int ubsize, /* not used */ 1136 int ubsize, /* not used */
1112 void *private_data, /* not used */
1113 xfs_daddr_t bno, /* starting block of inode cluster */
1114 int *ubused, /* not used */ 1137 int *ubused, /* not used */
1115 void *dip, /* not used */
1116 int *res) /* bulkstat result code */ 1138 int *res) /* bulkstat result code */
1117{ 1139{
1118 xfs_inode_t *ip; 1140 xfs_inode_t *ip;
@@ -1134,7 +1156,7 @@ xfs_qm_internalqcheck_adjust(
1134 ipreleased = B_FALSE; 1156 ipreleased = B_FALSE;
1135 again: 1157 again:
1136 lock_flags = XFS_ILOCK_SHARED; 1158 lock_flags = XFS_ILOCK_SHARED;
1137 if ((error = xfs_iget(mp, NULL, ino, 0, lock_flags, &ip, bno))) { 1159 if ((error = xfs_iget(mp, NULL, ino, 0, lock_flags, &ip))) {
1138 *res = BULKSTAT_RV_NOTHING; 1160 *res = BULKSTAT_RV_NOTHING;
1139 return (error); 1161 return (error);
1140 } 1162 }
@@ -1146,7 +1168,8 @@ xfs_qm_internalqcheck_adjust(
1146 * of those now. 1168 * of those now.
1147 */ 1169 */
1148 if (! ipreleased) { 1170 if (! ipreleased) {
1149 xfs_iput(ip, lock_flags); 1171 xfs_iunlock(ip, lock_flags);
1172 IRELE(ip);
1150 ipreleased = B_TRUE; 1173 ipreleased = B_TRUE;
1151 goto again; 1174 goto again;
1152 } 1175 }
@@ -1163,7 +1186,8 @@ xfs_qm_internalqcheck_adjust(
1163 ASSERT(gd); 1186 ASSERT(gd);
1164 xfs_qm_internalqcheck_dqadjust(ip, gd); 1187 xfs_qm_internalqcheck_dqadjust(ip, gd);
1165 } 1188 }
1166 xfs_iput(ip, lock_flags); 1189 xfs_iunlock(ip, lock_flags);
1190 IRELE(ip);
1167 *res = BULKSTAT_RV_DIDONE; 1191 *res = BULKSTAT_RV_DIDONE;
1168 return (0); 1192 return (0);
1169} 1193}
@@ -1205,15 +1229,15 @@ xfs_qm_internalqcheck(
1205 * Iterate thru all the inodes in the file system, 1229 * Iterate thru all the inodes in the file system,
1206 * adjusting the corresponding dquot counters 1230 * adjusting the corresponding dquot counters
1207 */ 1231 */
1208 if ((error = xfs_bulkstat(mp, &lastino, &count, 1232 error = xfs_bulkstat(mp, &lastino, &count,
1209 xfs_qm_internalqcheck_adjust, NULL, 1233 xfs_qm_internalqcheck_adjust,
1210 0, NULL, BULKSTAT_FG_IGET, &done))) { 1234 0, NULL, &done);
1235 if (error) {
1236 cmn_err(CE_DEBUG, "Bulkstat returned error 0x%x", error);
1211 break; 1237 break;
1212 } 1238 }
1213 } while (! done); 1239 } while (!done);
1214 if (error) { 1240
1215 cmn_err(CE_DEBUG, "Bulkstat returned error 0x%x", error);
1216 }
1217 cmn_err(CE_DEBUG, "Checking results against system dquots"); 1241 cmn_err(CE_DEBUG, "Checking results against system dquots");
1218 for (i = 0; i < qmtest_hashmask; i++) { 1242 for (i = 0; i < qmtest_hashmask; i++) {
1219 xfs_dqtest_t *d, *n; 1243 xfs_dqtest_t *d, *n;
diff --git a/fs/xfs/quota/xfs_trans_dquot.c b/fs/xfs/quota/xfs_trans_dquot.c
index 061d827da33c..7de91d1b75c0 100644
--- a/fs/xfs/quota/xfs_trans_dquot.c
+++ b/fs/xfs/quota/xfs_trans_dquot.c
@@ -23,25 +23,15 @@
23#include "xfs_trans.h" 23#include "xfs_trans.h"
24#include "xfs_sb.h" 24#include "xfs_sb.h"
25#include "xfs_ag.h" 25#include "xfs_ag.h"
26#include "xfs_dir2.h"
27#include "xfs_alloc.h" 26#include "xfs_alloc.h"
28#include "xfs_dmapi.h"
29#include "xfs_quota.h" 27#include "xfs_quota.h"
30#include "xfs_mount.h" 28#include "xfs_mount.h"
31#include "xfs_bmap_btree.h" 29#include "xfs_bmap_btree.h"
32#include "xfs_alloc_btree.h"
33#include "xfs_ialloc_btree.h"
34#include "xfs_attr_sf.h"
35#include "xfs_dir2_sf.h"
36#include "xfs_dinode.h"
37#include "xfs_inode.h" 30#include "xfs_inode.h"
38#include "xfs_ialloc.h"
39#include "xfs_itable.h" 31#include "xfs_itable.h"
40#include "xfs_btree.h"
41#include "xfs_bmap.h" 32#include "xfs_bmap.h"
42#include "xfs_rtalloc.h" 33#include "xfs_rtalloc.h"
43#include "xfs_error.h" 34#include "xfs_error.h"
44#include "xfs_rw.h"
45#include "xfs_attr.h" 35#include "xfs_attr.h"
46#include "xfs_buf_item.h" 36#include "xfs_buf_item.h"
47#include "xfs_trans_priv.h" 37#include "xfs_trans_priv.h"
@@ -59,16 +49,14 @@ xfs_trans_dqjoin(
59 xfs_trans_t *tp, 49 xfs_trans_t *tp,
60 xfs_dquot_t *dqp) 50 xfs_dquot_t *dqp)
61{ 51{
62 xfs_dq_logitem_t *lp = &dqp->q_logitem;
63
64 ASSERT(dqp->q_transp != tp); 52 ASSERT(dqp->q_transp != tp);
65 ASSERT(XFS_DQ_IS_LOCKED(dqp)); 53 ASSERT(XFS_DQ_IS_LOCKED(dqp));
66 ASSERT(lp->qli_dquot == dqp); 54 ASSERT(dqp->q_logitem.qli_dquot == dqp);
67 55
68 /* 56 /*
69 * Get a log_item_desc to point at the new item. 57 * Get a log_item_desc to point at the new item.
70 */ 58 */
71 (void) xfs_trans_add_item(tp, (xfs_log_item_t*)(lp)); 59 xfs_trans_add_item(tp, &dqp->q_logitem.qli_item);
72 60
73 /* 61 /*
74 * Initialize i_transp so we can later determine if this dquot is 62 * Initialize i_transp so we can later determine if this dquot is
@@ -93,16 +81,11 @@ xfs_trans_log_dquot(
93 xfs_trans_t *tp, 81 xfs_trans_t *tp,
94 xfs_dquot_t *dqp) 82 xfs_dquot_t *dqp)
95{ 83{
96 xfs_log_item_desc_t *lidp;
97
98 ASSERT(dqp->q_transp == tp); 84 ASSERT(dqp->q_transp == tp);
99 ASSERT(XFS_DQ_IS_LOCKED(dqp)); 85 ASSERT(XFS_DQ_IS_LOCKED(dqp));
100 86
101 lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)(&dqp->q_logitem));
102 ASSERT(lidp != NULL);
103
104 tp->t_flags |= XFS_TRANS_DIRTY; 87 tp->t_flags |= XFS_TRANS_DIRTY;
105 lidp->lid_flags |= XFS_LID_DIRTY; 88 dqp->q_logitem.qli_item.li_desc->lid_flags |= XFS_LID_DIRTY;
106} 89}
107 90
108/* 91/*
@@ -874,9 +857,8 @@ xfs_trans_get_qoff_item(
874 /* 857 /*
875 * Get a log_item_desc to point at the new item. 858 * Get a log_item_desc to point at the new item.
876 */ 859 */
877 (void) xfs_trans_add_item(tp, (xfs_log_item_t*)q); 860 xfs_trans_add_item(tp, &q->qql_item);
878 861 return q;
879 return (q);
880} 862}
881 863
882 864
@@ -890,13 +872,8 @@ xfs_trans_log_quotaoff_item(
890 xfs_trans_t *tp, 872 xfs_trans_t *tp,
891 xfs_qoff_logitem_t *qlp) 873 xfs_qoff_logitem_t *qlp)
892{ 874{
893 xfs_log_item_desc_t *lidp;
894
895 lidp = xfs_trans_find_item(tp, (xfs_log_item_t *)qlp);
896 ASSERT(lidp != NULL);
897
898 tp->t_flags |= XFS_TRANS_DIRTY; 875 tp->t_flags |= XFS_TRANS_DIRTY;
899 lidp->lid_flags |= XFS_LID_DIRTY; 876 qlp->qql_item.li_desc->lid_flags |= XFS_LID_DIRTY;
900} 877}
901 878
902STATIC void 879STATIC void
diff --git a/fs/xfs/support/debug.c b/fs/xfs/support/debug.c
index 3f3610a7ee05..975aa10e1a47 100644
--- a/fs/xfs/support/debug.c
+++ b/fs/xfs/support/debug.c
@@ -22,7 +22,6 @@
22#include "xfs_sb.h" 22#include "xfs_sb.h"
23#include "xfs_inum.h" 23#include "xfs_inum.h"
24#include "xfs_ag.h" 24#include "xfs_ag.h"
25#include "xfs_dmapi.h"
26#include "xfs_mount.h" 25#include "xfs_mount.h"
27#include "xfs_error.h" 26#include "xfs_error.h"
28 27
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c
index a7fbe8a99b12..af168faccc7a 100644
--- a/fs/xfs/xfs_alloc.c
+++ b/fs/xfs/xfs_alloc.c
@@ -24,18 +24,13 @@
24#include "xfs_trans.h" 24#include "xfs_trans.h"
25#include "xfs_sb.h" 25#include "xfs_sb.h"
26#include "xfs_ag.h" 26#include "xfs_ag.h"
27#include "xfs_dir2.h"
28#include "xfs_dmapi.h"
29#include "xfs_mount.h" 27#include "xfs_mount.h"
30#include "xfs_bmap_btree.h" 28#include "xfs_bmap_btree.h"
31#include "xfs_alloc_btree.h" 29#include "xfs_alloc_btree.h"
32#include "xfs_ialloc_btree.h" 30#include "xfs_ialloc_btree.h"
33#include "xfs_dir2_sf.h"
34#include "xfs_attr_sf.h"
35#include "xfs_dinode.h" 31#include "xfs_dinode.h"
36#include "xfs_inode.h" 32#include "xfs_inode.h"
37#include "xfs_btree.h" 33#include "xfs_btree.h"
38#include "xfs_ialloc.h"
39#include "xfs_alloc.h" 34#include "xfs_alloc.h"
40#include "xfs_error.h" 35#include "xfs_error.h"
41#include "xfs_trace.h" 36#include "xfs_trace.h"
@@ -688,8 +683,6 @@ xfs_alloc_ag_vextent_near(
688 xfs_agblock_t ltbno; /* start bno of left side entry */ 683 xfs_agblock_t ltbno; /* start bno of left side entry */
689 xfs_agblock_t ltbnoa; /* aligned ... */ 684 xfs_agblock_t ltbnoa; /* aligned ... */
690 xfs_extlen_t ltdiff; /* difference to left side entry */ 685 xfs_extlen_t ltdiff; /* difference to left side entry */
691 /*REFERENCED*/
692 xfs_agblock_t ltend; /* end bno of left side entry */
693 xfs_extlen_t ltlen; /* length of left side entry */ 686 xfs_extlen_t ltlen; /* length of left side entry */
694 xfs_extlen_t ltlena; /* aligned ... */ 687 xfs_extlen_t ltlena; /* aligned ... */
695 xfs_agblock_t ltnew; /* useful start bno of left side */ 688 xfs_agblock_t ltnew; /* useful start bno of left side */
@@ -814,8 +807,7 @@ xfs_alloc_ag_vextent_near(
814 if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno, &ltlen, &i))) 807 if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno, &ltlen, &i)))
815 goto error0; 808 goto error0;
816 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 809 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
817 ltend = ltbno + ltlen; 810 ASSERT(ltbno + ltlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
818 ASSERT(ltend <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
819 args->len = blen; 811 args->len = blen;
820 if (!xfs_alloc_fix_minleft(args)) { 812 if (!xfs_alloc_fix_minleft(args)) {
821 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); 813 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
@@ -828,7 +820,7 @@ xfs_alloc_ag_vextent_near(
828 */ 820 */
829 args->agbno = bnew; 821 args->agbno = bnew;
830 ASSERT(bnew >= ltbno); 822 ASSERT(bnew >= ltbno);
831 ASSERT(bnew + blen <= ltend); 823 ASSERT(bnew + blen <= ltbno + ltlen);
832 /* 824 /*
833 * Set up a cursor for the by-bno tree. 825 * Set up a cursor for the by-bno tree.
834 */ 826 */
@@ -1157,7 +1149,6 @@ xfs_alloc_ag_vextent_near(
1157 /* 1149 /*
1158 * Fix up the length and compute the useful address. 1150 * Fix up the length and compute the useful address.
1159 */ 1151 */
1160 ltend = ltbno + ltlen;
1161 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen); 1152 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
1162 xfs_alloc_fix_len(args); 1153 xfs_alloc_fix_len(args);
1163 if (!xfs_alloc_fix_minleft(args)) { 1154 if (!xfs_alloc_fix_minleft(args)) {
@@ -1170,7 +1161,7 @@ xfs_alloc_ag_vextent_near(
1170 (void)xfs_alloc_compute_diff(args->agbno, rlen, args->alignment, ltbno, 1161 (void)xfs_alloc_compute_diff(args->agbno, rlen, args->alignment, ltbno,
1171 ltlen, &ltnew); 1162 ltlen, &ltnew);
1172 ASSERT(ltnew >= ltbno); 1163 ASSERT(ltnew >= ltbno);
1173 ASSERT(ltnew + rlen <= ltend); 1164 ASSERT(ltnew + rlen <= ltbno + ltlen);
1174 ASSERT(ltnew + rlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length)); 1165 ASSERT(ltnew + rlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
1175 args->agbno = ltnew; 1166 args->agbno = ltnew;
1176 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno, ltlen, 1167 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno, ltlen,
diff --git a/fs/xfs/xfs_alloc.h b/fs/xfs/xfs_alloc.h
index 6d05199b667c..895009a97271 100644
--- a/fs/xfs/xfs_alloc.h
+++ b/fs/xfs/xfs_alloc.h
@@ -27,16 +27,16 @@ struct xfs_busy_extent;
27/* 27/*
28 * Freespace allocation types. Argument to xfs_alloc_[v]extent. 28 * Freespace allocation types. Argument to xfs_alloc_[v]extent.
29 */ 29 */
30typedef enum xfs_alloctype 30#define XFS_ALLOCTYPE_ANY_AG 0x01 /* allocate anywhere, use rotor */
31{ 31#define XFS_ALLOCTYPE_FIRST_AG 0x02 /* ... start at ag 0 */
32 XFS_ALLOCTYPE_ANY_AG, /* allocate anywhere, use rotor */ 32#define XFS_ALLOCTYPE_START_AG 0x04 /* anywhere, start in this a.g. */
33 XFS_ALLOCTYPE_FIRST_AG, /* ... start at ag 0 */ 33#define XFS_ALLOCTYPE_THIS_AG 0x08 /* anywhere in this a.g. */
34 XFS_ALLOCTYPE_START_AG, /* anywhere, start in this a.g. */ 34#define XFS_ALLOCTYPE_START_BNO 0x10 /* near this block else anywhere */
35 XFS_ALLOCTYPE_THIS_AG, /* anywhere in this a.g. */ 35#define XFS_ALLOCTYPE_NEAR_BNO 0x20 /* in this a.g. and near this block */
36 XFS_ALLOCTYPE_START_BNO, /* near this block else anywhere */ 36#define XFS_ALLOCTYPE_THIS_BNO 0x40 /* at exactly this block */
37 XFS_ALLOCTYPE_NEAR_BNO, /* in this a.g. and near this block */ 37
38 XFS_ALLOCTYPE_THIS_BNO /* at exactly this block */ 38/* this should become an enum again when the tracing code is fixed */
39} xfs_alloctype_t; 39typedef unsigned int xfs_alloctype_t;
40 40
41#define XFS_ALLOC_TYPES \ 41#define XFS_ALLOC_TYPES \
42 { XFS_ALLOCTYPE_ANY_AG, "ANY_AG" }, \ 42 { XFS_ALLOCTYPE_ANY_AG, "ANY_AG" }, \
diff --git a/fs/xfs/xfs_alloc_btree.c b/fs/xfs/xfs_alloc_btree.c
index 83f494218759..97f7328967fd 100644
--- a/fs/xfs/xfs_alloc_btree.c
+++ b/fs/xfs/xfs_alloc_btree.c
@@ -24,19 +24,14 @@
24#include "xfs_trans.h" 24#include "xfs_trans.h"
25#include "xfs_sb.h" 25#include "xfs_sb.h"
26#include "xfs_ag.h" 26#include "xfs_ag.h"
27#include "xfs_dir2.h"
28#include "xfs_dmapi.h"
29#include "xfs_mount.h" 27#include "xfs_mount.h"
30#include "xfs_bmap_btree.h" 28#include "xfs_bmap_btree.h"
31#include "xfs_alloc_btree.h" 29#include "xfs_alloc_btree.h"
32#include "xfs_ialloc_btree.h" 30#include "xfs_ialloc_btree.h"
33#include "xfs_dir2_sf.h"
34#include "xfs_attr_sf.h"
35#include "xfs_dinode.h" 31#include "xfs_dinode.h"
36#include "xfs_inode.h" 32#include "xfs_inode.h"
37#include "xfs_btree.h" 33#include "xfs_btree.h"
38#include "xfs_btree_trace.h" 34#include "xfs_btree_trace.h"
39#include "xfs_ialloc.h"
40#include "xfs_alloc.h" 35#include "xfs_alloc.h"
41#include "xfs_error.h" 36#include "xfs_error.h"
42#include "xfs_trace.h" 37#include "xfs_trace.h"
diff --git a/fs/xfs/xfs_attr.c b/fs/xfs/xfs_attr.c
index b9c196a53c42..c2568242a901 100644
--- a/fs/xfs/xfs_attr.c
+++ b/fs/xfs/xfs_attr.c
@@ -25,19 +25,13 @@
25#include "xfs_trans.h" 25#include "xfs_trans.h"
26#include "xfs_sb.h" 26#include "xfs_sb.h"
27#include "xfs_ag.h" 27#include "xfs_ag.h"
28#include "xfs_dir2.h"
29#include "xfs_dmapi.h"
30#include "xfs_mount.h" 28#include "xfs_mount.h"
31#include "xfs_da_btree.h" 29#include "xfs_da_btree.h"
32#include "xfs_bmap_btree.h" 30#include "xfs_bmap_btree.h"
33#include "xfs_alloc_btree.h"
34#include "xfs_ialloc_btree.h"
35#include "xfs_dir2_sf.h"
36#include "xfs_attr_sf.h" 31#include "xfs_attr_sf.h"
37#include "xfs_dinode.h" 32#include "xfs_dinode.h"
38#include "xfs_inode.h" 33#include "xfs_inode.h"
39#include "xfs_alloc.h" 34#include "xfs_alloc.h"
40#include "xfs_btree.h"
41#include "xfs_inode_item.h" 35#include "xfs_inode_item.h"
42#include "xfs_bmap.h" 36#include "xfs_bmap.h"
43#include "xfs_attr.h" 37#include "xfs_attr.h"
@@ -325,8 +319,7 @@ xfs_attr_set_int(
325 return (error); 319 return (error);
326 } 320 }
327 321
328 xfs_trans_ijoin(args.trans, dp, XFS_ILOCK_EXCL); 322 xfs_trans_ijoin(args.trans, dp);
329 xfs_trans_ihold(args.trans, dp);
330 323
331 /* 324 /*
332 * If the attribute list is non-existent or a shortform list, 325 * If the attribute list is non-existent or a shortform list,
@@ -396,10 +389,8 @@ xfs_attr_set_int(
396 * bmap_finish() may have committed the last trans and started 389 * bmap_finish() may have committed the last trans and started
397 * a new one. We need the inode to be in all transactions. 390 * a new one. We need the inode to be in all transactions.
398 */ 391 */
399 if (committed) { 392 if (committed)
400 xfs_trans_ijoin(args.trans, dp, XFS_ILOCK_EXCL); 393 xfs_trans_ijoin(args.trans, dp);
401 xfs_trans_ihold(args.trans, dp);
402 }
403 394
404 /* 395 /*
405 * Commit the leaf transformation. We'll need another (linked) 396 * Commit the leaf transformation. We'll need another (linked)
@@ -544,8 +535,7 @@ xfs_attr_remove_int(xfs_inode_t *dp, struct xfs_name *name, int flags)
544 * No need to make quota reservations here. We expect to release some 535 * No need to make quota reservations here. We expect to release some
545 * blocks not allocate in the common case. 536 * blocks not allocate in the common case.
546 */ 537 */
547 xfs_trans_ijoin(args.trans, dp, XFS_ILOCK_EXCL); 538 xfs_trans_ijoin(args.trans, dp);
548 xfs_trans_ihold(args.trans, dp);
549 539
550 /* 540 /*
551 * Decide on what work routines to call based on the inode size. 541 * Decide on what work routines to call based on the inode size.
@@ -821,8 +811,7 @@ xfs_attr_inactive(xfs_inode_t *dp)
821 * No need to make quota reservations here. We expect to release some 811 * No need to make quota reservations here. We expect to release some
822 * blocks, not allocate, in the common case. 812 * blocks, not allocate, in the common case.
823 */ 813 */
824 xfs_trans_ijoin(trans, dp, XFS_ILOCK_EXCL); 814 xfs_trans_ijoin(trans, dp);
825 xfs_trans_ihold(trans, dp);
826 815
827 /* 816 /*
828 * Decide on what work routines to call based on the inode size. 817 * Decide on what work routines to call based on the inode size.
@@ -981,10 +970,8 @@ xfs_attr_leaf_addname(xfs_da_args_t *args)
981 * bmap_finish() may have committed the last trans and started 970 * bmap_finish() may have committed the last trans and started
982 * a new one. We need the inode to be in all transactions. 971 * a new one. We need the inode to be in all transactions.
983 */ 972 */
984 if (committed) { 973 if (committed)
985 xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL); 974 xfs_trans_ijoin(args->trans, dp);
986 xfs_trans_ihold(args->trans, dp);
987 }
988 975
989 /* 976 /*
990 * Commit the current trans (including the inode) and start 977 * Commit the current trans (including the inode) and start
@@ -1085,10 +1072,8 @@ xfs_attr_leaf_addname(xfs_da_args_t *args)
1085 * and started a new one. We need the inode to be 1072 * and started a new one. We need the inode to be
1086 * in all transactions. 1073 * in all transactions.
1087 */ 1074 */
1088 if (committed) { 1075 if (committed)
1089 xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL); 1076 xfs_trans_ijoin(args->trans, dp);
1090 xfs_trans_ihold(args->trans, dp);
1091 }
1092 } else 1077 } else
1093 xfs_da_buf_done(bp); 1078 xfs_da_buf_done(bp);
1094 1079
@@ -1161,10 +1146,8 @@ xfs_attr_leaf_removename(xfs_da_args_t *args)
1161 * bmap_finish() may have committed the last trans and started 1146 * bmap_finish() may have committed the last trans and started
1162 * a new one. We need the inode to be in all transactions. 1147 * a new one. We need the inode to be in all transactions.
1163 */ 1148 */
1164 if (committed) { 1149 if (committed)
1165 xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL); 1150 xfs_trans_ijoin(args->trans, dp);
1166 xfs_trans_ihold(args->trans, dp);
1167 }
1168 } else 1151 } else
1169 xfs_da_buf_done(bp); 1152 xfs_da_buf_done(bp);
1170 return(0); 1153 return(0);
@@ -1317,10 +1300,8 @@ restart:
1317 * and started a new one. We need the inode to be 1300 * and started a new one. We need the inode to be
1318 * in all transactions. 1301 * in all transactions.
1319 */ 1302 */
1320 if (committed) { 1303 if (committed)
1321 xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL); 1304 xfs_trans_ijoin(args->trans, dp);
1322 xfs_trans_ihold(args->trans, dp);
1323 }
1324 1305
1325 /* 1306 /*
1326 * Commit the node conversion and start the next 1307 * Commit the node conversion and start the next
@@ -1356,10 +1337,8 @@ restart:
1356 * bmap_finish() may have committed the last trans and started 1337 * bmap_finish() may have committed the last trans and started
1357 * a new one. We need the inode to be in all transactions. 1338 * a new one. We need the inode to be in all transactions.
1358 */ 1339 */
1359 if (committed) { 1340 if (committed)
1360 xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL); 1341 xfs_trans_ijoin(args->trans, dp);
1361 xfs_trans_ihold(args->trans, dp);
1362 }
1363 } else { 1342 } else {
1364 /* 1343 /*
1365 * Addition succeeded, update Btree hashvals. 1344 * Addition succeeded, update Btree hashvals.
@@ -1470,10 +1449,8 @@ restart:
1470 * and started a new one. We need the inode to be 1449 * and started a new one. We need the inode to be
1471 * in all transactions. 1450 * in all transactions.
1472 */ 1451 */
1473 if (committed) { 1452 if (committed)
1474 xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL); 1453 xfs_trans_ijoin(args->trans, dp);
1475 xfs_trans_ihold(args->trans, dp);
1476 }
1477 } 1454 }
1478 1455
1479 /* 1456 /*
@@ -1604,10 +1581,8 @@ xfs_attr_node_removename(xfs_da_args_t *args)
1604 * bmap_finish() may have committed the last trans and started 1581 * bmap_finish() may have committed the last trans and started
1605 * a new one. We need the inode to be in all transactions. 1582 * a new one. We need the inode to be in all transactions.
1606 */ 1583 */
1607 if (committed) { 1584 if (committed)
1608 xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL); 1585 xfs_trans_ijoin(args->trans, dp);
1609 xfs_trans_ihold(args->trans, dp);
1610 }
1611 1586
1612 /* 1587 /*
1613 * Commit the Btree join operation and start a new trans. 1588 * Commit the Btree join operation and start a new trans.
@@ -1658,10 +1633,8 @@ xfs_attr_node_removename(xfs_da_args_t *args)
1658 * and started a new one. We need the inode to be 1633 * and started a new one. We need the inode to be
1659 * in all transactions. 1634 * in all transactions.
1660 */ 1635 */
1661 if (committed) { 1636 if (committed)
1662 xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL); 1637 xfs_trans_ijoin(args->trans, dp);
1663 xfs_trans_ihold(args->trans, dp);
1664 }
1665 } else 1638 } else
1666 xfs_da_brelse(args->trans, bp); 1639 xfs_da_brelse(args->trans, bp);
1667 } 1640 }
@@ -2004,7 +1977,7 @@ xfs_attr_rmtval_get(xfs_da_args_t *args)
2004 error = xfs_bmapi(args->trans, args->dp, (xfs_fileoff_t)lblkno, 1977 error = xfs_bmapi(args->trans, args->dp, (xfs_fileoff_t)lblkno,
2005 args->rmtblkcnt, 1978 args->rmtblkcnt,
2006 XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA, 1979 XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA,
2007 NULL, 0, map, &nmap, NULL, NULL); 1980 NULL, 0, map, &nmap, NULL);
2008 if (error) 1981 if (error)
2009 return(error); 1982 return(error);
2010 ASSERT(nmap >= 1); 1983 ASSERT(nmap >= 1);
@@ -2083,7 +2056,7 @@ xfs_attr_rmtval_set(xfs_da_args_t *args)
2083 XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA | 2056 XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA |
2084 XFS_BMAPI_WRITE, 2057 XFS_BMAPI_WRITE,
2085 args->firstblock, args->total, &map, &nmap, 2058 args->firstblock, args->total, &map, &nmap,
2086 args->flist, NULL); 2059 args->flist);
2087 if (!error) { 2060 if (!error) {
2088 error = xfs_bmap_finish(&args->trans, args->flist, 2061 error = xfs_bmap_finish(&args->trans, args->flist,
2089 &committed); 2062 &committed);
@@ -2099,10 +2072,8 @@ xfs_attr_rmtval_set(xfs_da_args_t *args)
2099 * bmap_finish() may have committed the last trans and started 2072 * bmap_finish() may have committed the last trans and started
2100 * a new one. We need the inode to be in all transactions. 2073 * a new one. We need the inode to be in all transactions.
2101 */ 2074 */
2102 if (committed) { 2075 if (committed)
2103 xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL); 2076 xfs_trans_ijoin(args->trans, dp);
2104 xfs_trans_ihold(args->trans, dp);
2105 }
2106 2077
2107 ASSERT(nmap == 1); 2078 ASSERT(nmap == 1);
2108 ASSERT((map.br_startblock != DELAYSTARTBLOCK) && 2079 ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
@@ -2136,7 +2107,7 @@ xfs_attr_rmtval_set(xfs_da_args_t *args)
2136 args->rmtblkcnt, 2107 args->rmtblkcnt,
2137 XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA, 2108 XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA,
2138 args->firstblock, 0, &map, &nmap, 2109 args->firstblock, 0, &map, &nmap,
2139 NULL, NULL); 2110 NULL);
2140 if (error) { 2111 if (error) {
2141 return(error); 2112 return(error);
2142 } 2113 }
@@ -2201,7 +2172,7 @@ xfs_attr_rmtval_remove(xfs_da_args_t *args)
2201 args->rmtblkcnt, 2172 args->rmtblkcnt,
2202 XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA, 2173 XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA,
2203 args->firstblock, 0, &map, &nmap, 2174 args->firstblock, 0, &map, &nmap,
2204 args->flist, NULL); 2175 args->flist);
2205 if (error) { 2176 if (error) {
2206 return(error); 2177 return(error);
2207 } 2178 }
@@ -2239,7 +2210,7 @@ xfs_attr_rmtval_remove(xfs_da_args_t *args)
2239 error = xfs_bunmapi(args->trans, args->dp, lblkno, blkcnt, 2210 error = xfs_bunmapi(args->trans, args->dp, lblkno, blkcnt,
2240 XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA, 2211 XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA,
2241 1, args->firstblock, args->flist, 2212 1, args->firstblock, args->flist,
2242 NULL, &done); 2213 &done);
2243 if (!error) { 2214 if (!error) {
2244 error = xfs_bmap_finish(&args->trans, args->flist, 2215 error = xfs_bmap_finish(&args->trans, args->flist,
2245 &committed); 2216 &committed);
@@ -2255,10 +2226,8 @@ xfs_attr_rmtval_remove(xfs_da_args_t *args)
2255 * bmap_finish() may have committed the last trans and started 2226 * bmap_finish() may have committed the last trans and started
2256 * a new one. We need the inode to be in all transactions. 2227 * a new one. We need the inode to be in all transactions.
2257 */ 2228 */
2258 if (committed) { 2229 if (committed)
2259 xfs_trans_ijoin(args->trans, args->dp, XFS_ILOCK_EXCL); 2230 xfs_trans_ijoin(args->trans, args->dp);
2260 xfs_trans_ihold(args->trans, args->dp);
2261 }
2262 2231
2263 /* 2232 /*
2264 * Close out trans and start the next one in the chain. 2233 * Close out trans and start the next one in the chain.
diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/xfs_attr_leaf.c
index a90ce74fc256..a6cff8edcdb6 100644
--- a/fs/xfs/xfs_attr_leaf.c
+++ b/fs/xfs/xfs_attr_leaf.c
@@ -24,8 +24,6 @@
24#include "xfs_trans.h" 24#include "xfs_trans.h"
25#include "xfs_sb.h" 25#include "xfs_sb.h"
26#include "xfs_ag.h" 26#include "xfs_ag.h"
27#include "xfs_dir2.h"
28#include "xfs_dmapi.h"
29#include "xfs_mount.h" 27#include "xfs_mount.h"
30#include "xfs_da_btree.h" 28#include "xfs_da_btree.h"
31#include "xfs_bmap_btree.h" 29#include "xfs_bmap_btree.h"
@@ -33,7 +31,6 @@
33#include "xfs_ialloc_btree.h" 31#include "xfs_ialloc_btree.h"
34#include "xfs_alloc.h" 32#include "xfs_alloc.h"
35#include "xfs_btree.h" 33#include "xfs_btree.h"
36#include "xfs_dir2_sf.h"
37#include "xfs_attr_sf.h" 34#include "xfs_attr_sf.h"
38#include "xfs_dinode.h" 35#include "xfs_dinode.h"
39#include "xfs_inode.h" 36#include "xfs_inode.h"
@@ -2931,7 +2928,7 @@ xfs_attr_leaf_freextent(xfs_trans_t **trans, xfs_inode_t *dp,
2931 nmap = 1; 2928 nmap = 1;
2932 error = xfs_bmapi(*trans, dp, (xfs_fileoff_t)tblkno, tblkcnt, 2929 error = xfs_bmapi(*trans, dp, (xfs_fileoff_t)tblkno, tblkcnt,
2933 XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA, 2930 XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA,
2934 NULL, 0, &map, &nmap, NULL, NULL); 2931 NULL, 0, &map, &nmap, NULL);
2935 if (error) { 2932 if (error) {
2936 return(error); 2933 return(error);
2937 } 2934 }
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index 99587ded043f..23f14e595c18 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -30,13 +30,10 @@
30#include "xfs_alloc_btree.h" 30#include "xfs_alloc_btree.h"
31#include "xfs_ialloc_btree.h" 31#include "xfs_ialloc_btree.h"
32#include "xfs_dir2_sf.h" 32#include "xfs_dir2_sf.h"
33#include "xfs_attr_sf.h"
34#include "xfs_dinode.h" 33#include "xfs_dinode.h"
35#include "xfs_inode.h" 34#include "xfs_inode.h"
36#include "xfs_btree.h" 35#include "xfs_btree.h"
37#include "xfs_dmapi.h"
38#include "xfs_mount.h" 36#include "xfs_mount.h"
39#include "xfs_ialloc.h"
40#include "xfs_itable.h" 37#include "xfs_itable.h"
41#include "xfs_dir2_data.h" 38#include "xfs_dir2_data.h"
42#include "xfs_dir2_leaf.h" 39#include "xfs_dir2_leaf.h"
@@ -104,7 +101,6 @@ xfs_bmap_add_extent(
104 xfs_fsblock_t *first, /* pointer to firstblock variable */ 101 xfs_fsblock_t *first, /* pointer to firstblock variable */
105 xfs_bmap_free_t *flist, /* list of extents to be freed */ 102 xfs_bmap_free_t *flist, /* list of extents to be freed */
106 int *logflagsp, /* inode logging flags */ 103 int *logflagsp, /* inode logging flags */
107 xfs_extdelta_t *delta, /* Change made to incore extents */
108 int whichfork, /* data or attr fork */ 104 int whichfork, /* data or attr fork */
109 int rsvd); /* OK to allocate reserved blocks */ 105 int rsvd); /* OK to allocate reserved blocks */
110 106
@@ -122,7 +118,6 @@ xfs_bmap_add_extent_delay_real(
122 xfs_fsblock_t *first, /* pointer to firstblock variable */ 118 xfs_fsblock_t *first, /* pointer to firstblock variable */
123 xfs_bmap_free_t *flist, /* list of extents to be freed */ 119 xfs_bmap_free_t *flist, /* list of extents to be freed */
124 int *logflagsp, /* inode logging flags */ 120 int *logflagsp, /* inode logging flags */
125 xfs_extdelta_t *delta, /* Change made to incore extents */
126 int rsvd); /* OK to allocate reserved blocks */ 121 int rsvd); /* OK to allocate reserved blocks */
127 122
128/* 123/*
@@ -135,7 +130,6 @@ xfs_bmap_add_extent_hole_delay(
135 xfs_extnum_t idx, /* extent number to update/insert */ 130 xfs_extnum_t idx, /* extent number to update/insert */
136 xfs_bmbt_irec_t *new, /* new data to add to file extents */ 131 xfs_bmbt_irec_t *new, /* new data to add to file extents */
137 int *logflagsp,/* inode logging flags */ 132 int *logflagsp,/* inode logging flags */
138 xfs_extdelta_t *delta, /* Change made to incore extents */
139 int rsvd); /* OK to allocate reserved blocks */ 133 int rsvd); /* OK to allocate reserved blocks */
140 134
141/* 135/*
@@ -149,7 +143,6 @@ xfs_bmap_add_extent_hole_real(
149 xfs_btree_cur_t *cur, /* if null, not a btree */ 143 xfs_btree_cur_t *cur, /* if null, not a btree */
150 xfs_bmbt_irec_t *new, /* new data to add to file extents */ 144 xfs_bmbt_irec_t *new, /* new data to add to file extents */
151 int *logflagsp, /* inode logging flags */ 145 int *logflagsp, /* inode logging flags */
152 xfs_extdelta_t *delta, /* Change made to incore extents */
153 int whichfork); /* data or attr fork */ 146 int whichfork); /* data or attr fork */
154 147
155/* 148/*
@@ -162,8 +155,7 @@ xfs_bmap_add_extent_unwritten_real(
162 xfs_extnum_t idx, /* extent number to update/insert */ 155 xfs_extnum_t idx, /* extent number to update/insert */
163 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ 156 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
164 xfs_bmbt_irec_t *new, /* new data to add to file extents */ 157 xfs_bmbt_irec_t *new, /* new data to add to file extents */
165 int *logflagsp, /* inode logging flags */ 158 int *logflagsp); /* inode logging flags */
166 xfs_extdelta_t *delta); /* Change made to incore extents */
167 159
168/* 160/*
169 * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file. 161 * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file.
@@ -200,7 +192,6 @@ xfs_bmap_del_extent(
200 xfs_btree_cur_t *cur, /* if null, not a btree */ 192 xfs_btree_cur_t *cur, /* if null, not a btree */
201 xfs_bmbt_irec_t *new, /* new data to add to file extents */ 193 xfs_bmbt_irec_t *new, /* new data to add to file extents */
202 int *logflagsp,/* inode logging flags */ 194 int *logflagsp,/* inode logging flags */
203 xfs_extdelta_t *delta, /* Change made to incore extents */
204 int whichfork, /* data or attr fork */ 195 int whichfork, /* data or attr fork */
205 int rsvd); /* OK to allocate reserved blocks */ 196 int rsvd); /* OK to allocate reserved blocks */
206 197
@@ -489,7 +480,6 @@ xfs_bmap_add_extent(
489 xfs_fsblock_t *first, /* pointer to firstblock variable */ 480 xfs_fsblock_t *first, /* pointer to firstblock variable */
490 xfs_bmap_free_t *flist, /* list of extents to be freed */ 481 xfs_bmap_free_t *flist, /* list of extents to be freed */
491 int *logflagsp, /* inode logging flags */ 482 int *logflagsp, /* inode logging flags */
492 xfs_extdelta_t *delta, /* Change made to incore extents */
493 int whichfork, /* data or attr fork */ 483 int whichfork, /* data or attr fork */
494 int rsvd) /* OK to use reserved data blocks */ 484 int rsvd) /* OK to use reserved data blocks */
495{ 485{
@@ -524,15 +514,6 @@ xfs_bmap_add_extent(
524 logflags = XFS_ILOG_CORE | xfs_ilog_fext(whichfork); 514 logflags = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
525 } else 515 } else
526 logflags = 0; 516 logflags = 0;
527 /* DELTA: single new extent */
528 if (delta) {
529 if (delta->xed_startoff > new->br_startoff)
530 delta->xed_startoff = new->br_startoff;
531 if (delta->xed_blockcount <
532 new->br_startoff + new->br_blockcount)
533 delta->xed_blockcount = new->br_startoff +
534 new->br_blockcount;
535 }
536 } 517 }
537 /* 518 /*
538 * Any kind of new delayed allocation goes here. 519 * Any kind of new delayed allocation goes here.
@@ -542,7 +523,7 @@ xfs_bmap_add_extent(
542 ASSERT((cur->bc_private.b.flags & 523 ASSERT((cur->bc_private.b.flags &
543 XFS_BTCUR_BPRV_WASDEL) == 0); 524 XFS_BTCUR_BPRV_WASDEL) == 0);
544 if ((error = xfs_bmap_add_extent_hole_delay(ip, idx, new, 525 if ((error = xfs_bmap_add_extent_hole_delay(ip, idx, new,
545 &logflags, delta, rsvd))) 526 &logflags, rsvd)))
546 goto done; 527 goto done;
547 } 528 }
548 /* 529 /*
@@ -553,7 +534,7 @@ xfs_bmap_add_extent(
553 ASSERT((cur->bc_private.b.flags & 534 ASSERT((cur->bc_private.b.flags &
554 XFS_BTCUR_BPRV_WASDEL) == 0); 535 XFS_BTCUR_BPRV_WASDEL) == 0);
555 if ((error = xfs_bmap_add_extent_hole_real(ip, idx, cur, new, 536 if ((error = xfs_bmap_add_extent_hole_real(ip, idx, cur, new,
556 &logflags, delta, whichfork))) 537 &logflags, whichfork)))
557 goto done; 538 goto done;
558 } else { 539 } else {
559 xfs_bmbt_irec_t prev; /* old extent at offset idx */ 540 xfs_bmbt_irec_t prev; /* old extent at offset idx */
@@ -578,17 +559,17 @@ xfs_bmap_add_extent(
578 XFS_BTCUR_BPRV_WASDEL); 559 XFS_BTCUR_BPRV_WASDEL);
579 if ((error = xfs_bmap_add_extent_delay_real(ip, 560 if ((error = xfs_bmap_add_extent_delay_real(ip,
580 idx, &cur, new, &da_new, first, flist, 561 idx, &cur, new, &da_new, first, flist,
581 &logflags, delta, rsvd))) 562 &logflags, rsvd)))
582 goto done; 563 goto done;
583 } else if (new->br_state == XFS_EXT_NORM) { 564 } else if (new->br_state == XFS_EXT_NORM) {
584 ASSERT(new->br_state == XFS_EXT_NORM); 565 ASSERT(new->br_state == XFS_EXT_NORM);
585 if ((error = xfs_bmap_add_extent_unwritten_real( 566 if ((error = xfs_bmap_add_extent_unwritten_real(
586 ip, idx, &cur, new, &logflags, delta))) 567 ip, idx, &cur, new, &logflags)))
587 goto done; 568 goto done;
588 } else { 569 } else {
589 ASSERT(new->br_state == XFS_EXT_UNWRITTEN); 570 ASSERT(new->br_state == XFS_EXT_UNWRITTEN);
590 if ((error = xfs_bmap_add_extent_unwritten_real( 571 if ((error = xfs_bmap_add_extent_unwritten_real(
591 ip, idx, &cur, new, &logflags, delta))) 572 ip, idx, &cur, new, &logflags)))
592 goto done; 573 goto done;
593 } 574 }
594 ASSERT(*curp == cur || *curp == NULL); 575 ASSERT(*curp == cur || *curp == NULL);
@@ -601,7 +582,7 @@ xfs_bmap_add_extent(
601 ASSERT((cur->bc_private.b.flags & 582 ASSERT((cur->bc_private.b.flags &
602 XFS_BTCUR_BPRV_WASDEL) == 0); 583 XFS_BTCUR_BPRV_WASDEL) == 0);
603 if ((error = xfs_bmap_add_extent_hole_real(ip, idx, cur, 584 if ((error = xfs_bmap_add_extent_hole_real(ip, idx, cur,
604 new, &logflags, delta, whichfork))) 585 new, &logflags, whichfork)))
605 goto done; 586 goto done;
606 } 587 }
607 } 588 }
@@ -666,7 +647,6 @@ xfs_bmap_add_extent_delay_real(
666 xfs_fsblock_t *first, /* pointer to firstblock variable */ 647 xfs_fsblock_t *first, /* pointer to firstblock variable */
667 xfs_bmap_free_t *flist, /* list of extents to be freed */ 648 xfs_bmap_free_t *flist, /* list of extents to be freed */
668 int *logflagsp, /* inode logging flags */ 649 int *logflagsp, /* inode logging flags */
669 xfs_extdelta_t *delta, /* Change made to incore extents */
670 int rsvd) /* OK to use reserved data block allocation */ 650 int rsvd) /* OK to use reserved data block allocation */
671{ 651{
672 xfs_btree_cur_t *cur; /* btree cursor */ 652 xfs_btree_cur_t *cur; /* btree cursor */
@@ -797,11 +777,6 @@ xfs_bmap_add_extent_delay_real(
797 goto done; 777 goto done;
798 } 778 }
799 *dnew = 0; 779 *dnew = 0;
800 /* DELTA: Three in-core extents are replaced by one. */
801 temp = LEFT.br_startoff;
802 temp2 = LEFT.br_blockcount +
803 PREV.br_blockcount +
804 RIGHT.br_blockcount;
805 break; 780 break;
806 781
807 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: 782 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
@@ -832,10 +807,6 @@ xfs_bmap_add_extent_delay_real(
832 goto done; 807 goto done;
833 } 808 }
834 *dnew = 0; 809 *dnew = 0;
835 /* DELTA: Two in-core extents are replaced by one. */
836 temp = LEFT.br_startoff;
837 temp2 = LEFT.br_blockcount +
838 PREV.br_blockcount;
839 break; 810 break;
840 811
841 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 812 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
@@ -867,10 +838,6 @@ xfs_bmap_add_extent_delay_real(
867 goto done; 838 goto done;
868 } 839 }
869 *dnew = 0; 840 *dnew = 0;
870 /* DELTA: Two in-core extents are replaced by one. */
871 temp = PREV.br_startoff;
872 temp2 = PREV.br_blockcount +
873 RIGHT.br_blockcount;
874 break; 841 break;
875 842
876 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: 843 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
@@ -900,9 +867,6 @@ xfs_bmap_add_extent_delay_real(
900 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 867 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
901 } 868 }
902 *dnew = 0; 869 *dnew = 0;
903 /* DELTA: The in-core extent described by new changed type. */
904 temp = new->br_startoff;
905 temp2 = new->br_blockcount;
906 break; 870 break;
907 871
908 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG: 872 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
@@ -942,10 +906,6 @@ xfs_bmap_add_extent_delay_real(
942 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); 906 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
943 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); 907 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
944 *dnew = temp; 908 *dnew = temp;
945 /* DELTA: The boundary between two in-core extents moved. */
946 temp = LEFT.br_startoff;
947 temp2 = LEFT.br_blockcount +
948 PREV.br_blockcount;
949 break; 909 break;
950 910
951 case BMAP_LEFT_FILLING: 911 case BMAP_LEFT_FILLING:
@@ -990,9 +950,6 @@ xfs_bmap_add_extent_delay_real(
990 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); 950 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
991 trace_xfs_bmap_post_update(ip, idx + 1, state, _THIS_IP_); 951 trace_xfs_bmap_post_update(ip, idx + 1, state, _THIS_IP_);
992 *dnew = temp; 952 *dnew = temp;
993 /* DELTA: One in-core extent is split in two. */
994 temp = PREV.br_startoff;
995 temp2 = PREV.br_blockcount;
996 break; 953 break;
997 954
998 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 955 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
@@ -1031,10 +988,6 @@ xfs_bmap_add_extent_delay_real(
1031 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); 988 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
1032 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); 989 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
1033 *dnew = temp; 990 *dnew = temp;
1034 /* DELTA: The boundary between two in-core extents moved. */
1035 temp = PREV.br_startoff;
1036 temp2 = PREV.br_blockcount +
1037 RIGHT.br_blockcount;
1038 break; 991 break;
1039 992
1040 case BMAP_RIGHT_FILLING: 993 case BMAP_RIGHT_FILLING:
@@ -1078,9 +1031,6 @@ xfs_bmap_add_extent_delay_real(
1078 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); 1031 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
1079 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); 1032 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
1080 *dnew = temp; 1033 *dnew = temp;
1081 /* DELTA: One in-core extent is split in two. */
1082 temp = PREV.br_startoff;
1083 temp2 = PREV.br_blockcount;
1084 break; 1034 break;
1085 1035
1086 case 0: 1036 case 0:
@@ -1161,9 +1111,6 @@ xfs_bmap_add_extent_delay_real(
1161 nullstartblock((int)temp2)); 1111 nullstartblock((int)temp2));
1162 trace_xfs_bmap_post_update(ip, idx + 2, state, _THIS_IP_); 1112 trace_xfs_bmap_post_update(ip, idx + 2, state, _THIS_IP_);
1163 *dnew = temp + temp2; 1113 *dnew = temp + temp2;
1164 /* DELTA: One in-core extent is split in three. */
1165 temp = PREV.br_startoff;
1166 temp2 = PREV.br_blockcount;
1167 break; 1114 break;
1168 1115
1169 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 1116 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
@@ -1179,13 +1126,6 @@ xfs_bmap_add_extent_delay_real(
1179 ASSERT(0); 1126 ASSERT(0);
1180 } 1127 }
1181 *curp = cur; 1128 *curp = cur;
1182 if (delta) {
1183 temp2 += temp;
1184 if (delta->xed_startoff > temp)
1185 delta->xed_startoff = temp;
1186 if (delta->xed_blockcount < temp2)
1187 delta->xed_blockcount = temp2;
1188 }
1189done: 1129done:
1190 *logflagsp = rval; 1130 *logflagsp = rval;
1191 return error; 1131 return error;
@@ -1204,8 +1144,7 @@ xfs_bmap_add_extent_unwritten_real(
1204 xfs_extnum_t idx, /* extent number to update/insert */ 1144 xfs_extnum_t idx, /* extent number to update/insert */
1205 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ 1145 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
1206 xfs_bmbt_irec_t *new, /* new data to add to file extents */ 1146 xfs_bmbt_irec_t *new, /* new data to add to file extents */
1207 int *logflagsp, /* inode logging flags */ 1147 int *logflagsp) /* inode logging flags */
1208 xfs_extdelta_t *delta) /* Change made to incore extents */
1209{ 1148{
1210 xfs_btree_cur_t *cur; /* btree cursor */ 1149 xfs_btree_cur_t *cur; /* btree cursor */
1211 xfs_bmbt_rec_host_t *ep; /* extent entry for idx */ 1150 xfs_bmbt_rec_host_t *ep; /* extent entry for idx */
@@ -1219,8 +1158,6 @@ xfs_bmap_add_extent_unwritten_real(
1219 /* left is 0, right is 1, prev is 2 */ 1158 /* left is 0, right is 1, prev is 2 */
1220 int rval=0; /* return value (logging flags) */ 1159 int rval=0; /* return value (logging flags) */
1221 int state = 0;/* state bits, accessed thru macros */ 1160 int state = 0;/* state bits, accessed thru macros */
1222 xfs_filblks_t temp=0;
1223 xfs_filblks_t temp2=0;
1224 1161
1225#define LEFT r[0] 1162#define LEFT r[0]
1226#define RIGHT r[1] 1163#define RIGHT r[1]
@@ -1341,11 +1278,6 @@ xfs_bmap_add_extent_unwritten_real(
1341 RIGHT.br_blockcount, LEFT.br_state))) 1278 RIGHT.br_blockcount, LEFT.br_state)))
1342 goto done; 1279 goto done;
1343 } 1280 }
1344 /* DELTA: Three in-core extents are replaced by one. */
1345 temp = LEFT.br_startoff;
1346 temp2 = LEFT.br_blockcount +
1347 PREV.br_blockcount +
1348 RIGHT.br_blockcount;
1349 break; 1281 break;
1350 1282
1351 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: 1283 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
@@ -1382,10 +1314,6 @@ xfs_bmap_add_extent_unwritten_real(
1382 LEFT.br_state))) 1314 LEFT.br_state)))
1383 goto done; 1315 goto done;
1384 } 1316 }
1385 /* DELTA: Two in-core extents are replaced by one. */
1386 temp = LEFT.br_startoff;
1387 temp2 = LEFT.br_blockcount +
1388 PREV.br_blockcount;
1389 break; 1317 break;
1390 1318
1391 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 1319 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
@@ -1422,10 +1350,6 @@ xfs_bmap_add_extent_unwritten_real(
1422 newext))) 1350 newext)))
1423 goto done; 1351 goto done;
1424 } 1352 }
1425 /* DELTA: Two in-core extents are replaced by one. */
1426 temp = PREV.br_startoff;
1427 temp2 = PREV.br_blockcount +
1428 RIGHT.br_blockcount;
1429 break; 1353 break;
1430 1354
1431 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: 1355 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
@@ -1453,9 +1377,6 @@ xfs_bmap_add_extent_unwritten_real(
1453 newext))) 1377 newext)))
1454 goto done; 1378 goto done;
1455 } 1379 }
1456 /* DELTA: The in-core extent described by new changed type. */
1457 temp = new->br_startoff;
1458 temp2 = new->br_blockcount;
1459 break; 1380 break;
1460 1381
1461 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG: 1382 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
@@ -1501,10 +1422,6 @@ xfs_bmap_add_extent_unwritten_real(
1501 LEFT.br_state)) 1422 LEFT.br_state))
1502 goto done; 1423 goto done;
1503 } 1424 }
1504 /* DELTA: The boundary between two in-core extents moved. */
1505 temp = LEFT.br_startoff;
1506 temp2 = LEFT.br_blockcount +
1507 PREV.br_blockcount;
1508 break; 1425 break;
1509 1426
1510 case BMAP_LEFT_FILLING: 1427 case BMAP_LEFT_FILLING:
@@ -1544,9 +1461,6 @@ xfs_bmap_add_extent_unwritten_real(
1544 goto done; 1461 goto done;
1545 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1462 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1546 } 1463 }
1547 /* DELTA: One in-core extent is split in two. */
1548 temp = PREV.br_startoff;
1549 temp2 = PREV.br_blockcount;
1550 break; 1464 break;
1551 1465
1552 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 1466 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
@@ -1587,10 +1501,6 @@ xfs_bmap_add_extent_unwritten_real(
1587 newext))) 1501 newext)))
1588 goto done; 1502 goto done;
1589 } 1503 }
1590 /* DELTA: The boundary between two in-core extents moved. */
1591 temp = PREV.br_startoff;
1592 temp2 = PREV.br_blockcount +
1593 RIGHT.br_blockcount;
1594 break; 1504 break;
1595 1505
1596 case BMAP_RIGHT_FILLING: 1506 case BMAP_RIGHT_FILLING:
@@ -1630,9 +1540,6 @@ xfs_bmap_add_extent_unwritten_real(
1630 goto done; 1540 goto done;
1631 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1541 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1632 } 1542 }
1633 /* DELTA: One in-core extent is split in two. */
1634 temp = PREV.br_startoff;
1635 temp2 = PREV.br_blockcount;
1636 break; 1543 break;
1637 1544
1638 case 0: 1545 case 0:
@@ -1692,9 +1599,6 @@ xfs_bmap_add_extent_unwritten_real(
1692 goto done; 1599 goto done;
1693 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1600 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1694 } 1601 }
1695 /* DELTA: One in-core extent is split in three. */
1696 temp = PREV.br_startoff;
1697 temp2 = PREV.br_blockcount;
1698 break; 1602 break;
1699 1603
1700 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 1604 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
@@ -1710,13 +1614,6 @@ xfs_bmap_add_extent_unwritten_real(
1710 ASSERT(0); 1614 ASSERT(0);
1711 } 1615 }
1712 *curp = cur; 1616 *curp = cur;
1713 if (delta) {
1714 temp2 += temp;
1715 if (delta->xed_startoff > temp)
1716 delta->xed_startoff = temp;
1717 if (delta->xed_blockcount < temp2)
1718 delta->xed_blockcount = temp2;
1719 }
1720done: 1617done:
1721 *logflagsp = rval; 1618 *logflagsp = rval;
1722 return error; 1619 return error;
@@ -1736,7 +1633,6 @@ xfs_bmap_add_extent_hole_delay(
1736 xfs_extnum_t idx, /* extent number to update/insert */ 1633 xfs_extnum_t idx, /* extent number to update/insert */
1737 xfs_bmbt_irec_t *new, /* new data to add to file extents */ 1634 xfs_bmbt_irec_t *new, /* new data to add to file extents */
1738 int *logflagsp, /* inode logging flags */ 1635 int *logflagsp, /* inode logging flags */
1739 xfs_extdelta_t *delta, /* Change made to incore extents */
1740 int rsvd) /* OK to allocate reserved blocks */ 1636 int rsvd) /* OK to allocate reserved blocks */
1741{ 1637{
1742 xfs_bmbt_rec_host_t *ep; /* extent record for idx */ 1638 xfs_bmbt_rec_host_t *ep; /* extent record for idx */
@@ -1747,7 +1643,6 @@ xfs_bmap_add_extent_hole_delay(
1747 xfs_bmbt_irec_t right; /* right neighbor extent entry */ 1643 xfs_bmbt_irec_t right; /* right neighbor extent entry */
1748 int state; /* state bits, accessed thru macros */ 1644 int state; /* state bits, accessed thru macros */
1749 xfs_filblks_t temp=0; /* temp for indirect calculations */ 1645 xfs_filblks_t temp=0; /* temp for indirect calculations */
1750 xfs_filblks_t temp2=0;
1751 1646
1752 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); 1647 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1753 ep = xfs_iext_get_ext(ifp, idx); 1648 ep = xfs_iext_get_ext(ifp, idx);
@@ -1819,9 +1714,6 @@ xfs_bmap_add_extent_hole_delay(
1819 1714
1820 xfs_iext_remove(ip, idx, 1, state); 1715 xfs_iext_remove(ip, idx, 1, state);
1821 ip->i_df.if_lastex = idx - 1; 1716 ip->i_df.if_lastex = idx - 1;
1822 /* DELTA: Two in-core extents were replaced by one. */
1823 temp2 = temp;
1824 temp = left.br_startoff;
1825 break; 1717 break;
1826 1718
1827 case BMAP_LEFT_CONTIG: 1719 case BMAP_LEFT_CONTIG:
@@ -1841,9 +1733,6 @@ xfs_bmap_add_extent_hole_delay(
1841 trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_); 1733 trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
1842 1734
1843 ip->i_df.if_lastex = idx - 1; 1735 ip->i_df.if_lastex = idx - 1;
1844 /* DELTA: One in-core extent grew into a hole. */
1845 temp2 = temp;
1846 temp = left.br_startoff;
1847 break; 1736 break;
1848 1737
1849 case BMAP_RIGHT_CONTIG: 1738 case BMAP_RIGHT_CONTIG:
@@ -1862,9 +1751,6 @@ xfs_bmap_add_extent_hole_delay(
1862 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); 1751 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
1863 1752
1864 ip->i_df.if_lastex = idx; 1753 ip->i_df.if_lastex = idx;
1865 /* DELTA: One in-core extent grew into a hole. */
1866 temp2 = temp;
1867 temp = new->br_startoff;
1868 break; 1754 break;
1869 1755
1870 case 0: 1756 case 0:
@@ -1876,9 +1762,6 @@ xfs_bmap_add_extent_hole_delay(
1876 oldlen = newlen = 0; 1762 oldlen = newlen = 0;
1877 xfs_iext_insert(ip, idx, 1, new, state); 1763 xfs_iext_insert(ip, idx, 1, new, state);
1878 ip->i_df.if_lastex = idx; 1764 ip->i_df.if_lastex = idx;
1879 /* DELTA: A new in-core extent was added in a hole. */
1880 temp2 = new->br_blockcount;
1881 temp = new->br_startoff;
1882 break; 1765 break;
1883 } 1766 }
1884 if (oldlen != newlen) { 1767 if (oldlen != newlen) {
@@ -1889,13 +1772,6 @@ xfs_bmap_add_extent_hole_delay(
1889 * Nothing to do for disk quota accounting here. 1772 * Nothing to do for disk quota accounting here.
1890 */ 1773 */
1891 } 1774 }
1892 if (delta) {
1893 temp2 += temp;
1894 if (delta->xed_startoff > temp)
1895 delta->xed_startoff = temp;
1896 if (delta->xed_blockcount < temp2)
1897 delta->xed_blockcount = temp2;
1898 }
1899 *logflagsp = 0; 1775 *logflagsp = 0;
1900 return 0; 1776 return 0;
1901} 1777}
@@ -1911,7 +1787,6 @@ xfs_bmap_add_extent_hole_real(
1911 xfs_btree_cur_t *cur, /* if null, not a btree */ 1787 xfs_btree_cur_t *cur, /* if null, not a btree */
1912 xfs_bmbt_irec_t *new, /* new data to add to file extents */ 1788 xfs_bmbt_irec_t *new, /* new data to add to file extents */
1913 int *logflagsp, /* inode logging flags */ 1789 int *logflagsp, /* inode logging flags */
1914 xfs_extdelta_t *delta, /* Change made to incore extents */
1915 int whichfork) /* data or attr fork */ 1790 int whichfork) /* data or attr fork */
1916{ 1791{
1917 xfs_bmbt_rec_host_t *ep; /* pointer to extent entry ins. point */ 1792 xfs_bmbt_rec_host_t *ep; /* pointer to extent entry ins. point */
@@ -1922,8 +1797,6 @@ xfs_bmap_add_extent_hole_real(
1922 xfs_bmbt_irec_t right; /* right neighbor extent entry */ 1797 xfs_bmbt_irec_t right; /* right neighbor extent entry */
1923 int rval=0; /* return value (logging flags) */ 1798 int rval=0; /* return value (logging flags) */
1924 int state; /* state bits, accessed thru macros */ 1799 int state; /* state bits, accessed thru macros */
1925 xfs_filblks_t temp=0;
1926 xfs_filblks_t temp2=0;
1927 1800
1928 ifp = XFS_IFORK_PTR(ip, whichfork); 1801 ifp = XFS_IFORK_PTR(ip, whichfork);
1929 ASSERT(idx <= ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)); 1802 ASSERT(idx <= ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t));
@@ -2020,11 +1893,6 @@ xfs_bmap_add_extent_hole_real(
2020 left.br_state))) 1893 left.br_state)))
2021 goto done; 1894 goto done;
2022 } 1895 }
2023 /* DELTA: Two in-core extents were replaced by one. */
2024 temp = left.br_startoff;
2025 temp2 = left.br_blockcount +
2026 new->br_blockcount +
2027 right.br_blockcount;
2028 break; 1896 break;
2029 1897
2030 case BMAP_LEFT_CONTIG: 1898 case BMAP_LEFT_CONTIG:
@@ -2056,10 +1924,6 @@ xfs_bmap_add_extent_hole_real(
2056 left.br_state))) 1924 left.br_state)))
2057 goto done; 1925 goto done;
2058 } 1926 }
2059 /* DELTA: One in-core extent grew. */
2060 temp = left.br_startoff;
2061 temp2 = left.br_blockcount +
2062 new->br_blockcount;
2063 break; 1927 break;
2064 1928
2065 case BMAP_RIGHT_CONTIG: 1929 case BMAP_RIGHT_CONTIG:
@@ -2092,10 +1956,6 @@ xfs_bmap_add_extent_hole_real(
2092 right.br_state))) 1956 right.br_state)))
2093 goto done; 1957 goto done;
2094 } 1958 }
2095 /* DELTA: One in-core extent grew. */
2096 temp = new->br_startoff;
2097 temp2 = new->br_blockcount +
2098 right.br_blockcount;
2099 break; 1959 break;
2100 1960
2101 case 0: 1961 case 0:
@@ -2123,18 +1983,8 @@ xfs_bmap_add_extent_hole_real(
2123 goto done; 1983 goto done;
2124 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1984 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
2125 } 1985 }
2126 /* DELTA: A new extent was added in a hole. */
2127 temp = new->br_startoff;
2128 temp2 = new->br_blockcount;
2129 break; 1986 break;
2130 } 1987 }
2131 if (delta) {
2132 temp2 += temp;
2133 if (delta->xed_startoff > temp)
2134 delta->xed_startoff = temp;
2135 if (delta->xed_blockcount < temp2)
2136 delta->xed_blockcount = temp2;
2137 }
2138done: 1988done:
2139 *logflagsp = rval; 1989 *logflagsp = rval;
2140 return error; 1990 return error;
@@ -2959,7 +2809,6 @@ xfs_bmap_del_extent(
2959 xfs_btree_cur_t *cur, /* if null, not a btree */ 2809 xfs_btree_cur_t *cur, /* if null, not a btree */
2960 xfs_bmbt_irec_t *del, /* data to remove from extents */ 2810 xfs_bmbt_irec_t *del, /* data to remove from extents */
2961 int *logflagsp, /* inode logging flags */ 2811 int *logflagsp, /* inode logging flags */
2962 xfs_extdelta_t *delta, /* Change made to incore extents */
2963 int whichfork, /* data or attr fork */ 2812 int whichfork, /* data or attr fork */
2964 int rsvd) /* OK to allocate reserved blocks */ 2813 int rsvd) /* OK to allocate reserved blocks */
2965{ 2814{
@@ -3265,14 +3114,6 @@ xfs_bmap_del_extent(
3265 if (da_old > da_new) 3114 if (da_old > da_new)
3266 xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS, (int64_t)(da_old - da_new), 3115 xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS, (int64_t)(da_old - da_new),
3267 rsvd); 3116 rsvd);
3268 if (delta) {
3269 /* DELTA: report the original extent. */
3270 if (delta->xed_startoff > got.br_startoff)
3271 delta->xed_startoff = got.br_startoff;
3272 if (delta->xed_blockcount < got.br_startoff+got.br_blockcount)
3273 delta->xed_blockcount = got.br_startoff +
3274 got.br_blockcount;
3275 }
3276done: 3117done:
3277 *logflagsp = flags; 3118 *logflagsp = flags;
3278 return error; 3119 return error;
@@ -3754,9 +3595,10 @@ xfs_bmap_add_attrfork(
3754 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS; 3595 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
3755 } 3596 }
3756 ASSERT(ip->i_d.di_anextents == 0); 3597 ASSERT(ip->i_d.di_anextents == 0);
3757 IHOLD(ip); 3598
3758 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 3599 xfs_trans_ijoin_ref(tp, ip, XFS_ILOCK_EXCL);
3759 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 3600 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
3601
3760 switch (ip->i_d.di_format) { 3602 switch (ip->i_d.di_format) {
3761 case XFS_DINODE_FMT_DEV: 3603 case XFS_DINODE_FMT_DEV:
3762 ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3; 3604 ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3;
@@ -4483,8 +4325,7 @@ xfs_bmapi(
4483 xfs_extlen_t total, /* total blocks needed */ 4325 xfs_extlen_t total, /* total blocks needed */
4484 xfs_bmbt_irec_t *mval, /* output: map values */ 4326 xfs_bmbt_irec_t *mval, /* output: map values */
4485 int *nmap, /* i/o: mval size/count */ 4327 int *nmap, /* i/o: mval size/count */
4486 xfs_bmap_free_t *flist, /* i/o: list extents to free */ 4328 xfs_bmap_free_t *flist) /* i/o: list extents to free */
4487 xfs_extdelta_t *delta) /* o: change made to incore extents */
4488{ 4329{
4489 xfs_fsblock_t abno; /* allocated block number */ 4330 xfs_fsblock_t abno; /* allocated block number */
4490 xfs_extlen_t alen; /* allocated extent length */ 4331 xfs_extlen_t alen; /* allocated extent length */
@@ -4596,10 +4437,7 @@ xfs_bmapi(
4596 end = bno + len; 4437 end = bno + len;
4597 obno = bno; 4438 obno = bno;
4598 bma.ip = NULL; 4439 bma.ip = NULL;
4599 if (delta) { 4440
4600 delta->xed_startoff = NULLFILEOFF;
4601 delta->xed_blockcount = 0;
4602 }
4603 while (bno < end && n < *nmap) { 4441 while (bno < end && n < *nmap) {
4604 /* 4442 /*
4605 * Reading past eof, act as though there's a hole 4443 * Reading past eof, act as though there's a hole
@@ -4620,19 +4458,13 @@ xfs_bmapi(
4620 * allocate the stuff asked for in this bmap call 4458 * allocate the stuff asked for in this bmap call
4621 * but that wouldn't be as good. 4459 * but that wouldn't be as good.
4622 */ 4460 */
4623 if (wasdelay && !(flags & XFS_BMAPI_EXACT)) { 4461 if (wasdelay) {
4624 alen = (xfs_extlen_t)got.br_blockcount; 4462 alen = (xfs_extlen_t)got.br_blockcount;
4625 aoff = got.br_startoff; 4463 aoff = got.br_startoff;
4626 if (lastx != NULLEXTNUM && lastx) { 4464 if (lastx != NULLEXTNUM && lastx) {
4627 ep = xfs_iext_get_ext(ifp, lastx - 1); 4465 ep = xfs_iext_get_ext(ifp, lastx - 1);
4628 xfs_bmbt_get_all(ep, &prev); 4466 xfs_bmbt_get_all(ep, &prev);
4629 } 4467 }
4630 } else if (wasdelay) {
4631 alen = (xfs_extlen_t)
4632 XFS_FILBLKS_MIN(len,
4633 (got.br_startoff +
4634 got.br_blockcount) - bno);
4635 aoff = bno;
4636 } else { 4468 } else {
4637 alen = (xfs_extlen_t) 4469 alen = (xfs_extlen_t)
4638 XFS_FILBLKS_MIN(len, MAXEXTLEN); 4470 XFS_FILBLKS_MIN(len, MAXEXTLEN);
@@ -4831,7 +4663,7 @@ xfs_bmapi(
4831 got.br_state = XFS_EXT_UNWRITTEN; 4663 got.br_state = XFS_EXT_UNWRITTEN;
4832 } 4664 }
4833 error = xfs_bmap_add_extent(ip, lastx, &cur, &got, 4665 error = xfs_bmap_add_extent(ip, lastx, &cur, &got,
4834 firstblock, flist, &tmp_logflags, delta, 4666 firstblock, flist, &tmp_logflags,
4835 whichfork, (flags & XFS_BMAPI_RSVBLOCKS)); 4667 whichfork, (flags & XFS_BMAPI_RSVBLOCKS));
4836 logflags |= tmp_logflags; 4668 logflags |= tmp_logflags;
4837 if (error) 4669 if (error)
@@ -4927,7 +4759,7 @@ xfs_bmapi(
4927 } 4759 }
4928 mval->br_state = XFS_EXT_NORM; 4760 mval->br_state = XFS_EXT_NORM;
4929 error = xfs_bmap_add_extent(ip, lastx, &cur, mval, 4761 error = xfs_bmap_add_extent(ip, lastx, &cur, mval,
4930 firstblock, flist, &tmp_logflags, delta, 4762 firstblock, flist, &tmp_logflags,
4931 whichfork, (flags & XFS_BMAPI_RSVBLOCKS)); 4763 whichfork, (flags & XFS_BMAPI_RSVBLOCKS));
4932 logflags |= tmp_logflags; 4764 logflags |= tmp_logflags;
4933 if (error) 4765 if (error)
@@ -5017,14 +4849,6 @@ xfs_bmapi(
5017 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE || 4849 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE ||
5018 XFS_IFORK_NEXTENTS(ip, whichfork) > ifp->if_ext_max); 4850 XFS_IFORK_NEXTENTS(ip, whichfork) > ifp->if_ext_max);
5019 error = 0; 4851 error = 0;
5020 if (delta && delta->xed_startoff != NULLFILEOFF) {
5021 /* A change was actually made.
5022 * Note that delta->xed_blockount is an offset at this
5023 * point and needs to be converted to a block count.
5024 */
5025 ASSERT(delta->xed_blockcount > delta->xed_startoff);
5026 delta->xed_blockcount -= delta->xed_startoff;
5027 }
5028error0: 4852error0:
5029 /* 4853 /*
5030 * Log everything. Do this after conversion, there's no point in 4854 * Log everything. Do this after conversion, there's no point in
@@ -5136,8 +4960,6 @@ xfs_bunmapi(
5136 xfs_fsblock_t *firstblock, /* first allocated block 4960 xfs_fsblock_t *firstblock, /* first allocated block
5137 controls a.g. for allocs */ 4961 controls a.g. for allocs */
5138 xfs_bmap_free_t *flist, /* i/o: list extents to free */ 4962 xfs_bmap_free_t *flist, /* i/o: list extents to free */
5139 xfs_extdelta_t *delta, /* o: change made to incore
5140 extents */
5141 int *done) /* set if not done yet */ 4963 int *done) /* set if not done yet */
5142{ 4964{
5143 xfs_btree_cur_t *cur; /* bmap btree cursor */ 4965 xfs_btree_cur_t *cur; /* bmap btree cursor */
@@ -5196,10 +5018,7 @@ xfs_bunmapi(
5196 bno = start + len - 1; 5018 bno = start + len - 1;
5197 ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got, 5019 ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got,
5198 &prev); 5020 &prev);
5199 if (delta) { 5021
5200 delta->xed_startoff = NULLFILEOFF;
5201 delta->xed_blockcount = 0;
5202 }
5203 /* 5022 /*
5204 * Check to see if the given block number is past the end of the 5023 * Check to see if the given block number is past the end of the
5205 * file, back up to the last block if so... 5024 * file, back up to the last block if so...
@@ -5297,7 +5116,7 @@ xfs_bunmapi(
5297 } 5116 }
5298 del.br_state = XFS_EXT_UNWRITTEN; 5117 del.br_state = XFS_EXT_UNWRITTEN;
5299 error = xfs_bmap_add_extent(ip, lastx, &cur, &del, 5118 error = xfs_bmap_add_extent(ip, lastx, &cur, &del,
5300 firstblock, flist, &logflags, delta, 5119 firstblock, flist, &logflags,
5301 XFS_DATA_FORK, 0); 5120 XFS_DATA_FORK, 0);
5302 if (error) 5121 if (error)
5303 goto error0; 5122 goto error0;
@@ -5352,7 +5171,7 @@ xfs_bunmapi(
5352 prev.br_state = XFS_EXT_UNWRITTEN; 5171 prev.br_state = XFS_EXT_UNWRITTEN;
5353 error = xfs_bmap_add_extent(ip, lastx - 1, &cur, 5172 error = xfs_bmap_add_extent(ip, lastx - 1, &cur,
5354 &prev, firstblock, flist, &logflags, 5173 &prev, firstblock, flist, &logflags,
5355 delta, XFS_DATA_FORK, 0); 5174 XFS_DATA_FORK, 0);
5356 if (error) 5175 if (error)
5357 goto error0; 5176 goto error0;
5358 goto nodelete; 5177 goto nodelete;
@@ -5361,7 +5180,7 @@ xfs_bunmapi(
5361 del.br_state = XFS_EXT_UNWRITTEN; 5180 del.br_state = XFS_EXT_UNWRITTEN;
5362 error = xfs_bmap_add_extent(ip, lastx, &cur, 5181 error = xfs_bmap_add_extent(ip, lastx, &cur,
5363 &del, firstblock, flist, &logflags, 5182 &del, firstblock, flist, &logflags,
5364 delta, XFS_DATA_FORK, 0); 5183 XFS_DATA_FORK, 0);
5365 if (error) 5184 if (error)
5366 goto error0; 5185 goto error0;
5367 goto nodelete; 5186 goto nodelete;
@@ -5414,7 +5233,7 @@ xfs_bunmapi(
5414 goto error0; 5233 goto error0;
5415 } 5234 }
5416 error = xfs_bmap_del_extent(ip, tp, lastx, flist, cur, &del, 5235 error = xfs_bmap_del_extent(ip, tp, lastx, flist, cur, &del,
5417 &tmp_logflags, delta, whichfork, rsvd); 5236 &tmp_logflags, whichfork, rsvd);
5418 logflags |= tmp_logflags; 5237 logflags |= tmp_logflags;
5419 if (error) 5238 if (error)
5420 goto error0; 5239 goto error0;
@@ -5471,14 +5290,6 @@ nodelete:
5471 ASSERT(ifp->if_ext_max == 5290 ASSERT(ifp->if_ext_max ==
5472 XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t)); 5291 XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
5473 error = 0; 5292 error = 0;
5474 if (delta && delta->xed_startoff != NULLFILEOFF) {
5475 /* A change was actually made.
5476 * Note that delta->xed_blockount is an offset at this
5477 * point and needs to be converted to a block count.
5478 */
5479 ASSERT(delta->xed_blockcount > delta->xed_startoff);
5480 delta->xed_blockcount -= delta->xed_startoff;
5481 }
5482error0: 5293error0:
5483 /* 5294 /*
5484 * Log everything. Do this after conversion, there's no point in 5295 * Log everything. Do this after conversion, there's no point in
@@ -5605,28 +5416,6 @@ xfs_getbmap(
5605 prealloced = 0; 5416 prealloced = 0;
5606 fixlen = 1LL << 32; 5417 fixlen = 1LL << 32;
5607 } else { 5418 } else {
5608 /*
5609 * If the BMV_IF_NO_DMAPI_READ interface bit specified, do
5610 * not generate a DMAPI read event. Otherwise, if the
5611 * DM_EVENT_READ bit is set for the file, generate a read
5612 * event in order that the DMAPI application may do its thing
5613 * before we return the extents. Usually this means restoring
5614 * user file data to regions of the file that look like holes.
5615 *
5616 * The "old behavior" (from XFS_IOC_GETBMAP) is to not specify
5617 * BMV_IF_NO_DMAPI_READ so that read events are generated.
5618 * If this were not true, callers of ioctl(XFS_IOC_GETBMAP)
5619 * could misinterpret holes in a DMAPI file as true holes,
5620 * when in fact they may represent offline user data.
5621 */
5622 if (DM_EVENT_ENABLED(ip, DM_EVENT_READ) &&
5623 !(iflags & BMV_IF_NO_DMAPI_READ)) {
5624 error = XFS_SEND_DATA(mp, DM_EVENT_READ, ip,
5625 0, 0, 0, NULL);
5626 if (error)
5627 return XFS_ERROR(error);
5628 }
5629
5630 if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS && 5419 if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS &&
5631 ip->i_d.di_format != XFS_DINODE_FMT_BTREE && 5420 ip->i_d.di_format != XFS_DINODE_FMT_BTREE &&
5632 ip->i_d.di_format != XFS_DINODE_FMT_LOCAL) 5421 ip->i_d.di_format != XFS_DINODE_FMT_LOCAL)
@@ -5713,7 +5502,7 @@ xfs_getbmap(
5713 error = xfs_bmapi(NULL, ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset), 5502 error = xfs_bmapi(NULL, ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset),
5714 XFS_BB_TO_FSB(mp, bmv->bmv_length), 5503 XFS_BB_TO_FSB(mp, bmv->bmv_length),
5715 bmapi_flags, NULL, 0, map, &nmap, 5504 bmapi_flags, NULL, 0, map, &nmap,
5716 NULL, NULL); 5505 NULL);
5717 if (error) 5506 if (error)
5718 goto out_free_map; 5507 goto out_free_map;
5719 ASSERT(nmap <= subnex); 5508 ASSERT(nmap <= subnex);
@@ -5859,66 +5648,34 @@ xfs_bmap_eof(
5859} 5648}
5860 5649
5861#ifdef DEBUG 5650#ifdef DEBUG
5862STATIC 5651STATIC struct xfs_buf *
5863xfs_buf_t *
5864xfs_bmap_get_bp( 5652xfs_bmap_get_bp(
5865 xfs_btree_cur_t *cur, 5653 struct xfs_btree_cur *cur,
5866 xfs_fsblock_t bno) 5654 xfs_fsblock_t bno)
5867{ 5655{
5868 int i; 5656 struct xfs_log_item_desc *lidp;
5869 xfs_buf_t *bp; 5657 int i;
5870 5658
5871 if (!cur) 5659 if (!cur)
5872 return(NULL); 5660 return NULL;
5873
5874 bp = NULL;
5875 for(i = 0; i < XFS_BTREE_MAXLEVELS; i++) {
5876 bp = cur->bc_bufs[i];
5877 if (!bp) break;
5878 if (XFS_BUF_ADDR(bp) == bno)
5879 break; /* Found it */
5880 }
5881 if (i == XFS_BTREE_MAXLEVELS)
5882 bp = NULL;
5883
5884 if (!bp) { /* Chase down all the log items to see if the bp is there */
5885 xfs_log_item_chunk_t *licp;
5886 xfs_trans_t *tp;
5887
5888 tp = cur->bc_tp;
5889 licp = &tp->t_items;
5890 while (!bp && licp != NULL) {
5891 if (xfs_lic_are_all_free(licp)) {
5892 licp = licp->lic_next;
5893 continue;
5894 }
5895 for (i = 0; i < licp->lic_unused; i++) {
5896 xfs_log_item_desc_t *lidp;
5897 xfs_log_item_t *lip;
5898 xfs_buf_log_item_t *bip;
5899 xfs_buf_t *lbp;
5900
5901 if (xfs_lic_isfree(licp, i)) {
5902 continue;
5903 }
5904
5905 lidp = xfs_lic_slot(licp, i);
5906 lip = lidp->lid_item;
5907 if (lip->li_type != XFS_LI_BUF)
5908 continue;
5909 5661
5910 bip = (xfs_buf_log_item_t *)lip; 5662 for (i = 0; i < XFS_BTREE_MAXLEVELS; i++) {
5911 lbp = bip->bli_buf; 5663 if (!cur->bc_bufs[i])
5664 break;
5665 if (XFS_BUF_ADDR(cur->bc_bufs[i]) == bno)
5666 return cur->bc_bufs[i];
5667 }
5912 5668
5913 if (XFS_BUF_ADDR(lbp) == bno) { 5669 /* Chase down all the log items to see if the bp is there */
5914 bp = lbp; 5670 list_for_each_entry(lidp, &cur->bc_tp->t_items, lid_trans) {
5915 break; /* Found it */ 5671 struct xfs_buf_log_item *bip;
5916 } 5672 bip = (struct xfs_buf_log_item *)lidp->lid_item;
5917 } 5673 if (bip->bli_item.li_type == XFS_LI_BUF &&
5918 licp = licp->lic_next; 5674 XFS_BUF_ADDR(bip->bli_buf) == bno)
5919 } 5675 return bip->bli_buf;
5920 } 5676 }
5921 return(bp); 5677
5678 return NULL;
5922} 5679}
5923 5680
5924STATIC void 5681STATIC void
diff --git a/fs/xfs/xfs_bmap.h b/fs/xfs/xfs_bmap.h
index 419dafb9d87d..b13569a6179b 100644
--- a/fs/xfs/xfs_bmap.h
+++ b/fs/xfs/xfs_bmap.h
@@ -28,20 +28,6 @@ struct xfs_trans;
28extern kmem_zone_t *xfs_bmap_free_item_zone; 28extern kmem_zone_t *xfs_bmap_free_item_zone;
29 29
30/* 30/*
31 * DELTA: describe a change to the in-core extent list.
32 *
33 * Internally the use of xed_blockount is somewhat funky.
34 * xed_blockcount contains an offset much of the time because this
35 * makes merging changes easier. (xfs_fileoff_t and xfs_filblks_t are
36 * the same underlying type).
37 */
38typedef struct xfs_extdelta
39{
40 xfs_fileoff_t xed_startoff; /* offset of range */
41 xfs_filblks_t xed_blockcount; /* blocks in range */
42} xfs_extdelta_t;
43
44/*
45 * List of extents to be free "later". 31 * List of extents to be free "later".
46 * The list is kept sorted on xbf_startblock. 32 * The list is kept sorted on xbf_startblock.
47 */ 33 */
@@ -82,16 +68,13 @@ typedef struct xfs_bmap_free
82#define XFS_BMAPI_DELAY 0x002 /* delayed write operation */ 68#define XFS_BMAPI_DELAY 0x002 /* delayed write operation */
83#define XFS_BMAPI_ENTIRE 0x004 /* return entire extent, not trimmed */ 69#define XFS_BMAPI_ENTIRE 0x004 /* return entire extent, not trimmed */
84#define XFS_BMAPI_METADATA 0x008 /* mapping metadata not user data */ 70#define XFS_BMAPI_METADATA 0x008 /* mapping metadata not user data */
85#define XFS_BMAPI_EXACT 0x010 /* allocate only to spec'd bounds */ 71#define XFS_BMAPI_ATTRFORK 0x010 /* use attribute fork not data */
86#define XFS_BMAPI_ATTRFORK 0x020 /* use attribute fork not data */ 72#define XFS_BMAPI_RSVBLOCKS 0x020 /* OK to alloc. reserved data blocks */
87#define XFS_BMAPI_ASYNC 0x040 /* bunmapi xactions can be async */ 73#define XFS_BMAPI_PREALLOC 0x040 /* preallocation op: unwritten space */
88#define XFS_BMAPI_RSVBLOCKS 0x080 /* OK to alloc. reserved data blocks */ 74#define XFS_BMAPI_IGSTATE 0x080 /* Ignore state - */
89#define XFS_BMAPI_PREALLOC 0x100 /* preallocation op: unwritten space */
90#define XFS_BMAPI_IGSTATE 0x200 /* Ignore state - */
91 /* combine contig. space */ 75 /* combine contig. space */
92#define XFS_BMAPI_CONTIG 0x400 /* must allocate only one extent */ 76#define XFS_BMAPI_CONTIG 0x100 /* must allocate only one extent */
93/* XFS_BMAPI_DIRECT_IO 0x800 */ 77#define XFS_BMAPI_CONVERT 0x200 /* unwritten extent conversion - */
94#define XFS_BMAPI_CONVERT 0x1000 /* unwritten extent conversion - */
95 /* need write cache flushing and no */ 78 /* need write cache flushing and no */
96 /* additional allocation alignments */ 79 /* additional allocation alignments */
97 80
@@ -100,9 +83,7 @@ typedef struct xfs_bmap_free
100 { XFS_BMAPI_DELAY, "DELAY" }, \ 83 { XFS_BMAPI_DELAY, "DELAY" }, \
101 { XFS_BMAPI_ENTIRE, "ENTIRE" }, \ 84 { XFS_BMAPI_ENTIRE, "ENTIRE" }, \
102 { XFS_BMAPI_METADATA, "METADATA" }, \ 85 { XFS_BMAPI_METADATA, "METADATA" }, \
103 { XFS_BMAPI_EXACT, "EXACT" }, \
104 { XFS_BMAPI_ATTRFORK, "ATTRFORK" }, \ 86 { XFS_BMAPI_ATTRFORK, "ATTRFORK" }, \
105 { XFS_BMAPI_ASYNC, "ASYNC" }, \
106 { XFS_BMAPI_RSVBLOCKS, "RSVBLOCKS" }, \ 87 { XFS_BMAPI_RSVBLOCKS, "RSVBLOCKS" }, \
107 { XFS_BMAPI_PREALLOC, "PREALLOC" }, \ 88 { XFS_BMAPI_PREALLOC, "PREALLOC" }, \
108 { XFS_BMAPI_IGSTATE, "IGSTATE" }, \ 89 { XFS_BMAPI_IGSTATE, "IGSTATE" }, \
@@ -310,9 +291,7 @@ xfs_bmapi(
310 xfs_extlen_t total, /* total blocks needed */ 291 xfs_extlen_t total, /* total blocks needed */
311 struct xfs_bmbt_irec *mval, /* output: map values */ 292 struct xfs_bmbt_irec *mval, /* output: map values */
312 int *nmap, /* i/o: mval size/count */ 293 int *nmap, /* i/o: mval size/count */
313 xfs_bmap_free_t *flist, /* i/o: list extents to free */ 294 xfs_bmap_free_t *flist); /* i/o: list extents to free */
314 xfs_extdelta_t *delta); /* o: change made to incore
315 extents */
316 295
317/* 296/*
318 * Map file blocks to filesystem blocks, simple version. 297 * Map file blocks to filesystem blocks, simple version.
@@ -346,8 +325,6 @@ xfs_bunmapi(
346 xfs_fsblock_t *firstblock, /* first allocated block 325 xfs_fsblock_t *firstblock, /* first allocated block
347 controls a.g. for allocs */ 326 controls a.g. for allocs */
348 xfs_bmap_free_t *flist, /* i/o: list extents to free */ 327 xfs_bmap_free_t *flist, /* i/o: list extents to free */
349 xfs_extdelta_t *delta, /* o: change made to incore
350 extents */
351 int *done); /* set if not done yet */ 328 int *done); /* set if not done yet */
352 329
353/* 330/*
diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c
index 416e47e54b83..87d3c10b6954 100644
--- a/fs/xfs/xfs_bmap_btree.c
+++ b/fs/xfs/xfs_bmap_btree.c
@@ -24,21 +24,16 @@
24#include "xfs_trans.h" 24#include "xfs_trans.h"
25#include "xfs_sb.h" 25#include "xfs_sb.h"
26#include "xfs_ag.h" 26#include "xfs_ag.h"
27#include "xfs_dir2.h"
28#include "xfs_dmapi.h"
29#include "xfs_mount.h" 27#include "xfs_mount.h"
30#include "xfs_bmap_btree.h" 28#include "xfs_bmap_btree.h"
31#include "xfs_alloc_btree.h" 29#include "xfs_alloc_btree.h"
32#include "xfs_ialloc_btree.h" 30#include "xfs_ialloc_btree.h"
33#include "xfs_dir2_sf.h"
34#include "xfs_attr_sf.h"
35#include "xfs_dinode.h" 31#include "xfs_dinode.h"
36#include "xfs_inode.h" 32#include "xfs_inode.h"
37#include "xfs_inode_item.h" 33#include "xfs_inode_item.h"
38#include "xfs_alloc.h" 34#include "xfs_alloc.h"
39#include "xfs_btree.h" 35#include "xfs_btree.h"
40#include "xfs_btree_trace.h" 36#include "xfs_btree_trace.h"
41#include "xfs_ialloc.h"
42#include "xfs_itable.h" 37#include "xfs_itable.h"
43#include "xfs_bmap.h" 38#include "xfs_bmap.h"
44#include "xfs_error.h" 39#include "xfs_error.h"
diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c
index 96be4b0f2496..829af92f0fba 100644
--- a/fs/xfs/xfs_btree.c
+++ b/fs/xfs/xfs_btree.c
@@ -24,20 +24,15 @@
24#include "xfs_trans.h" 24#include "xfs_trans.h"
25#include "xfs_sb.h" 25#include "xfs_sb.h"
26#include "xfs_ag.h" 26#include "xfs_ag.h"
27#include "xfs_dir2.h"
28#include "xfs_dmapi.h"
29#include "xfs_mount.h" 27#include "xfs_mount.h"
30#include "xfs_bmap_btree.h" 28#include "xfs_bmap_btree.h"
31#include "xfs_alloc_btree.h" 29#include "xfs_alloc_btree.h"
32#include "xfs_ialloc_btree.h" 30#include "xfs_ialloc_btree.h"
33#include "xfs_dir2_sf.h"
34#include "xfs_attr_sf.h"
35#include "xfs_dinode.h" 31#include "xfs_dinode.h"
36#include "xfs_inode.h" 32#include "xfs_inode.h"
37#include "xfs_inode_item.h" 33#include "xfs_inode_item.h"
38#include "xfs_btree.h" 34#include "xfs_btree.h"
39#include "xfs_btree_trace.h" 35#include "xfs_btree_trace.h"
40#include "xfs_ialloc.h"
41#include "xfs_error.h" 36#include "xfs_error.h"
42#include "xfs_trace.h" 37#include "xfs_trace.h"
43 38
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index 02a80984aa05..1b09d7a280df 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -24,7 +24,6 @@
24#include "xfs_trans.h" 24#include "xfs_trans.h"
25#include "xfs_sb.h" 25#include "xfs_sb.h"
26#include "xfs_ag.h" 26#include "xfs_ag.h"
27#include "xfs_dmapi.h"
28#include "xfs_mount.h" 27#include "xfs_mount.h"
29#include "xfs_buf_item.h" 28#include "xfs_buf_item.h"
30#include "xfs_trans_priv.h" 29#include "xfs_trans_priv.h"
@@ -34,6 +33,12 @@
34 33
35kmem_zone_t *xfs_buf_item_zone; 34kmem_zone_t *xfs_buf_item_zone;
36 35
36static inline struct xfs_buf_log_item *BUF_ITEM(struct xfs_log_item *lip)
37{
38 return container_of(lip, struct xfs_buf_log_item, bli_item);
39}
40
41
37#ifdef XFS_TRANS_DEBUG 42#ifdef XFS_TRANS_DEBUG
38/* 43/*
39 * This function uses an alternate strategy for tracking the bytes 44 * This function uses an alternate strategy for tracking the bytes
@@ -151,12 +156,13 @@ STATIC void xfs_buf_do_callbacks(xfs_buf_t *bp, xfs_log_item_t *lip);
151 */ 156 */
152STATIC uint 157STATIC uint
153xfs_buf_item_size( 158xfs_buf_item_size(
154 xfs_buf_log_item_t *bip) 159 struct xfs_log_item *lip)
155{ 160{
156 uint nvecs; 161 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
157 int next_bit; 162 struct xfs_buf *bp = bip->bli_buf;
158 int last_bit; 163 uint nvecs;
159 xfs_buf_t *bp; 164 int next_bit;
165 int last_bit;
160 166
161 ASSERT(atomic_read(&bip->bli_refcount) > 0); 167 ASSERT(atomic_read(&bip->bli_refcount) > 0);
162 if (bip->bli_flags & XFS_BLI_STALE) { 168 if (bip->bli_flags & XFS_BLI_STALE) {
@@ -170,7 +176,6 @@ xfs_buf_item_size(
170 return 1; 176 return 1;
171 } 177 }
172 178
173 bp = bip->bli_buf;
174 ASSERT(bip->bli_flags & XFS_BLI_LOGGED); 179 ASSERT(bip->bli_flags & XFS_BLI_LOGGED);
175 nvecs = 1; 180 nvecs = 1;
176 last_bit = xfs_next_bit(bip->bli_format.blf_data_map, 181 last_bit = xfs_next_bit(bip->bli_format.blf_data_map,
@@ -219,13 +224,13 @@ xfs_buf_item_size(
219 */ 224 */
220STATIC void 225STATIC void
221xfs_buf_item_format( 226xfs_buf_item_format(
222 xfs_buf_log_item_t *bip, 227 struct xfs_log_item *lip,
223 xfs_log_iovec_t *log_vector) 228 struct xfs_log_iovec *vecp)
224{ 229{
230 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
231 struct xfs_buf *bp = bip->bli_buf;
225 uint base_size; 232 uint base_size;
226 uint nvecs; 233 uint nvecs;
227 xfs_log_iovec_t *vecp;
228 xfs_buf_t *bp;
229 int first_bit; 234 int first_bit;
230 int last_bit; 235 int last_bit;
231 int next_bit; 236 int next_bit;
@@ -235,8 +240,6 @@ xfs_buf_item_format(
235 ASSERT(atomic_read(&bip->bli_refcount) > 0); 240 ASSERT(atomic_read(&bip->bli_refcount) > 0);
236 ASSERT((bip->bli_flags & XFS_BLI_LOGGED) || 241 ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
237 (bip->bli_flags & XFS_BLI_STALE)); 242 (bip->bli_flags & XFS_BLI_STALE));
238 bp = bip->bli_buf;
239 vecp = log_vector;
240 243
241 /* 244 /*
242 * The size of the base structure is the size of the 245 * The size of the base structure is the size of the
@@ -248,7 +251,7 @@ xfs_buf_item_format(
248 base_size = 251 base_size =
249 (uint)(sizeof(xfs_buf_log_format_t) + 252 (uint)(sizeof(xfs_buf_log_format_t) +
250 ((bip->bli_format.blf_map_size - 1) * sizeof(uint))); 253 ((bip->bli_format.blf_map_size - 1) * sizeof(uint)));
251 vecp->i_addr = (xfs_caddr_t)&bip->bli_format; 254 vecp->i_addr = &bip->bli_format;
252 vecp->i_len = base_size; 255 vecp->i_len = base_size;
253 vecp->i_type = XLOG_REG_TYPE_BFORMAT; 256 vecp->i_type = XLOG_REG_TYPE_BFORMAT;
254 vecp++; 257 vecp++;
@@ -263,7 +266,7 @@ xfs_buf_item_format(
263 */ 266 */
264 if (bip->bli_flags & XFS_BLI_INODE_BUF) { 267 if (bip->bli_flags & XFS_BLI_INODE_BUF) {
265 if (!((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) && 268 if (!((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) &&
266 xfs_log_item_in_current_chkpt(&bip->bli_item))) 269 xfs_log_item_in_current_chkpt(lip)))
267 bip->bli_format.blf_flags |= XFS_BLF_INODE_BUF; 270 bip->bli_format.blf_flags |= XFS_BLF_INODE_BUF;
268 bip->bli_flags &= ~XFS_BLI_INODE_BUF; 271 bip->bli_flags &= ~XFS_BLI_INODE_BUF;
269 } 272 }
@@ -356,66 +359,90 @@ xfs_buf_item_format(
356 359
357/* 360/*
358 * This is called to pin the buffer associated with the buf log item in memory 361 * This is called to pin the buffer associated with the buf log item in memory
359 * so it cannot be written out. Simply call bpin() on the buffer to do this. 362 * so it cannot be written out.
360 * 363 *
361 * We also always take a reference to the buffer log item here so that the bli 364 * We also always take a reference to the buffer log item here so that the bli
362 * is held while the item is pinned in memory. This means that we can 365 * is held while the item is pinned in memory. This means that we can
363 * unconditionally drop the reference count a transaction holds when the 366 * unconditionally drop the reference count a transaction holds when the
364 * transaction is completed. 367 * transaction is completed.
365 */ 368 */
366
367STATIC void 369STATIC void
368xfs_buf_item_pin( 370xfs_buf_item_pin(
369 xfs_buf_log_item_t *bip) 371 struct xfs_log_item *lip)
370{ 372{
371 xfs_buf_t *bp; 373 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
372 374
373 bp = bip->bli_buf; 375 ASSERT(XFS_BUF_ISBUSY(bip->bli_buf));
374 ASSERT(XFS_BUF_ISBUSY(bp));
375 ASSERT(atomic_read(&bip->bli_refcount) > 0); 376 ASSERT(atomic_read(&bip->bli_refcount) > 0);
376 ASSERT((bip->bli_flags & XFS_BLI_LOGGED) || 377 ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
377 (bip->bli_flags & XFS_BLI_STALE)); 378 (bip->bli_flags & XFS_BLI_STALE));
378 atomic_inc(&bip->bli_refcount); 379
379 trace_xfs_buf_item_pin(bip); 380 trace_xfs_buf_item_pin(bip);
380 xfs_bpin(bp);
381}
382 381
382 atomic_inc(&bip->bli_refcount);
383 atomic_inc(&bip->bli_buf->b_pin_count);
384}
383 385
384/* 386/*
385 * This is called to unpin the buffer associated with the buf log 387 * This is called to unpin the buffer associated with the buf log
386 * item which was previously pinned with a call to xfs_buf_item_pin(). 388 * item which was previously pinned with a call to xfs_buf_item_pin().
387 * Just call bunpin() on the buffer to do this.
388 * 389 *
389 * Also drop the reference to the buf item for the current transaction. 390 * Also drop the reference to the buf item for the current transaction.
390 * If the XFS_BLI_STALE flag is set and we are the last reference, 391 * If the XFS_BLI_STALE flag is set and we are the last reference,
391 * then free up the buf log item and unlock the buffer. 392 * then free up the buf log item and unlock the buffer.
393 *
394 * If the remove flag is set we are called from uncommit in the
395 * forced-shutdown path. If that is true and the reference count on
396 * the log item is going to drop to zero we need to free the item's
397 * descriptor in the transaction.
392 */ 398 */
393STATIC void 399STATIC void
394xfs_buf_item_unpin( 400xfs_buf_item_unpin(
395 xfs_buf_log_item_t *bip) 401 struct xfs_log_item *lip,
402 int remove)
396{ 403{
397 struct xfs_ail *ailp; 404 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
398 xfs_buf_t *bp; 405 xfs_buf_t *bp = bip->bli_buf;
399 int freed; 406 struct xfs_ail *ailp = lip->li_ailp;
400 int stale = bip->bli_flags & XFS_BLI_STALE; 407 int stale = bip->bli_flags & XFS_BLI_STALE;
408 int freed;
401 409
402 bp = bip->bli_buf;
403 ASSERT(bp != NULL);
404 ASSERT(XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *) == bip); 410 ASSERT(XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *) == bip);
405 ASSERT(atomic_read(&bip->bli_refcount) > 0); 411 ASSERT(atomic_read(&bip->bli_refcount) > 0);
412
406 trace_xfs_buf_item_unpin(bip); 413 trace_xfs_buf_item_unpin(bip);
407 414
408 freed = atomic_dec_and_test(&bip->bli_refcount); 415 freed = atomic_dec_and_test(&bip->bli_refcount);
409 ailp = bip->bli_item.li_ailp; 416
410 xfs_bunpin(bp); 417 if (atomic_dec_and_test(&bp->b_pin_count))
418 wake_up_all(&bp->b_waiters);
419
411 if (freed && stale) { 420 if (freed && stale) {
412 ASSERT(bip->bli_flags & XFS_BLI_STALE); 421 ASSERT(bip->bli_flags & XFS_BLI_STALE);
413 ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); 422 ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
414 ASSERT(!(XFS_BUF_ISDELAYWRITE(bp))); 423 ASSERT(!(XFS_BUF_ISDELAYWRITE(bp)));
415 ASSERT(XFS_BUF_ISSTALE(bp)); 424 ASSERT(XFS_BUF_ISSTALE(bp));
416 ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL); 425 ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL);
426
417 trace_xfs_buf_item_unpin_stale(bip); 427 trace_xfs_buf_item_unpin_stale(bip);
418 428
429 if (remove) {
430 /*
431 * We have to remove the log item from the transaction
432 * as we are about to release our reference to the
433 * buffer. If we don't, the unlock that occurs later
434 * in xfs_trans_uncommit() will ry to reference the
435 * buffer which we no longer have a hold on.
436 */
437 xfs_trans_del_item(lip);
438
439 /*
440 * Since the transaction no longer refers to the buffer,
441 * the buffer should no longer refer to the transaction.
442 */
443 XFS_BUF_SET_FSPRIVATE2(bp, NULL);
444 }
445
419 /* 446 /*
420 * If we get called here because of an IO error, we may 447 * If we get called here because of an IO error, we may
421 * or may not have the item on the AIL. xfs_trans_ail_delete() 448 * or may not have the item on the AIL. xfs_trans_ail_delete()
@@ -437,48 +464,6 @@ xfs_buf_item_unpin(
437} 464}
438 465
439/* 466/*
440 * this is called from uncommit in the forced-shutdown path.
441 * we need to check to see if the reference count on the log item
442 * is going to drop to zero. If so, unpin will free the log item
443 * so we need to free the item's descriptor (that points to the item)
444 * in the transaction.
445 */
446STATIC void
447xfs_buf_item_unpin_remove(
448 xfs_buf_log_item_t *bip,
449 xfs_trans_t *tp)
450{
451 /* will xfs_buf_item_unpin() call xfs_buf_item_relse()? */
452 if ((atomic_read(&bip->bli_refcount) == 1) &&
453 (bip->bli_flags & XFS_BLI_STALE)) {
454 /*
455 * yes -- We can safely do some work here and then call
456 * buf_item_unpin to do the rest because we are
457 * are holding the buffer locked so no one else will be
458 * able to bump up the refcount. We have to remove the
459 * log item from the transaction as we are about to release
460 * our reference to the buffer. If we don't, the unlock that
461 * occurs later in the xfs_trans_uncommit() will try to
462 * reference the buffer which we no longer have a hold on.
463 */
464 struct xfs_log_item_desc *lidp;
465
466 ASSERT(XFS_BUF_VALUSEMA(bip->bli_buf) <= 0);
467 trace_xfs_buf_item_unpin_stale(bip);
468
469 lidp = xfs_trans_find_item(tp, (xfs_log_item_t *)bip);
470 xfs_trans_free_item(tp, lidp);
471
472 /*
473 * Since the transaction no longer refers to the buffer, the
474 * buffer should no longer refer to the transaction.
475 */
476 XFS_BUF_SET_FSPRIVATE2(bip->bli_buf, NULL);
477 }
478 xfs_buf_item_unpin(bip);
479}
480
481/*
482 * This is called to attempt to lock the buffer associated with this 467 * This is called to attempt to lock the buffer associated with this
483 * buf log item. Don't sleep on the buffer lock. If we can't get 468 * buf log item. Don't sleep on the buffer lock. If we can't get
484 * the lock right away, return 0. If we can get the lock, take a 469 * the lock right away, return 0. If we can get the lock, take a
@@ -488,11 +473,11 @@ xfs_buf_item_unpin_remove(
488 */ 473 */
489STATIC uint 474STATIC uint
490xfs_buf_item_trylock( 475xfs_buf_item_trylock(
491 xfs_buf_log_item_t *bip) 476 struct xfs_log_item *lip)
492{ 477{
493 xfs_buf_t *bp; 478 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
479 struct xfs_buf *bp = bip->bli_buf;
494 480
495 bp = bip->bli_buf;
496 if (XFS_BUF_ISPINNED(bp)) 481 if (XFS_BUF_ISPINNED(bp))
497 return XFS_ITEM_PINNED; 482 return XFS_ITEM_PINNED;
498 if (!XFS_BUF_CPSEMA(bp)) 483 if (!XFS_BUF_CPSEMA(bp))
@@ -529,13 +514,12 @@ xfs_buf_item_trylock(
529 */ 514 */
530STATIC void 515STATIC void
531xfs_buf_item_unlock( 516xfs_buf_item_unlock(
532 xfs_buf_log_item_t *bip) 517 struct xfs_log_item *lip)
533{ 518{
534 int aborted; 519 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
535 xfs_buf_t *bp; 520 struct xfs_buf *bp = bip->bli_buf;
536 uint hold; 521 int aborted;
537 522 uint hold;
538 bp = bip->bli_buf;
539 523
540 /* Clear the buffer's association with this transaction. */ 524 /* Clear the buffer's association with this transaction. */
541 XFS_BUF_SET_FSPRIVATE2(bp, NULL); 525 XFS_BUF_SET_FSPRIVATE2(bp, NULL);
@@ -546,7 +530,7 @@ xfs_buf_item_unlock(
546 * (cancelled) buffers at unpin time, but we'll never go through the 530 * (cancelled) buffers at unpin time, but we'll never go through the
547 * pin/unpin cycle if we abort inside commit. 531 * pin/unpin cycle if we abort inside commit.
548 */ 532 */
549 aborted = (bip->bli_item.li_flags & XFS_LI_ABORTED) != 0; 533 aborted = (lip->li_flags & XFS_LI_ABORTED) != 0;
550 534
551 /* 535 /*
552 * Before possibly freeing the buf item, determine if we should 536 * Before possibly freeing the buf item, determine if we should
@@ -607,16 +591,16 @@ xfs_buf_item_unlock(
607 */ 591 */
608STATIC xfs_lsn_t 592STATIC xfs_lsn_t
609xfs_buf_item_committed( 593xfs_buf_item_committed(
610 xfs_buf_log_item_t *bip, 594 struct xfs_log_item *lip,
611 xfs_lsn_t lsn) 595 xfs_lsn_t lsn)
612{ 596{
597 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
598
613 trace_xfs_buf_item_committed(bip); 599 trace_xfs_buf_item_committed(bip);
614 600
615 if ((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) && 601 if ((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) && lip->li_lsn != 0)
616 (bip->bli_item.li_lsn != 0)) { 602 return lip->li_lsn;
617 return bip->bli_item.li_lsn; 603 return lsn;
618 }
619 return (lsn);
620} 604}
621 605
622/* 606/*
@@ -626,15 +610,16 @@ xfs_buf_item_committed(
626 */ 610 */
627STATIC void 611STATIC void
628xfs_buf_item_push( 612xfs_buf_item_push(
629 xfs_buf_log_item_t *bip) 613 struct xfs_log_item *lip)
630{ 614{
631 xfs_buf_t *bp; 615 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
616 struct xfs_buf *bp = bip->bli_buf;
632 617
633 ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); 618 ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
619 ASSERT(!XFS_BUF_ISDELAYWRITE(bp));
620
634 trace_xfs_buf_item_push(bip); 621 trace_xfs_buf_item_push(bip);
635 622
636 bp = bip->bli_buf;
637 ASSERT(!XFS_BUF_ISDELAYWRITE(bp));
638 xfs_buf_relse(bp); 623 xfs_buf_relse(bp);
639} 624}
640 625
@@ -646,22 +631,24 @@ xfs_buf_item_push(
646 */ 631 */
647STATIC void 632STATIC void
648xfs_buf_item_pushbuf( 633xfs_buf_item_pushbuf(
649 xfs_buf_log_item_t *bip) 634 struct xfs_log_item *lip)
650{ 635{
651 xfs_buf_t *bp; 636 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
637 struct xfs_buf *bp = bip->bli_buf;
652 638
653 ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); 639 ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
640 ASSERT(XFS_BUF_ISDELAYWRITE(bp));
641
654 trace_xfs_buf_item_pushbuf(bip); 642 trace_xfs_buf_item_pushbuf(bip);
655 643
656 bp = bip->bli_buf;
657 ASSERT(XFS_BUF_ISDELAYWRITE(bp));
658 xfs_buf_delwri_promote(bp); 644 xfs_buf_delwri_promote(bp);
659 xfs_buf_relse(bp); 645 xfs_buf_relse(bp);
660} 646}
661 647
662/* ARGSUSED */
663STATIC void 648STATIC void
664xfs_buf_item_committing(xfs_buf_log_item_t *bip, xfs_lsn_t commit_lsn) 649xfs_buf_item_committing(
650 struct xfs_log_item *lip,
651 xfs_lsn_t commit_lsn)
665{ 652{
666} 653}
667 654
@@ -669,21 +656,16 @@ xfs_buf_item_committing(xfs_buf_log_item_t *bip, xfs_lsn_t commit_lsn)
669 * This is the ops vector shared by all buf log items. 656 * This is the ops vector shared by all buf log items.
670 */ 657 */
671static struct xfs_item_ops xfs_buf_item_ops = { 658static struct xfs_item_ops xfs_buf_item_ops = {
672 .iop_size = (uint(*)(xfs_log_item_t*))xfs_buf_item_size, 659 .iop_size = xfs_buf_item_size,
673 .iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*)) 660 .iop_format = xfs_buf_item_format,
674 xfs_buf_item_format, 661 .iop_pin = xfs_buf_item_pin,
675 .iop_pin = (void(*)(xfs_log_item_t*))xfs_buf_item_pin, 662 .iop_unpin = xfs_buf_item_unpin,
676 .iop_unpin = (void(*)(xfs_log_item_t*))xfs_buf_item_unpin, 663 .iop_trylock = xfs_buf_item_trylock,
677 .iop_unpin_remove = (void(*)(xfs_log_item_t*, xfs_trans_t *)) 664 .iop_unlock = xfs_buf_item_unlock,
678 xfs_buf_item_unpin_remove, 665 .iop_committed = xfs_buf_item_committed,
679 .iop_trylock = (uint(*)(xfs_log_item_t*))xfs_buf_item_trylock, 666 .iop_push = xfs_buf_item_push,
680 .iop_unlock = (void(*)(xfs_log_item_t*))xfs_buf_item_unlock, 667 .iop_pushbuf = xfs_buf_item_pushbuf,
681 .iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t)) 668 .iop_committing = xfs_buf_item_committing
682 xfs_buf_item_committed,
683 .iop_push = (void(*)(xfs_log_item_t*))xfs_buf_item_push,
684 .iop_pushbuf = (void(*)(xfs_log_item_t*))xfs_buf_item_pushbuf,
685 .iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t))
686 xfs_buf_item_committing
687}; 669};
688 670
689 671
@@ -712,7 +694,6 @@ xfs_buf_item_init(
712 */ 694 */
713 if (bp->b_mount != mp) 695 if (bp->b_mount != mp)
714 bp->b_mount = mp; 696 bp->b_mount = mp;
715 XFS_BUF_SET_BDSTRAT_FUNC(bp, xfs_bdstrat_cb);
716 if (XFS_BUF_FSPRIVATE(bp, void *) != NULL) { 697 if (XFS_BUF_FSPRIVATE(bp, void *) != NULL) {
717 lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *); 698 lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
718 if (lip->li_type == XFS_LI_BUF) { 699 if (lip->li_type == XFS_LI_BUF) {
@@ -1098,15 +1079,14 @@ xfs_buf_error_relse(
1098 * It is called by xfs_buf_iodone_callbacks() above which will take 1079 * It is called by xfs_buf_iodone_callbacks() above which will take
1099 * care of cleaning up the buffer itself. 1080 * care of cleaning up the buffer itself.
1100 */ 1081 */
1101/* ARGSUSED */
1102void 1082void
1103xfs_buf_iodone( 1083xfs_buf_iodone(
1104 xfs_buf_t *bp, 1084 struct xfs_buf *bp,
1105 xfs_buf_log_item_t *bip) 1085 struct xfs_log_item *lip)
1106{ 1086{
1107 struct xfs_ail *ailp = bip->bli_item.li_ailp; 1087 struct xfs_ail *ailp = lip->li_ailp;
1108 1088
1109 ASSERT(bip->bli_buf == bp); 1089 ASSERT(BUF_ITEM(lip)->bli_buf == bp);
1110 1090
1111 xfs_buf_rele(bp); 1091 xfs_buf_rele(bp);
1112 1092
@@ -1120,6 +1100,6 @@ xfs_buf_iodone(
1120 * Either way, AIL is useless if we're forcing a shutdown. 1100 * Either way, AIL is useless if we're forcing a shutdown.
1121 */ 1101 */
1122 spin_lock(&ailp->xa_lock); 1102 spin_lock(&ailp->xa_lock);
1123 xfs_trans_ail_delete(ailp, (xfs_log_item_t *)bip); 1103 xfs_trans_ail_delete(ailp, lip);
1124 xfs_buf_item_free(bip); 1104 xfs_buf_item_free(BUF_ITEM(lip));
1125} 1105}
diff --git a/fs/xfs/xfs_buf_item.h b/fs/xfs/xfs_buf_item.h
index f20bb472d582..0e2ed43f16c7 100644
--- a/fs/xfs/xfs_buf_item.h
+++ b/fs/xfs/xfs_buf_item.h
@@ -124,7 +124,7 @@ void xfs_buf_attach_iodone(struct xfs_buf *,
124 void(*)(struct xfs_buf *, xfs_log_item_t *), 124 void(*)(struct xfs_buf *, xfs_log_item_t *),
125 xfs_log_item_t *); 125 xfs_log_item_t *);
126void xfs_buf_iodone_callbacks(struct xfs_buf *); 126void xfs_buf_iodone_callbacks(struct xfs_buf *);
127void xfs_buf_iodone(struct xfs_buf *, xfs_buf_log_item_t *); 127void xfs_buf_iodone(struct xfs_buf *, struct xfs_log_item *);
128 128
129#ifdef XFS_TRANS_DEBUG 129#ifdef XFS_TRANS_DEBUG
130void 130void
diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c
index 0ca556b4bf31..30fa0e206fba 100644
--- a/fs/xfs/xfs_da_btree.c
+++ b/fs/xfs/xfs_da_btree.c
@@ -25,19 +25,14 @@
25#include "xfs_sb.h" 25#include "xfs_sb.h"
26#include "xfs_ag.h" 26#include "xfs_ag.h"
27#include "xfs_dir2.h" 27#include "xfs_dir2.h"
28#include "xfs_dmapi.h"
29#include "xfs_mount.h" 28#include "xfs_mount.h"
30#include "xfs_da_btree.h" 29#include "xfs_da_btree.h"
31#include "xfs_bmap_btree.h" 30#include "xfs_bmap_btree.h"
32#include "xfs_alloc_btree.h"
33#include "xfs_ialloc_btree.h"
34#include "xfs_dir2_sf.h" 31#include "xfs_dir2_sf.h"
35#include "xfs_attr_sf.h"
36#include "xfs_dinode.h" 32#include "xfs_dinode.h"
37#include "xfs_inode.h" 33#include "xfs_inode.h"
38#include "xfs_inode_item.h" 34#include "xfs_inode_item.h"
39#include "xfs_alloc.h" 35#include "xfs_alloc.h"
40#include "xfs_btree.h"
41#include "xfs_bmap.h" 36#include "xfs_bmap.h"
42#include "xfs_attr.h" 37#include "xfs_attr.h"
43#include "xfs_attr_leaf.h" 38#include "xfs_attr_leaf.h"
@@ -581,16 +576,14 @@ xfs_da_node_add(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,
581 xfs_da_intnode_t *node; 576 xfs_da_intnode_t *node;
582 xfs_da_node_entry_t *btree; 577 xfs_da_node_entry_t *btree;
583 int tmp; 578 int tmp;
584 xfs_mount_t *mp;
585 579
586 node = oldblk->bp->data; 580 node = oldblk->bp->data;
587 mp = state->mp;
588 ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC); 581 ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC);
589 ASSERT((oldblk->index >= 0) && (oldblk->index <= be16_to_cpu(node->hdr.count))); 582 ASSERT((oldblk->index >= 0) && (oldblk->index <= be16_to_cpu(node->hdr.count)));
590 ASSERT(newblk->blkno != 0); 583 ASSERT(newblk->blkno != 0);
591 if (state->args->whichfork == XFS_DATA_FORK) 584 if (state->args->whichfork == XFS_DATA_FORK)
592 ASSERT(newblk->blkno >= mp->m_dirleafblk && 585 ASSERT(newblk->blkno >= state->mp->m_dirleafblk &&
593 newblk->blkno < mp->m_dirfreeblk); 586 newblk->blkno < state->mp->m_dirfreeblk);
594 587
595 /* 588 /*
596 * We may need to make some room before we insert the new node. 589 * We may need to make some room before we insert the new node.
@@ -1601,7 +1594,7 @@ xfs_da_grow_inode(xfs_da_args_t *args, xfs_dablk_t *new_blkno)
1601 xfs_bmapi_aflag(w)|XFS_BMAPI_WRITE|XFS_BMAPI_METADATA| 1594 xfs_bmapi_aflag(w)|XFS_BMAPI_WRITE|XFS_BMAPI_METADATA|
1602 XFS_BMAPI_CONTIG, 1595 XFS_BMAPI_CONTIG,
1603 args->firstblock, args->total, &map, &nmap, 1596 args->firstblock, args->total, &map, &nmap,
1604 args->flist, NULL))) { 1597 args->flist))) {
1605 return error; 1598 return error;
1606 } 1599 }
1607 ASSERT(nmap <= 1); 1600 ASSERT(nmap <= 1);
@@ -1622,8 +1615,7 @@ xfs_da_grow_inode(xfs_da_args_t *args, xfs_dablk_t *new_blkno)
1622 xfs_bmapi_aflag(w)|XFS_BMAPI_WRITE| 1615 xfs_bmapi_aflag(w)|XFS_BMAPI_WRITE|
1623 XFS_BMAPI_METADATA, 1616 XFS_BMAPI_METADATA,
1624 args->firstblock, args->total, 1617 args->firstblock, args->total,
1625 &mapp[mapi], &nmap, args->flist, 1618 &mapp[mapi], &nmap, args->flist))) {
1626 NULL))) {
1627 kmem_free(mapp); 1619 kmem_free(mapp);
1628 return error; 1620 return error;
1629 } 1621 }
@@ -1884,7 +1876,7 @@ xfs_da_shrink_inode(xfs_da_args_t *args, xfs_dablk_t dead_blkno,
1884 */ 1876 */
1885 if ((error = xfs_bunmapi(tp, dp, dead_blkno, count, 1877 if ((error = xfs_bunmapi(tp, dp, dead_blkno, count,
1886 xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA, 1878 xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA,
1887 0, args->firstblock, args->flist, NULL, 1879 0, args->firstblock, args->flist,
1888 &done)) == ENOSPC) { 1880 &done)) == ENOSPC) {
1889 if (w != XFS_DATA_FORK) 1881 if (w != XFS_DATA_FORK)
1890 break; 1882 break;
@@ -1989,7 +1981,7 @@ xfs_da_do_buf(
1989 nfsb, 1981 nfsb,
1990 XFS_BMAPI_METADATA | 1982 XFS_BMAPI_METADATA |
1991 xfs_bmapi_aflag(whichfork), 1983 xfs_bmapi_aflag(whichfork),
1992 NULL, 0, mapp, &nmap, NULL, NULL))) 1984 NULL, 0, mapp, &nmap, NULL)))
1993 goto exit0; 1985 goto exit0;
1994 } 1986 }
1995 } else { 1987 } else {
diff --git a/fs/xfs/xfs_dfrag.c b/fs/xfs/xfs_dfrag.c
index 5bba29a07812..3b9582c60a22 100644
--- a/fs/xfs/xfs_dfrag.c
+++ b/fs/xfs/xfs_dfrag.c
@@ -24,24 +24,15 @@
24#include "xfs_trans.h" 24#include "xfs_trans.h"
25#include "xfs_sb.h" 25#include "xfs_sb.h"
26#include "xfs_ag.h" 26#include "xfs_ag.h"
27#include "xfs_dir2.h"
28#include "xfs_dmapi.h"
29#include "xfs_mount.h" 27#include "xfs_mount.h"
30#include "xfs_bmap_btree.h" 28#include "xfs_bmap_btree.h"
31#include "xfs_alloc_btree.h"
32#include "xfs_ialloc_btree.h"
33#include "xfs_dir2_sf.h"
34#include "xfs_attr_sf.h"
35#include "xfs_dinode.h" 29#include "xfs_dinode.h"
36#include "xfs_inode.h" 30#include "xfs_inode.h"
37#include "xfs_inode_item.h" 31#include "xfs_inode_item.h"
38#include "xfs_bmap.h" 32#include "xfs_bmap.h"
39#include "xfs_btree.h"
40#include "xfs_ialloc.h"
41#include "xfs_itable.h" 33#include "xfs_itable.h"
42#include "xfs_dfrag.h" 34#include "xfs_dfrag.h"
43#include "xfs_error.h" 35#include "xfs_error.h"
44#include "xfs_rw.h"
45#include "xfs_vnodeops.h" 36#include "xfs_vnodeops.h"
46#include "xfs_trace.h" 37#include "xfs_trace.h"
47 38
@@ -69,7 +60,9 @@ xfs_swapext(
69 goto out; 60 goto out;
70 } 61 }
71 62
72 if (!(file->f_mode & FMODE_WRITE) || (file->f_flags & O_APPEND)) { 63 if (!(file->f_mode & FMODE_WRITE) ||
64 !(file->f_mode & FMODE_READ) ||
65 (file->f_flags & O_APPEND)) {
73 error = XFS_ERROR(EBADF); 66 error = XFS_ERROR(EBADF);
74 goto out_put_file; 67 goto out_put_file;
75 } 68 }
@@ -81,6 +74,7 @@ xfs_swapext(
81 } 74 }
82 75
83 if (!(tmp_file->f_mode & FMODE_WRITE) || 76 if (!(tmp_file->f_mode & FMODE_WRITE) ||
77 !(tmp_file->f_mode & FMODE_READ) ||
84 (tmp_file->f_flags & O_APPEND)) { 78 (tmp_file->f_flags & O_APPEND)) {
85 error = XFS_ERROR(EBADF); 79 error = XFS_ERROR(EBADF);
86 goto out_put_tmp_file; 80 goto out_put_tmp_file;
@@ -422,11 +416,8 @@ xfs_swap_extents(
422 } 416 }
423 417
424 418
425 IHOLD(ip); 419 xfs_trans_ijoin_ref(tp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
426 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); 420 xfs_trans_ijoin_ref(tp, tip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
427
428 IHOLD(tip);
429 xfs_trans_ijoin(tp, tip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
430 421
431 xfs_trans_log_inode(tp, ip, ilf_fields); 422 xfs_trans_log_inode(tp, ip, ilf_fields);
432 xfs_trans_log_inode(tp, tip, tilf_fields); 423 xfs_trans_log_inode(tp, tip, tilf_fields);
diff --git a/fs/xfs/xfs_dir2.c b/fs/xfs/xfs_dir2.c
index 42520f041265..a1321bc7f192 100644
--- a/fs/xfs/xfs_dir2.c
+++ b/fs/xfs/xfs_dir2.c
@@ -25,13 +25,11 @@
25#include "xfs_sb.h" 25#include "xfs_sb.h"
26#include "xfs_ag.h" 26#include "xfs_ag.h"
27#include "xfs_dir2.h" 27#include "xfs_dir2.h"
28#include "xfs_dmapi.h"
29#include "xfs_mount.h" 28#include "xfs_mount.h"
30#include "xfs_da_btree.h" 29#include "xfs_da_btree.h"
31#include "xfs_bmap_btree.h" 30#include "xfs_bmap_btree.h"
32#include "xfs_alloc_btree.h" 31#include "xfs_alloc_btree.h"
33#include "xfs_dir2_sf.h" 32#include "xfs_dir2_sf.h"
34#include "xfs_attr_sf.h"
35#include "xfs_dinode.h" 33#include "xfs_dinode.h"
36#include "xfs_inode.h" 34#include "xfs_inode.h"
37#include "xfs_inode_item.h" 35#include "xfs_inode_item.h"
@@ -382,7 +380,7 @@ xfs_readdir(
382 int rval; /* return value */ 380 int rval; /* return value */
383 int v; /* type-checking value */ 381 int v; /* type-checking value */
384 382
385 xfs_itrace_entry(dp); 383 trace_xfs_readdir(dp);
386 384
387 if (XFS_FORCED_SHUTDOWN(dp->i_mount)) 385 if (XFS_FORCED_SHUTDOWN(dp->i_mount))
388 return XFS_ERROR(EIO); 386 return XFS_ERROR(EIO);
@@ -549,7 +547,7 @@ xfs_dir2_grow_inode(
549 if ((error = xfs_bmapi(tp, dp, bno, count, 547 if ((error = xfs_bmapi(tp, dp, bno, count,
550 XFS_BMAPI_WRITE|XFS_BMAPI_METADATA|XFS_BMAPI_CONTIG, 548 XFS_BMAPI_WRITE|XFS_BMAPI_METADATA|XFS_BMAPI_CONTIG,
551 args->firstblock, args->total, &map, &nmap, 549 args->firstblock, args->total, &map, &nmap,
552 args->flist, NULL))) 550 args->flist)))
553 return error; 551 return error;
554 ASSERT(nmap <= 1); 552 ASSERT(nmap <= 1);
555 if (nmap == 1) { 553 if (nmap == 1) {
@@ -581,8 +579,7 @@ xfs_dir2_grow_inode(
581 if ((error = xfs_bmapi(tp, dp, b, c, 579 if ((error = xfs_bmapi(tp, dp, b, c,
582 XFS_BMAPI_WRITE|XFS_BMAPI_METADATA, 580 XFS_BMAPI_WRITE|XFS_BMAPI_METADATA,
583 args->firstblock, args->total, 581 args->firstblock, args->total,
584 &mapp[mapi], &nmap, args->flist, 582 &mapp[mapi], &nmap, args->flist))) {
585 NULL))) {
586 kmem_free(mapp); 583 kmem_free(mapp);
587 return error; 584 return error;
588 } 585 }
@@ -715,7 +712,7 @@ xfs_dir2_shrink_inode(
715 */ 712 */
716 if ((error = xfs_bunmapi(tp, dp, da, mp->m_dirblkfsbs, 713 if ((error = xfs_bunmapi(tp, dp, da, mp->m_dirblkfsbs,
717 XFS_BMAPI_METADATA, 0, args->firstblock, args->flist, 714 XFS_BMAPI_METADATA, 0, args->firstblock, args->flist,
718 NULL, &done))) { 715 &done))) {
719 /* 716 /*
720 * ENOSPC actually can happen if we're in a removename with 717 * ENOSPC actually can happen if we're in a removename with
721 * no space reservation, and the resulting block removal 718 * no space reservation, and the resulting block removal
diff --git a/fs/xfs/xfs_dir2_block.c b/fs/xfs/xfs_dir2_block.c
index 779a267b0a84..580d99cef9e7 100644
--- a/fs/xfs/xfs_dir2_block.c
+++ b/fs/xfs/xfs_dir2_block.c
@@ -24,12 +24,10 @@
24#include "xfs_sb.h" 24#include "xfs_sb.h"
25#include "xfs_ag.h" 25#include "xfs_ag.h"
26#include "xfs_dir2.h" 26#include "xfs_dir2.h"
27#include "xfs_dmapi.h"
28#include "xfs_mount.h" 27#include "xfs_mount.h"
29#include "xfs_da_btree.h" 28#include "xfs_da_btree.h"
30#include "xfs_bmap_btree.h" 29#include "xfs_bmap_btree.h"
31#include "xfs_dir2_sf.h" 30#include "xfs_dir2_sf.h"
32#include "xfs_attr_sf.h"
33#include "xfs_dinode.h" 31#include "xfs_dinode.h"
34#include "xfs_inode.h" 32#include "xfs_inode.h"
35#include "xfs_inode_item.h" 33#include "xfs_inode_item.h"
@@ -1073,10 +1071,10 @@ xfs_dir2_sf_to_block(
1073 */ 1071 */
1074 1072
1075 buf_len = dp->i_df.if_bytes; 1073 buf_len = dp->i_df.if_bytes;
1076 buf = kmem_alloc(dp->i_df.if_bytes, KM_SLEEP); 1074 buf = kmem_alloc(buf_len, KM_SLEEP);
1077 1075
1078 memcpy(buf, sfp, dp->i_df.if_bytes); 1076 memcpy(buf, sfp, buf_len);
1079 xfs_idata_realloc(dp, -dp->i_df.if_bytes, XFS_DATA_FORK); 1077 xfs_idata_realloc(dp, -buf_len, XFS_DATA_FORK);
1080 dp->i_d.di_size = 0; 1078 dp->i_d.di_size = 0;
1081 xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE); 1079 xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
1082 /* 1080 /*
diff --git a/fs/xfs/xfs_dir2_data.c b/fs/xfs/xfs_dir2_data.c
index 498f8d694330..921595b84f5b 100644
--- a/fs/xfs/xfs_dir2_data.c
+++ b/fs/xfs/xfs_dir2_data.c
@@ -24,12 +24,10 @@
24#include "xfs_sb.h" 24#include "xfs_sb.h"
25#include "xfs_ag.h" 25#include "xfs_ag.h"
26#include "xfs_dir2.h" 26#include "xfs_dir2.h"
27#include "xfs_dmapi.h"
28#include "xfs_mount.h" 27#include "xfs_mount.h"
29#include "xfs_da_btree.h" 28#include "xfs_da_btree.h"
30#include "xfs_bmap_btree.h" 29#include "xfs_bmap_btree.h"
31#include "xfs_dir2_sf.h" 30#include "xfs_dir2_sf.h"
32#include "xfs_attr_sf.h"
33#include "xfs_dinode.h" 31#include "xfs_dinode.h"
34#include "xfs_inode.h" 32#include "xfs_inode.h"
35#include "xfs_dir2_data.h" 33#include "xfs_dir2_data.h"
diff --git a/fs/xfs/xfs_dir2_leaf.c b/fs/xfs/xfs_dir2_leaf.c
index e2d89854ec9e..504be8640e91 100644
--- a/fs/xfs/xfs_dir2_leaf.c
+++ b/fs/xfs/xfs_dir2_leaf.c
@@ -25,11 +25,9 @@
25#include "xfs_sb.h" 25#include "xfs_sb.h"
26#include "xfs_ag.h" 26#include "xfs_ag.h"
27#include "xfs_dir2.h" 27#include "xfs_dir2.h"
28#include "xfs_dmapi.h"
29#include "xfs_mount.h" 28#include "xfs_mount.h"
30#include "xfs_da_btree.h" 29#include "xfs_da_btree.h"
31#include "xfs_bmap_btree.h" 30#include "xfs_bmap_btree.h"
32#include "xfs_attr_sf.h"
33#include "xfs_dir2_sf.h" 31#include "xfs_dir2_sf.h"
34#include "xfs_dinode.h" 32#include "xfs_dinode.h"
35#include "xfs_inode.h" 33#include "xfs_inode.h"
@@ -875,7 +873,7 @@ xfs_dir2_leaf_getdents(
875 xfs_dir2_byte_to_da(mp, 873 xfs_dir2_byte_to_da(mp,
876 XFS_DIR2_LEAF_OFFSET) - map_off, 874 XFS_DIR2_LEAF_OFFSET) - map_off,
877 XFS_BMAPI_METADATA, NULL, 0, 875 XFS_BMAPI_METADATA, NULL, 0,
878 &map[map_valid], &nmap, NULL, NULL); 876 &map[map_valid], &nmap, NULL);
879 /* 877 /*
880 * Don't know if we should ignore this or 878 * Don't know if we should ignore this or
881 * try to return an error. 879 * try to return an error.
diff --git a/fs/xfs/xfs_dir2_node.c b/fs/xfs/xfs_dir2_node.c
index 78fc4d9ae756..f9a0864b696a 100644
--- a/fs/xfs/xfs_dir2_node.c
+++ b/fs/xfs/xfs_dir2_node.c
@@ -24,12 +24,10 @@
24#include "xfs_sb.h" 24#include "xfs_sb.h"
25#include "xfs_ag.h" 25#include "xfs_ag.h"
26#include "xfs_dir2.h" 26#include "xfs_dir2.h"
27#include "xfs_dmapi.h"
28#include "xfs_mount.h" 27#include "xfs_mount.h"
29#include "xfs_da_btree.h" 28#include "xfs_da_btree.h"
30#include "xfs_bmap_btree.h" 29#include "xfs_bmap_btree.h"
31#include "xfs_dir2_sf.h" 30#include "xfs_dir2_sf.h"
32#include "xfs_attr_sf.h"
33#include "xfs_dinode.h" 31#include "xfs_dinode.h"
34#include "xfs_inode.h" 32#include "xfs_inode.h"
35#include "xfs_bmap.h" 33#include "xfs_bmap.h"
diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
index c1a5945d463a..b1bae6b1eed9 100644
--- a/fs/xfs/xfs_dir2_sf.c
+++ b/fs/xfs/xfs_dir2_sf.c
@@ -24,12 +24,10 @@
24#include "xfs_sb.h" 24#include "xfs_sb.h"
25#include "xfs_ag.h" 25#include "xfs_ag.h"
26#include "xfs_dir2.h" 26#include "xfs_dir2.h"
27#include "xfs_dmapi.h"
28#include "xfs_mount.h" 27#include "xfs_mount.h"
29#include "xfs_da_btree.h" 28#include "xfs_da_btree.h"
30#include "xfs_bmap_btree.h" 29#include "xfs_bmap_btree.h"
31#include "xfs_dir2_sf.h" 30#include "xfs_dir2_sf.h"
32#include "xfs_attr_sf.h"
33#include "xfs_dinode.h" 31#include "xfs_dinode.h"
34#include "xfs_inode.h" 32#include "xfs_inode.h"
35#include "xfs_inode_item.h" 33#include "xfs_inode_item.h"
diff --git a/fs/xfs/xfs_dmapi.h b/fs/xfs/xfs_dmapi.h
deleted file mode 100644
index 2813cdd72375..000000000000
--- a/fs/xfs/xfs_dmapi.h
+++ /dev/null
@@ -1,170 +0,0 @@
1/*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#ifndef __XFS_DMAPI_H__
19#define __XFS_DMAPI_H__
20
21/* Values used to define the on-disk version of dm_attrname_t. All
22 * on-disk attribute names start with the 8-byte string "SGI_DMI_".
23 *
24 * In the on-disk inode, DMAPI attribute names consist of the user-provided
25 * name with the DMATTR_PREFIXSTRING pre-pended. This string must NEVER be
26 * changed.
27 */
28
29#define DMATTR_PREFIXLEN 8
30#define DMATTR_PREFIXSTRING "SGI_DMI_"
31
32typedef enum {
33 DM_EVENT_INVALID = -1,
34 DM_EVENT_CANCEL = 0, /* not supported */
35 DM_EVENT_MOUNT = 1,
36 DM_EVENT_PREUNMOUNT = 2,
37 DM_EVENT_UNMOUNT = 3,
38 DM_EVENT_DEBUT = 4, /* not supported */
39 DM_EVENT_CREATE = 5,
40 DM_EVENT_CLOSE = 6, /* not supported */
41 DM_EVENT_POSTCREATE = 7,
42 DM_EVENT_REMOVE = 8,
43 DM_EVENT_POSTREMOVE = 9,
44 DM_EVENT_RENAME = 10,
45 DM_EVENT_POSTRENAME = 11,
46 DM_EVENT_LINK = 12,
47 DM_EVENT_POSTLINK = 13,
48 DM_EVENT_SYMLINK = 14,
49 DM_EVENT_POSTSYMLINK = 15,
50 DM_EVENT_READ = 16,
51 DM_EVENT_WRITE = 17,
52 DM_EVENT_TRUNCATE = 18,
53 DM_EVENT_ATTRIBUTE = 19,
54 DM_EVENT_DESTROY = 20,
55 DM_EVENT_NOSPACE = 21,
56 DM_EVENT_USER = 22,
57 DM_EVENT_MAX = 23
58} dm_eventtype_t;
59#define HAVE_DM_EVENTTYPE_T
60
61typedef enum {
62 DM_RIGHT_NULL,
63 DM_RIGHT_SHARED,
64 DM_RIGHT_EXCL
65} dm_right_t;
66#define HAVE_DM_RIGHT_T
67
68/* Defines for determining if an event message should be sent. */
69#ifdef HAVE_DMAPI
70#define DM_EVENT_ENABLED(ip, event) ( \
71 unlikely ((ip)->i_mount->m_flags & XFS_MOUNT_DMAPI) && \
72 ( ((ip)->i_d.di_dmevmask & (1 << event)) || \
73 ((ip)->i_mount->m_dmevmask & (1 << event)) ) \
74 )
75#else
76#define DM_EVENT_ENABLED(ip, event) (0)
77#endif
78
79#define DM_XFS_VALID_FS_EVENTS ( \
80 (1 << DM_EVENT_PREUNMOUNT) | \
81 (1 << DM_EVENT_UNMOUNT) | \
82 (1 << DM_EVENT_NOSPACE) | \
83 (1 << DM_EVENT_DEBUT) | \
84 (1 << DM_EVENT_CREATE) | \
85 (1 << DM_EVENT_POSTCREATE) | \
86 (1 << DM_EVENT_REMOVE) | \
87 (1 << DM_EVENT_POSTREMOVE) | \
88 (1 << DM_EVENT_RENAME) | \
89 (1 << DM_EVENT_POSTRENAME) | \
90 (1 << DM_EVENT_LINK) | \
91 (1 << DM_EVENT_POSTLINK) | \
92 (1 << DM_EVENT_SYMLINK) | \
93 (1 << DM_EVENT_POSTSYMLINK) | \
94 (1 << DM_EVENT_ATTRIBUTE) | \
95 (1 << DM_EVENT_DESTROY) )
96
97/* Events valid in dm_set_eventlist() when called with a file handle for
98 a regular file or a symlink. These events are persistent.
99*/
100
101#define DM_XFS_VALID_FILE_EVENTS ( \
102 (1 << DM_EVENT_ATTRIBUTE) | \
103 (1 << DM_EVENT_DESTROY) )
104
105/* Events valid in dm_set_eventlist() when called with a file handle for
106 a directory. These events are persistent.
107*/
108
109#define DM_XFS_VALID_DIRECTORY_EVENTS ( \
110 (1 << DM_EVENT_CREATE) | \
111 (1 << DM_EVENT_POSTCREATE) | \
112 (1 << DM_EVENT_REMOVE) | \
113 (1 << DM_EVENT_POSTREMOVE) | \
114 (1 << DM_EVENT_RENAME) | \
115 (1 << DM_EVENT_POSTRENAME) | \
116 (1 << DM_EVENT_LINK) | \
117 (1 << DM_EVENT_POSTLINK) | \
118 (1 << DM_EVENT_SYMLINK) | \
119 (1 << DM_EVENT_POSTSYMLINK) | \
120 (1 << DM_EVENT_ATTRIBUTE) | \
121 (1 << DM_EVENT_DESTROY) )
122
123/* Events supported by the XFS filesystem. */
124#define DM_XFS_SUPPORTED_EVENTS ( \
125 (1 << DM_EVENT_MOUNT) | \
126 (1 << DM_EVENT_PREUNMOUNT) | \
127 (1 << DM_EVENT_UNMOUNT) | \
128 (1 << DM_EVENT_NOSPACE) | \
129 (1 << DM_EVENT_CREATE) | \
130 (1 << DM_EVENT_POSTCREATE) | \
131 (1 << DM_EVENT_REMOVE) | \
132 (1 << DM_EVENT_POSTREMOVE) | \
133 (1 << DM_EVENT_RENAME) | \
134 (1 << DM_EVENT_POSTRENAME) | \
135 (1 << DM_EVENT_LINK) | \
136 (1 << DM_EVENT_POSTLINK) | \
137 (1 << DM_EVENT_SYMLINK) | \
138 (1 << DM_EVENT_POSTSYMLINK) | \
139 (1 << DM_EVENT_READ) | \
140 (1 << DM_EVENT_WRITE) | \
141 (1 << DM_EVENT_TRUNCATE) | \
142 (1 << DM_EVENT_ATTRIBUTE) | \
143 (1 << DM_EVENT_DESTROY) )
144
145
146/*
147 * Definitions used for the flags field on dm_send_*_event().
148 */
149
150#define DM_FLAGS_NDELAY 0x001 /* return EAGAIN after dm_pending() */
151#define DM_FLAGS_UNWANTED 0x002 /* event not in fsys dm_eventset_t */
152#define DM_FLAGS_IMUX 0x004 /* thread holds i_mutex */
153#define DM_FLAGS_IALLOCSEM_RD 0x010 /* thread holds i_alloc_sem rd */
154#define DM_FLAGS_IALLOCSEM_WR 0x020 /* thread holds i_alloc_sem wr */
155
156/*
157 * Pull in platform specific event flags defines
158 */
159#include "xfs_dmapi_priv.h"
160
161/*
162 * Macros to turn caller specified delay/block flags into
163 * dm_send_xxxx_event flag DM_FLAGS_NDELAY.
164 */
165
166#define FILP_DELAY_FLAG(filp) ((filp->f_flags&(O_NDELAY|O_NONBLOCK)) ? \
167 DM_FLAGS_NDELAY : 0)
168#define AT_DELAY_FLAG(f) ((f & XFS_ATTR_NONBLOCK) ? DM_FLAGS_NDELAY : 0)
169
170#endif /* __XFS_DMAPI_H__ */
diff --git a/fs/xfs/xfs_dmops.c b/fs/xfs/xfs_dmops.c
deleted file mode 100644
index e71e2581c0c3..000000000000
--- a/fs/xfs/xfs_dmops.c
+++ /dev/null
@@ -1,55 +0,0 @@
1/*
2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_types.h"
21#include "xfs_log.h"
22#include "xfs_trans.h"
23#include "xfs_sb.h"
24#include "xfs_dmapi.h"
25#include "xfs_inum.h"
26#include "xfs_ag.h"
27#include "xfs_mount.h"
28
29
30static struct xfs_dmops xfs_dmcore_stub = {
31 .xfs_send_data = (xfs_send_data_t)fs_nosys,
32 .xfs_send_mmap = (xfs_send_mmap_t)fs_noerr,
33 .xfs_send_destroy = (xfs_send_destroy_t)fs_nosys,
34 .xfs_send_namesp = (xfs_send_namesp_t)fs_nosys,
35 .xfs_send_mount = (xfs_send_mount_t)fs_nosys,
36 .xfs_send_unmount = (xfs_send_unmount_t)fs_noerr,
37};
38
39int
40xfs_dmops_get(struct xfs_mount *mp)
41{
42 if (mp->m_flags & XFS_MOUNT_DMAPI) {
43 cmn_err(CE_WARN,
44 "XFS: dmapi support not available in this kernel.");
45 return EINVAL;
46 }
47
48 mp->m_dm_ops = &xfs_dmcore_stub;
49 return 0;
50}
51
52void
53xfs_dmops_put(struct xfs_mount *mp)
54{
55}
diff --git a/fs/xfs/xfs_error.c b/fs/xfs/xfs_error.c
index 047b8a8e5c29..ed9990267661 100644
--- a/fs/xfs/xfs_error.c
+++ b/fs/xfs/xfs_error.c
@@ -23,12 +23,8 @@
23#include "xfs_trans.h" 23#include "xfs_trans.h"
24#include "xfs_sb.h" 24#include "xfs_sb.h"
25#include "xfs_ag.h" 25#include "xfs_ag.h"
26#include "xfs_dir2.h"
27#include "xfs_dmapi.h"
28#include "xfs_mount.h" 26#include "xfs_mount.h"
29#include "xfs_bmap_btree.h" 27#include "xfs_bmap_btree.h"
30#include "xfs_dir2_sf.h"
31#include "xfs_attr_sf.h"
32#include "xfs_dinode.h" 28#include "xfs_dinode.h"
33#include "xfs_inode.h" 29#include "xfs_inode.h"
34#include "xfs_utils.h" 30#include "xfs_utils.h"
diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c
index 409fe81585fd..a55e687bf562 100644
--- a/fs/xfs/xfs_extfree_item.c
+++ b/fs/xfs/xfs_extfree_item.c
@@ -24,7 +24,6 @@
24#include "xfs_buf_item.h" 24#include "xfs_buf_item.h"
25#include "xfs_sb.h" 25#include "xfs_sb.h"
26#include "xfs_ag.h" 26#include "xfs_ag.h"
27#include "xfs_dmapi.h"
28#include "xfs_mount.h" 27#include "xfs_mount.h"
29#include "xfs_trans_priv.h" 28#include "xfs_trans_priv.h"
30#include "xfs_extfree_item.h" 29#include "xfs_extfree_item.h"
@@ -33,18 +32,19 @@
33kmem_zone_t *xfs_efi_zone; 32kmem_zone_t *xfs_efi_zone;
34kmem_zone_t *xfs_efd_zone; 33kmem_zone_t *xfs_efd_zone;
35 34
36STATIC void xfs_efi_item_unlock(xfs_efi_log_item_t *); 35static inline struct xfs_efi_log_item *EFI_ITEM(struct xfs_log_item *lip)
36{
37 return container_of(lip, struct xfs_efi_log_item, efi_item);
38}
37 39
38void 40void
39xfs_efi_item_free(xfs_efi_log_item_t *efip) 41xfs_efi_item_free(
42 struct xfs_efi_log_item *efip)
40{ 43{
41 int nexts = efip->efi_format.efi_nextents; 44 if (efip->efi_format.efi_nextents > XFS_EFI_MAX_FAST_EXTENTS)
42
43 if (nexts > XFS_EFI_MAX_FAST_EXTENTS) {
44 kmem_free(efip); 45 kmem_free(efip);
45 } else { 46 else
46 kmem_zone_free(xfs_efi_zone, efip); 47 kmem_zone_free(xfs_efi_zone, efip);
47 }
48} 48}
49 49
50/* 50/*
@@ -52,9 +52,9 @@ xfs_efi_item_free(xfs_efi_log_item_t *efip)
52 * We only need 1 iovec for an efi item. It just logs the efi_log_format 52 * We only need 1 iovec for an efi item. It just logs the efi_log_format
53 * structure. 53 * structure.
54 */ 54 */
55/*ARGSUSED*/
56STATIC uint 55STATIC uint
57xfs_efi_item_size(xfs_efi_log_item_t *efip) 56xfs_efi_item_size(
57 struct xfs_log_item *lip)
58{ 58{
59 return 1; 59 return 1;
60} 60}
@@ -67,10 +67,12 @@ xfs_efi_item_size(xfs_efi_log_item_t *efip)
67 * slots in the efi item have been filled. 67 * slots in the efi item have been filled.
68 */ 68 */
69STATIC void 69STATIC void
70xfs_efi_item_format(xfs_efi_log_item_t *efip, 70xfs_efi_item_format(
71 xfs_log_iovec_t *log_vector) 71 struct xfs_log_item *lip,
72 struct xfs_log_iovec *log_vector)
72{ 73{
73 uint size; 74 struct xfs_efi_log_item *efip = EFI_ITEM(lip);
75 uint size;
74 76
75 ASSERT(efip->efi_next_extent == efip->efi_format.efi_nextents); 77 ASSERT(efip->efi_next_extent == efip->efi_format.efi_nextents);
76 78
@@ -80,7 +82,7 @@ xfs_efi_item_format(xfs_efi_log_item_t *efip,
80 size += (efip->efi_format.efi_nextents - 1) * sizeof(xfs_extent_t); 82 size += (efip->efi_format.efi_nextents - 1) * sizeof(xfs_extent_t);
81 efip->efi_format.efi_size = 1; 83 efip->efi_format.efi_size = 1;
82 84
83 log_vector->i_addr = (xfs_caddr_t)&(efip->efi_format); 85 log_vector->i_addr = &efip->efi_format;
84 log_vector->i_len = size; 86 log_vector->i_len = size;
85 log_vector->i_type = XLOG_REG_TYPE_EFI_FORMAT; 87 log_vector->i_type = XLOG_REG_TYPE_EFI_FORMAT;
86 ASSERT(size >= sizeof(xfs_efi_log_format_t)); 88 ASSERT(size >= sizeof(xfs_efi_log_format_t));
@@ -90,60 +92,33 @@ xfs_efi_item_format(xfs_efi_log_item_t *efip,
90/* 92/*
91 * Pinning has no meaning for an efi item, so just return. 93 * Pinning has no meaning for an efi item, so just return.
92 */ 94 */
93/*ARGSUSED*/
94STATIC void 95STATIC void
95xfs_efi_item_pin(xfs_efi_log_item_t *efip) 96xfs_efi_item_pin(
97 struct xfs_log_item *lip)
96{ 98{
97 return;
98} 99}
99 100
100
101/* 101/*
102 * While EFIs cannot really be pinned, the unpin operation is the 102 * While EFIs cannot really be pinned, the unpin operation is the
103 * last place at which the EFI is manipulated during a transaction. 103 * last place at which the EFI is manipulated during a transaction.
104 * Here we coordinate with xfs_efi_cancel() to determine who gets to 104 * Here we coordinate with xfs_efi_cancel() to determine who gets to
105 * free the EFI. 105 * free the EFI.
106 */ 106 */
107/*ARGSUSED*/
108STATIC void
109xfs_efi_item_unpin(xfs_efi_log_item_t *efip)
110{
111 struct xfs_ail *ailp = efip->efi_item.li_ailp;
112
113 spin_lock(&ailp->xa_lock);
114 if (efip->efi_flags & XFS_EFI_CANCELED) {
115 /* xfs_trans_ail_delete() drops the AIL lock. */
116 xfs_trans_ail_delete(ailp, (xfs_log_item_t *)efip);
117 xfs_efi_item_free(efip);
118 } else {
119 efip->efi_flags |= XFS_EFI_COMMITTED;
120 spin_unlock(&ailp->xa_lock);
121 }
122}
123
124/*
125 * like unpin only we have to also clear the xaction descriptor
126 * pointing the log item if we free the item. This routine duplicates
127 * unpin because efi_flags is protected by the AIL lock. Freeing
128 * the descriptor and then calling unpin would force us to drop the AIL
129 * lock which would open up a race condition.
130 */
131STATIC void 107STATIC void
132xfs_efi_item_unpin_remove(xfs_efi_log_item_t *efip, xfs_trans_t *tp) 108xfs_efi_item_unpin(
109 struct xfs_log_item *lip,
110 int remove)
133{ 111{
134 struct xfs_ail *ailp = efip->efi_item.li_ailp; 112 struct xfs_efi_log_item *efip = EFI_ITEM(lip);
135 xfs_log_item_desc_t *lidp; 113 struct xfs_ail *ailp = lip->li_ailp;
136 114
137 spin_lock(&ailp->xa_lock); 115 spin_lock(&ailp->xa_lock);
138 if (efip->efi_flags & XFS_EFI_CANCELED) { 116 if (efip->efi_flags & XFS_EFI_CANCELED) {
139 /* 117 if (remove)
140 * free the xaction descriptor pointing to this item 118 xfs_trans_del_item(lip);
141 */
142 lidp = xfs_trans_find_item(tp, (xfs_log_item_t *) efip);
143 xfs_trans_free_item(tp, lidp);
144 119
145 /* xfs_trans_ail_delete() drops the AIL lock. */ 120 /* xfs_trans_ail_delete() drops the AIL lock. */
146 xfs_trans_ail_delete(ailp, (xfs_log_item_t *)efip); 121 xfs_trans_ail_delete(ailp, lip);
147 xfs_efi_item_free(efip); 122 xfs_efi_item_free(efip);
148 } else { 123 } else {
149 efip->efi_flags |= XFS_EFI_COMMITTED; 124 efip->efi_flags |= XFS_EFI_COMMITTED;
@@ -158,9 +133,9 @@ xfs_efi_item_unpin_remove(xfs_efi_log_item_t *efip, xfs_trans_t *tp)
158 * XFS_ITEM_PINNED so that the caller will eventually flush the log. 133 * XFS_ITEM_PINNED so that the caller will eventually flush the log.
159 * This should help in getting the EFI out of the AIL. 134 * This should help in getting the EFI out of the AIL.
160 */ 135 */
161/*ARGSUSED*/
162STATIC uint 136STATIC uint
163xfs_efi_item_trylock(xfs_efi_log_item_t *efip) 137xfs_efi_item_trylock(
138 struct xfs_log_item *lip)
164{ 139{
165 return XFS_ITEM_PINNED; 140 return XFS_ITEM_PINNED;
166} 141}
@@ -168,13 +143,12 @@ xfs_efi_item_trylock(xfs_efi_log_item_t *efip)
168/* 143/*
169 * Efi items have no locking, so just return. 144 * Efi items have no locking, so just return.
170 */ 145 */
171/*ARGSUSED*/
172STATIC void 146STATIC void
173xfs_efi_item_unlock(xfs_efi_log_item_t *efip) 147xfs_efi_item_unlock(
148 struct xfs_log_item *lip)
174{ 149{
175 if (efip->efi_item.li_flags & XFS_LI_ABORTED) 150 if (lip->li_flags & XFS_LI_ABORTED)
176 xfs_efi_item_free(efip); 151 xfs_efi_item_free(EFI_ITEM(lip));
177 return;
178} 152}
179 153
180/* 154/*
@@ -183,9 +157,10 @@ xfs_efi_item_unlock(xfs_efi_log_item_t *efip)
183 * flag is not paid any attention here. Checking for that is delayed 157 * flag is not paid any attention here. Checking for that is delayed
184 * until the EFI is unpinned. 158 * until the EFI is unpinned.
185 */ 159 */
186/*ARGSUSED*/
187STATIC xfs_lsn_t 160STATIC xfs_lsn_t
188xfs_efi_item_committed(xfs_efi_log_item_t *efip, xfs_lsn_t lsn) 161xfs_efi_item_committed(
162 struct xfs_log_item *lip,
163 xfs_lsn_t lsn)
189{ 164{
190 return lsn; 165 return lsn;
191} 166}
@@ -195,11 +170,10 @@ xfs_efi_item_committed(xfs_efi_log_item_t *efip, xfs_lsn_t lsn)
195 * stuck waiting for all of its corresponding efd items to be 170 * stuck waiting for all of its corresponding efd items to be
196 * committed to disk. 171 * committed to disk.
197 */ 172 */
198/*ARGSUSED*/
199STATIC void 173STATIC void
200xfs_efi_item_push(xfs_efi_log_item_t *efip) 174xfs_efi_item_push(
175 struct xfs_log_item *lip)
201{ 176{
202 return;
203} 177}
204 178
205/* 179/*
@@ -209,61 +183,55 @@ xfs_efi_item_push(xfs_efi_log_item_t *efip)
209 * example, for inodes, the inode is locked throughout the extent freeing 183 * example, for inodes, the inode is locked throughout the extent freeing
210 * so the dependency should be recorded there. 184 * so the dependency should be recorded there.
211 */ 185 */
212/*ARGSUSED*/
213STATIC void 186STATIC void
214xfs_efi_item_committing(xfs_efi_log_item_t *efip, xfs_lsn_t lsn) 187xfs_efi_item_committing(
188 struct xfs_log_item *lip,
189 xfs_lsn_t lsn)
215{ 190{
216 return;
217} 191}
218 192
219/* 193/*
220 * This is the ops vector shared by all efi log items. 194 * This is the ops vector shared by all efi log items.
221 */ 195 */
222static struct xfs_item_ops xfs_efi_item_ops = { 196static struct xfs_item_ops xfs_efi_item_ops = {
223 .iop_size = (uint(*)(xfs_log_item_t*))xfs_efi_item_size, 197 .iop_size = xfs_efi_item_size,
224 .iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*)) 198 .iop_format = xfs_efi_item_format,
225 xfs_efi_item_format, 199 .iop_pin = xfs_efi_item_pin,
226 .iop_pin = (void(*)(xfs_log_item_t*))xfs_efi_item_pin, 200 .iop_unpin = xfs_efi_item_unpin,
227 .iop_unpin = (void(*)(xfs_log_item_t*))xfs_efi_item_unpin, 201 .iop_trylock = xfs_efi_item_trylock,
228 .iop_unpin_remove = (void(*)(xfs_log_item_t*, xfs_trans_t *)) 202 .iop_unlock = xfs_efi_item_unlock,
229 xfs_efi_item_unpin_remove, 203 .iop_committed = xfs_efi_item_committed,
230 .iop_trylock = (uint(*)(xfs_log_item_t*))xfs_efi_item_trylock, 204 .iop_push = xfs_efi_item_push,
231 .iop_unlock = (void(*)(xfs_log_item_t*))xfs_efi_item_unlock, 205 .iop_committing = xfs_efi_item_committing
232 .iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t))
233 xfs_efi_item_committed,
234 .iop_push = (void(*)(xfs_log_item_t*))xfs_efi_item_push,
235 .iop_pushbuf = NULL,
236 .iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t))
237 xfs_efi_item_committing
238}; 206};
239 207
240 208
241/* 209/*
242 * Allocate and initialize an efi item with the given number of extents. 210 * Allocate and initialize an efi item with the given number of extents.
243 */ 211 */
244xfs_efi_log_item_t * 212struct xfs_efi_log_item *
245xfs_efi_init(xfs_mount_t *mp, 213xfs_efi_init(
246 uint nextents) 214 struct xfs_mount *mp,
215 uint nextents)
247 216
248{ 217{
249 xfs_efi_log_item_t *efip; 218 struct xfs_efi_log_item *efip;
250 uint size; 219 uint size;
251 220
252 ASSERT(nextents > 0); 221 ASSERT(nextents > 0);
253 if (nextents > XFS_EFI_MAX_FAST_EXTENTS) { 222 if (nextents > XFS_EFI_MAX_FAST_EXTENTS) {
254 size = (uint)(sizeof(xfs_efi_log_item_t) + 223 size = (uint)(sizeof(xfs_efi_log_item_t) +
255 ((nextents - 1) * sizeof(xfs_extent_t))); 224 ((nextents - 1) * sizeof(xfs_extent_t)));
256 efip = (xfs_efi_log_item_t*)kmem_zalloc(size, KM_SLEEP); 225 efip = kmem_zalloc(size, KM_SLEEP);
257 } else { 226 } else {
258 efip = (xfs_efi_log_item_t*)kmem_zone_zalloc(xfs_efi_zone, 227 efip = kmem_zone_zalloc(xfs_efi_zone, KM_SLEEP);
259 KM_SLEEP);
260 } 228 }
261 229
262 xfs_log_item_init(mp, &efip->efi_item, XFS_LI_EFI, &xfs_efi_item_ops); 230 xfs_log_item_init(mp, &efip->efi_item, XFS_LI_EFI, &xfs_efi_item_ops);
263 efip->efi_format.efi_nextents = nextents; 231 efip->efi_format.efi_nextents = nextents;
264 efip->efi_format.efi_id = (__psint_t)(void*)efip; 232 efip->efi_format.efi_id = (__psint_t)(void*)efip;
265 233
266 return (efip); 234 return efip;
267} 235}
268 236
269/* 237/*
@@ -276,7 +244,7 @@ xfs_efi_init(xfs_mount_t *mp,
276int 244int
277xfs_efi_copy_format(xfs_log_iovec_t *buf, xfs_efi_log_format_t *dst_efi_fmt) 245xfs_efi_copy_format(xfs_log_iovec_t *buf, xfs_efi_log_format_t *dst_efi_fmt)
278{ 246{
279 xfs_efi_log_format_t *src_efi_fmt = (xfs_efi_log_format_t *)buf->i_addr; 247 xfs_efi_log_format_t *src_efi_fmt = buf->i_addr;
280 uint i; 248 uint i;
281 uint len = sizeof(xfs_efi_log_format_t) + 249 uint len = sizeof(xfs_efi_log_format_t) +
282 (src_efi_fmt->efi_nextents - 1) * sizeof(xfs_extent_t); 250 (src_efi_fmt->efi_nextents - 1) * sizeof(xfs_extent_t);
@@ -289,8 +257,7 @@ xfs_efi_copy_format(xfs_log_iovec_t *buf, xfs_efi_log_format_t *dst_efi_fmt)
289 memcpy((char *)dst_efi_fmt, (char*)src_efi_fmt, len); 257 memcpy((char *)dst_efi_fmt, (char*)src_efi_fmt, len);
290 return 0; 258 return 0;
291 } else if (buf->i_len == len32) { 259 } else if (buf->i_len == len32) {
292 xfs_efi_log_format_32_t *src_efi_fmt_32 = 260 xfs_efi_log_format_32_t *src_efi_fmt_32 = buf->i_addr;
293 (xfs_efi_log_format_32_t *)buf->i_addr;
294 261
295 dst_efi_fmt->efi_type = src_efi_fmt_32->efi_type; 262 dst_efi_fmt->efi_type = src_efi_fmt_32->efi_type;
296 dst_efi_fmt->efi_size = src_efi_fmt_32->efi_size; 263 dst_efi_fmt->efi_size = src_efi_fmt_32->efi_size;
@@ -304,8 +271,7 @@ xfs_efi_copy_format(xfs_log_iovec_t *buf, xfs_efi_log_format_t *dst_efi_fmt)
304 } 271 }
305 return 0; 272 return 0;
306 } else if (buf->i_len == len64) { 273 } else if (buf->i_len == len64) {
307 xfs_efi_log_format_64_t *src_efi_fmt_64 = 274 xfs_efi_log_format_64_t *src_efi_fmt_64 = buf->i_addr;
308 (xfs_efi_log_format_64_t *)buf->i_addr;
309 275
310 dst_efi_fmt->efi_type = src_efi_fmt_64->efi_type; 276 dst_efi_fmt->efi_type = src_efi_fmt_64->efi_type;
311 dst_efi_fmt->efi_size = src_efi_fmt_64->efi_size; 277 dst_efi_fmt->efi_size = src_efi_fmt_64->efi_size;
@@ -356,16 +322,18 @@ xfs_efi_release(xfs_efi_log_item_t *efip,
356 } 322 }
357} 323}
358 324
359STATIC void 325static inline struct xfs_efd_log_item *EFD_ITEM(struct xfs_log_item *lip)
360xfs_efd_item_free(xfs_efd_log_item_t *efdp)
361{ 326{
362 int nexts = efdp->efd_format.efd_nextents; 327 return container_of(lip, struct xfs_efd_log_item, efd_item);
328}
363 329
364 if (nexts > XFS_EFD_MAX_FAST_EXTENTS) { 330STATIC void
331xfs_efd_item_free(struct xfs_efd_log_item *efdp)
332{
333 if (efdp->efd_format.efd_nextents > XFS_EFD_MAX_FAST_EXTENTS)
365 kmem_free(efdp); 334 kmem_free(efdp);
366 } else { 335 else
367 kmem_zone_free(xfs_efd_zone, efdp); 336 kmem_zone_free(xfs_efd_zone, efdp);
368 }
369} 337}
370 338
371/* 339/*
@@ -373,9 +341,9 @@ xfs_efd_item_free(xfs_efd_log_item_t *efdp)
373 * We only need 1 iovec for an efd item. It just logs the efd_log_format 341 * We only need 1 iovec for an efd item. It just logs the efd_log_format
374 * structure. 342 * structure.
375 */ 343 */
376/*ARGSUSED*/
377STATIC uint 344STATIC uint
378xfs_efd_item_size(xfs_efd_log_item_t *efdp) 345xfs_efd_item_size(
346 struct xfs_log_item *lip)
379{ 347{
380 return 1; 348 return 1;
381} 349}
@@ -388,10 +356,12 @@ xfs_efd_item_size(xfs_efd_log_item_t *efdp)
388 * slots in the efd item have been filled. 356 * slots in the efd item have been filled.
389 */ 357 */
390STATIC void 358STATIC void
391xfs_efd_item_format(xfs_efd_log_item_t *efdp, 359xfs_efd_item_format(
392 xfs_log_iovec_t *log_vector) 360 struct xfs_log_item *lip,
361 struct xfs_log_iovec *log_vector)
393{ 362{
394 uint size; 363 struct xfs_efd_log_item *efdp = EFD_ITEM(lip);
364 uint size;
395 365
396 ASSERT(efdp->efd_next_extent == efdp->efd_format.efd_nextents); 366 ASSERT(efdp->efd_next_extent == efdp->efd_format.efd_nextents);
397 367
@@ -401,48 +371,38 @@ xfs_efd_item_format(xfs_efd_log_item_t *efdp,
401 size += (efdp->efd_format.efd_nextents - 1) * sizeof(xfs_extent_t); 371 size += (efdp->efd_format.efd_nextents - 1) * sizeof(xfs_extent_t);
402 efdp->efd_format.efd_size = 1; 372 efdp->efd_format.efd_size = 1;
403 373
404 log_vector->i_addr = (xfs_caddr_t)&(efdp->efd_format); 374 log_vector->i_addr = &efdp->efd_format;
405 log_vector->i_len = size; 375 log_vector->i_len = size;
406 log_vector->i_type = XLOG_REG_TYPE_EFD_FORMAT; 376 log_vector->i_type = XLOG_REG_TYPE_EFD_FORMAT;
407 ASSERT(size >= sizeof(xfs_efd_log_format_t)); 377 ASSERT(size >= sizeof(xfs_efd_log_format_t));
408} 378}
409 379
410
411/* 380/*
412 * Pinning has no meaning for an efd item, so just return. 381 * Pinning has no meaning for an efd item, so just return.
413 */ 382 */
414/*ARGSUSED*/
415STATIC void 383STATIC void
416xfs_efd_item_pin(xfs_efd_log_item_t *efdp) 384xfs_efd_item_pin(
385 struct xfs_log_item *lip)
417{ 386{
418 return;
419} 387}
420 388
421
422/* 389/*
423 * Since pinning has no meaning for an efd item, unpinning does 390 * Since pinning has no meaning for an efd item, unpinning does
424 * not either. 391 * not either.
425 */ 392 */
426/*ARGSUSED*/
427STATIC void
428xfs_efd_item_unpin(xfs_efd_log_item_t *efdp)
429{
430 return;
431}
432
433/*ARGSUSED*/
434STATIC void 393STATIC void
435xfs_efd_item_unpin_remove(xfs_efd_log_item_t *efdp, xfs_trans_t *tp) 394xfs_efd_item_unpin(
395 struct xfs_log_item *lip,
396 int remove)
436{ 397{
437 return;
438} 398}
439 399
440/* 400/*
441 * Efd items have no locking, so just return success. 401 * Efd items have no locking, so just return success.
442 */ 402 */
443/*ARGSUSED*/
444STATIC uint 403STATIC uint
445xfs_efd_item_trylock(xfs_efd_log_item_t *efdp) 404xfs_efd_item_trylock(
405 struct xfs_log_item *lip)
446{ 406{
447 return XFS_ITEM_LOCKED; 407 return XFS_ITEM_LOCKED;
448} 408}
@@ -451,13 +411,12 @@ xfs_efd_item_trylock(xfs_efd_log_item_t *efdp)
451 * Efd items have no locking or pushing, so return failure 411 * Efd items have no locking or pushing, so return failure
452 * so that the caller doesn't bother with us. 412 * so that the caller doesn't bother with us.
453 */ 413 */
454/*ARGSUSED*/
455STATIC void 414STATIC void
456xfs_efd_item_unlock(xfs_efd_log_item_t *efdp) 415xfs_efd_item_unlock(
416 struct xfs_log_item *lip)
457{ 417{
458 if (efdp->efd_item.li_flags & XFS_LI_ABORTED) 418 if (lip->li_flags & XFS_LI_ABORTED)
459 xfs_efd_item_free(efdp); 419 xfs_efd_item_free(EFD_ITEM(lip));
460 return;
461} 420}
462 421
463/* 422/*
@@ -467,15 +426,18 @@ xfs_efd_item_unlock(xfs_efd_log_item_t *efdp)
467 * return -1 to keep the transaction code from further referencing 426 * return -1 to keep the transaction code from further referencing
468 * this item. 427 * this item.
469 */ 428 */
470/*ARGSUSED*/
471STATIC xfs_lsn_t 429STATIC xfs_lsn_t
472xfs_efd_item_committed(xfs_efd_log_item_t *efdp, xfs_lsn_t lsn) 430xfs_efd_item_committed(
431 struct xfs_log_item *lip,
432 xfs_lsn_t lsn)
473{ 433{
434 struct xfs_efd_log_item *efdp = EFD_ITEM(lip);
435
474 /* 436 /*
475 * If we got a log I/O error, it's always the case that the LR with the 437 * If we got a log I/O error, it's always the case that the LR with the
476 * EFI got unpinned and freed before the EFD got aborted. 438 * EFI got unpinned and freed before the EFD got aborted.
477 */ 439 */
478 if ((efdp->efd_item.li_flags & XFS_LI_ABORTED) == 0) 440 if (!(lip->li_flags & XFS_LI_ABORTED))
479 xfs_efi_release(efdp->efd_efip, efdp->efd_format.efd_nextents); 441 xfs_efi_release(efdp->efd_efip, efdp->efd_format.efd_nextents);
480 442
481 xfs_efd_item_free(efdp); 443 xfs_efd_item_free(efdp);
@@ -486,11 +448,10 @@ xfs_efd_item_committed(xfs_efd_log_item_t *efdp, xfs_lsn_t lsn)
486 * There isn't much you can do to push on an efd item. It is simply 448 * There isn't much you can do to push on an efd item. It is simply
487 * stuck waiting for the log to be flushed to disk. 449 * stuck waiting for the log to be flushed to disk.
488 */ 450 */
489/*ARGSUSED*/
490STATIC void 451STATIC void
491xfs_efd_item_push(xfs_efd_log_item_t *efdp) 452xfs_efd_item_push(
453 struct xfs_log_item *lip)
492{ 454{
493 return;
494} 455}
495 456
496/* 457/*
@@ -500,55 +461,48 @@ xfs_efd_item_push(xfs_efd_log_item_t *efdp)
500 * example, for inodes, the inode is locked throughout the extent freeing 461 * example, for inodes, the inode is locked throughout the extent freeing
501 * so the dependency should be recorded there. 462 * so the dependency should be recorded there.
502 */ 463 */
503/*ARGSUSED*/
504STATIC void 464STATIC void
505xfs_efd_item_committing(xfs_efd_log_item_t *efip, xfs_lsn_t lsn) 465xfs_efd_item_committing(
466 struct xfs_log_item *lip,
467 xfs_lsn_t lsn)
506{ 468{
507 return;
508} 469}
509 470
510/* 471/*
511 * This is the ops vector shared by all efd log items. 472 * This is the ops vector shared by all efd log items.
512 */ 473 */
513static struct xfs_item_ops xfs_efd_item_ops = { 474static struct xfs_item_ops xfs_efd_item_ops = {
514 .iop_size = (uint(*)(xfs_log_item_t*))xfs_efd_item_size, 475 .iop_size = xfs_efd_item_size,
515 .iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*)) 476 .iop_format = xfs_efd_item_format,
516 xfs_efd_item_format, 477 .iop_pin = xfs_efd_item_pin,
517 .iop_pin = (void(*)(xfs_log_item_t*))xfs_efd_item_pin, 478 .iop_unpin = xfs_efd_item_unpin,
518 .iop_unpin = (void(*)(xfs_log_item_t*))xfs_efd_item_unpin, 479 .iop_trylock = xfs_efd_item_trylock,
519 .iop_unpin_remove = (void(*)(xfs_log_item_t*, xfs_trans_t*)) 480 .iop_unlock = xfs_efd_item_unlock,
520 xfs_efd_item_unpin_remove, 481 .iop_committed = xfs_efd_item_committed,
521 .iop_trylock = (uint(*)(xfs_log_item_t*))xfs_efd_item_trylock, 482 .iop_push = xfs_efd_item_push,
522 .iop_unlock = (void(*)(xfs_log_item_t*))xfs_efd_item_unlock, 483 .iop_committing = xfs_efd_item_committing
523 .iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t))
524 xfs_efd_item_committed,
525 .iop_push = (void(*)(xfs_log_item_t*))xfs_efd_item_push,
526 .iop_pushbuf = NULL,
527 .iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t))
528 xfs_efd_item_committing
529}; 484};
530 485
531
532/* 486/*
533 * Allocate and initialize an efd item with the given number of extents. 487 * Allocate and initialize an efd item with the given number of extents.
534 */ 488 */
535xfs_efd_log_item_t * 489struct xfs_efd_log_item *
536xfs_efd_init(xfs_mount_t *mp, 490xfs_efd_init(
537 xfs_efi_log_item_t *efip, 491 struct xfs_mount *mp,
538 uint nextents) 492 struct xfs_efi_log_item *efip,
493 uint nextents)
539 494
540{ 495{
541 xfs_efd_log_item_t *efdp; 496 struct xfs_efd_log_item *efdp;
542 uint size; 497 uint size;
543 498
544 ASSERT(nextents > 0); 499 ASSERT(nextents > 0);
545 if (nextents > XFS_EFD_MAX_FAST_EXTENTS) { 500 if (nextents > XFS_EFD_MAX_FAST_EXTENTS) {
546 size = (uint)(sizeof(xfs_efd_log_item_t) + 501 size = (uint)(sizeof(xfs_efd_log_item_t) +
547 ((nextents - 1) * sizeof(xfs_extent_t))); 502 ((nextents - 1) * sizeof(xfs_extent_t)));
548 efdp = (xfs_efd_log_item_t*)kmem_zalloc(size, KM_SLEEP); 503 efdp = kmem_zalloc(size, KM_SLEEP);
549 } else { 504 } else {
550 efdp = (xfs_efd_log_item_t*)kmem_zone_zalloc(xfs_efd_zone, 505 efdp = kmem_zone_zalloc(xfs_efd_zone, KM_SLEEP);
551 KM_SLEEP);
552 } 506 }
553 507
554 xfs_log_item_init(mp, &efdp->efd_item, XFS_LI_EFD, &xfs_efd_item_ops); 508 xfs_log_item_init(mp, &efdp->efd_item, XFS_LI_EFD, &xfs_efd_item_ops);
@@ -556,5 +510,5 @@ xfs_efd_init(xfs_mount_t *mp,
556 efdp->efd_format.efd_nextents = nextents; 510 efdp->efd_format.efd_nextents = nextents;
557 efdp->efd_format.efd_efi_id = efip->efi_format.efi_id; 511 efdp->efd_format.efd_efi_id = efip->efi_format.efi_id;
558 512
559 return (efdp); 513 return efdp;
560} 514}
diff --git a/fs/xfs/xfs_filestream.c b/fs/xfs/xfs_filestream.c
index 390850ee6603..9b715dce5699 100644
--- a/fs/xfs/xfs_filestream.c
+++ b/fs/xfs/xfs_filestream.c
@@ -18,13 +18,9 @@
18#include "xfs.h" 18#include "xfs.h"
19#include "xfs_bmap_btree.h" 19#include "xfs_bmap_btree.h"
20#include "xfs_inum.h" 20#include "xfs_inum.h"
21#include "xfs_dir2.h"
22#include "xfs_dir2_sf.h"
23#include "xfs_attr_sf.h"
24#include "xfs_dinode.h" 21#include "xfs_dinode.h"
25#include "xfs_inode.h" 22#include "xfs_inode.h"
26#include "xfs_ag.h" 23#include "xfs_ag.h"
27#include "xfs_dmapi.h"
28#include "xfs_log.h" 24#include "xfs_log.h"
29#include "xfs_trans.h" 25#include "xfs_trans.h"
30#include "xfs_sb.h" 26#include "xfs_sb.h"
@@ -127,6 +123,82 @@ typedef struct fstrm_item
127 xfs_inode_t *pip; /* Parent directory inode pointer. */ 123 xfs_inode_t *pip; /* Parent directory inode pointer. */
128} fstrm_item_t; 124} fstrm_item_t;
129 125
126/*
127 * Allocation group filestream associations are tracked with per-ag atomic
128 * counters. These counters allow _xfs_filestream_pick_ag() to tell whether a
129 * particular AG already has active filestreams associated with it. The mount
130 * point's m_peraglock is used to protect these counters from per-ag array
131 * re-allocation during a growfs operation. When xfs_growfs_data_private() is
132 * about to reallocate the array, it calls xfs_filestream_flush() with the
133 * m_peraglock held in write mode.
134 *
135 * Since xfs_mru_cache_flush() guarantees that all the free functions for all
136 * the cache elements have finished executing before it returns, it's safe for
137 * the free functions to use the atomic counters without m_peraglock protection.
138 * This allows the implementation of xfs_fstrm_free_func() to be agnostic about
139 * whether it was called with the m_peraglock held in read mode, write mode or
140 * not held at all. The race condition this addresses is the following:
141 *
142 * - The work queue scheduler fires and pulls a filestream directory cache
143 * element off the LRU end of the cache for deletion, then gets pre-empted.
144 * - A growfs operation grabs the m_peraglock in write mode, flushes all the
145 * remaining items from the cache and reallocates the mount point's per-ag
146 * array, resetting all the counters to zero.
147 * - The work queue thread resumes and calls the free function for the element
148 * it started cleaning up earlier. In the process it decrements the
149 * filestreams counter for an AG that now has no references.
150 *
151 * With a shrinkfs feature, the above scenario could panic the system.
152 *
153 * All other uses of the following macros should be protected by either the
154 * m_peraglock held in read mode, or the cache's internal locking exposed by the
155 * interval between a call to xfs_mru_cache_lookup() and a call to
156 * xfs_mru_cache_done(). In addition, the m_peraglock must be held in read mode
157 * when new elements are added to the cache.
158 *
159 * Combined, these locking rules ensure that no associations will ever exist in
160 * the cache that reference per-ag array elements that have since been
161 * reallocated.
162 */
163static int
164xfs_filestream_peek_ag(
165 xfs_mount_t *mp,
166 xfs_agnumber_t agno)
167{
168 struct xfs_perag *pag;
169 int ret;
170
171 pag = xfs_perag_get(mp, agno);
172 ret = atomic_read(&pag->pagf_fstrms);
173 xfs_perag_put(pag);
174 return ret;
175}
176
177static int
178xfs_filestream_get_ag(
179 xfs_mount_t *mp,
180 xfs_agnumber_t agno)
181{
182 struct xfs_perag *pag;
183 int ret;
184
185 pag = xfs_perag_get(mp, agno);
186 ret = atomic_inc_return(&pag->pagf_fstrms);
187 xfs_perag_put(pag);
188 return ret;
189}
190
191static void
192xfs_filestream_put_ag(
193 xfs_mount_t *mp,
194 xfs_agnumber_t agno)
195{
196 struct xfs_perag *pag;
197
198 pag = xfs_perag_get(mp, agno);
199 atomic_dec(&pag->pagf_fstrms);
200 xfs_perag_put(pag);
201}
130 202
131/* 203/*
132 * Scan the AGs starting at startag looking for an AG that isn't in use and has 204 * Scan the AGs starting at startag looking for an AG that isn't in use and has
@@ -355,16 +427,14 @@ xfs_fstrm_free_func(
355{ 427{
356 fstrm_item_t *item = (fstrm_item_t *)data; 428 fstrm_item_t *item = (fstrm_item_t *)data;
357 xfs_inode_t *ip = item->ip; 429 xfs_inode_t *ip = item->ip;
358 int ref;
359 430
360 ASSERT(ip->i_ino == ino); 431 ASSERT(ip->i_ino == ino);
361 432
362 xfs_iflags_clear(ip, XFS_IFILESTREAM); 433 xfs_iflags_clear(ip, XFS_IFILESTREAM);
363 434
364 /* Drop the reference taken on the AG when the item was added. */ 435 /* Drop the reference taken on the AG when the item was added. */
365 ref = xfs_filestream_put_ag(ip->i_mount, item->ag); 436 xfs_filestream_put_ag(ip->i_mount, item->ag);
366 437
367 ASSERT(ref >= 0);
368 TRACE_FREE(ip->i_mount, ip, item->pip, item->ag, 438 TRACE_FREE(ip->i_mount, ip, item->pip, item->ag,
369 xfs_filestream_peek_ag(ip->i_mount, item->ag)); 439 xfs_filestream_peek_ag(ip->i_mount, item->ag));
370 440
diff --git a/fs/xfs/xfs_filestream.h b/fs/xfs/xfs_filestream.h
index 260f757bbc5d..09dd9af45434 100644
--- a/fs/xfs/xfs_filestream.h
+++ b/fs/xfs/xfs_filestream.h
@@ -42,88 +42,6 @@ extern ktrace_t *xfs_filestreams_trace_buf;
42 42
43#endif 43#endif
44 44
45/*
46 * Allocation group filestream associations are tracked with per-ag atomic
47 * counters. These counters allow _xfs_filestream_pick_ag() to tell whether a
48 * particular AG already has active filestreams associated with it. The mount
49 * point's m_peraglock is used to protect these counters from per-ag array
50 * re-allocation during a growfs operation. When xfs_growfs_data_private() is
51 * about to reallocate the array, it calls xfs_filestream_flush() with the
52 * m_peraglock held in write mode.
53 *
54 * Since xfs_mru_cache_flush() guarantees that all the free functions for all
55 * the cache elements have finished executing before it returns, it's safe for
56 * the free functions to use the atomic counters without m_peraglock protection.
57 * This allows the implementation of xfs_fstrm_free_func() to be agnostic about
58 * whether it was called with the m_peraglock held in read mode, write mode or
59 * not held at all. The race condition this addresses is the following:
60 *
61 * - The work queue scheduler fires and pulls a filestream directory cache
62 * element off the LRU end of the cache for deletion, then gets pre-empted.
63 * - A growfs operation grabs the m_peraglock in write mode, flushes all the
64 * remaining items from the cache and reallocates the mount point's per-ag
65 * array, resetting all the counters to zero.
66 * - The work queue thread resumes and calls the free function for the element
67 * it started cleaning up earlier. In the process it decrements the
68 * filestreams counter for an AG that now has no references.
69 *
70 * With a shrinkfs feature, the above scenario could panic the system.
71 *
72 * All other uses of the following macros should be protected by either the
73 * m_peraglock held in read mode, or the cache's internal locking exposed by the
74 * interval between a call to xfs_mru_cache_lookup() and a call to
75 * xfs_mru_cache_done(). In addition, the m_peraglock must be held in read mode
76 * when new elements are added to the cache.
77 *
78 * Combined, these locking rules ensure that no associations will ever exist in
79 * the cache that reference per-ag array elements that have since been
80 * reallocated.
81 */
82/*
83 * xfs_filestream_peek_ag is only used in tracing code
84 */
85static inline int
86xfs_filestream_peek_ag(
87 xfs_mount_t *mp,
88 xfs_agnumber_t agno)
89{
90 struct xfs_perag *pag;
91 int ret;
92
93 pag = xfs_perag_get(mp, agno);
94 ret = atomic_read(&pag->pagf_fstrms);
95 xfs_perag_put(pag);
96 return ret;
97}
98
99static inline int
100xfs_filestream_get_ag(
101 xfs_mount_t *mp,
102 xfs_agnumber_t agno)
103{
104 struct xfs_perag *pag;
105 int ret;
106
107 pag = xfs_perag_get(mp, agno);
108 ret = atomic_inc_return(&pag->pagf_fstrms);
109 xfs_perag_put(pag);
110 return ret;
111}
112
113static inline int
114xfs_filestream_put_ag(
115 xfs_mount_t *mp,
116 xfs_agnumber_t agno)
117{
118 struct xfs_perag *pag;
119 int ret;
120
121 pag = xfs_perag_get(mp, agno);
122 ret = atomic_dec_return(&pag->pagf_fstrms);
123 xfs_perag_put(pag);
124 return ret;
125}
126
127/* allocation selection flags */ 45/* allocation selection flags */
128typedef enum xfs_fstrm_alloc { 46typedef enum xfs_fstrm_alloc {
129 XFS_PICK_USERDATA = 1, 47 XFS_PICK_USERDATA = 1,
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index 37a6f62c57b6..dbca5f5c37ba 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -24,14 +24,10 @@
24#include "xfs_trans.h" 24#include "xfs_trans.h"
25#include "xfs_sb.h" 25#include "xfs_sb.h"
26#include "xfs_ag.h" 26#include "xfs_ag.h"
27#include "xfs_dir2.h"
28#include "xfs_dmapi.h"
29#include "xfs_mount.h" 27#include "xfs_mount.h"
30#include "xfs_bmap_btree.h" 28#include "xfs_bmap_btree.h"
31#include "xfs_alloc_btree.h" 29#include "xfs_alloc_btree.h"
32#include "xfs_ialloc_btree.h" 30#include "xfs_ialloc_btree.h"
33#include "xfs_dir2_sf.h"
34#include "xfs_attr_sf.h"
35#include "xfs_dinode.h" 31#include "xfs_dinode.h"
36#include "xfs_inode.h" 32#include "xfs_inode.h"
37#include "xfs_inode_item.h" 33#include "xfs_inode_item.h"
@@ -626,8 +622,7 @@ xfs_fs_log_dummy(
626 ip = mp->m_rootip; 622 ip = mp->m_rootip;
627 xfs_ilock(ip, XFS_ILOCK_EXCL); 623 xfs_ilock(ip, XFS_ILOCK_EXCL);
628 624
629 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 625 xfs_trans_ijoin(tp, ip);
630 xfs_trans_ihold(tp, ip);
631 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 626 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
632 xfs_trans_set_sync(tp); 627 xfs_trans_set_sync(tp);
633 error = xfs_trans_commit(tp, 0); 628 error = xfs_trans_commit(tp, 0);
diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c
index 9d884c127bb9..abf80ae1e95b 100644
--- a/fs/xfs/xfs_ialloc.c
+++ b/fs/xfs/xfs_ialloc.c
@@ -24,14 +24,10 @@
24#include "xfs_trans.h" 24#include "xfs_trans.h"
25#include "xfs_sb.h" 25#include "xfs_sb.h"
26#include "xfs_ag.h" 26#include "xfs_ag.h"
27#include "xfs_dir2.h"
28#include "xfs_dmapi.h"
29#include "xfs_mount.h" 27#include "xfs_mount.h"
30#include "xfs_bmap_btree.h" 28#include "xfs_bmap_btree.h"
31#include "xfs_alloc_btree.h" 29#include "xfs_alloc_btree.h"
32#include "xfs_ialloc_btree.h" 30#include "xfs_ialloc_btree.h"
33#include "xfs_dir2_sf.h"
34#include "xfs_attr_sf.h"
35#include "xfs_dinode.h" 31#include "xfs_dinode.h"
36#include "xfs_inode.h" 32#include "xfs_inode.h"
37#include "xfs_btree.h" 33#include "xfs_btree.h"
@@ -1203,6 +1199,63 @@ error0:
1203 return error; 1199 return error;
1204} 1200}
1205 1201
1202STATIC int
1203xfs_imap_lookup(
1204 struct xfs_mount *mp,
1205 struct xfs_trans *tp,
1206 xfs_agnumber_t agno,
1207 xfs_agino_t agino,
1208 xfs_agblock_t agbno,
1209 xfs_agblock_t *chunk_agbno,
1210 xfs_agblock_t *offset_agbno,
1211 int flags)
1212{
1213 struct xfs_inobt_rec_incore rec;
1214 struct xfs_btree_cur *cur;
1215 struct xfs_buf *agbp;
1216 xfs_agino_t startino;
1217 int error;
1218 int i;
1219
1220 error = xfs_ialloc_read_agi(mp, tp, agno, &agbp);
1221 if (error) {
1222 xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: "
1223 "xfs_ialloc_read_agi() returned "
1224 "error %d, agno %d",
1225 error, agno);
1226 return error;
1227 }
1228
1229 /*
1230 * derive and lookup the exact inode record for the given agino. If the
1231 * record cannot be found, then it's an invalid inode number and we
1232 * should abort.
1233 */
1234 cur = xfs_inobt_init_cursor(mp, tp, agbp, agno);
1235 startino = agino & ~(XFS_IALLOC_INODES(mp) - 1);
1236 error = xfs_inobt_lookup(cur, startino, XFS_LOOKUP_EQ, &i);
1237 if (!error) {
1238 if (i)
1239 error = xfs_inobt_get_rec(cur, &rec, &i);
1240 if (!error && i == 0)
1241 error = EINVAL;
1242 }
1243
1244 xfs_trans_brelse(tp, agbp);
1245 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
1246 if (error)
1247 return error;
1248
1249 /* for untrusted inodes check it is allocated first */
1250 if ((flags & XFS_IGET_UNTRUSTED) &&
1251 (rec.ir_free & XFS_INOBT_MASK(agino - rec.ir_startino)))
1252 return EINVAL;
1253
1254 *chunk_agbno = XFS_AGINO_TO_AGBNO(mp, rec.ir_startino);
1255 *offset_agbno = agbno - *chunk_agbno;
1256 return 0;
1257}
1258
1206/* 1259/*
1207 * Return the location of the inode in imap, for mapping it into a buffer. 1260 * Return the location of the inode in imap, for mapping it into a buffer.
1208 */ 1261 */
@@ -1235,8 +1288,11 @@ xfs_imap(
1235 if (agno >= mp->m_sb.sb_agcount || agbno >= mp->m_sb.sb_agblocks || 1288 if (agno >= mp->m_sb.sb_agcount || agbno >= mp->m_sb.sb_agblocks ||
1236 ino != XFS_AGINO_TO_INO(mp, agno, agino)) { 1289 ino != XFS_AGINO_TO_INO(mp, agno, agino)) {
1237#ifdef DEBUG 1290#ifdef DEBUG
1238 /* no diagnostics for bulkstat, ino comes from userspace */ 1291 /*
1239 if (flags & XFS_IGET_BULKSTAT) 1292 * Don't output diagnostic information for untrusted inodes
1293 * as they can be invalid without implying corruption.
1294 */
1295 if (flags & XFS_IGET_UNTRUSTED)
1240 return XFS_ERROR(EINVAL); 1296 return XFS_ERROR(EINVAL);
1241 if (agno >= mp->m_sb.sb_agcount) { 1297 if (agno >= mp->m_sb.sb_agcount) {
1242 xfs_fs_cmn_err(CE_ALERT, mp, 1298 xfs_fs_cmn_err(CE_ALERT, mp,
@@ -1263,6 +1319,23 @@ xfs_imap(
1263 return XFS_ERROR(EINVAL); 1319 return XFS_ERROR(EINVAL);
1264 } 1320 }
1265 1321
1322 blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_blocklog;
1323
1324 /*
1325 * For bulkstat and handle lookups, we have an untrusted inode number
1326 * that we have to verify is valid. We cannot do this just by reading
1327 * the inode buffer as it may have been unlinked and removed leaving
1328 * inodes in stale state on disk. Hence we have to do a btree lookup
1329 * in all cases where an untrusted inode number is passed.
1330 */
1331 if (flags & XFS_IGET_UNTRUSTED) {
1332 error = xfs_imap_lookup(mp, tp, agno, agino, agbno,
1333 &chunk_agbno, &offset_agbno, flags);
1334 if (error)
1335 return error;
1336 goto out_map;
1337 }
1338
1266 /* 1339 /*
1267 * If the inode cluster size is the same as the blocksize or 1340 * If the inode cluster size is the same as the blocksize or
1268 * smaller we get to the buffer by simple arithmetics. 1341 * smaller we get to the buffer by simple arithmetics.
@@ -1277,24 +1350,6 @@ xfs_imap(
1277 return 0; 1350 return 0;
1278 } 1351 }
1279 1352
1280 blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_blocklog;
1281
1282 /*
1283 * If we get a block number passed from bulkstat we can use it to
1284 * find the buffer easily.
1285 */
1286 if (imap->im_blkno) {
1287 offset = XFS_INO_TO_OFFSET(mp, ino);
1288 ASSERT(offset < mp->m_sb.sb_inopblock);
1289
1290 cluster_agbno = xfs_daddr_to_agbno(mp, imap->im_blkno);
1291 offset += (agbno - cluster_agbno) * mp->m_sb.sb_inopblock;
1292
1293 imap->im_len = XFS_FSB_TO_BB(mp, blks_per_cluster);
1294 imap->im_boffset = (ushort)(offset << mp->m_sb.sb_inodelog);
1295 return 0;
1296 }
1297
1298 /* 1353 /*
1299 * If the inode chunks are aligned then use simple maths to 1354 * If the inode chunks are aligned then use simple maths to
1300 * find the location. Otherwise we have to do a btree 1355 * find the location. Otherwise we have to do a btree
@@ -1304,50 +1359,13 @@ xfs_imap(
1304 offset_agbno = agbno & mp->m_inoalign_mask; 1359 offset_agbno = agbno & mp->m_inoalign_mask;
1305 chunk_agbno = agbno - offset_agbno; 1360 chunk_agbno = agbno - offset_agbno;
1306 } else { 1361 } else {
1307 xfs_btree_cur_t *cur; /* inode btree cursor */ 1362 error = xfs_imap_lookup(mp, tp, agno, agino, agbno,
1308 xfs_inobt_rec_incore_t chunk_rec; 1363 &chunk_agbno, &offset_agbno, flags);
1309 xfs_buf_t *agbp; /* agi buffer */
1310 int i; /* temp state */
1311
1312 error = xfs_ialloc_read_agi(mp, tp, agno, &agbp);
1313 if (error) {
1314 xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: "
1315 "xfs_ialloc_read_agi() returned "
1316 "error %d, agno %d",
1317 error, agno);
1318 return error;
1319 }
1320
1321 cur = xfs_inobt_init_cursor(mp, tp, agbp, agno);
1322 error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &i);
1323 if (error) {
1324 xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: "
1325 "xfs_inobt_lookup() failed");
1326 goto error0;
1327 }
1328
1329 error = xfs_inobt_get_rec(cur, &chunk_rec, &i);
1330 if (error) {
1331 xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: "
1332 "xfs_inobt_get_rec() failed");
1333 goto error0;
1334 }
1335 if (i == 0) {
1336#ifdef DEBUG
1337 xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: "
1338 "xfs_inobt_get_rec() failed");
1339#endif /* DEBUG */
1340 error = XFS_ERROR(EINVAL);
1341 }
1342 error0:
1343 xfs_trans_brelse(tp, agbp);
1344 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
1345 if (error) 1364 if (error)
1346 return error; 1365 return error;
1347 chunk_agbno = XFS_AGINO_TO_AGBNO(mp, chunk_rec.ir_startino);
1348 offset_agbno = agbno - chunk_agbno;
1349 } 1366 }
1350 1367
1368out_map:
1351 ASSERT(agbno >= chunk_agbno); 1369 ASSERT(agbno >= chunk_agbno);
1352 cluster_agbno = chunk_agbno + 1370 cluster_agbno = chunk_agbno +
1353 ((offset_agbno / blks_per_cluster) * blks_per_cluster); 1371 ((offset_agbno / blks_per_cluster) * blks_per_cluster);
diff --git a/fs/xfs/xfs_ialloc_btree.c b/fs/xfs/xfs_ialloc_btree.c
index c282a9af5393..d352862cefa0 100644
--- a/fs/xfs/xfs_ialloc_btree.c
+++ b/fs/xfs/xfs_ialloc_btree.c
@@ -24,14 +24,10 @@
24#include "xfs_trans.h" 24#include "xfs_trans.h"
25#include "xfs_sb.h" 25#include "xfs_sb.h"
26#include "xfs_ag.h" 26#include "xfs_ag.h"
27#include "xfs_dir2.h"
28#include "xfs_dmapi.h"
29#include "xfs_mount.h" 27#include "xfs_mount.h"
30#include "xfs_bmap_btree.h" 28#include "xfs_bmap_btree.h"
31#include "xfs_alloc_btree.h" 29#include "xfs_alloc_btree.h"
32#include "xfs_ialloc_btree.h" 30#include "xfs_ialloc_btree.h"
33#include "xfs_dir2_sf.h"
34#include "xfs_attr_sf.h"
35#include "xfs_dinode.h" 31#include "xfs_dinode.h"
36#include "xfs_inode.h" 32#include "xfs_inode.h"
37#include "xfs_btree.h" 33#include "xfs_btree.h"
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c
index 75df75f43d48..b1ecc6f97ade 100644
--- a/fs/xfs/xfs_iget.c
+++ b/fs/xfs/xfs_iget.c
@@ -25,14 +25,10 @@
25#include "xfs_trans.h" 25#include "xfs_trans.h"
26#include "xfs_sb.h" 26#include "xfs_sb.h"
27#include "xfs_ag.h" 27#include "xfs_ag.h"
28#include "xfs_dir2.h"
29#include "xfs_dmapi.h"
30#include "xfs_mount.h" 28#include "xfs_mount.h"
31#include "xfs_bmap_btree.h" 29#include "xfs_bmap_btree.h"
32#include "xfs_alloc_btree.h" 30#include "xfs_alloc_btree.h"
33#include "xfs_ialloc_btree.h" 31#include "xfs_ialloc_btree.h"
34#include "xfs_dir2_sf.h"
35#include "xfs_attr_sf.h"
36#include "xfs_dinode.h" 32#include "xfs_dinode.h"
37#include "xfs_inode.h" 33#include "xfs_inode.h"
38#include "xfs_btree.h" 34#include "xfs_btree.h"
@@ -95,7 +91,7 @@ xfs_inode_alloc(
95 return ip; 91 return ip;
96} 92}
97 93
98STATIC void 94void
99xfs_inode_free( 95xfs_inode_free(
100 struct xfs_inode *ip) 96 struct xfs_inode *ip)
101{ 97{
@@ -212,7 +208,7 @@ xfs_iget_cache_hit(
212 ip->i_flags &= ~XFS_INEW; 208 ip->i_flags &= ~XFS_INEW;
213 ip->i_flags |= XFS_IRECLAIMABLE; 209 ip->i_flags |= XFS_IRECLAIMABLE;
214 __xfs_inode_set_reclaim_tag(pag, ip); 210 __xfs_inode_set_reclaim_tag(pag, ip);
215 trace_xfs_iget_reclaim(ip); 211 trace_xfs_iget_reclaim_fail(ip);
216 goto out_error; 212 goto out_error;
217 } 213 }
218 214
@@ -227,6 +223,7 @@ xfs_iget_cache_hit(
227 } else { 223 } else {
228 /* If the VFS inode is being torn down, pause and try again. */ 224 /* If the VFS inode is being torn down, pause and try again. */
229 if (!igrab(inode)) { 225 if (!igrab(inode)) {
226 trace_xfs_iget_skip(ip);
230 error = EAGAIN; 227 error = EAGAIN;
231 goto out_error; 228 goto out_error;
232 } 229 }
@@ -234,6 +231,7 @@ xfs_iget_cache_hit(
234 /* We've got a live one. */ 231 /* We've got a live one. */
235 spin_unlock(&ip->i_flags_lock); 232 spin_unlock(&ip->i_flags_lock);
236 read_unlock(&pag->pag_ici_lock); 233 read_unlock(&pag->pag_ici_lock);
234 trace_xfs_iget_hit(ip);
237 } 235 }
238 236
239 if (lock_flags != 0) 237 if (lock_flags != 0)
@@ -242,7 +240,6 @@ xfs_iget_cache_hit(
242 xfs_iflags_clear(ip, XFS_ISTALE); 240 xfs_iflags_clear(ip, XFS_ISTALE);
243 XFS_STATS_INC(xs_ig_found); 241 XFS_STATS_INC(xs_ig_found);
244 242
245 trace_xfs_iget_found(ip);
246 return 0; 243 return 0;
247 244
248out_error: 245out_error:
@@ -259,24 +256,22 @@ xfs_iget_cache_miss(
259 xfs_trans_t *tp, 256 xfs_trans_t *tp,
260 xfs_ino_t ino, 257 xfs_ino_t ino,
261 struct xfs_inode **ipp, 258 struct xfs_inode **ipp,
262 xfs_daddr_t bno,
263 int flags, 259 int flags,
264 int lock_flags) 260 int lock_flags)
265{ 261{
266 struct xfs_inode *ip; 262 struct xfs_inode *ip;
267 int error; 263 int error;
268 unsigned long first_index, mask;
269 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino); 264 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino);
270 265
271 ip = xfs_inode_alloc(mp, ino); 266 ip = xfs_inode_alloc(mp, ino);
272 if (!ip) 267 if (!ip)
273 return ENOMEM; 268 return ENOMEM;
274 269
275 error = xfs_iread(mp, tp, ip, bno, flags); 270 error = xfs_iread(mp, tp, ip, flags);
276 if (error) 271 if (error)
277 goto out_destroy; 272 goto out_destroy;
278 273
279 xfs_itrace_entry(ip); 274 trace_xfs_iget_miss(ip);
280 275
281 if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) { 276 if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) {
282 error = ENOENT; 277 error = ENOENT;
@@ -302,8 +297,6 @@ xfs_iget_cache_miss(
302 BUG(); 297 BUG();
303 } 298 }
304 299
305 mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1);
306 first_index = agino & mask;
307 write_lock(&pag->pag_ici_lock); 300 write_lock(&pag->pag_ici_lock);
308 301
309 /* insert the new inode */ 302 /* insert the new inode */
@@ -322,7 +315,6 @@ xfs_iget_cache_miss(
322 write_unlock(&pag->pag_ici_lock); 315 write_unlock(&pag->pag_ici_lock);
323 radix_tree_preload_end(); 316 radix_tree_preload_end();
324 317
325 trace_xfs_iget_alloc(ip);
326 *ipp = ip; 318 *ipp = ip;
327 return 0; 319 return 0;
328 320
@@ -358,8 +350,6 @@ out_destroy:
358 * within the file system for the inode being requested. 350 * within the file system for the inode being requested.
359 * lock_flags -- flags indicating how to lock the inode. See the comment 351 * lock_flags -- flags indicating how to lock the inode. See the comment
360 * for xfs_ilock() for a list of valid values. 352 * for xfs_ilock() for a list of valid values.
361 * bno -- the block number starting the buffer containing the inode,
362 * if known (as by bulkstat), else 0.
363 */ 353 */
364int 354int
365xfs_iget( 355xfs_iget(
@@ -368,8 +358,7 @@ xfs_iget(
368 xfs_ino_t ino, 358 xfs_ino_t ino,
369 uint flags, 359 uint flags,
370 uint lock_flags, 360 uint lock_flags,
371 xfs_inode_t **ipp, 361 xfs_inode_t **ipp)
372 xfs_daddr_t bno)
373{ 362{
374 xfs_inode_t *ip; 363 xfs_inode_t *ip;
375 int error; 364 int error;
@@ -397,7 +386,7 @@ again:
397 read_unlock(&pag->pag_ici_lock); 386 read_unlock(&pag->pag_ici_lock);
398 XFS_STATS_INC(xs_ig_missed); 387 XFS_STATS_INC(xs_ig_missed);
399 388
400 error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip, bno, 389 error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
401 flags, lock_flags); 390 flags, lock_flags);
402 if (error) 391 if (error)
403 goto out_error_or_again; 392 goto out_error_or_again;
@@ -426,97 +415,6 @@ out_error_or_again:
426} 415}
427 416
428/* 417/*
429 * Decrement reference count of an inode structure and unlock it.
430 *
431 * ip -- the inode being released
432 * lock_flags -- this parameter indicates the inode's locks to be
433 * to be released. See the comment on xfs_iunlock() for a list
434 * of valid values.
435 */
436void
437xfs_iput(xfs_inode_t *ip,
438 uint lock_flags)
439{
440 xfs_itrace_entry(ip);
441 xfs_iunlock(ip, lock_flags);
442 IRELE(ip);
443}
444
445/*
446 * Special iput for brand-new inodes that are still locked
447 */
448void
449xfs_iput_new(
450 xfs_inode_t *ip,
451 uint lock_flags)
452{
453 struct inode *inode = VFS_I(ip);
454
455 xfs_itrace_entry(ip);
456
457 if ((ip->i_d.di_mode == 0)) {
458 ASSERT(!xfs_iflags_test(ip, XFS_IRECLAIMABLE));
459 make_bad_inode(inode);
460 }
461 if (inode->i_state & I_NEW)
462 unlock_new_inode(inode);
463 if (lock_flags)
464 xfs_iunlock(ip, lock_flags);
465 IRELE(ip);
466}
467
468/*
469 * This is called free all the memory associated with an inode.
470 * It must free the inode itself and any buffers allocated for
471 * if_extents/if_data and if_broot. It must also free the lock
472 * associated with the inode.
473 *
474 * Note: because we don't initialise everything on reallocation out
475 * of the zone, we must ensure we nullify everything correctly before
476 * freeing the structure.
477 */
478void
479xfs_ireclaim(
480 struct xfs_inode *ip)
481{
482 struct xfs_mount *mp = ip->i_mount;
483 struct xfs_perag *pag;
484 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
485
486 XFS_STATS_INC(xs_ig_reclaims);
487
488 /*
489 * Remove the inode from the per-AG radix tree.
490 *
491 * Because radix_tree_delete won't complain even if the item was never
492 * added to the tree assert that it's been there before to catch
493 * problems with the inode life time early on.
494 */
495 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
496 write_lock(&pag->pag_ici_lock);
497 if (!radix_tree_delete(&pag->pag_ici_root, agino))
498 ASSERT(0);
499 write_unlock(&pag->pag_ici_lock);
500 xfs_perag_put(pag);
501
502 /*
503 * Here we do an (almost) spurious inode lock in order to coordinate
504 * with inode cache radix tree lookups. This is because the lookup
505 * can reference the inodes in the cache without taking references.
506 *
507 * We make that OK here by ensuring that we wait until the inode is
508 * unlocked after the lookup before we go ahead and free it. We get
509 * both the ilock and the iolock because the code may need to drop the
510 * ilock one but will still hold the iolock.
511 */
512 xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
513 xfs_qm_dqdetach(ip);
514 xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
515
516 xfs_inode_free(ip);
517}
518
519/*
520 * This is a wrapper routine around the xfs_ilock() routine 418 * This is a wrapper routine around the xfs_ilock() routine
521 * used to centralize some grungy code. It is used in places 419 * used to centralize some grungy code. It is used in places
522 * that wish to lock the inode solely for reading the extents. 420 * that wish to lock the inode solely for reading the extents.
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index d53c39de7d05..68415cb4f23c 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -27,13 +27,10 @@
27#include "xfs_trans_priv.h" 27#include "xfs_trans_priv.h"
28#include "xfs_sb.h" 28#include "xfs_sb.h"
29#include "xfs_ag.h" 29#include "xfs_ag.h"
30#include "xfs_dir2.h"
31#include "xfs_dmapi.h"
32#include "xfs_mount.h" 30#include "xfs_mount.h"
33#include "xfs_bmap_btree.h" 31#include "xfs_bmap_btree.h"
34#include "xfs_alloc_btree.h" 32#include "xfs_alloc_btree.h"
35#include "xfs_ialloc_btree.h" 33#include "xfs_ialloc_btree.h"
36#include "xfs_dir2_sf.h"
37#include "xfs_attr_sf.h" 34#include "xfs_attr_sf.h"
38#include "xfs_dinode.h" 35#include "xfs_dinode.h"
39#include "xfs_inode.h" 36#include "xfs_inode.h"
@@ -44,7 +41,6 @@
44#include "xfs_alloc.h" 41#include "xfs_alloc.h"
45#include "xfs_ialloc.h" 42#include "xfs_ialloc.h"
46#include "xfs_bmap.h" 43#include "xfs_bmap.h"
47#include "xfs_rw.h"
48#include "xfs_error.h" 44#include "xfs_error.h"
49#include "xfs_utils.h" 45#include "xfs_utils.h"
50#include "xfs_quota.h" 46#include "xfs_quota.h"
@@ -177,7 +173,7 @@ xfs_imap_to_bp(
177 if (unlikely(XFS_TEST_ERROR(!di_ok, mp, 173 if (unlikely(XFS_TEST_ERROR(!di_ok, mp,
178 XFS_ERRTAG_ITOBP_INOTOBP, 174 XFS_ERRTAG_ITOBP_INOTOBP,
179 XFS_RANDOM_ITOBP_INOTOBP))) { 175 XFS_RANDOM_ITOBP_INOTOBP))) {
180 if (iget_flags & XFS_IGET_BULKSTAT) { 176 if (iget_flags & XFS_IGET_UNTRUSTED) {
181 xfs_trans_brelse(tp, bp); 177 xfs_trans_brelse(tp, bp);
182 return XFS_ERROR(EINVAL); 178 return XFS_ERROR(EINVAL);
183 } 179 }
@@ -426,7 +422,7 @@ xfs_iformat(
426 if (!XFS_DFORK_Q(dip)) 422 if (!XFS_DFORK_Q(dip))
427 return 0; 423 return 0;
428 ASSERT(ip->i_afp == NULL); 424 ASSERT(ip->i_afp == NULL);
429 ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP); 425 ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP | KM_NOFS);
430 ip->i_afp->if_ext_max = 426 ip->i_afp->if_ext_max =
431 XFS_IFORK_ASIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t); 427 XFS_IFORK_ASIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
432 switch (dip->di_aformat) { 428 switch (dip->di_aformat) {
@@ -509,7 +505,7 @@ xfs_iformat_local(
509 ifp->if_u1.if_data = ifp->if_u2.if_inline_data; 505 ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
510 else { 506 else {
511 real_size = roundup(size, 4); 507 real_size = roundup(size, 4);
512 ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP); 508 ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP | KM_NOFS);
513 } 509 }
514 ifp->if_bytes = size; 510 ifp->if_bytes = size;
515 ifp->if_real_bytes = real_size; 511 ifp->if_real_bytes = real_size;
@@ -636,7 +632,7 @@ xfs_iformat_btree(
636 } 632 }
637 633
638 ifp->if_broot_bytes = size; 634 ifp->if_broot_bytes = size;
639 ifp->if_broot = kmem_alloc(size, KM_SLEEP); 635 ifp->if_broot = kmem_alloc(size, KM_SLEEP | KM_NOFS);
640 ASSERT(ifp->if_broot != NULL); 636 ASSERT(ifp->if_broot != NULL);
641 /* 637 /*
642 * Copy and convert from the on-disk structure 638 * Copy and convert from the on-disk structure
@@ -787,7 +783,6 @@ xfs_iread(
787 xfs_mount_t *mp, 783 xfs_mount_t *mp,
788 xfs_trans_t *tp, 784 xfs_trans_t *tp,
789 xfs_inode_t *ip, 785 xfs_inode_t *ip,
790 xfs_daddr_t bno,
791 uint iget_flags) 786 uint iget_flags)
792{ 787{
793 xfs_buf_t *bp; 788 xfs_buf_t *bp;
@@ -797,11 +792,9 @@ xfs_iread(
797 /* 792 /*
798 * Fill in the location information in the in-core inode. 793 * Fill in the location information in the in-core inode.
799 */ 794 */
800 ip->i_imap.im_blkno = bno;
801 error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, iget_flags); 795 error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, iget_flags);
802 if (error) 796 if (error)
803 return error; 797 return error;
804 ASSERT(bno == 0 || bno == ip->i_imap.im_blkno);
805 798
806 /* 799 /*
807 * Get pointers to the on-disk inode and the buffer containing it. 800 * Get pointers to the on-disk inode and the buffer containing it.
@@ -925,7 +918,6 @@ xfs_iread_extents(
925 int error; 918 int error;
926 xfs_ifork_t *ifp; 919 xfs_ifork_t *ifp;
927 xfs_extnum_t nextents; 920 xfs_extnum_t nextents;
928 size_t size;
929 921
930 if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) { 922 if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) {
931 XFS_ERROR_REPORT("xfs_iread_extents", XFS_ERRLEVEL_LOW, 923 XFS_ERROR_REPORT("xfs_iread_extents", XFS_ERRLEVEL_LOW,
@@ -933,7 +925,6 @@ xfs_iread_extents(
933 return XFS_ERROR(EFSCORRUPTED); 925 return XFS_ERROR(EFSCORRUPTED);
934 } 926 }
935 nextents = XFS_IFORK_NEXTENTS(ip, whichfork); 927 nextents = XFS_IFORK_NEXTENTS(ip, whichfork);
936 size = nextents * sizeof(xfs_bmbt_rec_t);
937 ifp = XFS_IFORK_PTR(ip, whichfork); 928 ifp = XFS_IFORK_PTR(ip, whichfork);
938 929
939 /* 930 /*
@@ -1229,7 +1220,7 @@ xfs_isize_check(
1229 (xfs_ufsize_t)XFS_MAXIOFFSET(mp)) - 1220 (xfs_ufsize_t)XFS_MAXIOFFSET(mp)) -
1230 map_first), 1221 map_first),
1231 XFS_BMAPI_ENTIRE, NULL, 0, imaps, &nimaps, 1222 XFS_BMAPI_ENTIRE, NULL, 0, imaps, &nimaps,
1232 NULL, NULL)) 1223 NULL))
1233 return; 1224 return;
1234 ASSERT(nimaps == 1); 1225 ASSERT(nimaps == 1);
1235 ASSERT(imaps[0].br_startblock == HOLESTARTBLOCK); 1226 ASSERT(imaps[0].br_startblock == HOLESTARTBLOCK);
@@ -1463,7 +1454,7 @@ xfs_itruncate_finish(
1463 ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES); 1454 ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);
1464 ASSERT(ip->i_transp == *tp); 1455 ASSERT(ip->i_transp == *tp);
1465 ASSERT(ip->i_itemp != NULL); 1456 ASSERT(ip->i_itemp != NULL);
1466 ASSERT(ip->i_itemp->ili_flags & XFS_ILI_HOLD); 1457 ASSERT(ip->i_itemp->ili_lock_flags == 0);
1467 1458
1468 1459
1469 ntp = *tp; 1460 ntp = *tp;
@@ -1592,11 +1583,10 @@ xfs_itruncate_finish(
1592 xfs_bmap_init(&free_list, &first_block); 1583 xfs_bmap_init(&free_list, &first_block);
1593 error = xfs_bunmapi(ntp, ip, 1584 error = xfs_bunmapi(ntp, ip,
1594 first_unmap_block, unmap_len, 1585 first_unmap_block, unmap_len,
1595 xfs_bmapi_aflag(fork) | 1586 xfs_bmapi_aflag(fork),
1596 (sync ? 0 : XFS_BMAPI_ASYNC),
1597 XFS_ITRUNC_MAX_EXTENTS, 1587 XFS_ITRUNC_MAX_EXTENTS,
1598 &first_block, &free_list, 1588 &first_block, &free_list,
1599 NULL, &done); 1589 &done);
1600 if (error) { 1590 if (error) {
1601 /* 1591 /*
1602 * If the bunmapi call encounters an error, 1592 * If the bunmapi call encounters an error,
@@ -1615,12 +1605,8 @@ xfs_itruncate_finish(
1615 */ 1605 */
1616 error = xfs_bmap_finish(tp, &free_list, &committed); 1606 error = xfs_bmap_finish(tp, &free_list, &committed);
1617 ntp = *tp; 1607 ntp = *tp;
1618 if (committed) { 1608 if (committed)
1619 /* link the inode into the next xact in the chain */ 1609 xfs_trans_ijoin(ntp, ip);
1620 xfs_trans_ijoin(ntp, ip,
1621 XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
1622 xfs_trans_ihold(ntp, ip);
1623 }
1624 1610
1625 if (error) { 1611 if (error) {
1626 /* 1612 /*
@@ -1649,9 +1635,7 @@ xfs_itruncate_finish(
1649 error = xfs_trans_commit(*tp, 0); 1635 error = xfs_trans_commit(*tp, 0);
1650 *tp = ntp; 1636 *tp = ntp;
1651 1637
1652 /* link the inode into the next transaction in the chain */ 1638 xfs_trans_ijoin(ntp, ip);
1653 xfs_trans_ijoin(ntp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
1654 xfs_trans_ihold(ntp, ip);
1655 1639
1656 if (error) 1640 if (error)
1657 return error; 1641 return error;
@@ -1988,7 +1972,7 @@ xfs_ifree_cluster(
1988 if (lip->li_type == XFS_LI_INODE) { 1972 if (lip->li_type == XFS_LI_INODE) {
1989 iip = (xfs_inode_log_item_t *)lip; 1973 iip = (xfs_inode_log_item_t *)lip;
1990 ASSERT(iip->ili_logged == 1); 1974 ASSERT(iip->ili_logged == 1);
1991 lip->li_cb = (void(*)(xfs_buf_t*,xfs_log_item_t*)) xfs_istale_done; 1975 lip->li_cb = xfs_istale_done;
1992 xfs_trans_ail_copy_lsn(mp->m_ail, 1976 xfs_trans_ail_copy_lsn(mp->m_ail,
1993 &iip->ili_flush_lsn, 1977 &iip->ili_flush_lsn,
1994 &iip->ili_item.li_lsn); 1978 &iip->ili_item.li_lsn);
@@ -2058,9 +2042,8 @@ xfs_ifree_cluster(
2058 xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn, 2042 xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
2059 &iip->ili_item.li_lsn); 2043 &iip->ili_item.li_lsn);
2060 2044
2061 xfs_buf_attach_iodone(bp, 2045 xfs_buf_attach_iodone(bp, xfs_istale_done,
2062 (void(*)(xfs_buf_t*,xfs_log_item_t*)) 2046 &iip->ili_item);
2063 xfs_istale_done, (xfs_log_item_t *)iip);
2064 2047
2065 if (ip != free_ip) 2048 if (ip != free_ip)
2066 xfs_iunlock(ip, XFS_ILOCK_EXCL); 2049 xfs_iunlock(ip, XFS_ILOCK_EXCL);
@@ -2206,7 +2189,7 @@ xfs_iroot_realloc(
2206 */ 2189 */
2207 if (ifp->if_broot_bytes == 0) { 2190 if (ifp->if_broot_bytes == 0) {
2208 new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(rec_diff); 2191 new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(rec_diff);
2209 ifp->if_broot = kmem_alloc(new_size, KM_SLEEP); 2192 ifp->if_broot = kmem_alloc(new_size, KM_SLEEP | KM_NOFS);
2210 ifp->if_broot_bytes = (int)new_size; 2193 ifp->if_broot_bytes = (int)new_size;
2211 return; 2194 return;
2212 } 2195 }
@@ -2222,7 +2205,7 @@ xfs_iroot_realloc(
2222 new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(new_max); 2205 new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(new_max);
2223 ifp->if_broot = kmem_realloc(ifp->if_broot, new_size, 2206 ifp->if_broot = kmem_realloc(ifp->if_broot, new_size,
2224 (size_t)XFS_BMAP_BROOT_SPACE_CALC(cur_max), /* old size */ 2207 (size_t)XFS_BMAP_BROOT_SPACE_CALC(cur_max), /* old size */
2225 KM_SLEEP); 2208 KM_SLEEP | KM_NOFS);
2226 op = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1, 2209 op = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1,
2227 ifp->if_broot_bytes); 2210 ifp->if_broot_bytes);
2228 np = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1, 2211 np = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1,
@@ -2248,7 +2231,7 @@ xfs_iroot_realloc(
2248 else 2231 else
2249 new_size = 0; 2232 new_size = 0;
2250 if (new_size > 0) { 2233 if (new_size > 0) {
2251 new_broot = kmem_alloc(new_size, KM_SLEEP); 2234 new_broot = kmem_alloc(new_size, KM_SLEEP | KM_NOFS);
2252 /* 2235 /*
2253 * First copy over the btree block header. 2236 * First copy over the btree block header.
2254 */ 2237 */
@@ -2352,7 +2335,8 @@ xfs_idata_realloc(
2352 real_size = roundup(new_size, 4); 2335 real_size = roundup(new_size, 4);
2353 if (ifp->if_u1.if_data == NULL) { 2336 if (ifp->if_u1.if_data == NULL) {
2354 ASSERT(ifp->if_real_bytes == 0); 2337 ASSERT(ifp->if_real_bytes == 0);
2355 ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP); 2338 ifp->if_u1.if_data = kmem_alloc(real_size,
2339 KM_SLEEP | KM_NOFS);
2356 } else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) { 2340 } else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) {
2357 /* 2341 /*
2358 * Only do the realloc if the underlying size 2342 * Only do the realloc if the underlying size
@@ -2363,11 +2347,12 @@ xfs_idata_realloc(
2363 kmem_realloc(ifp->if_u1.if_data, 2347 kmem_realloc(ifp->if_u1.if_data,
2364 real_size, 2348 real_size,
2365 ifp->if_real_bytes, 2349 ifp->if_real_bytes,
2366 KM_SLEEP); 2350 KM_SLEEP | KM_NOFS);
2367 } 2351 }
2368 } else { 2352 } else {
2369 ASSERT(ifp->if_real_bytes == 0); 2353 ASSERT(ifp->if_real_bytes == 0);
2370 ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP); 2354 ifp->if_u1.if_data = kmem_alloc(real_size,
2355 KM_SLEEP | KM_NOFS);
2371 memcpy(ifp->if_u1.if_data, ifp->if_u2.if_inline_data, 2356 memcpy(ifp->if_u1.if_data, ifp->if_u2.if_inline_data,
2372 ifp->if_bytes); 2357 ifp->if_bytes);
2373 } 2358 }
@@ -2734,7 +2719,6 @@ cluster_corrupt_out:
2734 * mark it as stale and brelse. 2719 * mark it as stale and brelse.
2735 */ 2720 */
2736 if (XFS_BUF_IODONE_FUNC(bp)) { 2721 if (XFS_BUF_IODONE_FUNC(bp)) {
2737 XFS_BUF_CLR_BDSTRAT_FUNC(bp);
2738 XFS_BUF_UNDONE(bp); 2722 XFS_BUF_UNDONE(bp);
2739 XFS_BUF_STALE(bp); 2723 XFS_BUF_STALE(bp);
2740 XFS_BUF_ERROR(bp,EIO); 2724 XFS_BUF_ERROR(bp,EIO);
@@ -3072,8 +3056,7 @@ xfs_iflush_int(
3072 * and unlock the inode's flush lock when the inode is 3056 * and unlock the inode's flush lock when the inode is
3073 * completely written to disk. 3057 * completely written to disk.
3074 */ 3058 */
3075 xfs_buf_attach_iodone(bp, (void(*)(xfs_buf_t*,xfs_log_item_t*)) 3059 xfs_buf_attach_iodone(bp, xfs_iflush_done, &iip->ili_item);
3076 xfs_iflush_done, (xfs_log_item_t *)iip);
3077 3060
3078 ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); 3061 ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
3079 ASSERT(XFS_BUF_IODONE_FUNC(bp) != NULL); 3062 ASSERT(XFS_BUF_IODONE_FUNC(bp) != NULL);
@@ -3517,13 +3500,11 @@ xfs_iext_remove_indirect(
3517 xfs_extnum_t ext_diff; /* extents to remove in current list */ 3500 xfs_extnum_t ext_diff; /* extents to remove in current list */
3518 xfs_extnum_t nex1; /* number of extents before idx */ 3501 xfs_extnum_t nex1; /* number of extents before idx */
3519 xfs_extnum_t nex2; /* extents after idx + count */ 3502 xfs_extnum_t nex2; /* extents after idx + count */
3520 int nlists; /* entries in indirection array */
3521 int page_idx = idx; /* index in target extent list */ 3503 int page_idx = idx; /* index in target extent list */
3522 3504
3523 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 3505 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3524 erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 0); 3506 erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 0);
3525 ASSERT(erp != NULL); 3507 ASSERT(erp != NULL);
3526 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3527 nex1 = page_idx; 3508 nex1 = page_idx;
3528 ext_cnt = count; 3509 ext_cnt = count;
3529 while (ext_cnt) { 3510 while (ext_cnt) {
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 9965e40a4615..0898c5417d12 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -442,9 +442,7 @@ static inline void xfs_ifunlock(xfs_inode_t *ip)
442 * xfs_iget.c prototypes. 442 * xfs_iget.c prototypes.
443 */ 443 */
444int xfs_iget(struct xfs_mount *, struct xfs_trans *, xfs_ino_t, 444int xfs_iget(struct xfs_mount *, struct xfs_trans *, xfs_ino_t,
445 uint, uint, xfs_inode_t **, xfs_daddr_t); 445 uint, uint, xfs_inode_t **);
446void xfs_iput(xfs_inode_t *, uint);
447void xfs_iput_new(xfs_inode_t *, uint);
448void xfs_ilock(xfs_inode_t *, uint); 446void xfs_ilock(xfs_inode_t *, uint);
449int xfs_ilock_nowait(xfs_inode_t *, uint); 447int xfs_ilock_nowait(xfs_inode_t *, uint);
450void xfs_iunlock(xfs_inode_t *, uint); 448void xfs_iunlock(xfs_inode_t *, uint);
@@ -452,7 +450,7 @@ void xfs_ilock_demote(xfs_inode_t *, uint);
452int xfs_isilocked(xfs_inode_t *, uint); 450int xfs_isilocked(xfs_inode_t *, uint);
453uint xfs_ilock_map_shared(xfs_inode_t *); 451uint xfs_ilock_map_shared(xfs_inode_t *);
454void xfs_iunlock_map_shared(xfs_inode_t *, uint); 452void xfs_iunlock_map_shared(xfs_inode_t *, uint);
455void xfs_ireclaim(xfs_inode_t *); 453void xfs_inode_free(struct xfs_inode *ip);
456 454
457/* 455/*
458 * xfs_inode.c prototypes. 456 * xfs_inode.c prototypes.
@@ -500,7 +498,7 @@ do { \
500 * Flags for xfs_iget() 498 * Flags for xfs_iget()
501 */ 499 */
502#define XFS_IGET_CREATE 0x1 500#define XFS_IGET_CREATE 0x1
503#define XFS_IGET_BULKSTAT 0x2 501#define XFS_IGET_UNTRUSTED 0x2
504 502
505int xfs_inotobp(struct xfs_mount *, struct xfs_trans *, 503int xfs_inotobp(struct xfs_mount *, struct xfs_trans *,
506 xfs_ino_t, struct xfs_dinode **, 504 xfs_ino_t, struct xfs_dinode **,
@@ -509,7 +507,7 @@ int xfs_itobp(struct xfs_mount *, struct xfs_trans *,
509 struct xfs_inode *, struct xfs_dinode **, 507 struct xfs_inode *, struct xfs_dinode **,
510 struct xfs_buf **, uint); 508 struct xfs_buf **, uint);
511int xfs_iread(struct xfs_mount *, struct xfs_trans *, 509int xfs_iread(struct xfs_mount *, struct xfs_trans *,
512 struct xfs_inode *, xfs_daddr_t, uint); 510 struct xfs_inode *, uint);
513void xfs_dinode_to_disk(struct xfs_dinode *, 511void xfs_dinode_to_disk(struct xfs_dinode *,
514 struct xfs_icdinode *); 512 struct xfs_icdinode *);
515void xfs_idestroy_fork(struct xfs_inode *, int); 513void xfs_idestroy_fork(struct xfs_inode *, int);
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index cf8249a60004..fe00777e2796 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -22,30 +22,26 @@
22#include "xfs_log.h" 22#include "xfs_log.h"
23#include "xfs_inum.h" 23#include "xfs_inum.h"
24#include "xfs_trans.h" 24#include "xfs_trans.h"
25#include "xfs_buf_item.h"
26#include "xfs_sb.h" 25#include "xfs_sb.h"
27#include "xfs_ag.h" 26#include "xfs_ag.h"
28#include "xfs_dir2.h"
29#include "xfs_dmapi.h"
30#include "xfs_mount.h" 27#include "xfs_mount.h"
31#include "xfs_trans_priv.h" 28#include "xfs_trans_priv.h"
32#include "xfs_bmap_btree.h" 29#include "xfs_bmap_btree.h"
33#include "xfs_alloc_btree.h"
34#include "xfs_ialloc_btree.h"
35#include "xfs_dir2_sf.h"
36#include "xfs_attr_sf.h"
37#include "xfs_dinode.h" 30#include "xfs_dinode.h"
38#include "xfs_inode.h" 31#include "xfs_inode.h"
39#include "xfs_inode_item.h" 32#include "xfs_inode_item.h"
40#include "xfs_btree.h"
41#include "xfs_ialloc.h"
42#include "xfs_rw.h"
43#include "xfs_error.h" 33#include "xfs_error.h"
44#include "xfs_trace.h" 34#include "xfs_trace.h"
45 35
46 36
47kmem_zone_t *xfs_ili_zone; /* inode log item zone */ 37kmem_zone_t *xfs_ili_zone; /* inode log item zone */
48 38
39static inline struct xfs_inode_log_item *INODE_ITEM(struct xfs_log_item *lip)
40{
41 return container_of(lip, struct xfs_inode_log_item, ili_item);
42}
43
44
49/* 45/*
50 * This returns the number of iovecs needed to log the given inode item. 46 * This returns the number of iovecs needed to log the given inode item.
51 * 47 *
@@ -55,13 +51,11 @@ kmem_zone_t *xfs_ili_zone; /* inode log item zone */
55 */ 51 */
56STATIC uint 52STATIC uint
57xfs_inode_item_size( 53xfs_inode_item_size(
58 xfs_inode_log_item_t *iip) 54 struct xfs_log_item *lip)
59{ 55{
60 uint nvecs; 56 struct xfs_inode_log_item *iip = INODE_ITEM(lip);
61 xfs_inode_t *ip; 57 struct xfs_inode *ip = iip->ili_inode;
62 58 uint nvecs = 2;
63 ip = iip->ili_inode;
64 nvecs = 2;
65 59
66 /* 60 /*
67 * Only log the data/extents/b-tree root if there is something 61 * Only log the data/extents/b-tree root if there is something
@@ -212,21 +206,17 @@ xfs_inode_item_size(
212 */ 206 */
213STATIC void 207STATIC void
214xfs_inode_item_format( 208xfs_inode_item_format(
215 xfs_inode_log_item_t *iip, 209 struct xfs_log_item *lip,
216 xfs_log_iovec_t *log_vector) 210 struct xfs_log_iovec *vecp)
217{ 211{
212 struct xfs_inode_log_item *iip = INODE_ITEM(lip);
213 struct xfs_inode *ip = iip->ili_inode;
218 uint nvecs; 214 uint nvecs;
219 xfs_log_iovec_t *vecp;
220 xfs_inode_t *ip;
221 size_t data_bytes; 215 size_t data_bytes;
222 xfs_bmbt_rec_t *ext_buffer; 216 xfs_bmbt_rec_t *ext_buffer;
223 int nrecs;
224 xfs_mount_t *mp; 217 xfs_mount_t *mp;
225 218
226 ip = iip->ili_inode; 219 vecp->i_addr = &iip->ili_format;
227 vecp = log_vector;
228
229 vecp->i_addr = (xfs_caddr_t)&iip->ili_format;
230 vecp->i_len = sizeof(xfs_inode_log_format_t); 220 vecp->i_len = sizeof(xfs_inode_log_format_t);
231 vecp->i_type = XLOG_REG_TYPE_IFORMAT; 221 vecp->i_type = XLOG_REG_TYPE_IFORMAT;
232 vecp++; 222 vecp++;
@@ -277,7 +267,7 @@ xfs_inode_item_format(
277 */ 267 */
278 xfs_synchronize_times(ip); 268 xfs_synchronize_times(ip);
279 269
280 vecp->i_addr = (xfs_caddr_t)&ip->i_d; 270 vecp->i_addr = &ip->i_d;
281 vecp->i_len = sizeof(struct xfs_icdinode); 271 vecp->i_len = sizeof(struct xfs_icdinode);
282 vecp->i_type = XLOG_REG_TYPE_ICORE; 272 vecp->i_type = XLOG_REG_TYPE_ICORE;
283 vecp++; 273 vecp++;
@@ -323,18 +313,17 @@ xfs_inode_item_format(
323 ASSERT(ip->i_df.if_u1.if_extents != NULL); 313 ASSERT(ip->i_df.if_u1.if_extents != NULL);
324 ASSERT(ip->i_d.di_nextents > 0); 314 ASSERT(ip->i_d.di_nextents > 0);
325 ASSERT(iip->ili_extents_buf == NULL); 315 ASSERT(iip->ili_extents_buf == NULL);
326 nrecs = ip->i_df.if_bytes / 316 ASSERT((ip->i_df.if_bytes /
327 (uint)sizeof(xfs_bmbt_rec_t); 317 (uint)sizeof(xfs_bmbt_rec_t)) > 0);
328 ASSERT(nrecs > 0);
329#ifdef XFS_NATIVE_HOST 318#ifdef XFS_NATIVE_HOST
330 if (nrecs == ip->i_d.di_nextents) { 319 if (ip->i_d.di_nextents == ip->i_df.if_bytes /
320 (uint)sizeof(xfs_bmbt_rec_t)) {
331 /* 321 /*
332 * There are no delayed allocation 322 * There are no delayed allocation
333 * extents, so just point to the 323 * extents, so just point to the
334 * real extents array. 324 * real extents array.
335 */ 325 */
336 vecp->i_addr = 326 vecp->i_addr = ip->i_df.if_u1.if_extents;
337 (char *)(ip->i_df.if_u1.if_extents);
338 vecp->i_len = ip->i_df.if_bytes; 327 vecp->i_len = ip->i_df.if_bytes;
339 vecp->i_type = XLOG_REG_TYPE_IEXT; 328 vecp->i_type = XLOG_REG_TYPE_IEXT;
340 } else 329 } else
@@ -352,7 +341,7 @@ xfs_inode_item_format(
352 ext_buffer = kmem_alloc(ip->i_df.if_bytes, 341 ext_buffer = kmem_alloc(ip->i_df.if_bytes,
353 KM_SLEEP); 342 KM_SLEEP);
354 iip->ili_extents_buf = ext_buffer; 343 iip->ili_extents_buf = ext_buffer;
355 vecp->i_addr = (xfs_caddr_t)ext_buffer; 344 vecp->i_addr = ext_buffer;
356 vecp->i_len = xfs_iextents_copy(ip, ext_buffer, 345 vecp->i_len = xfs_iextents_copy(ip, ext_buffer,
357 XFS_DATA_FORK); 346 XFS_DATA_FORK);
358 vecp->i_type = XLOG_REG_TYPE_IEXT; 347 vecp->i_type = XLOG_REG_TYPE_IEXT;
@@ -371,7 +360,7 @@ xfs_inode_item_format(
371 if (iip->ili_format.ilf_fields & XFS_ILOG_DBROOT) { 360 if (iip->ili_format.ilf_fields & XFS_ILOG_DBROOT) {
372 ASSERT(ip->i_df.if_broot_bytes > 0); 361 ASSERT(ip->i_df.if_broot_bytes > 0);
373 ASSERT(ip->i_df.if_broot != NULL); 362 ASSERT(ip->i_df.if_broot != NULL);
374 vecp->i_addr = (xfs_caddr_t)ip->i_df.if_broot; 363 vecp->i_addr = ip->i_df.if_broot;
375 vecp->i_len = ip->i_df.if_broot_bytes; 364 vecp->i_len = ip->i_df.if_broot_bytes;
376 vecp->i_type = XLOG_REG_TYPE_IBROOT; 365 vecp->i_type = XLOG_REG_TYPE_IBROOT;
377 vecp++; 366 vecp++;
@@ -389,7 +378,7 @@ xfs_inode_item_format(
389 ASSERT(ip->i_df.if_u1.if_data != NULL); 378 ASSERT(ip->i_df.if_u1.if_data != NULL);
390 ASSERT(ip->i_d.di_size > 0); 379 ASSERT(ip->i_d.di_size > 0);
391 380
392 vecp->i_addr = (xfs_caddr_t)ip->i_df.if_u1.if_data; 381 vecp->i_addr = ip->i_df.if_u1.if_data;
393 /* 382 /*
394 * Round i_bytes up to a word boundary. 383 * Round i_bytes up to a word boundary.
395 * The underlying memory is guaranteed to 384 * The underlying memory is guaranteed to
@@ -437,7 +426,7 @@ xfs_inode_item_format(
437 * Assert that no attribute-related log flags are set. 426 * Assert that no attribute-related log flags are set.
438 */ 427 */
439 if (!XFS_IFORK_Q(ip)) { 428 if (!XFS_IFORK_Q(ip)) {
440 ASSERT(nvecs == iip->ili_item.li_desc->lid_size); 429 ASSERT(nvecs == lip->li_desc->lid_size);
441 iip->ili_format.ilf_size = nvecs; 430 iip->ili_format.ilf_size = nvecs;
442 ASSERT(!(iip->ili_format.ilf_fields & 431 ASSERT(!(iip->ili_format.ilf_fields &
443 (XFS_ILOG_ADATA | XFS_ILOG_ABROOT | XFS_ILOG_AEXT))); 432 (XFS_ILOG_ADATA | XFS_ILOG_ABROOT | XFS_ILOG_AEXT)));
@@ -449,21 +438,21 @@ xfs_inode_item_format(
449 ASSERT(!(iip->ili_format.ilf_fields & 438 ASSERT(!(iip->ili_format.ilf_fields &
450 (XFS_ILOG_ADATA | XFS_ILOG_ABROOT))); 439 (XFS_ILOG_ADATA | XFS_ILOG_ABROOT)));
451 if (iip->ili_format.ilf_fields & XFS_ILOG_AEXT) { 440 if (iip->ili_format.ilf_fields & XFS_ILOG_AEXT) {
452 ASSERT(ip->i_afp->if_bytes > 0);
453 ASSERT(ip->i_afp->if_u1.if_extents != NULL);
454 ASSERT(ip->i_d.di_anextents > 0);
455#ifdef DEBUG 441#ifdef DEBUG
456 nrecs = ip->i_afp->if_bytes / 442 int nrecs = ip->i_afp->if_bytes /
457 (uint)sizeof(xfs_bmbt_rec_t); 443 (uint)sizeof(xfs_bmbt_rec_t);
458#endif
459 ASSERT(nrecs > 0); 444 ASSERT(nrecs > 0);
460 ASSERT(nrecs == ip->i_d.di_anextents); 445 ASSERT(nrecs == ip->i_d.di_anextents);
446 ASSERT(ip->i_afp->if_bytes > 0);
447 ASSERT(ip->i_afp->if_u1.if_extents != NULL);
448 ASSERT(ip->i_d.di_anextents > 0);
449#endif
461#ifdef XFS_NATIVE_HOST 450#ifdef XFS_NATIVE_HOST
462 /* 451 /*
463 * There are not delayed allocation extents 452 * There are not delayed allocation extents
464 * for attributes, so just point at the array. 453 * for attributes, so just point at the array.
465 */ 454 */
466 vecp->i_addr = (char *)(ip->i_afp->if_u1.if_extents); 455 vecp->i_addr = ip->i_afp->if_u1.if_extents;
467 vecp->i_len = ip->i_afp->if_bytes; 456 vecp->i_len = ip->i_afp->if_bytes;
468#else 457#else
469 ASSERT(iip->ili_aextents_buf == NULL); 458 ASSERT(iip->ili_aextents_buf == NULL);
@@ -473,7 +462,7 @@ xfs_inode_item_format(
473 ext_buffer = kmem_alloc(ip->i_afp->if_bytes, 462 ext_buffer = kmem_alloc(ip->i_afp->if_bytes,
474 KM_SLEEP); 463 KM_SLEEP);
475 iip->ili_aextents_buf = ext_buffer; 464 iip->ili_aextents_buf = ext_buffer;
476 vecp->i_addr = (xfs_caddr_t)ext_buffer; 465 vecp->i_addr = ext_buffer;
477 vecp->i_len = xfs_iextents_copy(ip, ext_buffer, 466 vecp->i_len = xfs_iextents_copy(ip, ext_buffer,
478 XFS_ATTR_FORK); 467 XFS_ATTR_FORK);
479#endif 468#endif
@@ -490,7 +479,7 @@ xfs_inode_item_format(
490 if (iip->ili_format.ilf_fields & XFS_ILOG_ABROOT) { 479 if (iip->ili_format.ilf_fields & XFS_ILOG_ABROOT) {
491 ASSERT(ip->i_afp->if_broot_bytes > 0); 480 ASSERT(ip->i_afp->if_broot_bytes > 0);
492 ASSERT(ip->i_afp->if_broot != NULL); 481 ASSERT(ip->i_afp->if_broot != NULL);
493 vecp->i_addr = (xfs_caddr_t)ip->i_afp->if_broot; 482 vecp->i_addr = ip->i_afp->if_broot;
494 vecp->i_len = ip->i_afp->if_broot_bytes; 483 vecp->i_len = ip->i_afp->if_broot_bytes;
495 vecp->i_type = XLOG_REG_TYPE_IATTR_BROOT; 484 vecp->i_type = XLOG_REG_TYPE_IATTR_BROOT;
496 vecp++; 485 vecp++;
@@ -506,7 +495,7 @@ xfs_inode_item_format(
506 ASSERT(ip->i_afp->if_bytes > 0); 495 ASSERT(ip->i_afp->if_bytes > 0);
507 ASSERT(ip->i_afp->if_u1.if_data != NULL); 496 ASSERT(ip->i_afp->if_u1.if_data != NULL);
508 497
509 vecp->i_addr = (xfs_caddr_t)ip->i_afp->if_u1.if_data; 498 vecp->i_addr = ip->i_afp->if_u1.if_data;
510 /* 499 /*
511 * Round i_bytes up to a word boundary. 500 * Round i_bytes up to a word boundary.
512 * The underlying memory is guaranteed to 501 * The underlying memory is guaranteed to
@@ -528,7 +517,7 @@ xfs_inode_item_format(
528 break; 517 break;
529 } 518 }
530 519
531 ASSERT(nvecs == iip->ili_item.li_desc->lid_size); 520 ASSERT(nvecs == lip->li_desc->lid_size);
532 iip->ili_format.ilf_size = nvecs; 521 iip->ili_format.ilf_size = nvecs;
533} 522}
534 523
@@ -539,12 +528,14 @@ xfs_inode_item_format(
539 */ 528 */
540STATIC void 529STATIC void
541xfs_inode_item_pin( 530xfs_inode_item_pin(
542 xfs_inode_log_item_t *iip) 531 struct xfs_log_item *lip)
543{ 532{
544 ASSERT(xfs_isilocked(iip->ili_inode, XFS_ILOCK_EXCL)); 533 struct xfs_inode *ip = INODE_ITEM(lip)->ili_inode;
534
535 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
545 536
546 trace_xfs_inode_pin(iip->ili_inode, _RET_IP_); 537 trace_xfs_inode_pin(ip, _RET_IP_);
547 atomic_inc(&iip->ili_inode->i_pincount); 538 atomic_inc(&ip->i_pincount);
548} 539}
549 540
550 541
@@ -554,12 +545,12 @@ xfs_inode_item_pin(
554 * 545 *
555 * Also wake up anyone in xfs_iunpin_wait() if the count goes to 0. 546 * Also wake up anyone in xfs_iunpin_wait() if the count goes to 0.
556 */ 547 */
557/* ARGSUSED */
558STATIC void 548STATIC void
559xfs_inode_item_unpin( 549xfs_inode_item_unpin(
560 xfs_inode_log_item_t *iip) 550 struct xfs_log_item *lip,
551 int remove)
561{ 552{
562 struct xfs_inode *ip = iip->ili_inode; 553 struct xfs_inode *ip = INODE_ITEM(lip)->ili_inode;
563 554
564 trace_xfs_inode_unpin(ip, _RET_IP_); 555 trace_xfs_inode_unpin(ip, _RET_IP_);
565 ASSERT(atomic_read(&ip->i_pincount) > 0); 556 ASSERT(atomic_read(&ip->i_pincount) > 0);
@@ -567,15 +558,6 @@ xfs_inode_item_unpin(
567 wake_up(&ip->i_ipin_wait); 558 wake_up(&ip->i_ipin_wait);
568} 559}
569 560
570/* ARGSUSED */
571STATIC void
572xfs_inode_item_unpin_remove(
573 xfs_inode_log_item_t *iip,
574 xfs_trans_t *tp)
575{
576 xfs_inode_item_unpin(iip);
577}
578
579/* 561/*
580 * This is called to attempt to lock the inode associated with this 562 * This is called to attempt to lock the inode associated with this
581 * inode log item, in preparation for the push routine which does the actual 563 * inode log item, in preparation for the push routine which does the actual
@@ -591,19 +573,16 @@ xfs_inode_item_unpin_remove(
591 */ 573 */
592STATIC uint 574STATIC uint
593xfs_inode_item_trylock( 575xfs_inode_item_trylock(
594 xfs_inode_log_item_t *iip) 576 struct xfs_log_item *lip)
595{ 577{
596 register xfs_inode_t *ip; 578 struct xfs_inode_log_item *iip = INODE_ITEM(lip);
597 579 struct xfs_inode *ip = iip->ili_inode;
598 ip = iip->ili_inode;
599 580
600 if (xfs_ipincount(ip) > 0) { 581 if (xfs_ipincount(ip) > 0)
601 return XFS_ITEM_PINNED; 582 return XFS_ITEM_PINNED;
602 }
603 583
604 if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) { 584 if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED))
605 return XFS_ITEM_LOCKED; 585 return XFS_ITEM_LOCKED;
606 }
607 586
608 if (!xfs_iflock_nowait(ip)) { 587 if (!xfs_iflock_nowait(ip)) {
609 /* 588 /*
@@ -629,7 +608,7 @@ xfs_inode_item_trylock(
629 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) { 608 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
630 ASSERT(iip->ili_format.ilf_fields != 0); 609 ASSERT(iip->ili_format.ilf_fields != 0);
631 ASSERT(iip->ili_logged == 0); 610 ASSERT(iip->ili_logged == 0);
632 ASSERT(iip->ili_item.li_flags & XFS_LI_IN_AIL); 611 ASSERT(lip->li_flags & XFS_LI_IN_AIL);
633 } 612 }
634#endif 613#endif
635 return XFS_ITEM_SUCCESS; 614 return XFS_ITEM_SUCCESS;
@@ -643,26 +622,18 @@ xfs_inode_item_trylock(
643 */ 622 */
644STATIC void 623STATIC void
645xfs_inode_item_unlock( 624xfs_inode_item_unlock(
646 xfs_inode_log_item_t *iip) 625 struct xfs_log_item *lip)
647{ 626{
648 uint hold; 627 struct xfs_inode_log_item *iip = INODE_ITEM(lip);
649 uint iolocked; 628 struct xfs_inode *ip = iip->ili_inode;
650 uint lock_flags; 629 unsigned short lock_flags;
651 xfs_inode_t *ip;
652 630
653 ASSERT(iip != NULL);
654 ASSERT(iip->ili_inode->i_itemp != NULL); 631 ASSERT(iip->ili_inode->i_itemp != NULL);
655 ASSERT(xfs_isilocked(iip->ili_inode, XFS_ILOCK_EXCL)); 632 ASSERT(xfs_isilocked(iip->ili_inode, XFS_ILOCK_EXCL));
656 ASSERT((!(iip->ili_inode->i_itemp->ili_flags & 633
657 XFS_ILI_IOLOCKED_EXCL)) ||
658 xfs_isilocked(iip->ili_inode, XFS_IOLOCK_EXCL));
659 ASSERT((!(iip->ili_inode->i_itemp->ili_flags &
660 XFS_ILI_IOLOCKED_SHARED)) ||
661 xfs_isilocked(iip->ili_inode, XFS_IOLOCK_SHARED));
662 /* 634 /*
663 * Clear the transaction pointer in the inode. 635 * Clear the transaction pointer in the inode.
664 */ 636 */
665 ip = iip->ili_inode;
666 ip->i_transp = NULL; 637 ip->i_transp = NULL;
667 638
668 /* 639 /*
@@ -686,34 +657,11 @@ xfs_inode_item_unlock(
686 iip->ili_aextents_buf = NULL; 657 iip->ili_aextents_buf = NULL;
687 } 658 }
688 659
689 /* 660 lock_flags = iip->ili_lock_flags;
690 * Figure out if we should unlock the inode or not. 661 iip->ili_lock_flags = 0;
691 */ 662 if (lock_flags) {
692 hold = iip->ili_flags & XFS_ILI_HOLD; 663 xfs_iunlock(iip->ili_inode, lock_flags);
693 664 IRELE(iip->ili_inode);
694 /*
695 * Before clearing out the flags, remember whether we
696 * are holding the inode's IO lock.
697 */
698 iolocked = iip->ili_flags & XFS_ILI_IOLOCKED_ANY;
699
700 /*
701 * Clear out the fields of the inode log item particular
702 * to the current transaction.
703 */
704 iip->ili_flags = 0;
705
706 /*
707 * Unlock the inode if XFS_ILI_HOLD was not set.
708 */
709 if (!hold) {
710 lock_flags = XFS_ILOCK_EXCL;
711 if (iolocked & XFS_ILI_IOLOCKED_EXCL) {
712 lock_flags |= XFS_IOLOCK_EXCL;
713 } else if (iolocked & XFS_ILI_IOLOCKED_SHARED) {
714 lock_flags |= XFS_IOLOCK_SHARED;
715 }
716 xfs_iput(iip->ili_inode, lock_flags);
717 } 665 }
718} 666}
719 667
@@ -725,13 +673,12 @@ xfs_inode_item_unlock(
725 * is the only one that matters. Therefore, simply return the 673 * is the only one that matters. Therefore, simply return the
726 * given lsn. 674 * given lsn.
727 */ 675 */
728/*ARGSUSED*/
729STATIC xfs_lsn_t 676STATIC xfs_lsn_t
730xfs_inode_item_committed( 677xfs_inode_item_committed(
731 xfs_inode_log_item_t *iip, 678 struct xfs_log_item *lip,
732 xfs_lsn_t lsn) 679 xfs_lsn_t lsn)
733{ 680{
734 return (lsn); 681 return lsn;
735} 682}
736 683
737/* 684/*
@@ -743,13 +690,12 @@ xfs_inode_item_committed(
743 */ 690 */
744STATIC void 691STATIC void
745xfs_inode_item_pushbuf( 692xfs_inode_item_pushbuf(
746 xfs_inode_log_item_t *iip) 693 struct xfs_log_item *lip)
747{ 694{
748 xfs_inode_t *ip; 695 struct xfs_inode_log_item *iip = INODE_ITEM(lip);
749 xfs_mount_t *mp; 696 struct xfs_inode *ip = iip->ili_inode;
750 xfs_buf_t *bp; 697 struct xfs_buf *bp;
751 698
752 ip = iip->ili_inode;
753 ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED)); 699 ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED));
754 700
755 /* 701 /*
@@ -757,14 +703,13 @@ xfs_inode_item_pushbuf(
757 * inode was taken off the AIL. So, just get out. 703 * inode was taken off the AIL. So, just get out.
758 */ 704 */
759 if (completion_done(&ip->i_flush) || 705 if (completion_done(&ip->i_flush) ||
760 ((iip->ili_item.li_flags & XFS_LI_IN_AIL) == 0)) { 706 !(lip->li_flags & XFS_LI_IN_AIL)) {
761 xfs_iunlock(ip, XFS_ILOCK_SHARED); 707 xfs_iunlock(ip, XFS_ILOCK_SHARED);
762 return; 708 return;
763 } 709 }
764 710
765 mp = ip->i_mount; 711 bp = xfs_incore(ip->i_mount->m_ddev_targp, iip->ili_format.ilf_blkno,
766 bp = xfs_incore(mp->m_ddev_targp, iip->ili_format.ilf_blkno, 712 iip->ili_format.ilf_len, XBF_TRYLOCK);
767 iip->ili_format.ilf_len, XBF_TRYLOCK);
768 713
769 xfs_iunlock(ip, XFS_ILOCK_SHARED); 714 xfs_iunlock(ip, XFS_ILOCK_SHARED);
770 if (!bp) 715 if (!bp)
@@ -772,10 +717,8 @@ xfs_inode_item_pushbuf(
772 if (XFS_BUF_ISDELAYWRITE(bp)) 717 if (XFS_BUF_ISDELAYWRITE(bp))
773 xfs_buf_delwri_promote(bp); 718 xfs_buf_delwri_promote(bp);
774 xfs_buf_relse(bp); 719 xfs_buf_relse(bp);
775 return;
776} 720}
777 721
778
779/* 722/*
780 * This is called to asynchronously write the inode associated with this 723 * This is called to asynchronously write the inode associated with this
781 * inode log item out to disk. The inode will already have been locked by 724 * inode log item out to disk. The inode will already have been locked by
@@ -783,14 +726,14 @@ xfs_inode_item_pushbuf(
783 */ 726 */
784STATIC void 727STATIC void
785xfs_inode_item_push( 728xfs_inode_item_push(
786 xfs_inode_log_item_t *iip) 729 struct xfs_log_item *lip)
787{ 730{
788 xfs_inode_t *ip; 731 struct xfs_inode_log_item *iip = INODE_ITEM(lip);
789 732 struct xfs_inode *ip = iip->ili_inode;
790 ip = iip->ili_inode;
791 733
792 ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED)); 734 ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED));
793 ASSERT(!completion_done(&ip->i_flush)); 735 ASSERT(!completion_done(&ip->i_flush));
736
794 /* 737 /*
795 * Since we were able to lock the inode's flush lock and 738 * Since we were able to lock the inode's flush lock and
796 * we found it on the AIL, the inode must be dirty. This 739 * we found it on the AIL, the inode must be dirty. This
@@ -813,43 +756,34 @@ xfs_inode_item_push(
813 */ 756 */
814 (void) xfs_iflush(ip, 0); 757 (void) xfs_iflush(ip, 0);
815 xfs_iunlock(ip, XFS_ILOCK_SHARED); 758 xfs_iunlock(ip, XFS_ILOCK_SHARED);
816
817 return;
818} 759}
819 760
820/* 761/*
821 * XXX rcc - this one really has to do something. Probably needs 762 * XXX rcc - this one really has to do something. Probably needs
822 * to stamp in a new field in the incore inode. 763 * to stamp in a new field in the incore inode.
823 */ 764 */
824/* ARGSUSED */
825STATIC void 765STATIC void
826xfs_inode_item_committing( 766xfs_inode_item_committing(
827 xfs_inode_log_item_t *iip, 767 struct xfs_log_item *lip,
828 xfs_lsn_t lsn) 768 xfs_lsn_t lsn)
829{ 769{
830 iip->ili_last_lsn = lsn; 770 INODE_ITEM(lip)->ili_last_lsn = lsn;
831 return;
832} 771}
833 772
834/* 773/*
835 * This is the ops vector shared by all buf log items. 774 * This is the ops vector shared by all buf log items.
836 */ 775 */
837static struct xfs_item_ops xfs_inode_item_ops = { 776static struct xfs_item_ops xfs_inode_item_ops = {
838 .iop_size = (uint(*)(xfs_log_item_t*))xfs_inode_item_size, 777 .iop_size = xfs_inode_item_size,
839 .iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*)) 778 .iop_format = xfs_inode_item_format,
840 xfs_inode_item_format, 779 .iop_pin = xfs_inode_item_pin,
841 .iop_pin = (void(*)(xfs_log_item_t*))xfs_inode_item_pin, 780 .iop_unpin = xfs_inode_item_unpin,
842 .iop_unpin = (void(*)(xfs_log_item_t*))xfs_inode_item_unpin, 781 .iop_trylock = xfs_inode_item_trylock,
843 .iop_unpin_remove = (void(*)(xfs_log_item_t*, xfs_trans_t*)) 782 .iop_unlock = xfs_inode_item_unlock,
844 xfs_inode_item_unpin_remove, 783 .iop_committed = xfs_inode_item_committed,
845 .iop_trylock = (uint(*)(xfs_log_item_t*))xfs_inode_item_trylock, 784 .iop_push = xfs_inode_item_push,
846 .iop_unlock = (void(*)(xfs_log_item_t*))xfs_inode_item_unlock, 785 .iop_pushbuf = xfs_inode_item_pushbuf,
847 .iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t)) 786 .iop_committing = xfs_inode_item_committing
848 xfs_inode_item_committed,
849 .iop_push = (void(*)(xfs_log_item_t*))xfs_inode_item_push,
850 .iop_pushbuf = (void(*)(xfs_log_item_t*))xfs_inode_item_pushbuf,
851 .iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t))
852 xfs_inode_item_committing
853}; 787};
854 788
855 789
@@ -858,10 +792,10 @@ static struct xfs_item_ops xfs_inode_item_ops = {
858 */ 792 */
859void 793void
860xfs_inode_item_init( 794xfs_inode_item_init(
861 xfs_inode_t *ip, 795 struct xfs_inode *ip,
862 xfs_mount_t *mp) 796 struct xfs_mount *mp)
863{ 797{
864 xfs_inode_log_item_t *iip; 798 struct xfs_inode_log_item *iip;
865 799
866 ASSERT(ip->i_itemp == NULL); 800 ASSERT(ip->i_itemp == NULL);
867 iip = ip->i_itemp = kmem_zone_zalloc(xfs_ili_zone, KM_SLEEP); 801 iip = ip->i_itemp = kmem_zone_zalloc(xfs_ili_zone, KM_SLEEP);
@@ -899,14 +833,14 @@ xfs_inode_item_destroy(
899 * from the AIL if it has not been re-logged, and unlocking the inode's 833 * from the AIL if it has not been re-logged, and unlocking the inode's
900 * flush lock. 834 * flush lock.
901 */ 835 */
902/*ARGSUSED*/
903void 836void
904xfs_iflush_done( 837xfs_iflush_done(
905 xfs_buf_t *bp, 838 struct xfs_buf *bp,
906 xfs_inode_log_item_t *iip) 839 struct xfs_log_item *lip)
907{ 840{
841 struct xfs_inode_log_item *iip = INODE_ITEM(lip);
908 xfs_inode_t *ip = iip->ili_inode; 842 xfs_inode_t *ip = iip->ili_inode;
909 struct xfs_ail *ailp = iip->ili_item.li_ailp; 843 struct xfs_ail *ailp = lip->li_ailp;
910 844
911 /* 845 /*
912 * We only want to pull the item from the AIL if it is 846 * We only want to pull the item from the AIL if it is
@@ -917,12 +851,11 @@ xfs_iflush_done(
917 * the lock since it's cheaper, and then we recheck while 851 * the lock since it's cheaper, and then we recheck while
918 * holding the lock before removing the inode from the AIL. 852 * holding the lock before removing the inode from the AIL.
919 */ 853 */
920 if (iip->ili_logged && 854 if (iip->ili_logged && lip->li_lsn == iip->ili_flush_lsn) {
921 (iip->ili_item.li_lsn == iip->ili_flush_lsn)) {
922 spin_lock(&ailp->xa_lock); 855 spin_lock(&ailp->xa_lock);
923 if (iip->ili_item.li_lsn == iip->ili_flush_lsn) { 856 if (lip->li_lsn == iip->ili_flush_lsn) {
924 /* xfs_trans_ail_delete() drops the AIL lock. */ 857 /* xfs_trans_ail_delete() drops the AIL lock. */
925 xfs_trans_ail_delete(ailp, (xfs_log_item_t*)iip); 858 xfs_trans_ail_delete(ailp, lip);
926 } else { 859 } else {
927 spin_unlock(&ailp->xa_lock); 860 spin_unlock(&ailp->xa_lock);
928 } 861 }
@@ -940,8 +873,6 @@ xfs_iflush_done(
940 * Release the inode's flush lock since we're done with it. 873 * Release the inode's flush lock since we're done with it.
941 */ 874 */
942 xfs_ifunlock(ip); 875 xfs_ifunlock(ip);
943
944 return;
945} 876}
946 877
947/* 878/*
@@ -957,10 +888,8 @@ xfs_iflush_abort(
957 xfs_inode_t *ip) 888 xfs_inode_t *ip)
958{ 889{
959 xfs_inode_log_item_t *iip = ip->i_itemp; 890 xfs_inode_log_item_t *iip = ip->i_itemp;
960 xfs_mount_t *mp;
961 891
962 iip = ip->i_itemp; 892 iip = ip->i_itemp;
963 mp = ip->i_mount;
964 if (iip) { 893 if (iip) {
965 struct xfs_ail *ailp = iip->ili_item.li_ailp; 894 struct xfs_ail *ailp = iip->ili_item.li_ailp;
966 if (iip->ili_item.li_flags & XFS_LI_IN_AIL) { 895 if (iip->ili_item.li_flags & XFS_LI_IN_AIL) {
@@ -991,10 +920,10 @@ xfs_iflush_abort(
991 920
992void 921void
993xfs_istale_done( 922xfs_istale_done(
994 xfs_buf_t *bp, 923 struct xfs_buf *bp,
995 xfs_inode_log_item_t *iip) 924 struct xfs_log_item *lip)
996{ 925{
997 xfs_iflush_abort(iip->ili_inode); 926 xfs_iflush_abort(INODE_ITEM(lip)->ili_inode);
998} 927}
999 928
1000/* 929/*
@@ -1007,9 +936,8 @@ xfs_inode_item_format_convert(
1007 xfs_inode_log_format_t *in_f) 936 xfs_inode_log_format_t *in_f)
1008{ 937{
1009 if (buf->i_len == sizeof(xfs_inode_log_format_32_t)) { 938 if (buf->i_len == sizeof(xfs_inode_log_format_32_t)) {
1010 xfs_inode_log_format_32_t *in_f32; 939 xfs_inode_log_format_32_t *in_f32 = buf->i_addr;
1011 940
1012 in_f32 = (xfs_inode_log_format_32_t *)buf->i_addr;
1013 in_f->ilf_type = in_f32->ilf_type; 941 in_f->ilf_type = in_f32->ilf_type;
1014 in_f->ilf_size = in_f32->ilf_size; 942 in_f->ilf_size = in_f32->ilf_size;
1015 in_f->ilf_fields = in_f32->ilf_fields; 943 in_f->ilf_fields = in_f32->ilf_fields;
@@ -1025,9 +953,8 @@ xfs_inode_item_format_convert(
1025 in_f->ilf_boffset = in_f32->ilf_boffset; 953 in_f->ilf_boffset = in_f32->ilf_boffset;
1026 return 0; 954 return 0;
1027 } else if (buf->i_len == sizeof(xfs_inode_log_format_64_t)){ 955 } else if (buf->i_len == sizeof(xfs_inode_log_format_64_t)){
1028 xfs_inode_log_format_64_t *in_f64; 956 xfs_inode_log_format_64_t *in_f64 = buf->i_addr;
1029 957
1030 in_f64 = (xfs_inode_log_format_64_t *)buf->i_addr;
1031 in_f->ilf_type = in_f64->ilf_type; 958 in_f->ilf_type = in_f64->ilf_type;
1032 in_f->ilf_size = in_f64->ilf_size; 959 in_f->ilf_size = in_f64->ilf_size;
1033 in_f->ilf_fields = in_f64->ilf_fields; 960 in_f->ilf_fields = in_f64->ilf_fields;
diff --git a/fs/xfs/xfs_inode_item.h b/fs/xfs/xfs_inode_item.h
index 9a467958ecdd..d3dee61e6d91 100644
--- a/fs/xfs/xfs_inode_item.h
+++ b/fs/xfs/xfs_inode_item.h
@@ -103,12 +103,6 @@ typedef struct xfs_inode_log_format_64 {
103 XFS_ILOG_ADATA | XFS_ILOG_AEXT | \ 103 XFS_ILOG_ADATA | XFS_ILOG_AEXT | \
104 XFS_ILOG_ABROOT) 104 XFS_ILOG_ABROOT)
105 105
106#define XFS_ILI_HOLD 0x1
107#define XFS_ILI_IOLOCKED_EXCL 0x2
108#define XFS_ILI_IOLOCKED_SHARED 0x4
109
110#define XFS_ILI_IOLOCKED_ANY (XFS_ILI_IOLOCKED_EXCL | XFS_ILI_IOLOCKED_SHARED)
111
112static inline int xfs_ilog_fbroot(int w) 106static inline int xfs_ilog_fbroot(int w)
113{ 107{
114 return (w == XFS_DATA_FORK ? XFS_ILOG_DBROOT : XFS_ILOG_ABROOT); 108 return (w == XFS_DATA_FORK ? XFS_ILOG_DBROOT : XFS_ILOG_ABROOT);
@@ -137,7 +131,7 @@ typedef struct xfs_inode_log_item {
137 struct xfs_inode *ili_inode; /* inode ptr */ 131 struct xfs_inode *ili_inode; /* inode ptr */
138 xfs_lsn_t ili_flush_lsn; /* lsn at last flush */ 132 xfs_lsn_t ili_flush_lsn; /* lsn at last flush */
139 xfs_lsn_t ili_last_lsn; /* lsn at last transaction */ 133 xfs_lsn_t ili_last_lsn; /* lsn at last transaction */
140 unsigned short ili_flags; /* misc flags */ 134 unsigned short ili_lock_flags; /* lock flags */
141 unsigned short ili_logged; /* flushed logged data */ 135 unsigned short ili_logged; /* flushed logged data */
142 unsigned int ili_last_fields; /* fields when flushed */ 136 unsigned int ili_last_fields; /* fields when flushed */
143 struct xfs_bmbt_rec *ili_extents_buf; /* array of logged 137 struct xfs_bmbt_rec *ili_extents_buf; /* array of logged
@@ -161,8 +155,8 @@ static inline int xfs_inode_clean(xfs_inode_t *ip)
161 155
162extern void xfs_inode_item_init(struct xfs_inode *, struct xfs_mount *); 156extern void xfs_inode_item_init(struct xfs_inode *, struct xfs_mount *);
163extern void xfs_inode_item_destroy(struct xfs_inode *); 157extern void xfs_inode_item_destroy(struct xfs_inode *);
164extern void xfs_iflush_done(struct xfs_buf *, xfs_inode_log_item_t *); 158extern void xfs_iflush_done(struct xfs_buf *, struct xfs_log_item *);
165extern void xfs_istale_done(struct xfs_buf *, xfs_inode_log_item_t *); 159extern void xfs_istale_done(struct xfs_buf *, struct xfs_log_item *);
166extern void xfs_iflush_abort(struct xfs_inode *); 160extern void xfs_iflush_abort(struct xfs_inode *);
167extern int xfs_inode_item_format_convert(xfs_log_iovec_t *, 161extern int xfs_inode_item_format_convert(xfs_log_iovec_t *,
168 xfs_inode_log_format_t *); 162 xfs_inode_log_format_t *);
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index ef14943829da..20576146369f 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -23,19 +23,14 @@
23#include "xfs_trans.h" 23#include "xfs_trans.h"
24#include "xfs_sb.h" 24#include "xfs_sb.h"
25#include "xfs_ag.h" 25#include "xfs_ag.h"
26#include "xfs_dir2.h"
27#include "xfs_alloc.h" 26#include "xfs_alloc.h"
28#include "xfs_dmapi.h"
29#include "xfs_quota.h" 27#include "xfs_quota.h"
30#include "xfs_mount.h" 28#include "xfs_mount.h"
31#include "xfs_bmap_btree.h" 29#include "xfs_bmap_btree.h"
32#include "xfs_alloc_btree.h" 30#include "xfs_alloc_btree.h"
33#include "xfs_ialloc_btree.h" 31#include "xfs_ialloc_btree.h"
34#include "xfs_dir2_sf.h"
35#include "xfs_attr_sf.h"
36#include "xfs_dinode.h" 32#include "xfs_dinode.h"
37#include "xfs_inode.h" 33#include "xfs_inode.h"
38#include "xfs_ialloc.h"
39#include "xfs_btree.h" 34#include "xfs_btree.h"
40#include "xfs_bmap.h" 35#include "xfs_bmap.h"
41#include "xfs_rtalloc.h" 36#include "xfs_rtalloc.h"
@@ -123,7 +118,7 @@ xfs_iomap(
123 error = xfs_bmapi(NULL, ip, offset_fsb, 118 error = xfs_bmapi(NULL, ip, offset_fsb,
124 (xfs_filblks_t)(end_fsb - offset_fsb), 119 (xfs_filblks_t)(end_fsb - offset_fsb),
125 bmapi_flags, NULL, 0, imap, 120 bmapi_flags, NULL, 0, imap,
126 nimaps, NULL, NULL); 121 nimaps, NULL);
127 122
128 if (error) 123 if (error)
129 goto out; 124 goto out;
@@ -138,7 +133,7 @@ xfs_iomap(
138 break; 133 break;
139 } 134 }
140 135
141 if (flags & (BMAPI_DIRECT|BMAPI_MMAP)) { 136 if (flags & BMAPI_DIRECT) {
142 error = xfs_iomap_write_direct(ip, offset, count, flags, 137 error = xfs_iomap_write_direct(ip, offset, count, flags,
143 imap, nimaps); 138 imap, nimaps);
144 } else { 139 } else {
@@ -247,7 +242,7 @@ xfs_iomap_write_direct(
247 xfs_off_t offset, 242 xfs_off_t offset,
248 size_t count, 243 size_t count,
249 int flags, 244 int flags,
250 xfs_bmbt_irec_t *ret_imap, 245 xfs_bmbt_irec_t *imap,
251 int *nmaps) 246 int *nmaps)
252{ 247{
253 xfs_mount_t *mp = ip->i_mount; 248 xfs_mount_t *mp = ip->i_mount;
@@ -261,7 +256,6 @@ xfs_iomap_write_direct(
261 int quota_flag; 256 int quota_flag;
262 int rt; 257 int rt;
263 xfs_trans_t *tp; 258 xfs_trans_t *tp;
264 xfs_bmbt_irec_t imap;
265 xfs_bmap_free_t free_list; 259 xfs_bmap_free_t free_list;
266 uint qblocks, resblks, resrtextents; 260 uint qblocks, resblks, resrtextents;
267 int committed; 261 int committed;
@@ -285,10 +279,10 @@ xfs_iomap_write_direct(
285 if (error) 279 if (error)
286 goto error_out; 280 goto error_out;
287 } else { 281 } else {
288 if (*nmaps && (ret_imap->br_startblock == HOLESTARTBLOCK)) 282 if (*nmaps && (imap->br_startblock == HOLESTARTBLOCK))
289 last_fsb = MIN(last_fsb, (xfs_fileoff_t) 283 last_fsb = MIN(last_fsb, (xfs_fileoff_t)
290 ret_imap->br_blockcount + 284 imap->br_blockcount +
291 ret_imap->br_startoff); 285 imap->br_startoff);
292 } 286 }
293 count_fsb = last_fsb - offset_fsb; 287 count_fsb = last_fsb - offset_fsb;
294 ASSERT(count_fsb > 0); 288 ASSERT(count_fsb > 0);
@@ -334,20 +328,22 @@ xfs_iomap_write_direct(
334 if (error) 328 if (error)
335 goto error1; 329 goto error1;
336 330
337 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 331 xfs_trans_ijoin(tp, ip);
338 xfs_trans_ihold(tp, ip);
339 332
340 bmapi_flag = XFS_BMAPI_WRITE; 333 bmapi_flag = XFS_BMAPI_WRITE;
341 if ((flags & BMAPI_DIRECT) && (offset < ip->i_size || extsz)) 334 if ((flags & BMAPI_DIRECT) && (offset < ip->i_size || extsz))
342 bmapi_flag |= XFS_BMAPI_PREALLOC; 335 bmapi_flag |= XFS_BMAPI_PREALLOC;
343 336
344 /* 337 /*
345 * Issue the xfs_bmapi() call to allocate the blocks 338 * Issue the xfs_bmapi() call to allocate the blocks.
339 *
340 * From this point onwards we overwrite the imap pointer that the
341 * caller gave to us.
346 */ 342 */
347 xfs_bmap_init(&free_list, &firstfsb); 343 xfs_bmap_init(&free_list, &firstfsb);
348 nimaps = 1; 344 nimaps = 1;
349 error = xfs_bmapi(tp, ip, offset_fsb, count_fsb, bmapi_flag, 345 error = xfs_bmapi(tp, ip, offset_fsb, count_fsb, bmapi_flag,
350 &firstfsb, 0, &imap, &nimaps, &free_list, NULL); 346 &firstfsb, 0, imap, &nimaps, &free_list);
351 if (error) 347 if (error)
352 goto error0; 348 goto error0;
353 349
@@ -369,12 +365,11 @@ xfs_iomap_write_direct(
369 goto error_out; 365 goto error_out;
370 } 366 }
371 367
372 if (!(imap.br_startblock || XFS_IS_REALTIME_INODE(ip))) { 368 if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip))) {
373 error = xfs_cmn_err_fsblock_zero(ip, &imap); 369 error = xfs_cmn_err_fsblock_zero(ip, imap);
374 goto error_out; 370 goto error_out;
375 } 371 }
376 372
377 *ret_imap = imap;
378 *nmaps = 1; 373 *nmaps = 1;
379 return 0; 374 return 0;
380 375
@@ -425,7 +420,7 @@ xfs_iomap_eof_want_preallocate(
425 imaps = nimaps; 420 imaps = nimaps;
426 firstblock = NULLFSBLOCK; 421 firstblock = NULLFSBLOCK;
427 error = xfs_bmapi(NULL, ip, start_fsb, count_fsb, 0, 422 error = xfs_bmapi(NULL, ip, start_fsb, count_fsb, 0,
428 &firstblock, 0, imap, &imaps, NULL, NULL); 423 &firstblock, 0, imap, &imaps, NULL);
429 if (error) 424 if (error)
430 return error; 425 return error;
431 for (n = 0; n < imaps; n++) { 426 for (n = 0; n < imaps; n++) {
@@ -500,7 +495,7 @@ retry:
500 (xfs_filblks_t)(last_fsb - offset_fsb), 495 (xfs_filblks_t)(last_fsb - offset_fsb),
501 XFS_BMAPI_DELAY | XFS_BMAPI_WRITE | 496 XFS_BMAPI_DELAY | XFS_BMAPI_WRITE |
502 XFS_BMAPI_ENTIRE, &firstblock, 1, imap, 497 XFS_BMAPI_ENTIRE, &firstblock, 1, imap,
503 &nimaps, NULL, NULL); 498 &nimaps, NULL);
504 if (error && (error != ENOSPC)) 499 if (error && (error != ENOSPC))
505 return XFS_ERROR(error); 500 return XFS_ERROR(error);
506 501
@@ -548,7 +543,7 @@ xfs_iomap_write_allocate(
548 xfs_inode_t *ip, 543 xfs_inode_t *ip,
549 xfs_off_t offset, 544 xfs_off_t offset,
550 size_t count, 545 size_t count,
551 xfs_bmbt_irec_t *map, 546 xfs_bmbt_irec_t *imap,
552 int *retmap) 547 int *retmap)
553{ 548{
554 xfs_mount_t *mp = ip->i_mount; 549 xfs_mount_t *mp = ip->i_mount;
@@ -557,7 +552,6 @@ xfs_iomap_write_allocate(
557 xfs_fsblock_t first_block; 552 xfs_fsblock_t first_block;
558 xfs_bmap_free_t free_list; 553 xfs_bmap_free_t free_list;
559 xfs_filblks_t count_fsb; 554 xfs_filblks_t count_fsb;
560 xfs_bmbt_irec_t imap;
561 xfs_trans_t *tp; 555 xfs_trans_t *tp;
562 int nimaps, committed; 556 int nimaps, committed;
563 int error = 0; 557 int error = 0;
@@ -573,8 +567,8 @@ xfs_iomap_write_allocate(
573 return XFS_ERROR(error); 567 return XFS_ERROR(error);
574 568
575 offset_fsb = XFS_B_TO_FSBT(mp, offset); 569 offset_fsb = XFS_B_TO_FSBT(mp, offset);
576 count_fsb = map->br_blockcount; 570 count_fsb = imap->br_blockcount;
577 map_start_fsb = map->br_startoff; 571 map_start_fsb = imap->br_startoff;
578 572
579 XFS_STATS_ADD(xs_xstrat_bytes, XFS_FSB_TO_B(mp, count_fsb)); 573 XFS_STATS_ADD(xs_xstrat_bytes, XFS_FSB_TO_B(mp, count_fsb));
580 574
@@ -602,8 +596,7 @@ xfs_iomap_write_allocate(
602 return XFS_ERROR(error); 596 return XFS_ERROR(error);
603 } 597 }
604 xfs_ilock(ip, XFS_ILOCK_EXCL); 598 xfs_ilock(ip, XFS_ILOCK_EXCL);
605 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 599 xfs_trans_ijoin(tp, ip);
606 xfs_trans_ihold(tp, ip);
607 600
608 xfs_bmap_init(&free_list, &first_block); 601 xfs_bmap_init(&free_list, &first_block);
609 602
@@ -654,10 +647,15 @@ xfs_iomap_write_allocate(
654 } 647 }
655 } 648 }
656 649
657 /* Go get the actual blocks */ 650 /*
651 * Go get the actual blocks.
652 *
653 * From this point onwards we overwrite the imap
654 * pointer that the caller gave to us.
655 */
658 error = xfs_bmapi(tp, ip, map_start_fsb, count_fsb, 656 error = xfs_bmapi(tp, ip, map_start_fsb, count_fsb,
659 XFS_BMAPI_WRITE, &first_block, 1, 657 XFS_BMAPI_WRITE, &first_block, 1,
660 &imap, &nimaps, &free_list, NULL); 658 imap, &nimaps, &free_list);
661 if (error) 659 if (error)
662 goto trans_cancel; 660 goto trans_cancel;
663 661
@@ -676,13 +674,12 @@ xfs_iomap_write_allocate(
676 * See if we were able to allocate an extent that 674 * See if we were able to allocate an extent that
677 * covers at least part of the callers request 675 * covers at least part of the callers request
678 */ 676 */
679 if (!(imap.br_startblock || XFS_IS_REALTIME_INODE(ip))) 677 if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip)))
680 return xfs_cmn_err_fsblock_zero(ip, &imap); 678 return xfs_cmn_err_fsblock_zero(ip, imap);
681 679
682 if ((offset_fsb >= imap.br_startoff) && 680 if ((offset_fsb >= imap->br_startoff) &&
683 (offset_fsb < (imap.br_startoff + 681 (offset_fsb < (imap->br_startoff +
684 imap.br_blockcount))) { 682 imap->br_blockcount))) {
685 *map = imap;
686 *retmap = 1; 683 *retmap = 1;
687 XFS_STATS_INC(xs_xstrat_quick); 684 XFS_STATS_INC(xs_xstrat_quick);
688 return 0; 685 return 0;
@@ -692,8 +689,8 @@ xfs_iomap_write_allocate(
692 * So far we have not mapped the requested part of the 689 * So far we have not mapped the requested part of the
693 * file, just surrounding data, try again. 690 * file, just surrounding data, try again.
694 */ 691 */
695 count_fsb -= imap.br_blockcount; 692 count_fsb -= imap->br_blockcount;
696 map_start_fsb = imap.br_startoff + imap.br_blockcount; 693 map_start_fsb = imap->br_startoff + imap->br_blockcount;
697 } 694 }
698 695
699trans_cancel: 696trans_cancel:
@@ -766,8 +763,7 @@ xfs_iomap_write_unwritten(
766 } 763 }
767 764
768 xfs_ilock(ip, XFS_ILOCK_EXCL); 765 xfs_ilock(ip, XFS_ILOCK_EXCL);
769 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 766 xfs_trans_ijoin(tp, ip);
770 xfs_trans_ihold(tp, ip);
771 767
772 /* 768 /*
773 * Modify the unwritten extent state of the buffer. 769 * Modify the unwritten extent state of the buffer.
@@ -776,7 +772,7 @@ xfs_iomap_write_unwritten(
776 nimaps = 1; 772 nimaps = 1;
777 error = xfs_bmapi(tp, ip, offset_fsb, count_fsb, 773 error = xfs_bmapi(tp, ip, offset_fsb, count_fsb,
778 XFS_BMAPI_WRITE|XFS_BMAPI_CONVERT, &firstfsb, 774 XFS_BMAPI_WRITE|XFS_BMAPI_CONVERT, &firstfsb,
779 1, &imap, &nimaps, &free_list, NULL); 775 1, &imap, &nimaps, &free_list);
780 if (error) 776 if (error)
781 goto error_on_bmapi_transaction; 777 goto error_on_bmapi_transaction;
782 778
diff --git a/fs/xfs/xfs_iomap.h b/fs/xfs/xfs_iomap.h
index 81ac4afd45b3..7748a430f50d 100644
--- a/fs/xfs/xfs_iomap.h
+++ b/fs/xfs/xfs_iomap.h
@@ -18,17 +18,16 @@
18#ifndef __XFS_IOMAP_H__ 18#ifndef __XFS_IOMAP_H__
19#define __XFS_IOMAP_H__ 19#define __XFS_IOMAP_H__
20 20
21typedef enum { 21/* base extent manipulation calls */
22 /* base extent manipulation calls */ 22#define BMAPI_READ (1 << 0) /* read extents */
23 BMAPI_READ = (1 << 0), /* read extents */ 23#define BMAPI_WRITE (1 << 1) /* create extents */
24 BMAPI_WRITE = (1 << 1), /* create extents */ 24#define BMAPI_ALLOCATE (1 << 2) /* delayed allocate to real extents */
25 BMAPI_ALLOCATE = (1 << 2), /* delayed allocate to real extents */ 25
26 /* modifiers */ 26/* modifiers */
27 BMAPI_IGNSTATE = (1 << 4), /* ignore unwritten state on read */ 27#define BMAPI_IGNSTATE (1 << 4) /* ignore unwritten state on read */
28 BMAPI_DIRECT = (1 << 5), /* direct instead of buffered write */ 28#define BMAPI_DIRECT (1 << 5) /* direct instead of buffered write */
29 BMAPI_MMAP = (1 << 6), /* allocate for mmap write */ 29#define BMAPI_MMA (1 << 6) /* allocate for mmap write */
30 BMAPI_TRYLOCK = (1 << 7), /* non-blocking request */ 30#define BMAPI_TRYLOCK (1 << 7) /* non-blocking request */
31} bmapi_flags_t;
32 31
33#define BMAPI_FLAGS \ 32#define BMAPI_FLAGS \
34 { BMAPI_READ, "READ" }, \ 33 { BMAPI_READ, "READ" }, \
@@ -36,7 +35,6 @@ typedef enum {
36 { BMAPI_ALLOCATE, "ALLOCATE" }, \ 35 { BMAPI_ALLOCATE, "ALLOCATE" }, \
37 { BMAPI_IGNSTATE, "IGNSTATE" }, \ 36 { BMAPI_IGNSTATE, "IGNSTATE" }, \
38 { BMAPI_DIRECT, "DIRECT" }, \ 37 { BMAPI_DIRECT, "DIRECT" }, \
39 { BMAPI_MMAP, "MMAP" }, \
40 { BMAPI_TRYLOCK, "TRYLOCK" } 38 { BMAPI_TRYLOCK, "TRYLOCK" }
41 39
42struct xfs_inode; 40struct xfs_inode;
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index b1b801e4a28e..7e3626e5925c 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -24,20 +24,17 @@
24#include "xfs_trans.h" 24#include "xfs_trans.h"
25#include "xfs_sb.h" 25#include "xfs_sb.h"
26#include "xfs_ag.h" 26#include "xfs_ag.h"
27#include "xfs_dir2.h"
28#include "xfs_dmapi.h"
29#include "xfs_mount.h" 27#include "xfs_mount.h"
30#include "xfs_bmap_btree.h" 28#include "xfs_bmap_btree.h"
31#include "xfs_alloc_btree.h" 29#include "xfs_alloc_btree.h"
32#include "xfs_ialloc_btree.h" 30#include "xfs_ialloc_btree.h"
33#include "xfs_dir2_sf.h"
34#include "xfs_attr_sf.h"
35#include "xfs_dinode.h" 31#include "xfs_dinode.h"
36#include "xfs_inode.h" 32#include "xfs_inode.h"
37#include "xfs_ialloc.h" 33#include "xfs_ialloc.h"
38#include "xfs_itable.h" 34#include "xfs_itable.h"
39#include "xfs_error.h" 35#include "xfs_error.h"
40#include "xfs_btree.h" 36#include "xfs_btree.h"
37#include "xfs_trace.h"
41 38
42STATIC int 39STATIC int
43xfs_internal_inum( 40xfs_internal_inum(
@@ -49,24 +46,40 @@ xfs_internal_inum(
49 (ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino))); 46 (ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino)));
50} 47}
51 48
52STATIC int 49/*
53xfs_bulkstat_one_iget( 50 * Return stat information for one inode.
54 xfs_mount_t *mp, /* mount point for filesystem */ 51 * Return 0 if ok, else errno.
55 xfs_ino_t ino, /* inode number to get data for */ 52 */
56 xfs_daddr_t bno, /* starting bno of inode cluster */ 53int
57 xfs_bstat_t *buf, /* return buffer */ 54xfs_bulkstat_one_int(
58 int *stat) /* BULKSTAT_RV_... */ 55 struct xfs_mount *mp, /* mount point for filesystem */
56 xfs_ino_t ino, /* inode to get data for */
57 void __user *buffer, /* buffer to place output in */
58 int ubsize, /* size of buffer */
59 bulkstat_one_fmt_pf formatter, /* formatter, copy to user */
60 int *ubused, /* bytes used by me */
61 int *stat) /* BULKSTAT_RV_... */
59{ 62{
60 xfs_icdinode_t *dic; /* dinode core info pointer */ 63 struct xfs_icdinode *dic; /* dinode core info pointer */
61 xfs_inode_t *ip; /* incore inode pointer */ 64 struct xfs_inode *ip; /* incore inode pointer */
62 struct inode *inode; 65 struct inode *inode;
63 int error; 66 struct xfs_bstat *buf; /* return buffer */
67 int error = 0; /* error value */
68
69 *stat = BULKSTAT_RV_NOTHING;
70
71 if (!buffer || xfs_internal_inum(mp, ino))
72 return XFS_ERROR(EINVAL);
73
74 buf = kmem_alloc(sizeof(*buf), KM_SLEEP | KM_MAYFAIL);
75 if (!buf)
76 return XFS_ERROR(ENOMEM);
64 77
65 error = xfs_iget(mp, NULL, ino, 78 error = xfs_iget(mp, NULL, ino,
66 XFS_IGET_BULKSTAT, XFS_ILOCK_SHARED, &ip, bno); 79 XFS_IGET_UNTRUSTED, XFS_ILOCK_SHARED, &ip);
67 if (error) { 80 if (error) {
68 *stat = BULKSTAT_RV_NOTHING; 81 *stat = BULKSTAT_RV_NOTHING;
69 return error; 82 goto out_free;
70 } 83 }
71 84
72 ASSERT(ip != NULL); 85 ASSERT(ip != NULL);
@@ -127,77 +140,17 @@ xfs_bulkstat_one_iget(
127 buf->bs_blocks = dic->di_nblocks + ip->i_delayed_blks; 140 buf->bs_blocks = dic->di_nblocks + ip->i_delayed_blks;
128 break; 141 break;
129 } 142 }
143 xfs_iunlock(ip, XFS_ILOCK_SHARED);
144 IRELE(ip);
130 145
131 xfs_iput(ip, XFS_ILOCK_SHARED); 146 error = formatter(buffer, ubsize, ubused, buf);
132 return error;
133}
134
135STATIC void
136xfs_bulkstat_one_dinode(
137 xfs_mount_t *mp, /* mount point for filesystem */
138 xfs_ino_t ino, /* inode number to get data for */
139 xfs_dinode_t *dic, /* dinode inode pointer */
140 xfs_bstat_t *buf) /* return buffer */
141{
142 /*
143 * The inode format changed when we moved the link count and
144 * made it 32 bits long. If this is an old format inode,
145 * convert it in memory to look like a new one. If it gets
146 * flushed to disk we will convert back before flushing or
147 * logging it. We zero out the new projid field and the old link
148 * count field. We'll handle clearing the pad field (the remains
149 * of the old uuid field) when we actually convert the inode to
150 * the new format. We don't change the version number so that we
151 * can distinguish this from a real new format inode.
152 */
153 if (dic->di_version == 1) {
154 buf->bs_nlink = be16_to_cpu(dic->di_onlink);
155 buf->bs_projid = 0;
156 } else {
157 buf->bs_nlink = be32_to_cpu(dic->di_nlink);
158 buf->bs_projid = be16_to_cpu(dic->di_projid);
159 }
160 147
161 buf->bs_ino = ino; 148 if (!error)
162 buf->bs_mode = be16_to_cpu(dic->di_mode); 149 *stat = BULKSTAT_RV_DIDONE;
163 buf->bs_uid = be32_to_cpu(dic->di_uid);
164 buf->bs_gid = be32_to_cpu(dic->di_gid);
165 buf->bs_size = be64_to_cpu(dic->di_size);
166 buf->bs_atime.tv_sec = be32_to_cpu(dic->di_atime.t_sec);
167 buf->bs_atime.tv_nsec = be32_to_cpu(dic->di_atime.t_nsec);
168 buf->bs_mtime.tv_sec = be32_to_cpu(dic->di_mtime.t_sec);
169 buf->bs_mtime.tv_nsec = be32_to_cpu(dic->di_mtime.t_nsec);
170 buf->bs_ctime.tv_sec = be32_to_cpu(dic->di_ctime.t_sec);
171 buf->bs_ctime.tv_nsec = be32_to_cpu(dic->di_ctime.t_nsec);
172 buf->bs_xflags = xfs_dic2xflags(dic);
173 buf->bs_extsize = be32_to_cpu(dic->di_extsize) << mp->m_sb.sb_blocklog;
174 buf->bs_extents = be32_to_cpu(dic->di_nextents);
175 buf->bs_gen = be32_to_cpu(dic->di_gen);
176 memset(buf->bs_pad, 0, sizeof(buf->bs_pad));
177 buf->bs_dmevmask = be32_to_cpu(dic->di_dmevmask);
178 buf->bs_dmstate = be16_to_cpu(dic->di_dmstate);
179 buf->bs_aextents = be16_to_cpu(dic->di_anextents);
180 buf->bs_forkoff = XFS_DFORK_BOFF(dic);
181 150
182 switch (dic->di_format) { 151 out_free:
183 case XFS_DINODE_FMT_DEV: 152 kmem_free(buf);
184 buf->bs_rdev = xfs_dinode_get_rdev(dic); 153 return error;
185 buf->bs_blksize = BLKDEV_IOSIZE;
186 buf->bs_blocks = 0;
187 break;
188 case XFS_DINODE_FMT_LOCAL:
189 case XFS_DINODE_FMT_UUID:
190 buf->bs_rdev = 0;
191 buf->bs_blksize = mp->m_sb.sb_blocksize;
192 buf->bs_blocks = 0;
193 break;
194 case XFS_DINODE_FMT_EXTENTS:
195 case XFS_DINODE_FMT_BTREE:
196 buf->bs_rdev = 0;
197 buf->bs_blksize = mp->m_sb.sb_blocksize;
198 buf->bs_blocks = be64_to_cpu(dic->di_nblocks);
199 break;
200 }
201} 154}
202 155
203/* Return 0 on success or positive error */ 156/* Return 0 on success or positive error */
@@ -217,118 +170,17 @@ xfs_bulkstat_one_fmt(
217 return 0; 170 return 0;
218} 171}
219 172
220/*
221 * Return stat information for one inode.
222 * Return 0 if ok, else errno.
223 */
224int /* error status */
225xfs_bulkstat_one_int(
226 xfs_mount_t *mp, /* mount point for filesystem */
227 xfs_ino_t ino, /* inode number to get data for */
228 void __user *buffer, /* buffer to place output in */
229 int ubsize, /* size of buffer */
230 bulkstat_one_fmt_pf formatter, /* formatter, copy to user */
231 xfs_daddr_t bno, /* starting bno of inode cluster */
232 int *ubused, /* bytes used by me */
233 void *dibuff, /* on-disk inode buffer */
234 int *stat) /* BULKSTAT_RV_... */
235{
236 xfs_bstat_t *buf; /* return buffer */
237 int error = 0; /* error value */
238 xfs_dinode_t *dip; /* dinode inode pointer */
239
240 dip = (xfs_dinode_t *)dibuff;
241 *stat = BULKSTAT_RV_NOTHING;
242
243 if (!buffer || xfs_internal_inum(mp, ino))
244 return XFS_ERROR(EINVAL);
245
246 buf = kmem_alloc(sizeof(*buf), KM_SLEEP);
247
248 if (dip == NULL) {
249 /* We're not being passed a pointer to a dinode. This happens
250 * if BULKSTAT_FG_IGET is selected. Do the iget.
251 */
252 error = xfs_bulkstat_one_iget(mp, ino, bno, buf, stat);
253 if (error)
254 goto out_free;
255 } else {
256 xfs_bulkstat_one_dinode(mp, ino, dip, buf);
257 }
258
259 error = formatter(buffer, ubsize, ubused, buf);
260 if (error)
261 goto out_free;
262
263 *stat = BULKSTAT_RV_DIDONE;
264
265 out_free:
266 kmem_free(buf);
267 return error;
268}
269
270int 173int
271xfs_bulkstat_one( 174xfs_bulkstat_one(
272 xfs_mount_t *mp, /* mount point for filesystem */ 175 xfs_mount_t *mp, /* mount point for filesystem */
273 xfs_ino_t ino, /* inode number to get data for */ 176 xfs_ino_t ino, /* inode number to get data for */
274 void __user *buffer, /* buffer to place output in */ 177 void __user *buffer, /* buffer to place output in */
275 int ubsize, /* size of buffer */ 178 int ubsize, /* size of buffer */
276 void *private_data, /* my private data */
277 xfs_daddr_t bno, /* starting bno of inode cluster */
278 int *ubused, /* bytes used by me */ 179 int *ubused, /* bytes used by me */
279 void *dibuff, /* on-disk inode buffer */
280 int *stat) /* BULKSTAT_RV_... */ 180 int *stat) /* BULKSTAT_RV_... */
281{ 181{
282 return xfs_bulkstat_one_int(mp, ino, buffer, ubsize, 182 return xfs_bulkstat_one_int(mp, ino, buffer, ubsize,
283 xfs_bulkstat_one_fmt, bno, 183 xfs_bulkstat_one_fmt, ubused, stat);
284 ubused, dibuff, stat);
285}
286
287/*
288 * Test to see whether we can use the ondisk inode directly, based
289 * on the given bulkstat flags, filling in dipp accordingly.
290 * Returns zero if the inode is dodgey.
291 */
292STATIC int
293xfs_bulkstat_use_dinode(
294 xfs_mount_t *mp,
295 int flags,
296 xfs_buf_t *bp,
297 int clustidx,
298 xfs_dinode_t **dipp)
299{
300 xfs_dinode_t *dip;
301 unsigned int aformat;
302
303 *dipp = NULL;
304 if (!bp || (flags & BULKSTAT_FG_IGET))
305 return 1;
306 dip = (xfs_dinode_t *)
307 xfs_buf_offset(bp, clustidx << mp->m_sb.sb_inodelog);
308 /*
309 * Check the buffer containing the on-disk inode for di_mode == 0.
310 * This is to prevent xfs_bulkstat from picking up just reclaimed
311 * inodes that have their in-core state initialized but not flushed
312 * to disk yet. This is a temporary hack that would require a proper
313 * fix in the future.
314 */
315 if (be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC ||
316 !XFS_DINODE_GOOD_VERSION(dip->di_version) ||
317 !dip->di_mode)
318 return 0;
319 if (flags & BULKSTAT_FG_QUICK) {
320 *dipp = dip;
321 return 1;
322 }
323 /* BULKSTAT_FG_INLINE: if attr fork is local, or not there, use it */
324 aformat = dip->di_aformat;
325 if ((XFS_DFORK_Q(dip) == 0) ||
326 (aformat == XFS_DINODE_FMT_LOCAL) ||
327 (aformat == XFS_DINODE_FMT_EXTENTS && !dip->di_anextents)) {
328 *dipp = dip;
329 return 1;
330 }
331 return 1;
332} 184}
333 185
334#define XFS_BULKSTAT_UBLEFT(ubleft) ((ubleft) >= statstruct_size) 186#define XFS_BULKSTAT_UBLEFT(ubleft) ((ubleft) >= statstruct_size)
@@ -342,10 +194,8 @@ xfs_bulkstat(
342 xfs_ino_t *lastinop, /* last inode returned */ 194 xfs_ino_t *lastinop, /* last inode returned */
343 int *ubcountp, /* size of buffer/count returned */ 195 int *ubcountp, /* size of buffer/count returned */
344 bulkstat_one_pf formatter, /* func that'd fill a single buf */ 196 bulkstat_one_pf formatter, /* func that'd fill a single buf */
345 void *private_data,/* private data for formatter */
346 size_t statstruct_size, /* sizeof struct filling */ 197 size_t statstruct_size, /* sizeof struct filling */
347 char __user *ubuffer, /* buffer with inode stats */ 198 char __user *ubuffer, /* buffer with inode stats */
348 int flags, /* defined in xfs_itable.h */
349 int *done) /* 1 if there are more stats to get */ 199 int *done) /* 1 if there are more stats to get */
350{ 200{
351 xfs_agblock_t agbno=0;/* allocation group block number */ 201 xfs_agblock_t agbno=0;/* allocation group block number */
@@ -380,14 +230,12 @@ xfs_bulkstat(
380 int ubelem; /* spaces used in user's buffer */ 230 int ubelem; /* spaces used in user's buffer */
381 int ubused; /* bytes used by formatter */ 231 int ubused; /* bytes used by formatter */
382 xfs_buf_t *bp; /* ptr to on-disk inode cluster buf */ 232 xfs_buf_t *bp; /* ptr to on-disk inode cluster buf */
383 xfs_dinode_t *dip; /* ptr into bp for specific inode */
384 233
385 /* 234 /*
386 * Get the last inode value, see if there's nothing to do. 235 * Get the last inode value, see if there's nothing to do.
387 */ 236 */
388 ino = (xfs_ino_t)*lastinop; 237 ino = (xfs_ino_t)*lastinop;
389 lastino = ino; 238 lastino = ino;
390 dip = NULL;
391 agno = XFS_INO_TO_AGNO(mp, ino); 239 agno = XFS_INO_TO_AGNO(mp, ino);
392 agino = XFS_INO_TO_AGINO(mp, ino); 240 agino = XFS_INO_TO_AGINO(mp, ino);
393 if (agno >= mp->m_sb.sb_agcount || 241 if (agno >= mp->m_sb.sb_agcount ||
@@ -612,37 +460,6 @@ xfs_bulkstat(
612 irbp->ir_startino) + 460 irbp->ir_startino) +
613 ((chunkidx & nimask) >> 461 ((chunkidx & nimask) >>
614 mp->m_sb.sb_inopblog); 462 mp->m_sb.sb_inopblog);
615
616 if (flags & (BULKSTAT_FG_QUICK |
617 BULKSTAT_FG_INLINE)) {
618 int offset;
619
620 ino = XFS_AGINO_TO_INO(mp, agno,
621 agino);
622 bno = XFS_AGB_TO_DADDR(mp, agno,
623 agbno);
624
625 /*
626 * Get the inode cluster buffer
627 */
628 if (bp)
629 xfs_buf_relse(bp);
630
631 error = xfs_inotobp(mp, NULL, ino, &dip,
632 &bp, &offset,
633 XFS_IGET_BULKSTAT);
634
635 if (!error)
636 clustidx = offset / mp->m_sb.sb_inodesize;
637 if (XFS_TEST_ERROR(error != 0,
638 mp, XFS_ERRTAG_BULKSTAT_READ_CHUNK,
639 XFS_RANDOM_BULKSTAT_READ_CHUNK)) {
640 bp = NULL;
641 ubleft = 0;
642 rval = error;
643 break;
644 }
645 }
646 } 463 }
647 ino = XFS_AGINO_TO_INO(mp, agno, agino); 464 ino = XFS_AGINO_TO_INO(mp, agno, agino);
648 bno = XFS_AGB_TO_DADDR(mp, agno, agbno); 465 bno = XFS_AGB_TO_DADDR(mp, agno, agbno);
@@ -658,35 +475,13 @@ xfs_bulkstat(
658 * when the chunk is used up. 475 * when the chunk is used up.
659 */ 476 */
660 irbp->ir_freecount++; 477 irbp->ir_freecount++;
661 if (!xfs_bulkstat_use_dinode(mp, flags, bp,
662 clustidx, &dip)) {
663 lastino = ino;
664 continue;
665 }
666 /*
667 * If we need to do an iget, cannot hold bp.
668 * Drop it, until starting the next cluster.
669 */
670 if ((flags & BULKSTAT_FG_INLINE) && !dip) {
671 if (bp)
672 xfs_buf_relse(bp);
673 bp = NULL;
674 }
675 478
676 /* 479 /*
677 * Get the inode and fill in a single buffer. 480 * Get the inode and fill in a single buffer.
678 * BULKSTAT_FG_QUICK uses dip to fill it in.
679 * BULKSTAT_FG_IGET uses igets.
680 * BULKSTAT_FG_INLINE uses dip if we have an
681 * inline attr fork, else igets.
682 * See: xfs_bulkstat_one & xfs_dm_bulkstat_one.
683 * This is also used to count inodes/blks, etc
684 * in xfs_qm_quotacheck.
685 */ 481 */
686 ubused = statstruct_size; 482 ubused = statstruct_size;
687 error = formatter(mp, ino, ubufp, 483 error = formatter(mp, ino, ubufp, ubleft,
688 ubleft, private_data, 484 &ubused, &fmterror);
689 bno, &ubused, dip, &fmterror);
690 if (fmterror == BULKSTAT_RV_NOTHING) { 485 if (fmterror == BULKSTAT_RV_NOTHING) {
691 if (error && error != ENOENT && 486 if (error && error != ENOENT &&
692 error != EINVAL) { 487 error != EINVAL) {
@@ -778,8 +573,7 @@ xfs_bulkstat_single(
778 */ 573 */
779 574
780 ino = (xfs_ino_t)*lastinop; 575 ino = (xfs_ino_t)*lastinop;
781 error = xfs_bulkstat_one(mp, ino, buffer, sizeof(xfs_bstat_t), 576 error = xfs_bulkstat_one(mp, ino, buffer, sizeof(xfs_bstat_t), 0, &res);
782 NULL, 0, NULL, NULL, &res);
783 if (error) { 577 if (error) {
784 /* 578 /*
785 * Special case way failed, do it the "long" way 579 * Special case way failed, do it the "long" way
@@ -788,8 +582,7 @@ xfs_bulkstat_single(
788 (*lastinop)--; 582 (*lastinop)--;
789 count = 1; 583 count = 1;
790 if (xfs_bulkstat(mp, lastinop, &count, xfs_bulkstat_one, 584 if (xfs_bulkstat(mp, lastinop, &count, xfs_bulkstat_one,
791 NULL, sizeof(xfs_bstat_t), buffer, 585 sizeof(xfs_bstat_t), buffer, done))
792 BULKSTAT_FG_IGET, done))
793 return error; 586 return error;
794 if (count == 0 || (xfs_ino_t)*lastinop != ino) 587 if (count == 0 || (xfs_ino_t)*lastinop != ino)
795 return error == EFSCORRUPTED ? 588 return error == EFSCORRUPTED ?
diff --git a/fs/xfs/xfs_itable.h b/fs/xfs/xfs_itable.h
index 20792bf45946..97295d91d170 100644
--- a/fs/xfs/xfs_itable.h
+++ b/fs/xfs/xfs_itable.h
@@ -27,10 +27,7 @@ typedef int (*bulkstat_one_pf)(struct xfs_mount *mp,
27 xfs_ino_t ino, 27 xfs_ino_t ino,
28 void __user *buffer, 28 void __user *buffer,
29 int ubsize, 29 int ubsize,
30 void *private_data,
31 xfs_daddr_t bno,
32 int *ubused, 30 int *ubused,
33 void *dip,
34 int *stat); 31 int *stat);
35 32
36/* 33/*
@@ -41,13 +38,6 @@ typedef int (*bulkstat_one_pf)(struct xfs_mount *mp,
41#define BULKSTAT_RV_GIVEUP 2 38#define BULKSTAT_RV_GIVEUP 2
42 39
43/* 40/*
44 * Values for bulkstat flag argument.
45 */
46#define BULKSTAT_FG_IGET 0x1 /* Go through the buffer cache */
47#define BULKSTAT_FG_QUICK 0x2 /* No iget, walk the dinode cluster */
48#define BULKSTAT_FG_INLINE 0x4 /* No iget if inline attrs */
49
50/*
51 * Return stat information in bulk (by-inode) for the filesystem. 41 * Return stat information in bulk (by-inode) for the filesystem.
52 */ 42 */
53int /* error status */ 43int /* error status */
@@ -56,10 +46,8 @@ xfs_bulkstat(
56 xfs_ino_t *lastino, /* last inode returned */ 46 xfs_ino_t *lastino, /* last inode returned */
57 int *count, /* size of buffer/count returned */ 47 int *count, /* size of buffer/count returned */
58 bulkstat_one_pf formatter, /* func that'd fill a single buf */ 48 bulkstat_one_pf formatter, /* func that'd fill a single buf */
59 void *private_data, /* private data for formatter */
60 size_t statstruct_size,/* sizeof struct that we're filling */ 49 size_t statstruct_size,/* sizeof struct that we're filling */
61 char __user *ubuffer,/* buffer with inode stats */ 50 char __user *ubuffer,/* buffer with inode stats */
62 int flags, /* flag to control access method */
63 int *done); /* 1 if there are more stats to get */ 51 int *done); /* 1 if there are more stats to get */
64 52
65int 53int
@@ -82,9 +70,7 @@ xfs_bulkstat_one_int(
82 void __user *buffer, 70 void __user *buffer,
83 int ubsize, 71 int ubsize,
84 bulkstat_one_fmt_pf formatter, 72 bulkstat_one_fmt_pf formatter,
85 xfs_daddr_t bno,
86 int *ubused, 73 int *ubused,
87 void *dibuff,
88 int *stat); 74 int *stat);
89 75
90int 76int
@@ -93,10 +79,7 @@ xfs_bulkstat_one(
93 xfs_ino_t ino, 79 xfs_ino_t ino,
94 void __user *buffer, 80 void __user *buffer,
95 int ubsize, 81 int ubsize,
96 void *private_data,
97 xfs_daddr_t bno,
98 int *ubused, 82 int *ubused,
99 void *dibuff,
100 int *stat); 83 int *stat);
101 84
102typedef int (*inumbers_fmt_pf)( 85typedef int (*inumbers_fmt_pf)(
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 5215abc8023a..925d572bf0f4 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -24,8 +24,6 @@
24#include "xfs_trans.h" 24#include "xfs_trans.h"
25#include "xfs_sb.h" 25#include "xfs_sb.h"
26#include "xfs_ag.h" 26#include "xfs_ag.h"
27#include "xfs_dir2.h"
28#include "xfs_dmapi.h"
29#include "xfs_mount.h" 27#include "xfs_mount.h"
30#include "xfs_error.h" 28#include "xfs_error.h"
31#include "xfs_log_priv.h" 29#include "xfs_log_priv.h"
@@ -35,8 +33,6 @@
35#include "xfs_ialloc_btree.h" 33#include "xfs_ialloc_btree.h"
36#include "xfs_log_recover.h" 34#include "xfs_log_recover.h"
37#include "xfs_trans_priv.h" 35#include "xfs_trans_priv.h"
38#include "xfs_dir2_sf.h"
39#include "xfs_attr_sf.h"
40#include "xfs_dinode.h" 36#include "xfs_dinode.h"
41#include "xfs_inode.h" 37#include "xfs_inode.h"
42#include "xfs_rw.h" 38#include "xfs_rw.h"
@@ -337,7 +333,6 @@ xfs_log_reserve(
337 int retval = 0; 333 int retval = 0;
338 334
339 ASSERT(client == XFS_TRANSACTION || client == XFS_LOG); 335 ASSERT(client == XFS_TRANSACTION || client == XFS_LOG);
340 ASSERT((flags & XFS_LOG_NOSLEEP) == 0);
341 336
342 if (XLOG_FORCED_SHUTDOWN(log)) 337 if (XLOG_FORCED_SHUTDOWN(log))
343 return XFS_ERROR(EIO); 338 return XFS_ERROR(EIO);
@@ -552,7 +547,7 @@ xfs_log_unmount_write(xfs_mount_t *mp)
552 .magic = XLOG_UNMOUNT_TYPE, 547 .magic = XLOG_UNMOUNT_TYPE,
553 }; 548 };
554 struct xfs_log_iovec reg = { 549 struct xfs_log_iovec reg = {
555 .i_addr = (void *)&magic, 550 .i_addr = &magic,
556 .i_len = sizeof(magic), 551 .i_len = sizeof(magic),
557 .i_type = XLOG_REG_TYPE_UNMOUNT, 552 .i_type = XLOG_REG_TYPE_UNMOUNT,
558 }; 553 };
@@ -1047,7 +1042,6 @@ xlog_alloc_log(xfs_mount_t *mp,
1047 xlog_in_core_t *iclog, *prev_iclog=NULL; 1042 xlog_in_core_t *iclog, *prev_iclog=NULL;
1048 xfs_buf_t *bp; 1043 xfs_buf_t *bp;
1049 int i; 1044 int i;
1050 int iclogsize;
1051 int error = ENOMEM; 1045 int error = ENOMEM;
1052 uint log2_size = 0; 1046 uint log2_size = 0;
1053 1047
@@ -1127,7 +1121,6 @@ xlog_alloc_log(xfs_mount_t *mp,
1127 * with different amounts of memory. See the definition of 1121 * with different amounts of memory. See the definition of
1128 * xlog_in_core_t in xfs_log_priv.h for details. 1122 * xlog_in_core_t in xfs_log_priv.h for details.
1129 */ 1123 */
1130 iclogsize = log->l_iclog_size;
1131 ASSERT(log->l_iclog_size >= 4096); 1124 ASSERT(log->l_iclog_size >= 4096);
1132 for (i=0; i < log->l_iclog_bufs; i++) { 1125 for (i=0; i < log->l_iclog_bufs; i++) {
1133 *iclogp = kmem_zalloc(sizeof(xlog_in_core_t), KM_MAYFAIL); 1126 *iclogp = kmem_zalloc(sizeof(xlog_in_core_t), KM_MAYFAIL);
@@ -1428,11 +1421,8 @@ xlog_sync(xlog_t *log,
1428 XFS_BUF_BUSY(bp); 1421 XFS_BUF_BUSY(bp);
1429 XFS_BUF_ASYNC(bp); 1422 XFS_BUF_ASYNC(bp);
1430 bp->b_flags |= XBF_LOG_BUFFER; 1423 bp->b_flags |= XBF_LOG_BUFFER;
1431 /* 1424
1432 * Do an ordered write for the log block. 1425 if (log->l_mp->m_flags & XFS_MOUNT_BARRIER)
1433 * Its unnecessary to flush the first split block in the log wrap case.
1434 */
1435 if (!split && (log->l_mp->m_flags & XFS_MOUNT_BARRIER))
1436 XFS_BUF_ORDERED(bp); 1426 XFS_BUF_ORDERED(bp);
1437 1427
1438 ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1); 1428 ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1);
diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h
index 04c78e642cc8..916eb7db14d9 100644
--- a/fs/xfs/xfs_log.h
+++ b/fs/xfs/xfs_log.h
@@ -55,14 +55,10 @@ static inline xfs_lsn_t _lsn_cmp(xfs_lsn_t lsn1, xfs_lsn_t lsn2)
55/* 55/*
56 * Flags to xfs_log_reserve() 56 * Flags to xfs_log_reserve()
57 * 57 *
58 * XFS_LOG_SLEEP: If space is not available, sleep (default)
59 * XFS_LOG_NOSLEEP: If space is not available, return error
60 * XFS_LOG_PERM_RESERV: Permanent reservation. When writes are 58 * XFS_LOG_PERM_RESERV: Permanent reservation. When writes are
61 * performed against this type of reservation, the reservation 59 * performed against this type of reservation, the reservation
62 * is not decreased. Long running transactions should use this. 60 * is not decreased. Long running transactions should use this.
63 */ 61 */
64#define XFS_LOG_SLEEP 0x0
65#define XFS_LOG_NOSLEEP 0x1
66#define XFS_LOG_PERM_RESERV 0x2 62#define XFS_LOG_PERM_RESERV 0x2
67 63
68/* 64/*
@@ -104,7 +100,7 @@ static inline xfs_lsn_t _lsn_cmp(xfs_lsn_t lsn1, xfs_lsn_t lsn2)
104#define XLOG_REG_TYPE_MAX 19 100#define XLOG_REG_TYPE_MAX 19
105 101
106typedef struct xfs_log_iovec { 102typedef struct xfs_log_iovec {
107 xfs_caddr_t i_addr; /* beginning address of region */ 103 void *i_addr; /* beginning address of region */
108 int i_len; /* length in bytes of region */ 104 int i_len; /* length in bytes of region */
109 uint i_type; /* type of region */ 105 uint i_type; /* type of region */
110} xfs_log_iovec_t; 106} xfs_log_iovec_t;
@@ -201,9 +197,4 @@ int xfs_log_commit_cil(struct xfs_mount *mp, struct xfs_trans *tp,
201bool xfs_log_item_in_current_chkpt(struct xfs_log_item *lip); 197bool xfs_log_item_in_current_chkpt(struct xfs_log_item *lip);
202 198
203#endif 199#endif
204
205
206extern int xlog_debug; /* set to 1 to enable real log */
207
208
209#endif /* __XFS_LOG_H__ */ 200#endif /* __XFS_LOG_H__ */
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
index bb17cc044bf3..31e4ea2d19ac 100644
--- a/fs/xfs/xfs_log_cil.c
+++ b/fs/xfs/xfs_log_cil.c
@@ -26,8 +26,6 @@
26#include "xfs_log_priv.h" 26#include "xfs_log_priv.h"
27#include "xfs_sb.h" 27#include "xfs_sb.h"
28#include "xfs_ag.h" 28#include "xfs_ag.h"
29#include "xfs_dir2.h"
30#include "xfs_dmapi.h"
31#include "xfs_mount.h" 29#include "xfs_mount.h"
32#include "xfs_error.h" 30#include "xfs_error.h"
33#include "xfs_alloc.h" 31#include "xfs_alloc.h"
@@ -554,7 +552,7 @@ xlog_cil_push(
554 thdr.th_type = XFS_TRANS_CHECKPOINT; 552 thdr.th_type = XFS_TRANS_CHECKPOINT;
555 thdr.th_tid = tic->t_tid; 553 thdr.th_tid = tic->t_tid;
556 thdr.th_num_items = num_iovecs; 554 thdr.th_num_items = num_iovecs;
557 lhdr.i_addr = (xfs_caddr_t)&thdr; 555 lhdr.i_addr = &thdr;
558 lhdr.i_len = sizeof(xfs_trans_header_t); 556 lhdr.i_len = sizeof(xfs_trans_header_t);
559 lhdr.i_type = XLOG_REG_TYPE_TRANSHDR; 557 lhdr.i_type = XLOG_REG_TYPE_TRANSHDR;
560 tic->t_curr_res -= lhdr.i_len + sizeof(xlog_op_header_t); 558 tic->t_curr_res -= lhdr.i_len + sizeof(xlog_op_header_t);
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index ed0684cc50ee..6f3f5fa37acf 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -24,15 +24,11 @@
24#include "xfs_trans.h" 24#include "xfs_trans.h"
25#include "xfs_sb.h" 25#include "xfs_sb.h"
26#include "xfs_ag.h" 26#include "xfs_ag.h"
27#include "xfs_dir2.h"
28#include "xfs_dmapi.h"
29#include "xfs_mount.h" 27#include "xfs_mount.h"
30#include "xfs_error.h" 28#include "xfs_error.h"
31#include "xfs_bmap_btree.h" 29#include "xfs_bmap_btree.h"
32#include "xfs_alloc_btree.h" 30#include "xfs_alloc_btree.h"
33#include "xfs_ialloc_btree.h" 31#include "xfs_ialloc_btree.h"
34#include "xfs_dir2_sf.h"
35#include "xfs_attr_sf.h"
36#include "xfs_dinode.h" 32#include "xfs_dinode.h"
37#include "xfs_inode.h" 33#include "xfs_inode.h"
38#include "xfs_inode_item.h" 34#include "xfs_inode_item.h"
@@ -1565,9 +1561,7 @@ xlog_recover_reorder_trans(
1565 1561
1566 list_splice_init(&trans->r_itemq, &sort_list); 1562 list_splice_init(&trans->r_itemq, &sort_list);
1567 list_for_each_entry_safe(item, n, &sort_list, ri_list) { 1563 list_for_each_entry_safe(item, n, &sort_list, ri_list) {
1568 xfs_buf_log_format_t *buf_f; 1564 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
1569
1570 buf_f = (xfs_buf_log_format_t *)item->ri_buf[0].i_addr;
1571 1565
1572 switch (ITEM_TYPE(item)) { 1566 switch (ITEM_TYPE(item)) {
1573 case XFS_LI_BUF: 1567 case XFS_LI_BUF:
@@ -1892,9 +1886,8 @@ xlog_recover_do_inode_buffer(
1892 * current di_next_unlinked field. Extract its value 1886 * current di_next_unlinked field. Extract its value
1893 * and copy it to the buffer copy. 1887 * and copy it to the buffer copy.
1894 */ 1888 */
1895 logged_nextp = (xfs_agino_t *) 1889 logged_nextp = item->ri_buf[item_index].i_addr +
1896 ((char *)(item->ri_buf[item_index].i_addr) + 1890 next_unlinked_offset - reg_buf_offset;
1897 (next_unlinked_offset - reg_buf_offset));
1898 if (unlikely(*logged_nextp == 0)) { 1891 if (unlikely(*logged_nextp == 0)) {
1899 xfs_fs_cmn_err(CE_ALERT, mp, 1892 xfs_fs_cmn_err(CE_ALERT, mp,
1900 "bad inode buffer log record (ptr = 0x%p, bp = 0x%p). XFS trying to replay bad (0) inode di_next_unlinked field", 1893 "bad inode buffer log record (ptr = 0x%p, bp = 0x%p). XFS trying to replay bad (0) inode di_next_unlinked field",
@@ -1973,8 +1966,7 @@ xlog_recover_do_reg_buffer(
1973 item->ri_buf[i].i_len, __func__); 1966 item->ri_buf[i].i_len, __func__);
1974 goto next; 1967 goto next;
1975 } 1968 }
1976 error = xfs_qm_dqcheck((xfs_disk_dquot_t *) 1969 error = xfs_qm_dqcheck(item->ri_buf[i].i_addr,
1977 item->ri_buf[i].i_addr,
1978 -1, 0, XFS_QMOPT_DOWARN, 1970 -1, 0, XFS_QMOPT_DOWARN,
1979 "dquot_buf_recover"); 1971 "dquot_buf_recover");
1980 if (error) 1972 if (error)
@@ -2187,7 +2179,7 @@ xlog_recover_do_buffer_trans(
2187 xlog_recover_item_t *item, 2179 xlog_recover_item_t *item,
2188 int pass) 2180 int pass)
2189{ 2181{
2190 xfs_buf_log_format_t *buf_f; 2182 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
2191 xfs_mount_t *mp; 2183 xfs_mount_t *mp;
2192 xfs_buf_t *bp; 2184 xfs_buf_t *bp;
2193 int error; 2185 int error;
@@ -2197,8 +2189,6 @@ xlog_recover_do_buffer_trans(
2197 ushort flags; 2189 ushort flags;
2198 uint buf_flags; 2190 uint buf_flags;
2199 2191
2200 buf_f = (xfs_buf_log_format_t *)item->ri_buf[0].i_addr;
2201
2202 if (pass == XLOG_RECOVER_PASS1) { 2192 if (pass == XLOG_RECOVER_PASS1) {
2203 /* 2193 /*
2204 * In this pass we're only looking for buf items 2194 * In this pass we're only looking for buf items
@@ -2319,10 +2309,9 @@ xlog_recover_do_inode_trans(
2319 } 2309 }
2320 2310
2321 if (item->ri_buf[0].i_len == sizeof(xfs_inode_log_format_t)) { 2311 if (item->ri_buf[0].i_len == sizeof(xfs_inode_log_format_t)) {
2322 in_f = (xfs_inode_log_format_t *)item->ri_buf[0].i_addr; 2312 in_f = item->ri_buf[0].i_addr;
2323 } else { 2313 } else {
2324 in_f = (xfs_inode_log_format_t *)kmem_alloc( 2314 in_f = kmem_alloc(sizeof(xfs_inode_log_format_t), KM_SLEEP);
2325 sizeof(xfs_inode_log_format_t), KM_SLEEP);
2326 need_free = 1; 2315 need_free = 1;
2327 error = xfs_inode_item_format_convert(&item->ri_buf[0], in_f); 2316 error = xfs_inode_item_format_convert(&item->ri_buf[0], in_f);
2328 if (error) 2317 if (error)
@@ -2370,7 +2359,7 @@ xlog_recover_do_inode_trans(
2370 error = EFSCORRUPTED; 2359 error = EFSCORRUPTED;
2371 goto error; 2360 goto error;
2372 } 2361 }
2373 dicp = (xfs_icdinode_t *)(item->ri_buf[1].i_addr); 2362 dicp = item->ri_buf[1].i_addr;
2374 if (unlikely(dicp->di_magic != XFS_DINODE_MAGIC)) { 2363 if (unlikely(dicp->di_magic != XFS_DINODE_MAGIC)) {
2375 xfs_buf_relse(bp); 2364 xfs_buf_relse(bp);
2376 xfs_fs_cmn_err(CE_ALERT, mp, 2365 xfs_fs_cmn_err(CE_ALERT, mp,
@@ -2461,7 +2450,7 @@ xlog_recover_do_inode_trans(
2461 } 2450 }
2462 2451
2463 /* The core is in in-core format */ 2452 /* The core is in in-core format */
2464 xfs_dinode_to_disk(dip, (xfs_icdinode_t *)item->ri_buf[1].i_addr); 2453 xfs_dinode_to_disk(dip, item->ri_buf[1].i_addr);
2465 2454
2466 /* the rest is in on-disk format */ 2455 /* the rest is in on-disk format */
2467 if (item->ri_buf[1].i_len > sizeof(struct xfs_icdinode)) { 2456 if (item->ri_buf[1].i_len > sizeof(struct xfs_icdinode)) {
@@ -2578,7 +2567,7 @@ xlog_recover_do_quotaoff_trans(
2578 return (0); 2567 return (0);
2579 } 2568 }
2580 2569
2581 qoff_f = (xfs_qoff_logformat_t *)item->ri_buf[0].i_addr; 2570 qoff_f = item->ri_buf[0].i_addr;
2582 ASSERT(qoff_f); 2571 ASSERT(qoff_f);
2583 2572
2584 /* 2573 /*
@@ -2622,9 +2611,8 @@ xlog_recover_do_dquot_trans(
2622 if (mp->m_qflags == 0) 2611 if (mp->m_qflags == 0)
2623 return (0); 2612 return (0);
2624 2613
2625 recddq = (xfs_disk_dquot_t *)item->ri_buf[1].i_addr; 2614 recddq = item->ri_buf[1].i_addr;
2626 2615 if (recddq == NULL) {
2627 if (item->ri_buf[1].i_addr == NULL) {
2628 cmn_err(CE_ALERT, 2616 cmn_err(CE_ALERT,
2629 "XFS: NULL dquot in %s.", __func__); 2617 "XFS: NULL dquot in %s.", __func__);
2630 return XFS_ERROR(EIO); 2618 return XFS_ERROR(EIO);
@@ -2654,7 +2642,7 @@ xlog_recover_do_dquot_trans(
2654 * The other possibility, of course, is that the quota subsystem was 2642 * The other possibility, of course, is that the quota subsystem was
2655 * removed since the last mount - ENOSYS. 2643 * removed since the last mount - ENOSYS.
2656 */ 2644 */
2657 dq_f = (xfs_dq_logformat_t *)item->ri_buf[0].i_addr; 2645 dq_f = item->ri_buf[0].i_addr;
2658 ASSERT(dq_f); 2646 ASSERT(dq_f);
2659 if ((error = xfs_qm_dqcheck(recddq, 2647 if ((error = xfs_qm_dqcheck(recddq,
2660 dq_f->qlf_id, 2648 dq_f->qlf_id,
@@ -2721,7 +2709,7 @@ xlog_recover_do_efi_trans(
2721 return 0; 2709 return 0;
2722 } 2710 }
2723 2711
2724 efi_formatp = (xfs_efi_log_format_t *)item->ri_buf[0].i_addr; 2712 efi_formatp = item->ri_buf[0].i_addr;
2725 2713
2726 mp = log->l_mp; 2714 mp = log->l_mp;
2727 efip = xfs_efi_init(mp, efi_formatp->efi_nextents); 2715 efip = xfs_efi_init(mp, efi_formatp->efi_nextents);
@@ -2767,7 +2755,7 @@ xlog_recover_do_efd_trans(
2767 return; 2755 return;
2768 } 2756 }
2769 2757
2770 efd_formatp = (xfs_efd_log_format_t *)item->ri_buf[0].i_addr; 2758 efd_formatp = item->ri_buf[0].i_addr;
2771 ASSERT((item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_32_t) + 2759 ASSERT((item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_32_t) +
2772 ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_32_t)))) || 2760 ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_32_t)))) ||
2773 (item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_64_t) + 2761 (item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_64_t) +
@@ -3198,7 +3186,7 @@ xlog_recover_process_one_iunlink(
3198 int error; 3186 int error;
3199 3187
3200 ino = XFS_AGINO_TO_INO(mp, agno, agino); 3188 ino = XFS_AGINO_TO_INO(mp, agno, agino);
3201 error = xfs_iget(mp, NULL, ino, 0, 0, &ip, 0); 3189 error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
3202 if (error) 3190 if (error)
3203 goto fail; 3191 goto fail;
3204 3192
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index d59f4e8bedcf..aeb9d72ebf6e 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -25,13 +25,10 @@
25#include "xfs_sb.h" 25#include "xfs_sb.h"
26#include "xfs_ag.h" 26#include "xfs_ag.h"
27#include "xfs_dir2.h" 27#include "xfs_dir2.h"
28#include "xfs_dmapi.h"
29#include "xfs_mount.h" 28#include "xfs_mount.h"
30#include "xfs_bmap_btree.h" 29#include "xfs_bmap_btree.h"
31#include "xfs_alloc_btree.h" 30#include "xfs_alloc_btree.h"
32#include "xfs_ialloc_btree.h" 31#include "xfs_ialloc_btree.h"
33#include "xfs_dir2_sf.h"
34#include "xfs_attr_sf.h"
35#include "xfs_dinode.h" 32#include "xfs_dinode.h"
36#include "xfs_inode.h" 33#include "xfs_inode.h"
37#include "xfs_btree.h" 34#include "xfs_btree.h"
@@ -1300,7 +1297,7 @@ xfs_mountfs(
1300 * Get and sanity-check the root inode. 1297 * Get and sanity-check the root inode.
1301 * Save the pointer to it in the mount structure. 1298 * Save the pointer to it in the mount structure.
1302 */ 1299 */
1303 error = xfs_iget(mp, NULL, sbp->sb_rootino, 0, XFS_ILOCK_EXCL, &rip, 0); 1300 error = xfs_iget(mp, NULL, sbp->sb_rootino, 0, XFS_ILOCK_EXCL, &rip);
1304 if (error) { 1301 if (error) {
1305 cmn_err(CE_WARN, "XFS: failed to read root inode"); 1302 cmn_err(CE_WARN, "XFS: failed to read root inode");
1306 goto out_log_dealloc; 1303 goto out_log_dealloc;
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 1d2c7eed4eda..622da2179a57 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -66,65 +66,6 @@ struct xfs_nameops;
66struct xfs_ail; 66struct xfs_ail;
67struct xfs_quotainfo; 67struct xfs_quotainfo;
68 68
69
70/*
71 * Prototypes and functions for the Data Migration subsystem.
72 */
73
74typedef int (*xfs_send_data_t)(int, struct xfs_inode *,
75 xfs_off_t, size_t, int, int *);
76typedef int (*xfs_send_mmap_t)(struct vm_area_struct *, uint);
77typedef int (*xfs_send_destroy_t)(struct xfs_inode *, dm_right_t);
78typedef int (*xfs_send_namesp_t)(dm_eventtype_t, struct xfs_mount *,
79 struct xfs_inode *, dm_right_t,
80 struct xfs_inode *, dm_right_t,
81 const unsigned char *, const unsigned char *,
82 mode_t, int, int);
83typedef int (*xfs_send_mount_t)(struct xfs_mount *, dm_right_t,
84 char *, char *);
85typedef void (*xfs_send_unmount_t)(struct xfs_mount *, struct xfs_inode *,
86 dm_right_t, mode_t, int, int);
87
88typedef struct xfs_dmops {
89 xfs_send_data_t xfs_send_data;
90 xfs_send_mmap_t xfs_send_mmap;
91 xfs_send_destroy_t xfs_send_destroy;
92 xfs_send_namesp_t xfs_send_namesp;
93 xfs_send_mount_t xfs_send_mount;
94 xfs_send_unmount_t xfs_send_unmount;
95} xfs_dmops_t;
96
97#define XFS_DMAPI_UNMOUNT_FLAGS(mp) \
98 (((mp)->m_dmevmask & (1 << DM_EVENT_UNMOUNT)) ? 0 : DM_FLAGS_UNWANTED)
99
100#define XFS_SEND_DATA(mp, ev,ip,off,len,fl,lock) \
101 (*(mp)->m_dm_ops->xfs_send_data)(ev,ip,off,len,fl,lock)
102#define XFS_SEND_MMAP(mp, vma,fl) \
103 (*(mp)->m_dm_ops->xfs_send_mmap)(vma,fl)
104#define XFS_SEND_DESTROY(mp, ip,right) \
105 (*(mp)->m_dm_ops->xfs_send_destroy)(ip,right)
106#define XFS_SEND_NAMESP(mp, ev,b1,r1,b2,r2,n1,n2,mode,rval,fl) \
107 (*(mp)->m_dm_ops->xfs_send_namesp)(ev,NULL,b1,r1,b2,r2,n1,n2,mode,rval,fl)
108#define XFS_SEND_MOUNT(mp,right,path,name) \
109 (*(mp)->m_dm_ops->xfs_send_mount)(mp,right,path,name)
110#define XFS_SEND_PREUNMOUNT(mp) \
111do { \
112 if (mp->m_flags & XFS_MOUNT_DMAPI) { \
113 (*(mp)->m_dm_ops->xfs_send_namesp)(DM_EVENT_PREUNMOUNT, mp, \
114 (mp)->m_rootip, DM_RIGHT_NULL, \
115 (mp)->m_rootip, DM_RIGHT_NULL, \
116 NULL, NULL, 0, 0, XFS_DMAPI_UNMOUNT_FLAGS(mp)); \
117 } \
118} while (0)
119#define XFS_SEND_UNMOUNT(mp) \
120do { \
121 if (mp->m_flags & XFS_MOUNT_DMAPI) { \
122 (*(mp)->m_dm_ops->xfs_send_unmount)(mp, (mp)->m_rootip, \
123 DM_RIGHT_NULL, 0, 0, XFS_DMAPI_UNMOUNT_FLAGS(mp)); \
124 } \
125} while (0)
126
127
128#ifdef HAVE_PERCPU_SB 69#ifdef HAVE_PERCPU_SB
129 70
130/* 71/*
@@ -241,8 +182,6 @@ typedef struct xfs_mount {
241 uint m_chsize; /* size of next field */ 182 uint m_chsize; /* size of next field */
242 struct xfs_chash *m_chash; /* fs private inode per-cluster 183 struct xfs_chash *m_chash; /* fs private inode per-cluster
243 * hash table */ 184 * hash table */
244 struct xfs_dmops *m_dm_ops; /* vector of DMI ops */
245 struct xfs_qmops *m_qm_ops; /* vector of XQM ops */
246 atomic_t m_active_trans; /* number trans frozen */ 185 atomic_t m_active_trans; /* number trans frozen */
247#ifdef HAVE_PERCPU_SB 186#ifdef HAVE_PERCPU_SB
248 xfs_icsb_cnts_t __percpu *m_sb_cnts; /* per-cpu superblock counters */ 187 xfs_icsb_cnts_t __percpu *m_sb_cnts; /* per-cpu superblock counters */
@@ -259,7 +198,7 @@ typedef struct xfs_mount {
259 wait_queue_head_t m_wait_single_sync_task; 198 wait_queue_head_t m_wait_single_sync_task;
260 __int64_t m_update_flags; /* sb flags we need to update 199 __int64_t m_update_flags; /* sb flags we need to update
261 on the next remount,rw */ 200 on the next remount,rw */
262 struct list_head m_mplist; /* inode shrinker mount list */ 201 struct shrinker m_inode_shrink; /* inode reclaim shrinker */
263} xfs_mount_t; 202} xfs_mount_t;
264 203
265/* 204/*
@@ -269,7 +208,6 @@ typedef struct xfs_mount {
269 must be synchronous except 208 must be synchronous except
270 for space allocations */ 209 for space allocations */
271#define XFS_MOUNT_DELAYLOG (1ULL << 1) /* delayed logging is enabled */ 210#define XFS_MOUNT_DELAYLOG (1ULL << 1) /* delayed logging is enabled */
272#define XFS_MOUNT_DMAPI (1ULL << 2) /* dmapi is enabled */
273#define XFS_MOUNT_WAS_CLEAN (1ULL << 3) 211#define XFS_MOUNT_WAS_CLEAN (1ULL << 3)
274#define XFS_MOUNT_FS_SHUTDOWN (1ULL << 4) /* atomic stop of all filesystem 212#define XFS_MOUNT_FS_SHUTDOWN (1ULL << 4) /* atomic stop of all filesystem
275 operations, typically for 213 operations, typically for
@@ -282,8 +220,6 @@ typedef struct xfs_mount {
282#define XFS_MOUNT_GRPID (1ULL << 9) /* group-ID assigned from directory */ 220#define XFS_MOUNT_GRPID (1ULL << 9) /* group-ID assigned from directory */
283#define XFS_MOUNT_NORECOVERY (1ULL << 10) /* no recovery - dirty fs */ 221#define XFS_MOUNT_NORECOVERY (1ULL << 10) /* no recovery - dirty fs */
284#define XFS_MOUNT_DFLT_IOSIZE (1ULL << 12) /* set default i/o size */ 222#define XFS_MOUNT_DFLT_IOSIZE (1ULL << 12) /* set default i/o size */
285#define XFS_MOUNT_OSYNCISOSYNC (1ULL << 13) /* o_sync is REALLY o_sync */
286 /* osyncisdsync is now default*/
287#define XFS_MOUNT_32BITINODES (1ULL << 14) /* do not create inodes above 223#define XFS_MOUNT_32BITINODES (1ULL << 14) /* do not create inodes above
288 * 32 bits in size */ 224 * 32 bits in size */
289#define XFS_MOUNT_SMALL_INUMS (1ULL << 15) /* users wants 32bit inodes */ 225#define XFS_MOUNT_SMALL_INUMS (1ULL << 15) /* users wants 32bit inodes */
@@ -440,11 +376,6 @@ extern int xfs_sb_validate_fsb_count(struct xfs_sb *, __uint64_t);
440 376
441extern int xfs_dev_is_read_only(struct xfs_mount *, char *); 377extern int xfs_dev_is_read_only(struct xfs_mount *, char *);
442 378
443extern int xfs_dmops_get(struct xfs_mount *);
444extern void xfs_dmops_put(struct xfs_mount *);
445
446extern struct xfs_dmops xfs_dmcore_xfs;
447
448#endif /* __KERNEL__ */ 379#endif /* __KERNEL__ */
449 380
450extern void xfs_mod_sb(struct xfs_trans *, __int64_t); 381extern void xfs_mod_sb(struct xfs_trans *, __int64_t);
diff --git a/fs/xfs/xfs_rename.c b/fs/xfs/xfs_rename.c
index fc1cda23b817..8fca957200df 100644
--- a/fs/xfs/xfs_rename.c
+++ b/fs/xfs/xfs_rename.c
@@ -24,12 +24,9 @@
24#include "xfs_sb.h" 24#include "xfs_sb.h"
25#include "xfs_ag.h" 25#include "xfs_ag.h"
26#include "xfs_dir2.h" 26#include "xfs_dir2.h"
27#include "xfs_dmapi.h"
28#include "xfs_mount.h" 27#include "xfs_mount.h"
29#include "xfs_da_btree.h" 28#include "xfs_da_btree.h"
30#include "xfs_bmap_btree.h" 29#include "xfs_bmap_btree.h"
31#include "xfs_dir2_sf.h"
32#include "xfs_attr_sf.h"
33#include "xfs_dinode.h" 30#include "xfs_dinode.h"
34#include "xfs_inode.h" 31#include "xfs_inode.h"
35#include "xfs_inode_item.h" 32#include "xfs_inode_item.h"
@@ -116,20 +113,7 @@ xfs_rename(
116 int spaceres; 113 int spaceres;
117 int num_inodes; 114 int num_inodes;
118 115
119 xfs_itrace_entry(src_dp); 116 trace_xfs_rename(src_dp, target_dp, src_name, target_name);
120 xfs_itrace_entry(target_dp);
121
122 if (DM_EVENT_ENABLED(src_dp, DM_EVENT_RENAME) ||
123 DM_EVENT_ENABLED(target_dp, DM_EVENT_RENAME)) {
124 error = XFS_SEND_NAMESP(mp, DM_EVENT_RENAME,
125 src_dp, DM_RIGHT_NULL,
126 target_dp, DM_RIGHT_NULL,
127 src_name->name, target_name->name,
128 0, 0, 0);
129 if (error)
130 return error;
131 }
132 /* Return through std_return after this point. */
133 117
134 new_parent = (src_dp != target_dp); 118 new_parent = (src_dp != target_dp);
135 src_is_directory = ((src_ip->i_d.di_mode & S_IFMT) == S_IFDIR); 119 src_is_directory = ((src_ip->i_d.di_mode & S_IFMT) == S_IFDIR);
@@ -184,26 +168,14 @@ xfs_rename(
184 /* 168 /*
185 * Join all the inodes to the transaction. From this point on, 169 * Join all the inodes to the transaction. From this point on,
186 * we can rely on either trans_commit or trans_cancel to unlock 170 * we can rely on either trans_commit or trans_cancel to unlock
187 * them. Note that we need to add a vnode reference to the 171 * them.
188 * directories since trans_commit & trans_cancel will decrement
189 * them when they unlock the inodes. Also, we need to be careful
190 * not to add an inode to the transaction more than once.
191 */ 172 */
192 IHOLD(src_dp); 173 xfs_trans_ijoin_ref(tp, src_dp, XFS_ILOCK_EXCL);
193 xfs_trans_ijoin(tp, src_dp, XFS_ILOCK_EXCL); 174 if (new_parent)
194 175 xfs_trans_ijoin_ref(tp, target_dp, XFS_ILOCK_EXCL);
195 if (new_parent) { 176 xfs_trans_ijoin_ref(tp, src_ip, XFS_ILOCK_EXCL);
196 IHOLD(target_dp); 177 if (target_ip)
197 xfs_trans_ijoin(tp, target_dp, XFS_ILOCK_EXCL); 178 xfs_trans_ijoin_ref(tp, target_ip, XFS_ILOCK_EXCL);
198 }
199
200 IHOLD(src_ip);
201 xfs_trans_ijoin(tp, src_ip, XFS_ILOCK_EXCL);
202
203 if (target_ip) {
204 IHOLD(target_ip);
205 xfs_trans_ijoin(tp, target_ip, XFS_ILOCK_EXCL);
206 }
207 179
208 /* 180 /*
209 * If we are using project inheritance, we only allow renames 181 * If we are using project inheritance, we only allow renames
@@ -369,26 +341,13 @@ xfs_rename(
369 * trans_commit will unlock src_ip, target_ip & decrement 341 * trans_commit will unlock src_ip, target_ip & decrement
370 * the vnode references. 342 * the vnode references.
371 */ 343 */
372 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); 344 return xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
373
374 /* Fall through to std_return with error = 0 or errno from
375 * xfs_trans_commit */
376std_return:
377 if (DM_EVENT_ENABLED(src_dp, DM_EVENT_POSTRENAME) ||
378 DM_EVENT_ENABLED(target_dp, DM_EVENT_POSTRENAME)) {
379 (void) XFS_SEND_NAMESP (mp, DM_EVENT_POSTRENAME,
380 src_dp, DM_RIGHT_NULL,
381 target_dp, DM_RIGHT_NULL,
382 src_name->name, target_name->name,
383 0, error, 0);
384 }
385 return error;
386 345
387 abort_return: 346 abort_return:
388 cancel_flags |= XFS_TRANS_ABORT; 347 cancel_flags |= XFS_TRANS_ABORT;
389 /* FALLTHROUGH */
390 error_return: 348 error_return:
391 xfs_bmap_cancel(&free_list); 349 xfs_bmap_cancel(&free_list);
392 xfs_trans_cancel(tp, cancel_flags); 350 xfs_trans_cancel(tp, cancel_flags);
393 goto std_return; 351 std_return:
352 return error;
394} 353}
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index 16445518506d..891260fea11e 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -25,17 +25,10 @@
25#include "xfs_sb.h" 25#include "xfs_sb.h"
26#include "xfs_ag.h" 26#include "xfs_ag.h"
27#include "xfs_dir2.h" 27#include "xfs_dir2.h"
28#include "xfs_dmapi.h"
29#include "xfs_mount.h" 28#include "xfs_mount.h"
30#include "xfs_bmap_btree.h" 29#include "xfs_bmap_btree.h"
31#include "xfs_alloc_btree.h"
32#include "xfs_ialloc_btree.h"
33#include "xfs_dir2_sf.h"
34#include "xfs_attr_sf.h"
35#include "xfs_dinode.h" 30#include "xfs_dinode.h"
36#include "xfs_inode.h" 31#include "xfs_inode.h"
37#include "xfs_btree.h"
38#include "xfs_ialloc.h"
39#include "xfs_alloc.h" 32#include "xfs_alloc.h"
40#include "xfs_bmap.h" 33#include "xfs_bmap.h"
41#include "xfs_rtalloc.h" 34#include "xfs_rtalloc.h"
@@ -129,7 +122,7 @@ xfs_growfs_rt_alloc(
129 cancelflags |= XFS_TRANS_ABORT; 122 cancelflags |= XFS_TRANS_ABORT;
130 error = xfs_bmapi(tp, ip, oblocks, nblocks - oblocks, 123 error = xfs_bmapi(tp, ip, oblocks, nblocks - oblocks,
131 XFS_BMAPI_WRITE | XFS_BMAPI_METADATA, &firstblock, 124 XFS_BMAPI_WRITE | XFS_BMAPI_METADATA, &firstblock,
132 resblks, &map, &nmap, &flist, NULL); 125 resblks, &map, &nmap, &flist);
133 if (!error && nmap < 1) 126 if (!error && nmap < 1)
134 error = XFS_ERROR(ENOSPC); 127 error = XFS_ERROR(ENOSPC);
135 if (error) 128 if (error)
@@ -2277,12 +2270,12 @@ xfs_rtmount_inodes(
2277 sbp = &mp->m_sb; 2270 sbp = &mp->m_sb;
2278 if (sbp->sb_rbmino == NULLFSINO) 2271 if (sbp->sb_rbmino == NULLFSINO)
2279 return 0; 2272 return 0;
2280 error = xfs_iget(mp, NULL, sbp->sb_rbmino, 0, 0, &mp->m_rbmip, 0); 2273 error = xfs_iget(mp, NULL, sbp->sb_rbmino, 0, 0, &mp->m_rbmip);
2281 if (error) 2274 if (error)
2282 return error; 2275 return error;
2283 ASSERT(mp->m_rbmip != NULL); 2276 ASSERT(mp->m_rbmip != NULL);
2284 ASSERT(sbp->sb_rsumino != NULLFSINO); 2277 ASSERT(sbp->sb_rsumino != NULLFSINO);
2285 error = xfs_iget(mp, NULL, sbp->sb_rsumino, 0, 0, &mp->m_rsumip, 0); 2278 error = xfs_iget(mp, NULL, sbp->sb_rsumino, 0, 0, &mp->m_rsumip);
2286 if (error) { 2279 if (error) {
2287 IRELE(mp->m_rbmip); 2280 IRELE(mp->m_rbmip);
2288 return error; 2281 return error;
diff --git a/fs/xfs/xfs_rw.c b/fs/xfs/xfs_rw.c
index e336742a58a4..56861d5daaef 100644
--- a/fs/xfs/xfs_rw.c
+++ b/fs/xfs/xfs_rw.c
@@ -24,27 +24,12 @@
24#include "xfs_trans.h" 24#include "xfs_trans.h"
25#include "xfs_sb.h" 25#include "xfs_sb.h"
26#include "xfs_ag.h" 26#include "xfs_ag.h"
27#include "xfs_dir2.h"
28#include "xfs_dmapi.h"
29#include "xfs_mount.h" 27#include "xfs_mount.h"
30#include "xfs_bmap_btree.h" 28#include "xfs_bmap_btree.h"
31#include "xfs_alloc_btree.h"
32#include "xfs_ialloc_btree.h"
33#include "xfs_dir2_sf.h"
34#include "xfs_attr_sf.h"
35#include "xfs_dinode.h" 29#include "xfs_dinode.h"
36#include "xfs_inode.h" 30#include "xfs_inode.h"
37#include "xfs_inode_item.h"
38#include "xfs_itable.h"
39#include "xfs_btree.h"
40#include "xfs_alloc.h"
41#include "xfs_ialloc.h"
42#include "xfs_attr.h"
43#include "xfs_bmap.h"
44#include "xfs_error.h" 31#include "xfs_error.h"
45#include "xfs_buf_item.h"
46#include "xfs_rw.h" 32#include "xfs_rw.h"
47#include "xfs_trace.h"
48 33
49/* 34/*
50 * Force a shutdown of the filesystem instantly while keeping 35 * Force a shutdown of the filesystem instantly while keeping
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index 28547dfce037..fdca7416c754 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. 2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
3 * Copyright (C) 2010 Red Hat, Inc.
3 * All Rights Reserved. 4 * All Rights Reserved.
4 * 5 *
5 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
@@ -24,16 +25,12 @@
24#include "xfs_trans.h" 25#include "xfs_trans.h"
25#include "xfs_sb.h" 26#include "xfs_sb.h"
26#include "xfs_ag.h" 27#include "xfs_ag.h"
27#include "xfs_dir2.h"
28#include "xfs_dmapi.h"
29#include "xfs_mount.h" 28#include "xfs_mount.h"
30#include "xfs_error.h" 29#include "xfs_error.h"
31#include "xfs_da_btree.h" 30#include "xfs_da_btree.h"
32#include "xfs_bmap_btree.h" 31#include "xfs_bmap_btree.h"
33#include "xfs_alloc_btree.h" 32#include "xfs_alloc_btree.h"
34#include "xfs_ialloc_btree.h" 33#include "xfs_ialloc_btree.h"
35#include "xfs_dir2_sf.h"
36#include "xfs_attr_sf.h"
37#include "xfs_dinode.h" 34#include "xfs_dinode.h"
38#include "xfs_inode.h" 35#include "xfs_inode.h"
39#include "xfs_btree.h" 36#include "xfs_btree.h"
@@ -47,6 +44,7 @@
47#include "xfs_trace.h" 44#include "xfs_trace.h"
48 45
49kmem_zone_t *xfs_trans_zone; 46kmem_zone_t *xfs_trans_zone;
47kmem_zone_t *xfs_log_item_desc_zone;
50 48
51 49
52/* 50/*
@@ -597,8 +595,7 @@ _xfs_trans_alloc(
597 tp->t_magic = XFS_TRANS_MAGIC; 595 tp->t_magic = XFS_TRANS_MAGIC;
598 tp->t_type = type; 596 tp->t_type = type;
599 tp->t_mountp = mp; 597 tp->t_mountp = mp;
600 tp->t_items_free = XFS_LIC_NUM_SLOTS; 598 INIT_LIST_HEAD(&tp->t_items);
601 xfs_lic_init(&(tp->t_items));
602 INIT_LIST_HEAD(&tp->t_busy); 599 INIT_LIST_HEAD(&tp->t_busy);
603 return tp; 600 return tp;
604} 601}
@@ -643,8 +640,7 @@ xfs_trans_dup(
643 ntp->t_magic = XFS_TRANS_MAGIC; 640 ntp->t_magic = XFS_TRANS_MAGIC;
644 ntp->t_type = tp->t_type; 641 ntp->t_type = tp->t_type;
645 ntp->t_mountp = tp->t_mountp; 642 ntp->t_mountp = tp->t_mountp;
646 ntp->t_items_free = XFS_LIC_NUM_SLOTS; 643 INIT_LIST_HEAD(&ntp->t_items);
647 xfs_lic_init(&(ntp->t_items));
648 INIT_LIST_HEAD(&ntp->t_busy); 644 INIT_LIST_HEAD(&ntp->t_busy);
649 645
650 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); 646 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
@@ -1124,6 +1120,108 @@ xfs_trans_unreserve_and_mod_sb(
1124} 1120}
1125 1121
1126/* 1122/*
1123 * Add the given log item to the transaction's list of log items.
1124 *
1125 * The log item will now point to its new descriptor with its li_desc field.
1126 */
1127void
1128xfs_trans_add_item(
1129 struct xfs_trans *tp,
1130 struct xfs_log_item *lip)
1131{
1132 struct xfs_log_item_desc *lidp;
1133
1134 ASSERT(lip->li_mountp = tp->t_mountp);
1135 ASSERT(lip->li_ailp = tp->t_mountp->m_ail);
1136
1137 lidp = kmem_zone_zalloc(xfs_log_item_desc_zone, KM_SLEEP | KM_NOFS);
1138
1139 lidp->lid_item = lip;
1140 lidp->lid_flags = 0;
1141 lidp->lid_size = 0;
1142 list_add_tail(&lidp->lid_trans, &tp->t_items);
1143
1144 lip->li_desc = lidp;
1145}
1146
1147STATIC void
1148xfs_trans_free_item_desc(
1149 struct xfs_log_item_desc *lidp)
1150{
1151 list_del_init(&lidp->lid_trans);
1152 kmem_zone_free(xfs_log_item_desc_zone, lidp);
1153}
1154
1155/*
1156 * Unlink and free the given descriptor.
1157 */
1158void
1159xfs_trans_del_item(
1160 struct xfs_log_item *lip)
1161{
1162 xfs_trans_free_item_desc(lip->li_desc);
1163 lip->li_desc = NULL;
1164}
1165
1166/*
1167 * Unlock all of the items of a transaction and free all the descriptors
1168 * of that transaction.
1169 */
1170STATIC void
1171xfs_trans_free_items(
1172 struct xfs_trans *tp,
1173 xfs_lsn_t commit_lsn,
1174 int flags)
1175{
1176 struct xfs_log_item_desc *lidp, *next;
1177
1178 list_for_each_entry_safe(lidp, next, &tp->t_items, lid_trans) {
1179 struct xfs_log_item *lip = lidp->lid_item;
1180
1181 lip->li_desc = NULL;
1182
1183 if (commit_lsn != NULLCOMMITLSN)
1184 IOP_COMMITTING(lip, commit_lsn);
1185 if (flags & XFS_TRANS_ABORT)
1186 lip->li_flags |= XFS_LI_ABORTED;
1187 IOP_UNLOCK(lip);
1188
1189 xfs_trans_free_item_desc(lidp);
1190 }
1191}
1192
1193/*
1194 * Unlock the items associated with a transaction.
1195 *
1196 * Items which were not logged should be freed. Those which were logged must
1197 * still be tracked so they can be unpinned when the transaction commits.
1198 */
1199STATIC void
1200xfs_trans_unlock_items(
1201 struct xfs_trans *tp,
1202 xfs_lsn_t commit_lsn)
1203{
1204 struct xfs_log_item_desc *lidp, *next;
1205
1206 list_for_each_entry_safe(lidp, next, &tp->t_items, lid_trans) {
1207 struct xfs_log_item *lip = lidp->lid_item;
1208
1209 lip->li_desc = NULL;
1210
1211 if (commit_lsn != NULLCOMMITLSN)
1212 IOP_COMMITTING(lip, commit_lsn);
1213 IOP_UNLOCK(lip);
1214
1215 /*
1216 * Free the descriptor if the item is not dirty
1217 * within this transaction.
1218 */
1219 if (!(lidp->lid_flags & XFS_LID_DIRTY))
1220 xfs_trans_free_item_desc(lidp);
1221 }
1222}
1223
1224/*
1127 * Total up the number of log iovecs needed to commit this 1225 * Total up the number of log iovecs needed to commit this
1128 * transaction. The transaction itself needs one for the 1226 * transaction. The transaction itself needs one for the
1129 * transaction header. Ask each dirty item in turn how many 1227 * transaction header. Ask each dirty item in turn how many
@@ -1134,30 +1232,27 @@ xfs_trans_count_vecs(
1134 struct xfs_trans *tp) 1232 struct xfs_trans *tp)
1135{ 1233{
1136 int nvecs; 1234 int nvecs;
1137 xfs_log_item_desc_t *lidp; 1235 struct xfs_log_item_desc *lidp;
1138 1236
1139 nvecs = 1; 1237 nvecs = 1;
1140 lidp = xfs_trans_first_item(tp);
1141 ASSERT(lidp != NULL);
1142 1238
1143 /* In the non-debug case we need to start bailing out if we 1239 /* In the non-debug case we need to start bailing out if we
1144 * didn't find a log_item here, return zero and let trans_commit 1240 * didn't find a log_item here, return zero and let trans_commit
1145 * deal with it. 1241 * deal with it.
1146 */ 1242 */
1147 if (lidp == NULL) 1243 if (list_empty(&tp->t_items)) {
1244 ASSERT(0);
1148 return 0; 1245 return 0;
1246 }
1149 1247
1150 while (lidp != NULL) { 1248 list_for_each_entry(lidp, &tp->t_items, lid_trans) {
1151 /* 1249 /*
1152 * Skip items which aren't dirty in this transaction. 1250 * Skip items which aren't dirty in this transaction.
1153 */ 1251 */
1154 if (!(lidp->lid_flags & XFS_LID_DIRTY)) { 1252 if (!(lidp->lid_flags & XFS_LID_DIRTY))
1155 lidp = xfs_trans_next_item(tp, lidp);
1156 continue; 1253 continue;
1157 }
1158 lidp->lid_size = IOP_SIZE(lidp->lid_item); 1254 lidp->lid_size = IOP_SIZE(lidp->lid_item);
1159 nvecs += lidp->lid_size; 1255 nvecs += lidp->lid_size;
1160 lidp = xfs_trans_next_item(tp, lidp);
1161 } 1256 }
1162 1257
1163 return nvecs; 1258 return nvecs;
@@ -1177,7 +1272,7 @@ xfs_trans_fill_vecs(
1177 struct xfs_trans *tp, 1272 struct xfs_trans *tp,
1178 struct xfs_log_iovec *log_vector) 1273 struct xfs_log_iovec *log_vector)
1179{ 1274{
1180 xfs_log_item_desc_t *lidp; 1275 struct xfs_log_item_desc *lidp;
1181 struct xfs_log_iovec *vecp; 1276 struct xfs_log_iovec *vecp;
1182 uint nitems; 1277 uint nitems;
1183 1278
@@ -1188,14 +1283,11 @@ xfs_trans_fill_vecs(
1188 vecp = log_vector + 1; 1283 vecp = log_vector + 1;
1189 1284
1190 nitems = 0; 1285 nitems = 0;
1191 lidp = xfs_trans_first_item(tp); 1286 ASSERT(!list_empty(&tp->t_items));
1192 ASSERT(lidp); 1287 list_for_each_entry(lidp, &tp->t_items, lid_trans) {
1193 while (lidp) {
1194 /* Skip items which aren't dirty in this transaction. */ 1288 /* Skip items which aren't dirty in this transaction. */
1195 if (!(lidp->lid_flags & XFS_LID_DIRTY)) { 1289 if (!(lidp->lid_flags & XFS_LID_DIRTY))
1196 lidp = xfs_trans_next_item(tp, lidp);
1197 continue; 1290 continue;
1198 }
1199 1291
1200 /* 1292 /*
1201 * The item may be marked dirty but not log anything. This can 1293 * The item may be marked dirty but not log anything. This can
@@ -1206,7 +1298,6 @@ xfs_trans_fill_vecs(
1206 IOP_FORMAT(lidp->lid_item, vecp); 1298 IOP_FORMAT(lidp->lid_item, vecp);
1207 vecp += lidp->lid_size; 1299 vecp += lidp->lid_size;
1208 IOP_PIN(lidp->lid_item); 1300 IOP_PIN(lidp->lid_item);
1209 lidp = xfs_trans_next_item(tp, lidp);
1210 } 1301 }
1211 1302
1212 /* 1303 /*
@@ -1284,7 +1375,7 @@ xfs_trans_item_committed(
1284 * log item flags, if anyone else stales the buffer we do not want to 1375 * log item flags, if anyone else stales the buffer we do not want to
1285 * pay any attention to it. 1376 * pay any attention to it.
1286 */ 1377 */
1287 IOP_UNPIN(lip); 1378 IOP_UNPIN(lip, 0);
1288} 1379}
1289 1380
1290/* 1381/*
@@ -1301,24 +1392,15 @@ xfs_trans_committed(
1301 struct xfs_trans *tp, 1392 struct xfs_trans *tp,
1302 int abortflag) 1393 int abortflag)
1303{ 1394{
1304 xfs_log_item_desc_t *lidp; 1395 struct xfs_log_item_desc *lidp, *next;
1305 xfs_log_item_chunk_t *licp;
1306 xfs_log_item_chunk_t *next_licp;
1307 1396
1308 /* Call the transaction's completion callback if there is one. */ 1397 /* Call the transaction's completion callback if there is one. */
1309 if (tp->t_callback != NULL) 1398 if (tp->t_callback != NULL)
1310 tp->t_callback(tp, tp->t_callarg); 1399 tp->t_callback(tp, tp->t_callarg);
1311 1400
1312 for (lidp = xfs_trans_first_item(tp); 1401 list_for_each_entry_safe(lidp, next, &tp->t_items, lid_trans) {
1313 lidp != NULL;
1314 lidp = xfs_trans_next_item(tp, lidp)) {
1315 xfs_trans_item_committed(lidp->lid_item, tp->t_lsn, abortflag); 1402 xfs_trans_item_committed(lidp->lid_item, tp->t_lsn, abortflag);
1316 } 1403 xfs_trans_free_item_desc(lidp);
1317
1318 /* free the item chunks, ignoring the embedded chunk */
1319 for (licp = tp->t_items.lic_next; licp != NULL; licp = next_licp) {
1320 next_licp = licp->lic_next;
1321 kmem_free(licp);
1322 } 1404 }
1323 1405
1324 xfs_trans_free(tp); 1406 xfs_trans_free(tp);
@@ -1333,16 +1415,14 @@ xfs_trans_uncommit(
1333 struct xfs_trans *tp, 1415 struct xfs_trans *tp,
1334 uint flags) 1416 uint flags)
1335{ 1417{
1336 xfs_log_item_desc_t *lidp; 1418 struct xfs_log_item_desc *lidp;
1337 1419
1338 for (lidp = xfs_trans_first_item(tp); 1420 list_for_each_entry(lidp, &tp->t_items, lid_trans) {
1339 lidp != NULL;
1340 lidp = xfs_trans_next_item(tp, lidp)) {
1341 /* 1421 /*
1342 * Unpin all but those that aren't dirty. 1422 * Unpin all but those that aren't dirty.
1343 */ 1423 */
1344 if (lidp->lid_flags & XFS_LID_DIRTY) 1424 if (lidp->lid_flags & XFS_LID_DIRTY)
1345 IOP_UNPIN_REMOVE(lidp->lid_item, tp); 1425 IOP_UNPIN(lidp->lid_item, 1);
1346 } 1426 }
1347 1427
1348 xfs_trans_unreserve_and_mod_sb(tp); 1428 xfs_trans_unreserve_and_mod_sb(tp);
@@ -1508,33 +1588,28 @@ STATIC struct xfs_log_vec *
1508xfs_trans_alloc_log_vecs( 1588xfs_trans_alloc_log_vecs(
1509 xfs_trans_t *tp) 1589 xfs_trans_t *tp)
1510{ 1590{
1511 xfs_log_item_desc_t *lidp; 1591 struct xfs_log_item_desc *lidp;
1512 struct xfs_log_vec *lv = NULL; 1592 struct xfs_log_vec *lv = NULL;
1513 struct xfs_log_vec *ret_lv = NULL; 1593 struct xfs_log_vec *ret_lv = NULL;
1514 1594
1515 lidp = xfs_trans_first_item(tp);
1516 1595
1517 /* Bail out if we didn't find a log item. */ 1596 /* Bail out if we didn't find a log item. */
1518 if (!lidp) { 1597 if (list_empty(&tp->t_items)) {
1519 ASSERT(0); 1598 ASSERT(0);
1520 return NULL; 1599 return NULL;
1521 } 1600 }
1522 1601
1523 while (lidp != NULL) { 1602 list_for_each_entry(lidp, &tp->t_items, lid_trans) {
1524 struct xfs_log_vec *new_lv; 1603 struct xfs_log_vec *new_lv;
1525 1604
1526 /* Skip items which aren't dirty in this transaction. */ 1605 /* Skip items which aren't dirty in this transaction. */
1527 if (!(lidp->lid_flags & XFS_LID_DIRTY)) { 1606 if (!(lidp->lid_flags & XFS_LID_DIRTY))
1528 lidp = xfs_trans_next_item(tp, lidp);
1529 continue; 1607 continue;
1530 }
1531 1608
1532 /* Skip items that do not have any vectors for writing */ 1609 /* Skip items that do not have any vectors for writing */
1533 lidp->lid_size = IOP_SIZE(lidp->lid_item); 1610 lidp->lid_size = IOP_SIZE(lidp->lid_item);
1534 if (!lidp->lid_size) { 1611 if (!lidp->lid_size)
1535 lidp = xfs_trans_next_item(tp, lidp);
1536 continue; 1612 continue;
1537 }
1538 1613
1539 new_lv = kmem_zalloc(sizeof(*new_lv) + 1614 new_lv = kmem_zalloc(sizeof(*new_lv) +
1540 lidp->lid_size * sizeof(struct xfs_log_iovec), 1615 lidp->lid_size * sizeof(struct xfs_log_iovec),
@@ -1549,7 +1624,6 @@ xfs_trans_alloc_log_vecs(
1549 else 1624 else
1550 lv->lv_next = new_lv; 1625 lv->lv_next = new_lv;
1551 lv = new_lv; 1626 lv = new_lv;
1552 lidp = xfs_trans_next_item(tp, lidp);
1553 } 1627 }
1554 1628
1555 return ret_lv; 1629 return ret_lv;
@@ -1708,12 +1782,6 @@ xfs_trans_cancel(
1708 int flags) 1782 int flags)
1709{ 1783{
1710 int log_flags; 1784 int log_flags;
1711#ifdef DEBUG
1712 xfs_log_item_chunk_t *licp;
1713 xfs_log_item_desc_t *lidp;
1714 xfs_log_item_t *lip;
1715 int i;
1716#endif
1717 xfs_mount_t *mp = tp->t_mountp; 1785 xfs_mount_t *mp = tp->t_mountp;
1718 1786
1719 /* 1787 /*
@@ -1732,21 +1800,11 @@ xfs_trans_cancel(
1732 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 1800 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1733 } 1801 }
1734#ifdef DEBUG 1802#ifdef DEBUG
1735 if (!(flags & XFS_TRANS_ABORT)) { 1803 if (!(flags & XFS_TRANS_ABORT) && !XFS_FORCED_SHUTDOWN(mp)) {
1736 licp = &(tp->t_items); 1804 struct xfs_log_item_desc *lidp;
1737 while (licp != NULL) { 1805
1738 lidp = licp->lic_descs; 1806 list_for_each_entry(lidp, &tp->t_items, lid_trans)
1739 for (i = 0; i < licp->lic_unused; i++, lidp++) { 1807 ASSERT(!(lidp->lid_item->li_type == XFS_LI_EFD));
1740 if (xfs_lic_isfree(licp, i)) {
1741 continue;
1742 }
1743
1744 lip = lidp->lid_item;
1745 if (!XFS_FORCED_SHUTDOWN(mp))
1746 ASSERT(!(lip->li_type == XFS_LI_EFD));
1747 }
1748 licp = licp->lic_next;
1749 }
1750 } 1808 }
1751#endif 1809#endif
1752 xfs_trans_unreserve_and_mod_sb(tp); 1810 xfs_trans_unreserve_and_mod_sb(tp);
@@ -1834,7 +1892,6 @@ xfs_trans_roll(
1834 if (error) 1892 if (error)
1835 return error; 1893 return error;
1836 1894
1837 xfs_trans_ijoin(trans, dp, XFS_ILOCK_EXCL); 1895 xfs_trans_ijoin(trans, dp);
1838 xfs_trans_ihold(trans, dp);
1839 return 0; 1896 return 0;
1840} 1897}
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index e639e8e9a2a9..c13c0f97b494 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -161,105 +161,14 @@ typedef struct xfs_trans_header {
161 * the amount of space needed to log the item it describes 161 * the amount of space needed to log the item it describes
162 * once we get to commit processing (see xfs_trans_commit()). 162 * once we get to commit processing (see xfs_trans_commit()).
163 */ 163 */
164typedef struct xfs_log_item_desc { 164struct xfs_log_item_desc {
165 struct xfs_log_item *lid_item; 165 struct xfs_log_item *lid_item;
166 ushort lid_size; 166 ushort lid_size;
167 unsigned char lid_flags; 167 unsigned char lid_flags;
168 unsigned char lid_index; 168 struct list_head lid_trans;
169} xfs_log_item_desc_t; 169};
170 170
171#define XFS_LID_DIRTY 0x1 171#define XFS_LID_DIRTY 0x1
172#define XFS_LID_PINNED 0x2
173
174/*
175 * This structure is used to maintain a chunk list of log_item_desc
176 * structures. The free field is a bitmask indicating which descriptors
177 * in this chunk's array are free. The unused field is the first value
178 * not used since this chunk was allocated.
179 */
180#define XFS_LIC_NUM_SLOTS 15
181typedef struct xfs_log_item_chunk {
182 struct xfs_log_item_chunk *lic_next;
183 ushort lic_free;
184 ushort lic_unused;
185 xfs_log_item_desc_t lic_descs[XFS_LIC_NUM_SLOTS];
186} xfs_log_item_chunk_t;
187
188#define XFS_LIC_MAX_SLOT (XFS_LIC_NUM_SLOTS - 1)
189#define XFS_LIC_FREEMASK ((1 << XFS_LIC_NUM_SLOTS) - 1)
190
191
192/*
193 * Initialize the given chunk. Set the chunk's free descriptor mask
194 * to indicate that all descriptors are free. The caller gets to set
195 * lic_unused to the right value (0 matches all free). The
196 * lic_descs.lid_index values are set up as each desc is allocated.
197 */
198static inline void xfs_lic_init(xfs_log_item_chunk_t *cp)
199{
200 cp->lic_free = XFS_LIC_FREEMASK;
201}
202
203static inline void xfs_lic_init_slot(xfs_log_item_chunk_t *cp, int slot)
204{
205 cp->lic_descs[slot].lid_index = (unsigned char)(slot);
206}
207
208static inline int xfs_lic_vacancy(xfs_log_item_chunk_t *cp)
209{
210 return cp->lic_free & XFS_LIC_FREEMASK;
211}
212
213static inline void xfs_lic_all_free(xfs_log_item_chunk_t *cp)
214{
215 cp->lic_free = XFS_LIC_FREEMASK;
216}
217
218static inline int xfs_lic_are_all_free(xfs_log_item_chunk_t *cp)
219{
220 return ((cp->lic_free & XFS_LIC_FREEMASK) == XFS_LIC_FREEMASK);
221}
222
223static inline int xfs_lic_isfree(xfs_log_item_chunk_t *cp, int slot)
224{
225 return (cp->lic_free & (1 << slot));
226}
227
228static inline void xfs_lic_claim(xfs_log_item_chunk_t *cp, int slot)
229{
230 cp->lic_free &= ~(1 << slot);
231}
232
233static inline void xfs_lic_relse(xfs_log_item_chunk_t *cp, int slot)
234{
235 cp->lic_free |= 1 << slot;
236}
237
238static inline xfs_log_item_desc_t *
239xfs_lic_slot(xfs_log_item_chunk_t *cp, int slot)
240{
241 return &(cp->lic_descs[slot]);
242}
243
244static inline int xfs_lic_desc_to_slot(xfs_log_item_desc_t *dp)
245{
246 return (uint)dp->lid_index;
247}
248
249/*
250 * Calculate the address of a chunk given a descriptor pointer:
251 * dp - dp->lid_index give the address of the start of the lic_descs array.
252 * From this we subtract the offset of the lic_descs field in a chunk.
253 * All of this yields the address of the chunk, which is
254 * cast to a chunk pointer.
255 */
256static inline xfs_log_item_chunk_t *
257xfs_lic_desc_to_chunk(xfs_log_item_desc_t *dp)
258{
259 return (xfs_log_item_chunk_t*) \
260 (((xfs_caddr_t)((dp) - (dp)->lid_index)) - \
261 (xfs_caddr_t)(((xfs_log_item_chunk_t*)0)->lic_descs));
262}
263 172
264#define XFS_TRANS_MAGIC 0x5452414E /* 'TRAN' */ 173#define XFS_TRANS_MAGIC 0x5452414E /* 'TRAN' */
265/* 174/*
@@ -275,8 +184,6 @@ xfs_lic_desc_to_chunk(xfs_log_item_desc_t *dp)
275/* 184/*
276 * Values for call flags parameter. 185 * Values for call flags parameter.
277 */ 186 */
278#define XFS_TRANS_NOSLEEP 0x1
279#define XFS_TRANS_WAIT 0x2
280#define XFS_TRANS_RELEASE_LOG_RES 0x4 187#define XFS_TRANS_RELEASE_LOG_RES 0x4
281#define XFS_TRANS_ABORT 0x8 188#define XFS_TRANS_ABORT 0x8
282 189
@@ -438,8 +345,7 @@ typedef struct xfs_item_ops {
438 uint (*iop_size)(xfs_log_item_t *); 345 uint (*iop_size)(xfs_log_item_t *);
439 void (*iop_format)(xfs_log_item_t *, struct xfs_log_iovec *); 346 void (*iop_format)(xfs_log_item_t *, struct xfs_log_iovec *);
440 void (*iop_pin)(xfs_log_item_t *); 347 void (*iop_pin)(xfs_log_item_t *);
441 void (*iop_unpin)(xfs_log_item_t *); 348 void (*iop_unpin)(xfs_log_item_t *, int remove);
442 void (*iop_unpin_remove)(xfs_log_item_t *, struct xfs_trans *);
443 uint (*iop_trylock)(xfs_log_item_t *); 349 uint (*iop_trylock)(xfs_log_item_t *);
444 void (*iop_unlock)(xfs_log_item_t *); 350 void (*iop_unlock)(xfs_log_item_t *);
445 xfs_lsn_t (*iop_committed)(xfs_log_item_t *, xfs_lsn_t); 351 xfs_lsn_t (*iop_committed)(xfs_log_item_t *, xfs_lsn_t);
@@ -451,8 +357,7 @@ typedef struct xfs_item_ops {
451#define IOP_SIZE(ip) (*(ip)->li_ops->iop_size)(ip) 357#define IOP_SIZE(ip) (*(ip)->li_ops->iop_size)(ip)
452#define IOP_FORMAT(ip,vp) (*(ip)->li_ops->iop_format)(ip, vp) 358#define IOP_FORMAT(ip,vp) (*(ip)->li_ops->iop_format)(ip, vp)
453#define IOP_PIN(ip) (*(ip)->li_ops->iop_pin)(ip) 359#define IOP_PIN(ip) (*(ip)->li_ops->iop_pin)(ip)
454#define IOP_UNPIN(ip) (*(ip)->li_ops->iop_unpin)(ip) 360#define IOP_UNPIN(ip, remove) (*(ip)->li_ops->iop_unpin)(ip, remove)
455#define IOP_UNPIN_REMOVE(ip,tp) (*(ip)->li_ops->iop_unpin_remove)(ip, tp)
456#define IOP_TRYLOCK(ip) (*(ip)->li_ops->iop_trylock)(ip) 361#define IOP_TRYLOCK(ip) (*(ip)->li_ops->iop_trylock)(ip)
457#define IOP_UNLOCK(ip) (*(ip)->li_ops->iop_unlock)(ip) 362#define IOP_UNLOCK(ip) (*(ip)->li_ops->iop_unlock)(ip)
458#define IOP_COMMITTED(ip, lsn) (*(ip)->li_ops->iop_committed)(ip, lsn) 363#define IOP_COMMITTED(ip, lsn) (*(ip)->li_ops->iop_committed)(ip, lsn)
@@ -516,8 +421,7 @@ typedef struct xfs_trans {
516 int64_t t_rblocks_delta;/* superblock rblocks change */ 421 int64_t t_rblocks_delta;/* superblock rblocks change */
517 int64_t t_rextents_delta;/* superblocks rextents chg */ 422 int64_t t_rextents_delta;/* superblocks rextents chg */
518 int64_t t_rextslog_delta;/* superblocks rextslog chg */ 423 int64_t t_rextslog_delta;/* superblocks rextslog chg */
519 unsigned int t_items_free; /* log item descs free */ 424 struct list_head t_items; /* log item descriptors */
520 xfs_log_item_chunk_t t_items; /* first log item desc chunk */
521 xfs_trans_header_t t_header; /* header for in-log trans */ 425 xfs_trans_header_t t_header; /* header for in-log trans */
522 struct list_head t_busy; /* list of busy extents */ 426 struct list_head t_busy; /* list of busy extents */
523 unsigned long t_pflags; /* saved process flags state */ 427 unsigned long t_pflags; /* saved process flags state */
@@ -569,8 +473,8 @@ void xfs_trans_dquot_buf(xfs_trans_t *, struct xfs_buf *, uint);
569void xfs_trans_inode_alloc_buf(xfs_trans_t *, struct xfs_buf *); 473void xfs_trans_inode_alloc_buf(xfs_trans_t *, struct xfs_buf *);
570int xfs_trans_iget(struct xfs_mount *, xfs_trans_t *, 474int xfs_trans_iget(struct xfs_mount *, xfs_trans_t *,
571 xfs_ino_t , uint, uint, struct xfs_inode **); 475 xfs_ino_t , uint, uint, struct xfs_inode **);
572void xfs_trans_ijoin(xfs_trans_t *, struct xfs_inode *, uint); 476void xfs_trans_ijoin_ref(struct xfs_trans *, struct xfs_inode *, uint);
573void xfs_trans_ihold(xfs_trans_t *, struct xfs_inode *); 477void xfs_trans_ijoin(struct xfs_trans *, struct xfs_inode *);
574void xfs_trans_log_buf(xfs_trans_t *, struct xfs_buf *, uint, uint); 478void xfs_trans_log_buf(xfs_trans_t *, struct xfs_buf *, uint, uint);
575void xfs_trans_log_inode(xfs_trans_t *, struct xfs_inode *, uint); 479void xfs_trans_log_inode(xfs_trans_t *, struct xfs_inode *, uint);
576struct xfs_efi_log_item *xfs_trans_get_efi(xfs_trans_t *, uint); 480struct xfs_efi_log_item *xfs_trans_get_efi(xfs_trans_t *, uint);
@@ -595,6 +499,7 @@ int xfs_trans_ail_init(struct xfs_mount *);
595void xfs_trans_ail_destroy(struct xfs_mount *); 499void xfs_trans_ail_destroy(struct xfs_mount *);
596 500
597extern kmem_zone_t *xfs_trans_zone; 501extern kmem_zone_t *xfs_trans_zone;
502extern kmem_zone_t *xfs_log_item_desc_zone;
598 503
599#endif /* __KERNEL__ */ 504#endif /* __KERNEL__ */
600 505
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
index e799824f7245..dc9069568ff7 100644
--- a/fs/xfs/xfs_trans_ail.c
+++ b/fs/xfs/xfs_trans_ail.c
@@ -24,7 +24,6 @@
24#include "xfs_trans.h" 24#include "xfs_trans.h"
25#include "xfs_sb.h" 25#include "xfs_sb.h"
26#include "xfs_ag.h" 26#include "xfs_ag.h"
27#include "xfs_dmapi.h"
28#include "xfs_mount.h" 27#include "xfs_mount.h"
29#include "xfs_trans_priv.h" 28#include "xfs_trans_priv.h"
30#include "xfs_error.h" 29#include "xfs_error.h"
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c
index 63d81a22f4fd..90af025e6839 100644
--- a/fs/xfs/xfs_trans_buf.c
+++ b/fs/xfs/xfs_trans_buf.c
@@ -24,14 +24,10 @@
24#include "xfs_trans.h" 24#include "xfs_trans.h"
25#include "xfs_sb.h" 25#include "xfs_sb.h"
26#include "xfs_ag.h" 26#include "xfs_ag.h"
27#include "xfs_dir2.h"
28#include "xfs_dmapi.h"
29#include "xfs_mount.h" 27#include "xfs_mount.h"
30#include "xfs_bmap_btree.h" 28#include "xfs_bmap_btree.h"
31#include "xfs_alloc_btree.h" 29#include "xfs_alloc_btree.h"
32#include "xfs_ialloc_btree.h" 30#include "xfs_ialloc_btree.h"
33#include "xfs_dir2_sf.h"
34#include "xfs_attr_sf.h"
35#include "xfs_dinode.h" 31#include "xfs_dinode.h"
36#include "xfs_inode.h" 32#include "xfs_inode.h"
37#include "xfs_buf_item.h" 33#include "xfs_buf_item.h"
@@ -51,36 +47,17 @@ xfs_trans_buf_item_match(
51 xfs_daddr_t blkno, 47 xfs_daddr_t blkno,
52 int len) 48 int len)
53{ 49{
54 xfs_log_item_chunk_t *licp; 50 struct xfs_log_item_desc *lidp;
55 xfs_log_item_desc_t *lidp; 51 struct xfs_buf_log_item *blip;
56 xfs_buf_log_item_t *blip;
57 int i;
58 52
59 len = BBTOB(len); 53 len = BBTOB(len);
60 for (licp = &tp->t_items; licp != NULL; licp = licp->lic_next) { 54 list_for_each_entry(lidp, &tp->t_items, lid_trans) {
61 if (xfs_lic_are_all_free(licp)) { 55 blip = (struct xfs_buf_log_item *)lidp->lid_item;
62 ASSERT(licp == &tp->t_items); 56 if (blip->bli_item.li_type == XFS_LI_BUF &&
63 ASSERT(licp->lic_next == NULL); 57 XFS_BUF_TARGET(blip->bli_buf) == target &&
64 return NULL; 58 XFS_BUF_ADDR(blip->bli_buf) == blkno &&
65 } 59 XFS_BUF_COUNT(blip->bli_buf) == len)
66 60 return blip->bli_buf;
67 for (i = 0; i < licp->lic_unused; i++) {
68 /*
69 * Skip unoccupied slots.
70 */
71 if (xfs_lic_isfree(licp, i))
72 continue;
73
74 lidp = xfs_lic_slot(licp, i);
75 blip = (xfs_buf_log_item_t *)lidp->lid_item;
76 if (blip->bli_item.li_type != XFS_LI_BUF)
77 continue;
78
79 if (XFS_BUF_TARGET(blip->bli_buf) == target &&
80 XFS_BUF_ADDR(blip->bli_buf) == blkno &&
81 XFS_BUF_COUNT(blip->bli_buf) == len)
82 return blip->bli_buf;
83 }
84 } 61 }
85 62
86 return NULL; 63 return NULL;
@@ -127,7 +104,7 @@ _xfs_trans_bjoin(
127 /* 104 /*
128 * Get a log_item_desc to point at the new item. 105 * Get a log_item_desc to point at the new item.
129 */ 106 */
130 (void) xfs_trans_add_item(tp, (xfs_log_item_t *)bip); 107 xfs_trans_add_item(tp, &bip->bli_item);
131 108
132 /* 109 /*
133 * Initialize b_fsprivate2 so we can find it with incore_match() 110 * Initialize b_fsprivate2 so we can find it with incore_match()
@@ -483,7 +460,6 @@ xfs_trans_brelse(xfs_trans_t *tp,
483{ 460{
484 xfs_buf_log_item_t *bip; 461 xfs_buf_log_item_t *bip;
485 xfs_log_item_t *lip; 462 xfs_log_item_t *lip;
486 xfs_log_item_desc_t *lidp;
487 463
488 /* 464 /*
489 * Default to a normal brelse() call if the tp is NULL. 465 * Default to a normal brelse() call if the tp is NULL.
@@ -514,13 +490,6 @@ xfs_trans_brelse(xfs_trans_t *tp,
514 ASSERT(!(bip->bli_format.blf_flags & XFS_BLF_CANCEL)); 490 ASSERT(!(bip->bli_format.blf_flags & XFS_BLF_CANCEL));
515 ASSERT(atomic_read(&bip->bli_refcount) > 0); 491 ASSERT(atomic_read(&bip->bli_refcount) > 0);
516 492
517 /*
518 * Find the item descriptor pointing to this buffer's
519 * log item. It must be there.
520 */
521 lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)bip);
522 ASSERT(lidp != NULL);
523
524 trace_xfs_trans_brelse(bip); 493 trace_xfs_trans_brelse(bip);
525 494
526 /* 495 /*
@@ -536,7 +505,7 @@ xfs_trans_brelse(xfs_trans_t *tp,
536 * If the buffer is dirty within this transaction, we can't 505 * If the buffer is dirty within this transaction, we can't
537 * release it until we commit. 506 * release it until we commit.
538 */ 507 */
539 if (lidp->lid_flags & XFS_LID_DIRTY) 508 if (bip->bli_item.li_desc->lid_flags & XFS_LID_DIRTY)
540 return; 509 return;
541 510
542 /* 511 /*
@@ -553,7 +522,7 @@ xfs_trans_brelse(xfs_trans_t *tp,
553 /* 522 /*
554 * Free up the log item descriptor tracking the released item. 523 * Free up the log item descriptor tracking the released item.
555 */ 524 */
556 xfs_trans_free_item(tp, lidp); 525 xfs_trans_del_item(&bip->bli_item);
557 526
558 /* 527 /*
559 * Clear the hold flag in the buf log item if it is set. 528 * Clear the hold flag in the buf log item if it is set.
@@ -665,7 +634,6 @@ xfs_trans_log_buf(xfs_trans_t *tp,
665 uint last) 634 uint last)
666{ 635{
667 xfs_buf_log_item_t *bip; 636 xfs_buf_log_item_t *bip;
668 xfs_log_item_desc_t *lidp;
669 637
670 ASSERT(XFS_BUF_ISBUSY(bp)); 638 ASSERT(XFS_BUF_ISBUSY(bp));
671 ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp); 639 ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
@@ -690,7 +658,7 @@ xfs_trans_log_buf(xfs_trans_t *tp,
690 bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); 658 bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
691 ASSERT(atomic_read(&bip->bli_refcount) > 0); 659 ASSERT(atomic_read(&bip->bli_refcount) > 0);
692 XFS_BUF_SET_IODONE_FUNC(bp, xfs_buf_iodone_callbacks); 660 XFS_BUF_SET_IODONE_FUNC(bp, xfs_buf_iodone_callbacks);
693 bip->bli_item.li_cb = (void(*)(xfs_buf_t*,xfs_log_item_t*))xfs_buf_iodone; 661 bip->bli_item.li_cb = xfs_buf_iodone;
694 662
695 trace_xfs_trans_log_buf(bip); 663 trace_xfs_trans_log_buf(bip);
696 664
@@ -707,11 +675,8 @@ xfs_trans_log_buf(xfs_trans_t *tp,
707 bip->bli_format.blf_flags &= ~XFS_BLF_CANCEL; 675 bip->bli_format.blf_flags &= ~XFS_BLF_CANCEL;
708 } 676 }
709 677
710 lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)bip);
711 ASSERT(lidp != NULL);
712
713 tp->t_flags |= XFS_TRANS_DIRTY; 678 tp->t_flags |= XFS_TRANS_DIRTY;
714 lidp->lid_flags |= XFS_LID_DIRTY; 679 bip->bli_item.li_desc->lid_flags |= XFS_LID_DIRTY;
715 bip->bli_flags |= XFS_BLI_LOGGED; 680 bip->bli_flags |= XFS_BLI_LOGGED;
716 xfs_buf_item_log(bip, first, last); 681 xfs_buf_item_log(bip, first, last);
717} 682}
@@ -740,7 +705,6 @@ xfs_trans_binval(
740 xfs_trans_t *tp, 705 xfs_trans_t *tp,
741 xfs_buf_t *bp) 706 xfs_buf_t *bp)
742{ 707{
743 xfs_log_item_desc_t *lidp;
744 xfs_buf_log_item_t *bip; 708 xfs_buf_log_item_t *bip;
745 709
746 ASSERT(XFS_BUF_ISBUSY(bp)); 710 ASSERT(XFS_BUF_ISBUSY(bp));
@@ -748,8 +712,6 @@ xfs_trans_binval(
748 ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); 712 ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
749 713
750 bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); 714 bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
751 lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)bip);
752 ASSERT(lidp != NULL);
753 ASSERT(atomic_read(&bip->bli_refcount) > 0); 715 ASSERT(atomic_read(&bip->bli_refcount) > 0);
754 716
755 trace_xfs_trans_binval(bip); 717 trace_xfs_trans_binval(bip);
@@ -764,7 +726,7 @@ xfs_trans_binval(
764 ASSERT(!(bip->bli_flags & (XFS_BLI_LOGGED | XFS_BLI_DIRTY))); 726 ASSERT(!(bip->bli_flags & (XFS_BLI_LOGGED | XFS_BLI_DIRTY)));
765 ASSERT(!(bip->bli_format.blf_flags & XFS_BLF_INODE_BUF)); 727 ASSERT(!(bip->bli_format.blf_flags & XFS_BLF_INODE_BUF));
766 ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL); 728 ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL);
767 ASSERT(lidp->lid_flags & XFS_LID_DIRTY); 729 ASSERT(bip->bli_item.li_desc->lid_flags & XFS_LID_DIRTY);
768 ASSERT(tp->t_flags & XFS_TRANS_DIRTY); 730 ASSERT(tp->t_flags & XFS_TRANS_DIRTY);
769 return; 731 return;
770 } 732 }
@@ -797,7 +759,7 @@ xfs_trans_binval(
797 bip->bli_format.blf_flags |= XFS_BLF_CANCEL; 759 bip->bli_format.blf_flags |= XFS_BLF_CANCEL;
798 memset((char *)(bip->bli_format.blf_data_map), 0, 760 memset((char *)(bip->bli_format.blf_data_map), 0,
799 (bip->bli_format.blf_map_size * sizeof(uint))); 761 (bip->bli_format.blf_map_size * sizeof(uint)));
800 lidp->lid_flags |= XFS_LID_DIRTY; 762 bip->bli_item.li_desc->lid_flags |= XFS_LID_DIRTY;
801 tp->t_flags |= XFS_TRANS_DIRTY; 763 tp->t_flags |= XFS_TRANS_DIRTY;
802} 764}
803 765
@@ -853,12 +815,9 @@ xfs_trans_stale_inode_buf(
853 ASSERT(atomic_read(&bip->bli_refcount) > 0); 815 ASSERT(atomic_read(&bip->bli_refcount) > 0);
854 816
855 bip->bli_flags |= XFS_BLI_STALE_INODE; 817 bip->bli_flags |= XFS_BLI_STALE_INODE;
856 bip->bli_item.li_cb = (void(*)(xfs_buf_t*,xfs_log_item_t*)) 818 bip->bli_item.li_cb = xfs_buf_iodone;
857 xfs_buf_iodone;
858} 819}
859 820
860
861
862/* 821/*
863 * Mark the buffer as being one which contains newly allocated 822 * Mark the buffer as being one which contains newly allocated
864 * inodes. We need to make sure that even if this buffer is 823 * inodes. We need to make sure that even if this buffer is
diff --git a/fs/xfs/xfs_trans_extfree.c b/fs/xfs/xfs_trans_extfree.c
index 27cce2a9c7e9..f783d5e9fa70 100644
--- a/fs/xfs/xfs_trans_extfree.c
+++ b/fs/xfs/xfs_trans_extfree.c
@@ -23,7 +23,6 @@
23#include "xfs_trans.h" 23#include "xfs_trans.h"
24#include "xfs_sb.h" 24#include "xfs_sb.h"
25#include "xfs_ag.h" 25#include "xfs_ag.h"
26#include "xfs_dmapi.h"
27#include "xfs_mount.h" 26#include "xfs_mount.h"
28#include "xfs_trans_priv.h" 27#include "xfs_trans_priv.h"
29#include "xfs_extfree_item.h" 28#include "xfs_extfree_item.h"
@@ -49,9 +48,8 @@ xfs_trans_get_efi(xfs_trans_t *tp,
49 /* 48 /*
50 * Get a log_item_desc to point at the new item. 49 * Get a log_item_desc to point at the new item.
51 */ 50 */
52 (void) xfs_trans_add_item(tp, (xfs_log_item_t*)efip); 51 xfs_trans_add_item(tp, &efip->efi_item);
53 52 return efip;
54 return (efip);
55} 53}
56 54
57/* 55/*
@@ -65,15 +63,11 @@ xfs_trans_log_efi_extent(xfs_trans_t *tp,
65 xfs_fsblock_t start_block, 63 xfs_fsblock_t start_block,
66 xfs_extlen_t ext_len) 64 xfs_extlen_t ext_len)
67{ 65{
68 xfs_log_item_desc_t *lidp;
69 uint next_extent; 66 uint next_extent;
70 xfs_extent_t *extp; 67 xfs_extent_t *extp;
71 68
72 lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)efip);
73 ASSERT(lidp != NULL);
74
75 tp->t_flags |= XFS_TRANS_DIRTY; 69 tp->t_flags |= XFS_TRANS_DIRTY;
76 lidp->lid_flags |= XFS_LID_DIRTY; 70 efip->efi_item.li_desc->lid_flags |= XFS_LID_DIRTY;
77 71
78 next_extent = efip->efi_next_extent; 72 next_extent = efip->efi_next_extent;
79 ASSERT(next_extent < efip->efi_format.efi_nextents); 73 ASSERT(next_extent < efip->efi_format.efi_nextents);
@@ -106,9 +100,8 @@ xfs_trans_get_efd(xfs_trans_t *tp,
106 /* 100 /*
107 * Get a log_item_desc to point at the new item. 101 * Get a log_item_desc to point at the new item.
108 */ 102 */
109 (void) xfs_trans_add_item(tp, (xfs_log_item_t*)efdp); 103 xfs_trans_add_item(tp, &efdp->efd_item);
110 104 return efdp;
111 return (efdp);
112} 105}
113 106
114/* 107/*
@@ -122,15 +115,11 @@ xfs_trans_log_efd_extent(xfs_trans_t *tp,
122 xfs_fsblock_t start_block, 115 xfs_fsblock_t start_block,
123 xfs_extlen_t ext_len) 116 xfs_extlen_t ext_len)
124{ 117{
125 xfs_log_item_desc_t *lidp;
126 uint next_extent; 118 uint next_extent;
127 xfs_extent_t *extp; 119 xfs_extent_t *extp;
128 120
129 lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)efdp);
130 ASSERT(lidp != NULL);
131
132 tp->t_flags |= XFS_TRANS_DIRTY; 121 tp->t_flags |= XFS_TRANS_DIRTY;
133 lidp->lid_flags |= XFS_LID_DIRTY; 122 efdp->efd_item.li_desc->lid_flags |= XFS_LID_DIRTY;
134 123
135 next_extent = efdp->efd_next_extent; 124 next_extent = efdp->efd_next_extent;
136 ASSERT(next_extent < efdp->efd_format.efd_nextents); 125 ASSERT(next_extent < efdp->efd_format.efd_nextents);
diff --git a/fs/xfs/xfs_trans_inode.c b/fs/xfs/xfs_trans_inode.c
index 785ff101da0a..cdc53a1050c5 100644
--- a/fs/xfs/xfs_trans_inode.c
+++ b/fs/xfs/xfs_trans_inode.c
@@ -24,20 +24,16 @@
24#include "xfs_trans.h" 24#include "xfs_trans.h"
25#include "xfs_sb.h" 25#include "xfs_sb.h"
26#include "xfs_ag.h" 26#include "xfs_ag.h"
27#include "xfs_dir2.h"
28#include "xfs_dmapi.h"
29#include "xfs_mount.h" 27#include "xfs_mount.h"
30#include "xfs_bmap_btree.h" 28#include "xfs_bmap_btree.h"
31#include "xfs_alloc_btree.h" 29#include "xfs_alloc_btree.h"
32#include "xfs_ialloc_btree.h" 30#include "xfs_ialloc_btree.h"
33#include "xfs_dir2_sf.h"
34#include "xfs_attr_sf.h"
35#include "xfs_dinode.h" 31#include "xfs_dinode.h"
36#include "xfs_inode.h" 32#include "xfs_inode.h"
37#include "xfs_btree.h" 33#include "xfs_btree.h"
38#include "xfs_ialloc.h"
39#include "xfs_trans_priv.h" 34#include "xfs_trans_priv.h"
40#include "xfs_inode_item.h" 35#include "xfs_inode_item.h"
36#include "xfs_trace.h"
41 37
42#ifdef XFS_TRANS_DEBUG 38#ifdef XFS_TRANS_DEBUG
43STATIC void 39STATIC void
@@ -47,7 +43,6 @@ xfs_trans_inode_broot_debug(
47#define xfs_trans_inode_broot_debug(ip) 43#define xfs_trans_inode_broot_debug(ip)
48#endif 44#endif
49 45
50
51/* 46/*
52 * Get an inode and join it to the transaction. 47 * Get an inode and join it to the transaction.
53 */ 48 */
@@ -62,78 +57,66 @@ xfs_trans_iget(
62{ 57{
63 int error; 58 int error;
64 59
65 error = xfs_iget(mp, tp, ino, flags, lock_flags, ipp, 0); 60 error = xfs_iget(mp, tp, ino, flags, lock_flags, ipp);
66 if (!error && tp) 61 if (!error && tp) {
67 xfs_trans_ijoin(tp, *ipp, lock_flags); 62 xfs_trans_ijoin(tp, *ipp);
63 (*ipp)->i_itemp->ili_lock_flags = lock_flags;
64 }
68 return error; 65 return error;
69} 66}
70 67
71/* 68/*
72 * Add the locked inode to the transaction. 69 * Add a locked inode to the transaction.
73 * The inode must be locked, and it cannot be associated with any 70 *
74 * transaction. The caller must specify the locks already held 71 * The inode must be locked, and it cannot be associated with any transaction.
75 * on the inode.
76 */ 72 */
77void 73void
78xfs_trans_ijoin( 74xfs_trans_ijoin(
79 xfs_trans_t *tp, 75 struct xfs_trans *tp,
80 xfs_inode_t *ip, 76 struct xfs_inode *ip)
81 uint lock_flags)
82{ 77{
83 xfs_inode_log_item_t *iip; 78 xfs_inode_log_item_t *iip;
84 79
85 ASSERT(ip->i_transp == NULL); 80 ASSERT(ip->i_transp == NULL);
86 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 81 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
87 ASSERT(lock_flags & XFS_ILOCK_EXCL);
88 if (ip->i_itemp == NULL) 82 if (ip->i_itemp == NULL)
89 xfs_inode_item_init(ip, ip->i_mount); 83 xfs_inode_item_init(ip, ip->i_mount);
90 iip = ip->i_itemp; 84 iip = ip->i_itemp;
91 ASSERT(iip->ili_flags == 0); 85 ASSERT(iip->ili_lock_flags == 0);
92 86
93 /* 87 /*
94 * Get a log_item_desc to point at the new item. 88 * Get a log_item_desc to point at the new item.
95 */ 89 */
96 (void) xfs_trans_add_item(tp, (xfs_log_item_t*)(iip)); 90 xfs_trans_add_item(tp, &iip->ili_item);
97 91
98 xfs_trans_inode_broot_debug(ip); 92 xfs_trans_inode_broot_debug(ip);
99 93
100 /* 94 /*
101 * If the IO lock is already held, mark that in the inode log item.
102 */
103 if (lock_flags & XFS_IOLOCK_EXCL) {
104 iip->ili_flags |= XFS_ILI_IOLOCKED_EXCL;
105 } else if (lock_flags & XFS_IOLOCK_SHARED) {
106 iip->ili_flags |= XFS_ILI_IOLOCKED_SHARED;
107 }
108
109 /*
110 * Initialize i_transp so we can find it with xfs_inode_incore() 95 * Initialize i_transp so we can find it with xfs_inode_incore()
111 * in xfs_trans_iget() above. 96 * in xfs_trans_iget() above.
112 */ 97 */
113 ip->i_transp = tp; 98 ip->i_transp = tp;
114} 99}
115 100
116
117
118/* 101/*
119 * Mark the inode as not needing to be unlocked when the inode item's 102 * Add a locked inode to the transaction.
120 * IOP_UNLOCK() routine is called. The inode must already be locked 103 *
121 * and associated with the given transaction. 104 *
105 * Grabs a reference to the inode which will be dropped when the transaction
106 * is commited. The inode will also be unlocked at that point. The inode
107 * must be locked, and it cannot be associated with any transaction.
122 */ 108 */
123/*ARGSUSED*/
124void 109void
125xfs_trans_ihold( 110xfs_trans_ijoin_ref(
126 xfs_trans_t *tp, 111 struct xfs_trans *tp,
127 xfs_inode_t *ip) 112 struct xfs_inode *ip,
113 uint lock_flags)
128{ 114{
129 ASSERT(ip->i_transp == tp); 115 xfs_trans_ijoin(tp, ip);
130 ASSERT(ip->i_itemp != NULL); 116 IHOLD(ip);
131 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 117 ip->i_itemp->ili_lock_flags = lock_flags;
132
133 ip->i_itemp->ili_flags |= XFS_ILI_HOLD;
134} 118}
135 119
136
137/* 120/*
138 * This is called to mark the fields indicated in fieldmask as needing 121 * This is called to mark the fields indicated in fieldmask as needing
139 * to be logged when the transaction is committed. The inode must 122 * to be logged when the transaction is committed. The inode must
@@ -149,17 +132,12 @@ xfs_trans_log_inode(
149 xfs_inode_t *ip, 132 xfs_inode_t *ip,
150 uint flags) 133 uint flags)
151{ 134{
152 xfs_log_item_desc_t *lidp;
153
154 ASSERT(ip->i_transp == tp); 135 ASSERT(ip->i_transp == tp);
155 ASSERT(ip->i_itemp != NULL); 136 ASSERT(ip->i_itemp != NULL);
156 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 137 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
157 138
158 lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)(ip->i_itemp));
159 ASSERT(lidp != NULL);
160
161 tp->t_flags |= XFS_TRANS_DIRTY; 139 tp->t_flags |= XFS_TRANS_DIRTY;
162 lidp->lid_flags |= XFS_LID_DIRTY; 140 ip->i_itemp->ili_item.li_desc->lid_flags |= XFS_LID_DIRTY;
163 141
164 /* 142 /*
165 * Always OR in the bits from the ili_last_fields field. 143 * Always OR in the bits from the ili_last_fields field.
diff --git a/fs/xfs/xfs_trans_item.c b/fs/xfs/xfs_trans_item.c
deleted file mode 100644
index f11d37d06dcc..000000000000
--- a/fs/xfs/xfs_trans_item.c
+++ /dev/null
@@ -1,441 +0,0 @@
1/*
2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_types.h"
21#include "xfs_log.h"
22#include "xfs_inum.h"
23#include "xfs_trans.h"
24#include "xfs_trans_priv.h"
25/* XXX: from here down needed until struct xfs_trans has its own ailp */
26#include "xfs_bit.h"
27#include "xfs_buf_item.h"
28#include "xfs_sb.h"
29#include "xfs_ag.h"
30#include "xfs_dir2.h"
31#include "xfs_dmapi.h"
32#include "xfs_mount.h"
33
34STATIC int xfs_trans_unlock_chunk(xfs_log_item_chunk_t *,
35 int, int, xfs_lsn_t);
36
37/*
38 * This is called to add the given log item to the transaction's
39 * list of log items. It must find a free log item descriptor
40 * or allocate a new one and add the item to that descriptor.
41 * The function returns a pointer to item descriptor used to point
42 * to the new item. The log item will now point to its new descriptor
43 * with its li_desc field.
44 */
45xfs_log_item_desc_t *
46xfs_trans_add_item(xfs_trans_t *tp, xfs_log_item_t *lip)
47{
48 xfs_log_item_desc_t *lidp;
49 xfs_log_item_chunk_t *licp;
50 int i=0;
51
52 /*
53 * If there are no free descriptors, allocate a new chunk
54 * of them and put it at the front of the chunk list.
55 */
56 if (tp->t_items_free == 0) {
57 licp = (xfs_log_item_chunk_t*)
58 kmem_alloc(sizeof(xfs_log_item_chunk_t), KM_SLEEP);
59 ASSERT(licp != NULL);
60 /*
61 * Initialize the chunk, and then
62 * claim the first slot in the newly allocated chunk.
63 */
64 xfs_lic_init(licp);
65 xfs_lic_claim(licp, 0);
66 licp->lic_unused = 1;
67 xfs_lic_init_slot(licp, 0);
68 lidp = xfs_lic_slot(licp, 0);
69
70 /*
71 * Link in the new chunk and update the free count.
72 */
73 licp->lic_next = tp->t_items.lic_next;
74 tp->t_items.lic_next = licp;
75 tp->t_items_free = XFS_LIC_NUM_SLOTS - 1;
76
77 /*
78 * Initialize the descriptor and the generic portion
79 * of the log item.
80 *
81 * Point the new slot at this item and return it.
82 * Also point the log item at its currently active
83 * descriptor and set the item's mount pointer.
84 */
85 lidp->lid_item = lip;
86 lidp->lid_flags = 0;
87 lidp->lid_size = 0;
88 lip->li_desc = lidp;
89 lip->li_mountp = tp->t_mountp;
90 lip->li_ailp = tp->t_mountp->m_ail;
91 return lidp;
92 }
93
94 /*
95 * Find the free descriptor. It is somewhere in the chunklist
96 * of descriptors.
97 */
98 licp = &tp->t_items;
99 while (licp != NULL) {
100 if (xfs_lic_vacancy(licp)) {
101 if (licp->lic_unused <= XFS_LIC_MAX_SLOT) {
102 i = licp->lic_unused;
103 ASSERT(xfs_lic_isfree(licp, i));
104 break;
105 }
106 for (i = 0; i <= XFS_LIC_MAX_SLOT; i++) {
107 if (xfs_lic_isfree(licp, i))
108 break;
109 }
110 ASSERT(i <= XFS_LIC_MAX_SLOT);
111 break;
112 }
113 licp = licp->lic_next;
114 }
115 ASSERT(licp != NULL);
116 /*
117 * If we find a free descriptor, claim it,
118 * initialize it, and return it.
119 */
120 xfs_lic_claim(licp, i);
121 if (licp->lic_unused <= i) {
122 licp->lic_unused = i + 1;
123 xfs_lic_init_slot(licp, i);
124 }
125 lidp = xfs_lic_slot(licp, i);
126 tp->t_items_free--;
127 lidp->lid_item = lip;
128 lidp->lid_flags = 0;
129 lidp->lid_size = 0;
130 lip->li_desc = lidp;
131 lip->li_mountp = tp->t_mountp;
132 lip->li_ailp = tp->t_mountp->m_ail;
133 return lidp;
134}
135
136/*
137 * Free the given descriptor.
138 *
139 * This requires setting the bit in the chunk's free mask corresponding
140 * to the given slot.
141 */
142void
143xfs_trans_free_item(xfs_trans_t *tp, xfs_log_item_desc_t *lidp)
144{
145 uint slot;
146 xfs_log_item_chunk_t *licp;
147 xfs_log_item_chunk_t **licpp;
148
149 slot = xfs_lic_desc_to_slot(lidp);
150 licp = xfs_lic_desc_to_chunk(lidp);
151 xfs_lic_relse(licp, slot);
152 lidp->lid_item->li_desc = NULL;
153 tp->t_items_free++;
154
155 /*
156 * If there are no more used items in the chunk and this is not
157 * the chunk embedded in the transaction structure, then free
158 * the chunk. First pull it from the chunk list and then
159 * free it back to the heap. We didn't bother with a doubly
160 * linked list here because the lists should be very short
161 * and this is not a performance path. It's better to save
162 * the memory of the extra pointer.
163 *
164 * Also decrement the transaction structure's count of free items
165 * by the number in a chunk since we are freeing an empty chunk.
166 */
167 if (xfs_lic_are_all_free(licp) && (licp != &(tp->t_items))) {
168 licpp = &(tp->t_items.lic_next);
169 while (*licpp != licp) {
170 ASSERT(*licpp != NULL);
171 licpp = &((*licpp)->lic_next);
172 }
173 *licpp = licp->lic_next;
174 kmem_free(licp);
175 tp->t_items_free -= XFS_LIC_NUM_SLOTS;
176 }
177}
178
179/*
180 * This is called to find the descriptor corresponding to the given
181 * log item. It returns a pointer to the descriptor.
182 * The log item MUST have a corresponding descriptor in the given
183 * transaction. This routine does not return NULL, it panics.
184 *
185 * The descriptor pointer is kept in the log item's li_desc field.
186 * Just return it.
187 */
188/*ARGSUSED*/
189xfs_log_item_desc_t *
190xfs_trans_find_item(xfs_trans_t *tp, xfs_log_item_t *lip)
191{
192 ASSERT(lip->li_desc != NULL);
193
194 return lip->li_desc;
195}
196
197
198/*
199 * Return a pointer to the first descriptor in the chunk list.
200 * This does not return NULL if there are none, it panics.
201 *
202 * The first descriptor must be in either the first or second chunk.
203 * This is because the only chunk allowed to be empty is the first.
204 * All others are freed when they become empty.
205 *
206 * At some point this and xfs_trans_next_item() should be optimized
207 * to quickly look at the mask to determine if there is anything to
208 * look at.
209 */
210xfs_log_item_desc_t *
211xfs_trans_first_item(xfs_trans_t *tp)
212{
213 xfs_log_item_chunk_t *licp;
214 int i;
215
216 licp = &tp->t_items;
217 /*
218 * If it's not in the first chunk, skip to the second.
219 */
220 if (xfs_lic_are_all_free(licp)) {
221 licp = licp->lic_next;
222 }
223
224 /*
225 * Return the first non-free descriptor in the chunk.
226 */
227 ASSERT(!xfs_lic_are_all_free(licp));
228 for (i = 0; i < licp->lic_unused; i++) {
229 if (xfs_lic_isfree(licp, i)) {
230 continue;
231 }
232
233 return xfs_lic_slot(licp, i);
234 }
235 cmn_err(CE_WARN, "xfs_trans_first_item() -- no first item");
236 return NULL;
237}
238
239
240/*
241 * Given a descriptor, return the next descriptor in the chunk list.
242 * This returns NULL if there are no more used descriptors in the list.
243 *
244 * We do this by first locating the chunk in which the descriptor resides,
245 * and then scanning forward in the chunk and the list for the next
246 * used descriptor.
247 */
248/*ARGSUSED*/
249xfs_log_item_desc_t *
250xfs_trans_next_item(xfs_trans_t *tp, xfs_log_item_desc_t *lidp)
251{
252 xfs_log_item_chunk_t *licp;
253 int i;
254
255 licp = xfs_lic_desc_to_chunk(lidp);
256
257 /*
258 * First search the rest of the chunk. The for loop keeps us
259 * from referencing things beyond the end of the chunk.
260 */
261 for (i = (int)xfs_lic_desc_to_slot(lidp) + 1; i < licp->lic_unused; i++) {
262 if (xfs_lic_isfree(licp, i)) {
263 continue;
264 }
265
266 return xfs_lic_slot(licp, i);
267 }
268
269 /*
270 * Now search the next chunk. It must be there, because the
271 * next chunk would have been freed if it were empty.
272 * If there is no next chunk, return NULL.
273 */
274 if (licp->lic_next == NULL) {
275 return NULL;
276 }
277
278 licp = licp->lic_next;
279 ASSERT(!xfs_lic_are_all_free(licp));
280 for (i = 0; i < licp->lic_unused; i++) {
281 if (xfs_lic_isfree(licp, i)) {
282 continue;
283 }
284
285 return xfs_lic_slot(licp, i);
286 }
287 ASSERT(0);
288 /* NOTREACHED */
289 return NULL; /* keep gcc quite */
290}
291
292/*
293 * This is called to unlock all of the items of a transaction and to free
294 * all the descriptors of that transaction.
295 *
296 * It walks the list of descriptors and unlocks each item. It frees
297 * each chunk except that embedded in the transaction as it goes along.
298 */
299void
300xfs_trans_free_items(
301 xfs_trans_t *tp,
302 xfs_lsn_t commit_lsn,
303 int flags)
304{
305 xfs_log_item_chunk_t *licp;
306 xfs_log_item_chunk_t *next_licp;
307 int abort;
308
309 abort = flags & XFS_TRANS_ABORT;
310 licp = &tp->t_items;
311 /*
312 * Special case the embedded chunk so we don't free it below.
313 */
314 if (!xfs_lic_are_all_free(licp)) {
315 (void) xfs_trans_unlock_chunk(licp, 1, abort, commit_lsn);
316 xfs_lic_all_free(licp);
317 licp->lic_unused = 0;
318 }
319 licp = licp->lic_next;
320
321 /*
322 * Unlock each item in each chunk and free the chunks.
323 */
324 while (licp != NULL) {
325 ASSERT(!xfs_lic_are_all_free(licp));
326 (void) xfs_trans_unlock_chunk(licp, 1, abort, commit_lsn);
327 next_licp = licp->lic_next;
328 kmem_free(licp);
329 licp = next_licp;
330 }
331
332 /*
333 * Reset the transaction structure's free item count.
334 */
335 tp->t_items_free = XFS_LIC_NUM_SLOTS;
336 tp->t_items.lic_next = NULL;
337}
338
339
340
341/*
342 * This is called to unlock the items associated with a transaction.
343 * Items which were not logged should be freed.
344 * Those which were logged must still be tracked so they can be unpinned
345 * when the transaction commits.
346 */
347void
348xfs_trans_unlock_items(xfs_trans_t *tp, xfs_lsn_t commit_lsn)
349{
350 xfs_log_item_chunk_t *licp;
351 xfs_log_item_chunk_t *next_licp;
352 xfs_log_item_chunk_t **licpp;
353 int freed;
354
355 freed = 0;
356 licp = &tp->t_items;
357
358 /*
359 * Special case the embedded chunk so we don't free.
360 */
361 if (!xfs_lic_are_all_free(licp)) {
362 freed = xfs_trans_unlock_chunk(licp, 0, 0, commit_lsn);
363 }
364 licpp = &(tp->t_items.lic_next);
365 licp = licp->lic_next;
366
367 /*
368 * Unlock each item in each chunk, free non-dirty descriptors,
369 * and free empty chunks.
370 */
371 while (licp != NULL) {
372 ASSERT(!xfs_lic_are_all_free(licp));
373 freed += xfs_trans_unlock_chunk(licp, 0, 0, commit_lsn);
374 next_licp = licp->lic_next;
375 if (xfs_lic_are_all_free(licp)) {
376 *licpp = next_licp;
377 kmem_free(licp);
378 freed -= XFS_LIC_NUM_SLOTS;
379 } else {
380 licpp = &(licp->lic_next);
381 }
382 ASSERT(*licpp == next_licp);
383 licp = next_licp;
384 }
385
386 /*
387 * Fix the free descriptor count in the transaction.
388 */
389 tp->t_items_free += freed;
390}
391
392/*
393 * Unlock each item pointed to by a descriptor in the given chunk.
394 * Stamp the commit lsn into each item if necessary.
395 * Free descriptors pointing to items which are not dirty if freeing_chunk
396 * is zero. If freeing_chunk is non-zero, then we need to unlock all
397 * items in the chunk.
398 *
399 * Return the number of descriptors freed.
400 */
401STATIC int
402xfs_trans_unlock_chunk(
403 xfs_log_item_chunk_t *licp,
404 int freeing_chunk,
405 int abort,
406 xfs_lsn_t commit_lsn)
407{
408 xfs_log_item_desc_t *lidp;
409 xfs_log_item_t *lip;
410 int i;
411 int freed;
412
413 freed = 0;
414 lidp = licp->lic_descs;
415 for (i = 0; i < licp->lic_unused; i++, lidp++) {
416 if (xfs_lic_isfree(licp, i)) {
417 continue;
418 }
419 lip = lidp->lid_item;
420 lip->li_desc = NULL;
421
422 if (commit_lsn != NULLCOMMITLSN)
423 IOP_COMMITTING(lip, commit_lsn);
424 if (abort)
425 lip->li_flags |= XFS_LI_ABORTED;
426 IOP_UNLOCK(lip);
427
428 /*
429 * Free the descriptor if the item is not dirty
430 * within this transaction and the caller is not
431 * going to just free the entire thing regardless.
432 */
433 if (!(freeing_chunk) &&
434 (!(lidp->lid_flags & XFS_LID_DIRTY) || abort)) {
435 xfs_lic_relse(licp, i);
436 freed++;
437 }
438 }
439
440 return freed;
441}
diff --git a/fs/xfs/xfs_trans_priv.h b/fs/xfs/xfs_trans_priv.h
index c6e4f2c8de6e..e2d93d8ead7b 100644
--- a/fs/xfs/xfs_trans_priv.h
+++ b/fs/xfs/xfs_trans_priv.h
@@ -23,22 +23,8 @@ struct xfs_log_item_desc;
23struct xfs_mount; 23struct xfs_mount;
24struct xfs_trans; 24struct xfs_trans;
25 25
26/* 26void xfs_trans_add_item(struct xfs_trans *, struct xfs_log_item *);
27 * From xfs_trans_item.c 27void xfs_trans_del_item(struct xfs_log_item *);
28 */
29struct xfs_log_item_desc *xfs_trans_add_item(struct xfs_trans *,
30 struct xfs_log_item *);
31void xfs_trans_free_item(struct xfs_trans *,
32 struct xfs_log_item_desc *);
33struct xfs_log_item_desc *xfs_trans_find_item(struct xfs_trans *,
34 struct xfs_log_item *);
35struct xfs_log_item_desc *xfs_trans_first_item(struct xfs_trans *);
36struct xfs_log_item_desc *xfs_trans_next_item(struct xfs_trans *,
37 struct xfs_log_item_desc *);
38
39void xfs_trans_unlock_items(struct xfs_trans *tp, xfs_lsn_t commit_lsn);
40void xfs_trans_free_items(struct xfs_trans *tp, xfs_lsn_t commit_lsn,
41 int flags);
42 28
43void xfs_trans_item_committed(struct xfs_log_item *lip, 29void xfs_trans_item_committed(struct xfs_log_item *lip,
44 xfs_lsn_t commit_lsn, int aborted); 30 xfs_lsn_t commit_lsn, int aborted);
diff --git a/fs/xfs/xfs_utils.c b/fs/xfs/xfs_utils.c
index 4d88616bde91..b7d5769d2df0 100644
--- a/fs/xfs/xfs_utils.c
+++ b/fs/xfs/xfs_utils.c
@@ -25,18 +25,14 @@
25#include "xfs_sb.h" 25#include "xfs_sb.h"
26#include "xfs_ag.h" 26#include "xfs_ag.h"
27#include "xfs_dir2.h" 27#include "xfs_dir2.h"
28#include "xfs_dmapi.h"
29#include "xfs_mount.h" 28#include "xfs_mount.h"
30#include "xfs_bmap_btree.h" 29#include "xfs_bmap_btree.h"
31#include "xfs_dir2_sf.h"
32#include "xfs_attr_sf.h"
33#include "xfs_dinode.h" 30#include "xfs_dinode.h"
34#include "xfs_inode.h" 31#include "xfs_inode.h"
35#include "xfs_inode_item.h" 32#include "xfs_inode_item.h"
36#include "xfs_bmap.h" 33#include "xfs_bmap.h"
37#include "xfs_error.h" 34#include "xfs_error.h"
38#include "xfs_quota.h" 35#include "xfs_quota.h"
39#include "xfs_rw.h"
40#include "xfs_itable.h" 36#include "xfs_itable.h"
41#include "xfs_utils.h" 37#include "xfs_utils.h"
42 38
@@ -324,86 +320,3 @@ xfs_bumplink(
324 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 320 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
325 return 0; 321 return 0;
326} 322}
327
328/*
329 * Try to truncate the given file to 0 length. Currently called
330 * only out of xfs_remove when it has to truncate a file to free
331 * up space for the remove to proceed.
332 */
333int
334xfs_truncate_file(
335 xfs_mount_t *mp,
336 xfs_inode_t *ip)
337{
338 xfs_trans_t *tp;
339 int error;
340
341#ifdef QUOTADEBUG
342 /*
343 * This is called to truncate the quotainodes too.
344 */
345 if (XFS_IS_UQUOTA_ON(mp)) {
346 if (ip->i_ino != mp->m_sb.sb_uquotino)
347 ASSERT(ip->i_udquot);
348 }
349 if (XFS_IS_OQUOTA_ON(mp)) {
350 if (ip->i_ino != mp->m_sb.sb_gquotino)
351 ASSERT(ip->i_gdquot);
352 }
353#endif
354 /*
355 * Make the call to xfs_itruncate_start before starting the
356 * transaction, because we cannot make the call while we're
357 * in a transaction.
358 */
359 xfs_ilock(ip, XFS_IOLOCK_EXCL);
360 error = xfs_itruncate_start(ip, XFS_ITRUNC_DEFINITE, (xfs_fsize_t)0);
361 if (error) {
362 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
363 return error;
364 }
365
366 tp = xfs_trans_alloc(mp, XFS_TRANS_TRUNCATE_FILE);
367 if ((error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0,
368 XFS_TRANS_PERM_LOG_RES,
369 XFS_ITRUNCATE_LOG_COUNT))) {
370 xfs_trans_cancel(tp, 0);
371 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
372 return error;
373 }
374
375 /*
376 * Follow the normal truncate locking protocol. Since we
377 * hold the inode in the transaction, we know that its number
378 * of references will stay constant.
379 */
380 xfs_ilock(ip, XFS_ILOCK_EXCL);
381 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
382 xfs_trans_ihold(tp, ip);
383 /*
384 * Signal a sync xaction. The only case where that isn't
385 * the case is if we're truncating an already unlinked file
386 * on a wsync fs. In that case, we know the blocks can't
387 * reappear in the file because the links to file are
388 * permanently toast. Currently, we're always going to
389 * want a sync transaction because this code is being
390 * called from places where nlink is guaranteed to be 1
391 * but I'm leaving the tests in to protect against future
392 * changes -- rcc.
393 */
394 error = xfs_itruncate_finish(&tp, ip, (xfs_fsize_t)0,
395 XFS_DATA_FORK,
396 ((ip->i_d.di_nlink != 0 ||
397 !(mp->m_flags & XFS_MOUNT_WSYNC))
398 ? 1 : 0));
399 if (error) {
400 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES |
401 XFS_TRANS_ABORT);
402 } else {
403 xfs_ichgtime(ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
404 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
405 }
406 xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
407
408 return error;
409}
diff --git a/fs/xfs/xfs_utils.h b/fs/xfs/xfs_utils.h
index ef321225d269..f55b9678264f 100644
--- a/fs/xfs/xfs_utils.h
+++ b/fs/xfs/xfs_utils.h
@@ -18,7 +18,6 @@
18#ifndef __XFS_UTILS_H__ 18#ifndef __XFS_UTILS_H__
19#define __XFS_UTILS_H__ 19#define __XFS_UTILS_H__
20 20
21extern int xfs_truncate_file(xfs_mount_t *, xfs_inode_t *);
22extern int xfs_dir_ialloc(xfs_trans_t **, xfs_inode_t *, mode_t, xfs_nlink_t, 21extern int xfs_dir_ialloc(xfs_trans_t **, xfs_inode_t *, mode_t, xfs_nlink_t,
23 xfs_dev_t, cred_t *, prid_t, int, 22 xfs_dev_t, cred_t *, prid_t, int,
24 xfs_inode_t **, int *); 23 xfs_inode_t **, int *);
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index a06bd62504fc..3ac137dd531b 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -26,19 +26,14 @@
26#include "xfs_sb.h" 26#include "xfs_sb.h"
27#include "xfs_ag.h" 27#include "xfs_ag.h"
28#include "xfs_dir2.h" 28#include "xfs_dir2.h"
29#include "xfs_dmapi.h"
30#include "xfs_mount.h" 29#include "xfs_mount.h"
31#include "xfs_da_btree.h" 30#include "xfs_da_btree.h"
32#include "xfs_bmap_btree.h" 31#include "xfs_bmap_btree.h"
33#include "xfs_alloc_btree.h"
34#include "xfs_ialloc_btree.h" 32#include "xfs_ialloc_btree.h"
35#include "xfs_dir2_sf.h"
36#include "xfs_attr_sf.h"
37#include "xfs_dinode.h" 33#include "xfs_dinode.h"
38#include "xfs_inode.h" 34#include "xfs_inode.h"
39#include "xfs_inode_item.h" 35#include "xfs_inode_item.h"
40#include "xfs_itable.h" 36#include "xfs_itable.h"
41#include "xfs_btree.h"
42#include "xfs_ialloc.h" 37#include "xfs_ialloc.h"
43#include "xfs_alloc.h" 38#include "xfs_alloc.h"
44#include "xfs_bmap.h" 39#include "xfs_bmap.h"
@@ -73,7 +68,7 @@ xfs_setattr(
73 struct xfs_dquot *udqp, *gdqp, *olddquot1, *olddquot2; 68 struct xfs_dquot *udqp, *gdqp, *olddquot1, *olddquot2;
74 int need_iolock = 1; 69 int need_iolock = 1;
75 70
76 xfs_itrace_entry(ip); 71 trace_xfs_setattr(ip);
77 72
78 if (mp->m_flags & XFS_MOUNT_RDONLY) 73 if (mp->m_flags & XFS_MOUNT_RDONLY)
79 return XFS_ERROR(EROFS); 74 return XFS_ERROR(EROFS);
@@ -143,16 +138,6 @@ xfs_setattr(
143 goto error_return; 138 goto error_return;
144 } 139 }
145 } else { 140 } else {
146 if (DM_EVENT_ENABLED(ip, DM_EVENT_TRUNCATE) &&
147 !(flags & XFS_ATTR_DMI)) {
148 int dmflags = AT_DELAY_FLAG(flags) | DM_SEM_FLAG_WR;
149 code = XFS_SEND_DATA(mp, DM_EVENT_TRUNCATE, ip,
150 iattr->ia_size, 0, dmflags, NULL);
151 if (code) {
152 lock_flags = 0;
153 goto error_return;
154 }
155 }
156 if (need_iolock) 141 if (need_iolock)
157 lock_flags |= XFS_IOLOCK_EXCL; 142 lock_flags |= XFS_IOLOCK_EXCL;
158 } 143 }
@@ -283,8 +268,7 @@ xfs_setattr(
283 commit_flags = XFS_TRANS_RELEASE_LOG_RES; 268 commit_flags = XFS_TRANS_RELEASE_LOG_RES;
284 xfs_ilock(ip, XFS_ILOCK_EXCL); 269 xfs_ilock(ip, XFS_ILOCK_EXCL);
285 270
286 xfs_trans_ijoin(tp, ip, lock_flags); 271 xfs_trans_ijoin(tp, ip);
287 xfs_trans_ihold(tp, ip);
288 272
289 /* 273 /*
290 * Only change the c/mtime if we are changing the size 274 * Only change the c/mtime if we are changing the size
@@ -334,8 +318,7 @@ xfs_setattr(
334 xfs_iflags_set(ip, XFS_ITRUNCATED); 318 xfs_iflags_set(ip, XFS_ITRUNCATED);
335 } 319 }
336 } else if (tp) { 320 } else if (tp) {
337 xfs_trans_ijoin(tp, ip, lock_flags); 321 xfs_trans_ijoin(tp, ip);
338 xfs_trans_ihold(tp, ip);
339 } 322 }
340 323
341 /* 324 /*
@@ -470,17 +453,10 @@ xfs_setattr(
470 return XFS_ERROR(code); 453 return XFS_ERROR(code);
471 } 454 }
472 455
473 if (DM_EVENT_ENABLED(ip, DM_EVENT_ATTRIBUTE) &&
474 !(flags & XFS_ATTR_DMI)) {
475 (void) XFS_SEND_NAMESP(mp, DM_EVENT_ATTRIBUTE, ip, DM_RIGHT_NULL,
476 NULL, DM_RIGHT_NULL, NULL, NULL,
477 0, 0, AT_DELAY_FLAG(flags));
478 }
479 return 0; 456 return 0;
480 457
481 abort_return: 458 abort_return:
482 commit_flags |= XFS_TRANS_ABORT; 459 commit_flags |= XFS_TRANS_ABORT;
483 /* FALLTHROUGH */
484 error_return: 460 error_return:
485 xfs_qm_dqrele(udqp); 461 xfs_qm_dqrele(udqp);
486 xfs_qm_dqrele(gdqp); 462 xfs_qm_dqrele(gdqp);
@@ -516,7 +492,7 @@ xfs_readlink_bmap(
516 int error = 0; 492 int error = 0;
517 493
518 error = xfs_bmapi(NULL, ip, 0, XFS_B_TO_FSB(mp, pathlen), 0, NULL, 0, 494 error = xfs_bmapi(NULL, ip, 0, XFS_B_TO_FSB(mp, pathlen), 0, NULL, 0,
519 mval, &nmaps, NULL, NULL); 495 mval, &nmaps, NULL);
520 if (error) 496 if (error)
521 goto out; 497 goto out;
522 498
@@ -557,7 +533,7 @@ xfs_readlink(
557 int pathlen; 533 int pathlen;
558 int error = 0; 534 int error = 0;
559 535
560 xfs_itrace_entry(ip); 536 trace_xfs_readlink(ip);
561 537
562 if (XFS_FORCED_SHUTDOWN(mp)) 538 if (XFS_FORCED_SHUTDOWN(mp))
563 return XFS_ERROR(EIO); 539 return XFS_ERROR(EIO);
@@ -613,14 +589,14 @@ xfs_free_eofblocks(
613 */ 589 */
614 end_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)ip->i_size)); 590 end_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)ip->i_size));
615 last_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp)); 591 last_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp));
616 map_len = last_fsb - end_fsb; 592 if (last_fsb <= end_fsb)
617 if (map_len <= 0)
618 return 0; 593 return 0;
594 map_len = last_fsb - end_fsb;
619 595
620 nimaps = 1; 596 nimaps = 1;
621 xfs_ilock(ip, XFS_ILOCK_SHARED); 597 xfs_ilock(ip, XFS_ILOCK_SHARED);
622 error = xfs_bmapi(NULL, ip, end_fsb, map_len, 0, 598 error = xfs_bmapi(NULL, ip, end_fsb, map_len, 0,
623 NULL, 0, &imap, &nimaps, NULL, NULL); 599 NULL, 0, &imap, &nimaps, NULL);
624 xfs_iunlock(ip, XFS_ILOCK_SHARED); 600 xfs_iunlock(ip, XFS_ILOCK_SHARED);
625 601
626 if (!error && (nimaps != 0) && 602 if (!error && (nimaps != 0) &&
@@ -675,10 +651,7 @@ xfs_free_eofblocks(
675 } 651 }
676 652
677 xfs_ilock(ip, XFS_ILOCK_EXCL); 653 xfs_ilock(ip, XFS_ILOCK_EXCL);
678 xfs_trans_ijoin(tp, ip, 654 xfs_trans_ijoin(tp, ip);
679 XFS_IOLOCK_EXCL |
680 XFS_ILOCK_EXCL);
681 xfs_trans_ihold(tp, ip);
682 655
683 error = xfs_itruncate_finish(&tp, ip, 656 error = xfs_itruncate_finish(&tp, ip,
684 ip->i_size, 657 ip->i_size,
@@ -750,8 +723,7 @@ xfs_inactive_symlink_rmt(
750 xfs_ilock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); 723 xfs_ilock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
751 size = (int)ip->i_d.di_size; 724 size = (int)ip->i_d.di_size;
752 ip->i_d.di_size = 0; 725 ip->i_d.di_size = 0;
753 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); 726 xfs_trans_ijoin(tp, ip);
754 xfs_trans_ihold(tp, ip);
755 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 727 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
756 /* 728 /*
757 * Find the block(s) so we can inval and unmap them. 729 * Find the block(s) so we can inval and unmap them.
@@ -761,7 +733,7 @@ xfs_inactive_symlink_rmt(
761 nmaps = ARRAY_SIZE(mval); 733 nmaps = ARRAY_SIZE(mval);
762 if ((error = xfs_bmapi(tp, ip, 0, XFS_B_TO_FSB(mp, size), 734 if ((error = xfs_bmapi(tp, ip, 0, XFS_B_TO_FSB(mp, size),
763 XFS_BMAPI_METADATA, &first_block, 0, mval, &nmaps, 735 XFS_BMAPI_METADATA, &first_block, 0, mval, &nmaps,
764 &free_list, NULL))) 736 &free_list)))
765 goto error0; 737 goto error0;
766 /* 738 /*
767 * Invalidate the block(s). 739 * Invalidate the block(s).
@@ -776,7 +748,7 @@ xfs_inactive_symlink_rmt(
776 * Unmap the dead block(s) to the free_list. 748 * Unmap the dead block(s) to the free_list.
777 */ 749 */
778 if ((error = xfs_bunmapi(tp, ip, 0, size, XFS_BMAPI_METADATA, nmaps, 750 if ((error = xfs_bunmapi(tp, ip, 0, size, XFS_BMAPI_METADATA, nmaps,
779 &first_block, &free_list, NULL, &done))) 751 &first_block, &free_list, &done)))
780 goto error1; 752 goto error1;
781 ASSERT(done); 753 ASSERT(done);
782 /* 754 /*
@@ -795,8 +767,7 @@ xfs_inactive_symlink_rmt(
795 * Mark it dirty so it will be logged and moved forward in the log as 767 * Mark it dirty so it will be logged and moved forward in the log as
796 * part of every commit. 768 * part of every commit.
797 */ 769 */
798 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); 770 xfs_trans_ijoin(tp, ip);
799 xfs_trans_ihold(tp, ip);
800 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 771 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
801 /* 772 /*
802 * Get a new, empty transaction to return to our caller. 773 * Get a new, empty transaction to return to our caller.
@@ -929,8 +900,7 @@ xfs_inactive_attrs(
929 goto error_cancel; 900 goto error_cancel;
930 901
931 xfs_ilock(ip, XFS_ILOCK_EXCL); 902 xfs_ilock(ip, XFS_ILOCK_EXCL);
932 xfs_trans_ijoin(tp, ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); 903 xfs_trans_ijoin(tp, ip);
933 xfs_trans_ihold(tp, ip);
934 xfs_idestroy_fork(ip, XFS_ATTR_FORK); 904 xfs_idestroy_fork(ip, XFS_ATTR_FORK);
935 905
936 ASSERT(ip->i_d.di_anextents == 0); 906 ASSERT(ip->i_d.di_anextents == 0);
@@ -1035,8 +1005,6 @@ xfs_inactive(
1035 int error; 1005 int error;
1036 int truncate; 1006 int truncate;
1037 1007
1038 xfs_itrace_entry(ip);
1039
1040 /* 1008 /*
1041 * If the inode is already free, then there can be nothing 1009 * If the inode is already free, then there can be nothing
1042 * to clean up here. 1010 * to clean up here.
@@ -1060,9 +1028,6 @@ xfs_inactive(
1060 1028
1061 mp = ip->i_mount; 1029 mp = ip->i_mount;
1062 1030
1063 if (ip->i_d.di_nlink == 0 && DM_EVENT_ENABLED(ip, DM_EVENT_DESTROY))
1064 XFS_SEND_DESTROY(mp, ip, DM_RIGHT_NULL);
1065
1066 error = 0; 1031 error = 0;
1067 1032
1068 /* If this is a read-only mount, don't do this (would generate I/O) */ 1033 /* If this is a read-only mount, don't do this (would generate I/O) */
@@ -1120,8 +1085,7 @@ xfs_inactive(
1120 } 1085 }
1121 1086
1122 xfs_ilock(ip, XFS_ILOCK_EXCL); 1087 xfs_ilock(ip, XFS_ILOCK_EXCL);
1123 xfs_trans_ijoin(tp, ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); 1088 xfs_trans_ijoin(tp, ip);
1124 xfs_trans_ihold(tp, ip);
1125 1089
1126 /* 1090 /*
1127 * normally, we have to run xfs_itruncate_finish sync. 1091 * normally, we have to run xfs_itruncate_finish sync.
@@ -1154,8 +1118,7 @@ xfs_inactive(
1154 return VN_INACTIVE_CACHE; 1118 return VN_INACTIVE_CACHE;
1155 } 1119 }
1156 1120
1157 xfs_trans_ijoin(tp, ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); 1121 xfs_trans_ijoin(tp, ip);
1158 xfs_trans_ihold(tp, ip);
1159 } else { 1122 } else {
1160 error = xfs_trans_reserve(tp, 0, 1123 error = xfs_trans_reserve(tp, 0,
1161 XFS_IFREE_LOG_RES(mp), 1124 XFS_IFREE_LOG_RES(mp),
@@ -1168,8 +1131,7 @@ xfs_inactive(
1168 } 1131 }
1169 1132
1170 xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); 1133 xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
1171 xfs_trans_ijoin(tp, ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); 1134 xfs_trans_ijoin(tp, ip);
1172 xfs_trans_ihold(tp, ip);
1173 } 1135 }
1174 1136
1175 /* 1137 /*
@@ -1257,7 +1219,7 @@ xfs_lookup(
1257 int error; 1219 int error;
1258 uint lock_mode; 1220 uint lock_mode;
1259 1221
1260 xfs_itrace_entry(dp); 1222 trace_xfs_lookup(dp, name);
1261 1223
1262 if (XFS_FORCED_SHUTDOWN(dp->i_mount)) 1224 if (XFS_FORCED_SHUTDOWN(dp->i_mount))
1263 return XFS_ERROR(EIO); 1225 return XFS_ERROR(EIO);
@@ -1269,7 +1231,7 @@ xfs_lookup(
1269 if (error) 1231 if (error)
1270 goto out; 1232 goto out;
1271 1233
1272 error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp, 0); 1234 error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp);
1273 if (error) 1235 if (error)
1274 goto out_free_name; 1236 goto out_free_name;
1275 1237
@@ -1309,21 +1271,11 @@ xfs_create(
1309 uint log_res; 1271 uint log_res;
1310 uint log_count; 1272 uint log_count;
1311 1273
1312 xfs_itrace_entry(dp); 1274 trace_xfs_create(dp, name);
1313 1275
1314 if (XFS_FORCED_SHUTDOWN(mp)) 1276 if (XFS_FORCED_SHUTDOWN(mp))
1315 return XFS_ERROR(EIO); 1277 return XFS_ERROR(EIO);
1316 1278
1317 if (DM_EVENT_ENABLED(dp, DM_EVENT_CREATE)) {
1318 error = XFS_SEND_NAMESP(mp, DM_EVENT_CREATE,
1319 dp, DM_RIGHT_NULL, NULL,
1320 DM_RIGHT_NULL, name->name, NULL,
1321 mode, 0, 0);
1322
1323 if (error)
1324 return error;
1325 }
1326
1327 if (dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) 1279 if (dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
1328 prid = dp->i_d.di_projid; 1280 prid = dp->i_d.di_projid;
1329 else 1281 else
@@ -1427,8 +1379,7 @@ xfs_create(
1427 * the transaction cancel unlocking dp so don't do it explicitly in the 1379 * the transaction cancel unlocking dp so don't do it explicitly in the
1428 * error path. 1380 * error path.
1429 */ 1381 */
1430 IHOLD(dp); 1382 xfs_trans_ijoin_ref(tp, dp, XFS_ILOCK_EXCL);
1431 xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
1432 unlock_dp_on_error = B_FALSE; 1383 unlock_dp_on_error = B_FALSE;
1433 1384
1434 error = xfs_dir_createname(tp, dp, name, ip->i_ino, 1385 error = xfs_dir_createname(tp, dp, name, ip->i_ino,
@@ -1487,16 +1438,7 @@ xfs_create(
1487 xfs_qm_dqrele(gdqp); 1438 xfs_qm_dqrele(gdqp);
1488 1439
1489 *ipp = ip; 1440 *ipp = ip;
1490 1441 return 0;
1491 /* Fallthrough to std_return with error = 0 */
1492 std_return:
1493 if (DM_EVENT_ENABLED(dp, DM_EVENT_POSTCREATE)) {
1494 XFS_SEND_NAMESP(mp, DM_EVENT_POSTCREATE, dp, DM_RIGHT_NULL,
1495 ip, DM_RIGHT_NULL, name->name, NULL, mode,
1496 error, 0);
1497 }
1498
1499 return error;
1500 1442
1501 out_bmap_cancel: 1443 out_bmap_cancel:
1502 xfs_bmap_cancel(&free_list); 1444 xfs_bmap_cancel(&free_list);
@@ -1510,8 +1452,8 @@ xfs_create(
1510 1452
1511 if (unlock_dp_on_error) 1453 if (unlock_dp_on_error)
1512 xfs_iunlock(dp, XFS_ILOCK_EXCL); 1454 xfs_iunlock(dp, XFS_ILOCK_EXCL);
1513 1455 std_return:
1514 goto std_return; 1456 return error;
1515 1457
1516 out_abort_rele: 1458 out_abort_rele:
1517 /* 1459 /*
@@ -1726,20 +1668,11 @@ xfs_remove(
1726 uint resblks; 1668 uint resblks;
1727 uint log_count; 1669 uint log_count;
1728 1670
1729 xfs_itrace_entry(dp); 1671 trace_xfs_remove(dp, name);
1730 xfs_itrace_entry(ip);
1731 1672
1732 if (XFS_FORCED_SHUTDOWN(mp)) 1673 if (XFS_FORCED_SHUTDOWN(mp))
1733 return XFS_ERROR(EIO); 1674 return XFS_ERROR(EIO);
1734 1675
1735 if (DM_EVENT_ENABLED(dp, DM_EVENT_REMOVE)) {
1736 error = XFS_SEND_NAMESP(mp, DM_EVENT_REMOVE, dp, DM_RIGHT_NULL,
1737 NULL, DM_RIGHT_NULL, name->name, NULL,
1738 ip->i_d.di_mode, 0, 0);
1739 if (error)
1740 return error;
1741 }
1742
1743 error = xfs_qm_dqattach(dp, 0); 1676 error = xfs_qm_dqattach(dp, 0);
1744 if (error) 1677 if (error)
1745 goto std_return; 1678 goto std_return;
@@ -1782,15 +1715,8 @@ xfs_remove(
1782 1715
1783 xfs_lock_two_inodes(dp, ip, XFS_ILOCK_EXCL); 1716 xfs_lock_two_inodes(dp, ip, XFS_ILOCK_EXCL);
1784 1717
1785 /* 1718 xfs_trans_ijoin_ref(tp, dp, XFS_ILOCK_EXCL);
1786 * At this point, we've gotten both the directory and the entry 1719 xfs_trans_ijoin_ref(tp, ip, XFS_ILOCK_EXCL);
1787 * inodes locked.
1788 */
1789 IHOLD(ip);
1790 xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
1791
1792 IHOLD(dp);
1793 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1794 1720
1795 /* 1721 /*
1796 * If we're removing a directory perform some additional validation. 1722 * If we're removing a directory perform some additional validation.
@@ -1877,21 +1803,15 @@ xfs_remove(
1877 if (!is_dir && link_zero && xfs_inode_is_filestream(ip)) 1803 if (!is_dir && link_zero && xfs_inode_is_filestream(ip))
1878 xfs_filestream_deassociate(ip); 1804 xfs_filestream_deassociate(ip);
1879 1805
1880 std_return: 1806 return 0;
1881 if (DM_EVENT_ENABLED(dp, DM_EVENT_POSTREMOVE)) {
1882 XFS_SEND_NAMESP(mp, DM_EVENT_POSTREMOVE, dp, DM_RIGHT_NULL,
1883 NULL, DM_RIGHT_NULL, name->name, NULL,
1884 ip->i_d.di_mode, error, 0);
1885 }
1886
1887 return error;
1888 1807
1889 out_bmap_cancel: 1808 out_bmap_cancel:
1890 xfs_bmap_cancel(&free_list); 1809 xfs_bmap_cancel(&free_list);
1891 cancel_flags |= XFS_TRANS_ABORT; 1810 cancel_flags |= XFS_TRANS_ABORT;
1892 out_trans_cancel: 1811 out_trans_cancel:
1893 xfs_trans_cancel(tp, cancel_flags); 1812 xfs_trans_cancel(tp, cancel_flags);
1894 goto std_return; 1813 std_return:
1814 return error;
1895} 1815}
1896 1816
1897int 1817int
@@ -1909,25 +1829,13 @@ xfs_link(
1909 int committed; 1829 int committed;
1910 int resblks; 1830 int resblks;
1911 1831
1912 xfs_itrace_entry(tdp); 1832 trace_xfs_link(tdp, target_name);
1913 xfs_itrace_entry(sip);
1914 1833
1915 ASSERT(!S_ISDIR(sip->i_d.di_mode)); 1834 ASSERT(!S_ISDIR(sip->i_d.di_mode));
1916 1835
1917 if (XFS_FORCED_SHUTDOWN(mp)) 1836 if (XFS_FORCED_SHUTDOWN(mp))
1918 return XFS_ERROR(EIO); 1837 return XFS_ERROR(EIO);
1919 1838
1920 if (DM_EVENT_ENABLED(tdp, DM_EVENT_LINK)) {
1921 error = XFS_SEND_NAMESP(mp, DM_EVENT_LINK,
1922 tdp, DM_RIGHT_NULL,
1923 sip, DM_RIGHT_NULL,
1924 target_name->name, NULL, 0, 0, 0);
1925 if (error)
1926 return error;
1927 }
1928
1929 /* Return through std_return after this point. */
1930
1931 error = xfs_qm_dqattach(sip, 0); 1839 error = xfs_qm_dqattach(sip, 0);
1932 if (error) 1840 if (error)
1933 goto std_return; 1841 goto std_return;
@@ -1953,15 +1861,8 @@ xfs_link(
1953 1861
1954 xfs_lock_two_inodes(sip, tdp, XFS_ILOCK_EXCL); 1862 xfs_lock_two_inodes(sip, tdp, XFS_ILOCK_EXCL);
1955 1863
1956 /* 1864 xfs_trans_ijoin_ref(tp, sip, XFS_ILOCK_EXCL);
1957 * Increment vnode ref counts since xfs_trans_commit & 1865 xfs_trans_ijoin_ref(tp, tdp, XFS_ILOCK_EXCL);
1958 * xfs_trans_cancel will both unlock the inodes and
1959 * decrement the associated ref counts.
1960 */
1961 IHOLD(sip);
1962 IHOLD(tdp);
1963 xfs_trans_ijoin(tp, sip, XFS_ILOCK_EXCL);
1964 xfs_trans_ijoin(tp, tdp, XFS_ILOCK_EXCL);
1965 1866
1966 /* 1867 /*
1967 * If the source has too many links, we can't make any more to it. 1868 * If the source has too many links, we can't make any more to it.
@@ -2014,27 +1915,14 @@ xfs_link(
2014 goto abort_return; 1915 goto abort_return;
2015 } 1916 }
2016 1917
2017 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); 1918 return xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
2018 if (error)
2019 goto std_return;
2020
2021 /* Fall through to std_return with error = 0. */
2022std_return:
2023 if (DM_EVENT_ENABLED(sip, DM_EVENT_POSTLINK)) {
2024 (void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTLINK,
2025 tdp, DM_RIGHT_NULL,
2026 sip, DM_RIGHT_NULL,
2027 target_name->name, NULL, 0, error, 0);
2028 }
2029 return error;
2030 1919
2031 abort_return: 1920 abort_return:
2032 cancel_flags |= XFS_TRANS_ABORT; 1921 cancel_flags |= XFS_TRANS_ABORT;
2033 /* FALLTHROUGH */
2034
2035 error_return: 1922 error_return:
2036 xfs_trans_cancel(tp, cancel_flags); 1923 xfs_trans_cancel(tp, cancel_flags);
2037 goto std_return; 1924 std_return:
1925 return error;
2038} 1926}
2039 1927
2040int 1928int
@@ -2074,7 +1962,7 @@ xfs_symlink(
2074 ip = NULL; 1962 ip = NULL;
2075 tp = NULL; 1963 tp = NULL;
2076 1964
2077 xfs_itrace_entry(dp); 1965 trace_xfs_symlink(dp, link_name);
2078 1966
2079 if (XFS_FORCED_SHUTDOWN(mp)) 1967 if (XFS_FORCED_SHUTDOWN(mp))
2080 return XFS_ERROR(EIO); 1968 return XFS_ERROR(EIO);
@@ -2086,17 +1974,6 @@ xfs_symlink(
2086 if (pathlen >= MAXPATHLEN) /* total string too long */ 1974 if (pathlen >= MAXPATHLEN) /* total string too long */
2087 return XFS_ERROR(ENAMETOOLONG); 1975 return XFS_ERROR(ENAMETOOLONG);
2088 1976
2089 if (DM_EVENT_ENABLED(dp, DM_EVENT_SYMLINK)) {
2090 error = XFS_SEND_NAMESP(mp, DM_EVENT_SYMLINK, dp,
2091 DM_RIGHT_NULL, NULL, DM_RIGHT_NULL,
2092 link_name->name,
2093 (unsigned char *)target_path, 0, 0, 0);
2094 if (error)
2095 return error;
2096 }
2097
2098 /* Return through std_return after this point. */
2099
2100 udqp = gdqp = NULL; 1977 udqp = gdqp = NULL;
2101 if (dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) 1978 if (dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
2102 prid = dp->i_d.di_projid; 1979 prid = dp->i_d.di_projid;
@@ -2180,8 +2057,7 @@ xfs_symlink(
2180 * transaction cancel unlocking dp so don't do it explicitly in the 2057 * transaction cancel unlocking dp so don't do it explicitly in the
2181 * error path. 2058 * error path.
2182 */ 2059 */
2183 IHOLD(dp); 2060 xfs_trans_ijoin_ref(tp, dp, XFS_ILOCK_EXCL);
2184 xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
2185 unlock_dp_on_error = B_FALSE; 2061 unlock_dp_on_error = B_FALSE;
2186 2062
2187 /* 2063 /*
@@ -2215,7 +2091,7 @@ xfs_symlink(
2215 error = xfs_bmapi(tp, ip, first_fsb, fs_blocks, 2091 error = xfs_bmapi(tp, ip, first_fsb, fs_blocks,
2216 XFS_BMAPI_WRITE | XFS_BMAPI_METADATA, 2092 XFS_BMAPI_WRITE | XFS_BMAPI_METADATA,
2217 &first_block, resblks, mval, &nmaps, 2093 &first_block, resblks, mval, &nmaps,
2218 &free_list, NULL); 2094 &free_list);
2219 if (error) { 2095 if (error) {
2220 goto error1; 2096 goto error1;
2221 } 2097 }
@@ -2278,21 +2154,8 @@ xfs_symlink(
2278 xfs_qm_dqrele(udqp); 2154 xfs_qm_dqrele(udqp);
2279 xfs_qm_dqrele(gdqp); 2155 xfs_qm_dqrele(gdqp);
2280 2156
2281 /* Fall through to std_return with error = 0 or errno from 2157 *ipp = ip;
2282 * xfs_trans_commit */ 2158 return 0;
2283std_return:
2284 if (DM_EVENT_ENABLED(dp, DM_EVENT_POSTSYMLINK)) {
2285 (void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTSYMLINK,
2286 dp, DM_RIGHT_NULL,
2287 error ? NULL : ip,
2288 DM_RIGHT_NULL, link_name->name,
2289 (unsigned char *)target_path,
2290 0, error, 0);
2291 }
2292
2293 if (!error)
2294 *ipp = ip;
2295 return error;
2296 2159
2297 error2: 2160 error2:
2298 IRELE(ip); 2161 IRELE(ip);
@@ -2306,8 +2169,8 @@ std_return:
2306 2169
2307 if (unlock_dp_on_error) 2170 if (unlock_dp_on_error)
2308 xfs_iunlock(dp, XFS_ILOCK_EXCL); 2171 xfs_iunlock(dp, XFS_ILOCK_EXCL);
2309 2172 std_return:
2310 goto std_return; 2173 return error;
2311} 2174}
2312 2175
2313int 2176int
@@ -2333,13 +2196,12 @@ xfs_set_dmattrs(
2333 return error; 2196 return error;
2334 } 2197 }
2335 xfs_ilock(ip, XFS_ILOCK_EXCL); 2198 xfs_ilock(ip, XFS_ILOCK_EXCL);
2336 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 2199 xfs_trans_ijoin_ref(tp, ip, XFS_ILOCK_EXCL);
2337 2200
2338 ip->i_d.di_dmevmask = evmask; 2201 ip->i_d.di_dmevmask = evmask;
2339 ip->i_d.di_dmstate = state; 2202 ip->i_d.di_dmstate = state;
2340 2203
2341 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 2204 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
2342 IHOLD(ip);
2343 error = xfs_trans_commit(tp, 0); 2205 error = xfs_trans_commit(tp, 0);
2344 2206
2345 return error; 2207 return error;
@@ -2390,7 +2252,7 @@ xfs_alloc_file_space(
2390 int committed; 2252 int committed;
2391 int error; 2253 int error;
2392 2254
2393 xfs_itrace_entry(ip); 2255 trace_xfs_alloc_file_space(ip);
2394 2256
2395 if (XFS_FORCED_SHUTDOWN(mp)) 2257 if (XFS_FORCED_SHUTDOWN(mp))
2396 return XFS_ERROR(EIO); 2258 return XFS_ERROR(EIO);
@@ -2412,25 +2274,9 @@ xfs_alloc_file_space(
2412 startoffset_fsb = XFS_B_TO_FSBT(mp, offset); 2274 startoffset_fsb = XFS_B_TO_FSBT(mp, offset);
2413 allocatesize_fsb = XFS_B_TO_FSB(mp, count); 2275 allocatesize_fsb = XFS_B_TO_FSB(mp, count);
2414 2276
2415 /* Generate a DMAPI event if needed. */
2416 if (alloc_type != 0 && offset < ip->i_size &&
2417 (attr_flags & XFS_ATTR_DMI) == 0 &&
2418 DM_EVENT_ENABLED(ip, DM_EVENT_WRITE)) {
2419 xfs_off_t end_dmi_offset;
2420
2421 end_dmi_offset = offset+len;
2422 if (end_dmi_offset > ip->i_size)
2423 end_dmi_offset = ip->i_size;
2424 error = XFS_SEND_DATA(mp, DM_EVENT_WRITE, ip, offset,
2425 end_dmi_offset - offset, 0, NULL);
2426 if (error)
2427 return error;
2428 }
2429
2430 /* 2277 /*
2431 * Allocate file space until done or until there is an error 2278 * Allocate file space until done or until there is an error
2432 */ 2279 */
2433retry:
2434 while (allocatesize_fsb && !error) { 2280 while (allocatesize_fsb && !error) {
2435 xfs_fileoff_t s, e; 2281 xfs_fileoff_t s, e;
2436 2282
@@ -2488,8 +2334,7 @@ retry:
2488 if (error) 2334 if (error)
2489 goto error1; 2335 goto error1;
2490 2336
2491 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 2337 xfs_trans_ijoin(tp, ip);
2492 xfs_trans_ihold(tp, ip);
2493 2338
2494 /* 2339 /*
2495 * Issue the xfs_bmapi() call to allocate the blocks 2340 * Issue the xfs_bmapi() call to allocate the blocks
@@ -2498,7 +2343,7 @@ retry:
2498 error = xfs_bmapi(tp, ip, startoffset_fsb, 2343 error = xfs_bmapi(tp, ip, startoffset_fsb,
2499 allocatesize_fsb, bmapi_flag, 2344 allocatesize_fsb, bmapi_flag,
2500 &firstfsb, 0, imapp, &nimaps, 2345 &firstfsb, 0, imapp, &nimaps,
2501 &free_list, NULL); 2346 &free_list);
2502 if (error) { 2347 if (error) {
2503 goto error0; 2348 goto error0;
2504 } 2349 }
@@ -2527,17 +2372,6 @@ retry:
2527 startoffset_fsb += allocated_fsb; 2372 startoffset_fsb += allocated_fsb;
2528 allocatesize_fsb -= allocated_fsb; 2373 allocatesize_fsb -= allocated_fsb;
2529 } 2374 }
2530dmapi_enospc_check:
2531 if (error == ENOSPC && (attr_flags & XFS_ATTR_DMI) == 0 &&
2532 DM_EVENT_ENABLED(ip, DM_EVENT_NOSPACE)) {
2533 error = XFS_SEND_NAMESP(mp, DM_EVENT_NOSPACE,
2534 ip, DM_RIGHT_NULL,
2535 ip, DM_RIGHT_NULL,
2536 NULL, NULL, 0, 0, 0); /* Delay flag intentionally unused */
2537 if (error == 0)
2538 goto retry; /* Maybe DMAPI app. has made space */
2539 /* else fall through with error from XFS_SEND_DATA */
2540 }
2541 2375
2542 return error; 2376 return error;
2543 2377
@@ -2548,7 +2382,7 @@ error0: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
2548error1: /* Just cancel transaction */ 2382error1: /* Just cancel transaction */
2549 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); 2383 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
2550 xfs_iunlock(ip, XFS_ILOCK_EXCL); 2384 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2551 goto dmapi_enospc_check; 2385 return error;
2552} 2386}
2553 2387
2554/* 2388/*
@@ -2598,7 +2432,7 @@ xfs_zero_remaining_bytes(
2598 offset_fsb = XFS_B_TO_FSBT(mp, offset); 2432 offset_fsb = XFS_B_TO_FSBT(mp, offset);
2599 nimap = 1; 2433 nimap = 1;
2600 error = xfs_bmapi(NULL, ip, offset_fsb, 1, 0, 2434 error = xfs_bmapi(NULL, ip, offset_fsb, 1, 0,
2601 NULL, 0, &imap, &nimap, NULL, NULL); 2435 NULL, 0, &imap, &nimap, NULL);
2602 if (error || nimap < 1) 2436 if (error || nimap < 1)
2603 break; 2437 break;
2604 ASSERT(imap.br_blockcount >= 1); 2438 ASSERT(imap.br_blockcount >= 1);
@@ -2661,7 +2495,6 @@ xfs_free_file_space(
2661{ 2495{
2662 int committed; 2496 int committed;
2663 int done; 2497 int done;
2664 xfs_off_t end_dmi_offset;
2665 xfs_fileoff_t endoffset_fsb; 2498 xfs_fileoff_t endoffset_fsb;
2666 int error; 2499 int error;
2667 xfs_fsblock_t firstfsb; 2500 xfs_fsblock_t firstfsb;
@@ -2680,7 +2513,7 @@ xfs_free_file_space(
2680 2513
2681 mp = ip->i_mount; 2514 mp = ip->i_mount;
2682 2515
2683 xfs_itrace_entry(ip); 2516 trace_xfs_free_file_space(ip);
2684 2517
2685 error = xfs_qm_dqattach(ip, 0); 2518 error = xfs_qm_dqattach(ip, 0);
2686 if (error) 2519 if (error)
@@ -2691,19 +2524,7 @@ xfs_free_file_space(
2691 return error; 2524 return error;
2692 rt = XFS_IS_REALTIME_INODE(ip); 2525 rt = XFS_IS_REALTIME_INODE(ip);
2693 startoffset_fsb = XFS_B_TO_FSB(mp, offset); 2526 startoffset_fsb = XFS_B_TO_FSB(mp, offset);
2694 end_dmi_offset = offset + len; 2527 endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
2695 endoffset_fsb = XFS_B_TO_FSBT(mp, end_dmi_offset);
2696
2697 if (offset < ip->i_size && (attr_flags & XFS_ATTR_DMI) == 0 &&
2698 DM_EVENT_ENABLED(ip, DM_EVENT_WRITE)) {
2699 if (end_dmi_offset > ip->i_size)
2700 end_dmi_offset = ip->i_size;
2701 error = XFS_SEND_DATA(mp, DM_EVENT_WRITE, ip,
2702 offset, end_dmi_offset - offset,
2703 AT_DELAY_FLAG(attr_flags), NULL);
2704 if (error)
2705 return error;
2706 }
2707 2528
2708 if (attr_flags & XFS_ATTR_NOLOCK) 2529 if (attr_flags & XFS_ATTR_NOLOCK)
2709 need_iolock = 0; 2530 need_iolock = 0;
@@ -2731,7 +2552,7 @@ xfs_free_file_space(
2731 if (rt && !xfs_sb_version_hasextflgbit(&mp->m_sb)) { 2552 if (rt && !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
2732 nimap = 1; 2553 nimap = 1;
2733 error = xfs_bmapi(NULL, ip, startoffset_fsb, 2554 error = xfs_bmapi(NULL, ip, startoffset_fsb,
2734 1, 0, NULL, 0, &imap, &nimap, NULL, NULL); 2555 1, 0, NULL, 0, &imap, &nimap, NULL);
2735 if (error) 2556 if (error)
2736 goto out_unlock_iolock; 2557 goto out_unlock_iolock;
2737 ASSERT(nimap == 0 || nimap == 1); 2558 ASSERT(nimap == 0 || nimap == 1);
@@ -2746,7 +2567,7 @@ xfs_free_file_space(
2746 } 2567 }
2747 nimap = 1; 2568 nimap = 1;
2748 error = xfs_bmapi(NULL, ip, endoffset_fsb - 1, 2569 error = xfs_bmapi(NULL, ip, endoffset_fsb - 1,
2749 1, 0, NULL, 0, &imap, &nimap, NULL, NULL); 2570 1, 0, NULL, 0, &imap, &nimap, NULL);
2750 if (error) 2571 if (error)
2751 goto out_unlock_iolock; 2572 goto out_unlock_iolock;
2752 ASSERT(nimap == 0 || nimap == 1); 2573 ASSERT(nimap == 0 || nimap == 1);
@@ -2814,8 +2635,7 @@ xfs_free_file_space(
2814 if (error) 2635 if (error)
2815 goto error1; 2636 goto error1;
2816 2637
2817 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 2638 xfs_trans_ijoin(tp, ip);
2818 xfs_trans_ihold(tp, ip);
2819 2639
2820 /* 2640 /*
2821 * issue the bunmapi() call to free the blocks 2641 * issue the bunmapi() call to free the blocks
@@ -2823,7 +2643,7 @@ xfs_free_file_space(
2823 xfs_bmap_init(&free_list, &firstfsb); 2643 xfs_bmap_init(&free_list, &firstfsb);
2824 error = xfs_bunmapi(tp, ip, startoffset_fsb, 2644 error = xfs_bunmapi(tp, ip, startoffset_fsb,
2825 endoffset_fsb - startoffset_fsb, 2645 endoffset_fsb - startoffset_fsb,
2826 0, 2, &firstfsb, &free_list, NULL, &done); 2646 0, 2, &firstfsb, &free_list, &done);
2827 if (error) { 2647 if (error) {
2828 goto error0; 2648 goto error0;
2829 } 2649 }
@@ -2883,8 +2703,6 @@ xfs_change_file_space(
2883 xfs_trans_t *tp; 2703 xfs_trans_t *tp;
2884 struct iattr iattr; 2704 struct iattr iattr;
2885 2705
2886 xfs_itrace_entry(ip);
2887
2888 if (!S_ISREG(ip->i_d.di_mode)) 2706 if (!S_ISREG(ip->i_d.di_mode))
2889 return XFS_ERROR(EINVAL); 2707 return XFS_ERROR(EINVAL);
2890 2708
@@ -2985,8 +2803,7 @@ xfs_change_file_space(
2985 2803
2986 xfs_ilock(ip, XFS_ILOCK_EXCL); 2804 xfs_ilock(ip, XFS_ILOCK_EXCL);
2987 2805
2988 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 2806 xfs_trans_ijoin(tp, ip);
2989 xfs_trans_ihold(tp, ip);
2990 2807
2991 if ((attr_flags & XFS_ATTR_DMI) == 0) { 2808 if ((attr_flags & XFS_ATTR_DMI) == 0) {
2992 ip->i_d.di_mode &= ~S_ISUID; 2809 ip->i_d.di_mode &= ~S_ISUID;