aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-12-11 18:30:29 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-12-11 18:30:29 -0500
commitf4d544ee5720d336a8c64f9fd33efb888c302309 (patch)
tree3b4674d46b04fbcfc38677df59c92320f65568dd
parent0e2f7b837600979d5b6f174a6ff695b85942e985 (diff)
parent44a743f68705c681439f264deb05f8f38e9048d3 (diff)
Merge branch 'for-linus' of git://oss.sgi.com/xfs/xfs
* 'for-linus' of git://oss.sgi.com/xfs/xfs: xfs: Fix error return for fallocate() on XFS xfs: cleanup dmapi macros in the umount path xfs: remove incorrect sparse annotation for xfs_iget_cache_miss xfs: kill the STATIC_INLINE macro xfs: uninline xfs_get_extsz_hint xfs: rename xfs_attr_fetch to xfs_attr_get_int xfs: simplify xfs_buf_get / xfs_buf_read interfaces xfs: remove IO_ISAIO xfs: Wrapped journal record corruption on read at recovery xfs: cleanup data end I/O handlers xfs: use WRITE_SYNC_PLUG for synchronous writeout xfs: reset the i_iolock lock class in the reclaim path xfs: I/O completion handlers must use NOFS allocations xfs: fix mmap_sem/iolock inversion in xfs_free_eofblocks xfs: simplify inode teardown
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c114
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c14
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.h9
-rw-r--r--fs/xfs/linux-2.6/xfs_file.c4
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.c6
-rw-r--r--fs/xfs/linux-2.6/xfs_lrw.c5
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c71
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.c15
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.h1
-rw-r--r--fs/xfs/linux-2.6/xfs_vnode.h1
-rw-r--r--fs/xfs/support/debug.h18
-rw-r--r--fs/xfs/xfs_attr.c16
-rw-r--r--fs/xfs/xfs_attr.h1
-rw-r--r--fs/xfs/xfs_attr_leaf.c2
-rw-r--r--fs/xfs/xfs_bmap_btree.c3
-rw-r--r--fs/xfs/xfs_filestream.h8
-rw-r--r--fs/xfs/xfs_fsops.c25
-rw-r--r--fs/xfs/xfs_ialloc.c2
-rw-r--r--fs/xfs/xfs_iget.c5
-rw-r--r--fs/xfs/xfs_iomap.c9
-rw-r--r--fs/xfs/xfs_log_recover.c40
-rw-r--r--fs/xfs/xfs_mount.c18
-rw-r--r--fs/xfs/xfs_mount.h27
-rw-r--r--fs/xfs/xfs_rw.c30
-rw-r--r--fs/xfs/xfs_rw.h29
-rw-r--r--fs/xfs/xfs_trans.c7
-rw-r--r--fs/xfs/xfs_trans.h2
-rw-r--r--fs/xfs/xfs_trans_buf.c13
-rw-r--r--fs/xfs/xfs_vnodeops.c79
-rw-r--r--fs/xfs/xfs_vnodeops.h1
30 files changed, 254 insertions, 321 deletions
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index 70f989895d15..87813e405cef 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -235,71 +235,36 @@ xfs_setfilesize(
235} 235}
236 236
237/* 237/*
238 * Buffered IO write completion for delayed allocate extents. 238 * IO write completion.
239 */ 239 */
240STATIC void 240STATIC void
241xfs_end_bio_delalloc( 241xfs_end_io(
242 struct work_struct *work)
243{
244 xfs_ioend_t *ioend =
245 container_of(work, xfs_ioend_t, io_work);
246
247 xfs_setfilesize(ioend);
248 xfs_destroy_ioend(ioend);
249}
250
251/*
252 * Buffered IO write completion for regular, written extents.
253 */
254STATIC void
255xfs_end_bio_written(
256 struct work_struct *work)
257{
258 xfs_ioend_t *ioend =
259 container_of(work, xfs_ioend_t, io_work);
260
261 xfs_setfilesize(ioend);
262 xfs_destroy_ioend(ioend);
263}
264
265/*
266 * IO write completion for unwritten extents.
267 *
268 * Issue transactions to convert a buffer range from unwritten
269 * to written extents.
270 */
271STATIC void
272xfs_end_bio_unwritten(
273 struct work_struct *work) 242 struct work_struct *work)
274{ 243{
275 xfs_ioend_t *ioend = 244 xfs_ioend_t *ioend =
276 container_of(work, xfs_ioend_t, io_work); 245 container_of(work, xfs_ioend_t, io_work);
277 struct xfs_inode *ip = XFS_I(ioend->io_inode); 246 struct xfs_inode *ip = XFS_I(ioend->io_inode);
278 xfs_off_t offset = ioend->io_offset;
279 size_t size = ioend->io_size;
280
281 if (likely(!ioend->io_error)) {
282 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
283 int error;
284 error = xfs_iomap_write_unwritten(ip, offset, size);
285 if (error)
286 ioend->io_error = error;
287 }
288 xfs_setfilesize(ioend);
289 }
290 xfs_destroy_ioend(ioend);
291}
292 247
293/* 248 /*
294 * IO read completion for regular, written extents. 249 * For unwritten extents we need to issue transactions to convert a
295 */ 250 * range to normal written extens after the data I/O has finished.
296STATIC void 251 */
297xfs_end_bio_read( 252 if (ioend->io_type == IOMAP_UNWRITTEN &&
298 struct work_struct *work) 253 likely(!ioend->io_error && !XFS_FORCED_SHUTDOWN(ip->i_mount))) {
299{ 254 int error;
300 xfs_ioend_t *ioend = 255
301 container_of(work, xfs_ioend_t, io_work); 256 error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
257 ioend->io_size);
258 if (error)
259 ioend->io_error = error;
260 }
302 261
262 /*
263 * We might have to update the on-disk file size after extending
264 * writes.
265 */
266 if (ioend->io_type != IOMAP_READ)
267 xfs_setfilesize(ioend);
303 xfs_destroy_ioend(ioend); 268 xfs_destroy_ioend(ioend);
304} 269}
305 270
@@ -314,10 +279,10 @@ xfs_finish_ioend(
314 int wait) 279 int wait)
315{ 280{
316 if (atomic_dec_and_test(&ioend->io_remaining)) { 281 if (atomic_dec_and_test(&ioend->io_remaining)) {
317 struct workqueue_struct *wq = xfsdatad_workqueue; 282 struct workqueue_struct *wq;
318 if (ioend->io_work.func == xfs_end_bio_unwritten)
319 wq = xfsconvertd_workqueue;
320 283
284 wq = (ioend->io_type == IOMAP_UNWRITTEN) ?
285 xfsconvertd_workqueue : xfsdatad_workqueue;
321 queue_work(wq, &ioend->io_work); 286 queue_work(wq, &ioend->io_work);
322 if (wait) 287 if (wait)
323 flush_workqueue(wq); 288 flush_workqueue(wq);
@@ -355,15 +320,7 @@ xfs_alloc_ioend(
355 ioend->io_offset = 0; 320 ioend->io_offset = 0;
356 ioend->io_size = 0; 321 ioend->io_size = 0;
357 322
358 if (type == IOMAP_UNWRITTEN) 323 INIT_WORK(&ioend->io_work, xfs_end_io);
359 INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten);
360 else if (type == IOMAP_DELAY)
361 INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc);
362 else if (type == IOMAP_READ)
363 INIT_WORK(&ioend->io_work, xfs_end_bio_read);
364 else
365 INIT_WORK(&ioend->io_work, xfs_end_bio_written);
366
367 return ioend; 324 return ioend;
368} 325}
369 326
@@ -380,7 +337,7 @@ xfs_map_blocks(
380 return -xfs_iomap(XFS_I(inode), offset, count, flags, mapp, &nmaps); 337 return -xfs_iomap(XFS_I(inode), offset, count, flags, mapp, &nmaps);
381} 338}
382 339
383STATIC_INLINE int 340STATIC int
384xfs_iomap_valid( 341xfs_iomap_valid(
385 xfs_iomap_t *iomapp, 342 xfs_iomap_t *iomapp,
386 loff_t offset) 343 loff_t offset)
@@ -412,8 +369,9 @@ xfs_end_bio(
412 369
413STATIC void 370STATIC void
414xfs_submit_ioend_bio( 371xfs_submit_ioend_bio(
415 xfs_ioend_t *ioend, 372 struct writeback_control *wbc,
416 struct bio *bio) 373 xfs_ioend_t *ioend,
374 struct bio *bio)
417{ 375{
418 atomic_inc(&ioend->io_remaining); 376 atomic_inc(&ioend->io_remaining);
419 bio->bi_private = ioend; 377 bio->bi_private = ioend;
@@ -426,7 +384,8 @@ xfs_submit_ioend_bio(
426 if (xfs_ioend_new_eof(ioend)) 384 if (xfs_ioend_new_eof(ioend))
427 xfs_mark_inode_dirty_sync(XFS_I(ioend->io_inode)); 385 xfs_mark_inode_dirty_sync(XFS_I(ioend->io_inode));
428 386
429 submit_bio(WRITE, bio); 387 submit_bio(wbc->sync_mode == WB_SYNC_ALL ?
388 WRITE_SYNC_PLUG : WRITE, bio);
430 ASSERT(!bio_flagged(bio, BIO_EOPNOTSUPP)); 389 ASSERT(!bio_flagged(bio, BIO_EOPNOTSUPP));
431 bio_put(bio); 390 bio_put(bio);
432} 391}
@@ -505,6 +464,7 @@ static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh)
505 */ 464 */
506STATIC void 465STATIC void
507xfs_submit_ioend( 466xfs_submit_ioend(
467 struct writeback_control *wbc,
508 xfs_ioend_t *ioend) 468 xfs_ioend_t *ioend)
509{ 469{
510 xfs_ioend_t *head = ioend; 470 xfs_ioend_t *head = ioend;
@@ -533,19 +493,19 @@ xfs_submit_ioend(
533 retry: 493 retry:
534 bio = xfs_alloc_ioend_bio(bh); 494 bio = xfs_alloc_ioend_bio(bh);
535 } else if (bh->b_blocknr != lastblock + 1) { 495 } else if (bh->b_blocknr != lastblock + 1) {
536 xfs_submit_ioend_bio(ioend, bio); 496 xfs_submit_ioend_bio(wbc, ioend, bio);
537 goto retry; 497 goto retry;
538 } 498 }
539 499
540 if (bio_add_buffer(bio, bh) != bh->b_size) { 500 if (bio_add_buffer(bio, bh) != bh->b_size) {
541 xfs_submit_ioend_bio(ioend, bio); 501 xfs_submit_ioend_bio(wbc, ioend, bio);
542 goto retry; 502 goto retry;
543 } 503 }
544 504
545 lastblock = bh->b_blocknr; 505 lastblock = bh->b_blocknr;
546 } 506 }
547 if (bio) 507 if (bio)
548 xfs_submit_ioend_bio(ioend, bio); 508 xfs_submit_ioend_bio(wbc, ioend, bio);
549 xfs_finish_ioend(ioend, 0); 509 xfs_finish_ioend(ioend, 0);
550 } while ((ioend = next) != NULL); 510 } while ((ioend = next) != NULL);
551} 511}
@@ -1191,7 +1151,7 @@ xfs_page_state_convert(
1191 } 1151 }
1192 1152
1193 if (iohead) 1153 if (iohead)
1194 xfs_submit_ioend(iohead); 1154 xfs_submit_ioend(wbc, iohead);
1195 1155
1196 return page_dirty; 1156 return page_dirty;
1197 1157
@@ -1528,7 +1488,7 @@ xfs_end_io_direct(
1528 * didn't map an unwritten extent so switch it's completion 1488 * didn't map an unwritten extent so switch it's completion
1529 * handler. 1489 * handler.
1530 */ 1490 */
1531 INIT_WORK(&ioend->io_work, xfs_end_bio_written); 1491 ioend->io_type = IOMAP_NEW;
1532 xfs_finish_ioend(ioend, 0); 1492 xfs_finish_ioend(ioend, 0);
1533 } 1493 }
1534 1494
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 965df1227d64..4ddc973aea7a 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -149,7 +149,7 @@ page_region_mask(
149 return mask; 149 return mask;
150} 150}
151 151
152STATIC_INLINE void 152STATIC void
153set_page_region( 153set_page_region(
154 struct page *page, 154 struct page *page,
155 size_t offset, 155 size_t offset,
@@ -161,7 +161,7 @@ set_page_region(
161 SetPageUptodate(page); 161 SetPageUptodate(page);
162} 162}
163 163
164STATIC_INLINE int 164STATIC int
165test_page_region( 165test_page_region(
166 struct page *page, 166 struct page *page,
167 size_t offset, 167 size_t offset,
@@ -582,7 +582,7 @@ found:
582 * although backing storage may not be. 582 * although backing storage may not be.
583 */ 583 */
584xfs_buf_t * 584xfs_buf_t *
585xfs_buf_get_flags( 585xfs_buf_get(
586 xfs_buftarg_t *target,/* target for buffer */ 586 xfs_buftarg_t *target,/* target for buffer */
587 xfs_off_t ioff, /* starting offset of range */ 587 xfs_off_t ioff, /* starting offset of range */
588 size_t isize, /* length of range */ 588 size_t isize, /* length of range */
@@ -661,7 +661,7 @@ _xfs_buf_read(
661} 661}
662 662
663xfs_buf_t * 663xfs_buf_t *
664xfs_buf_read_flags( 664xfs_buf_read(
665 xfs_buftarg_t *target, 665 xfs_buftarg_t *target,
666 xfs_off_t ioff, 666 xfs_off_t ioff,
667 size_t isize, 667 size_t isize,
@@ -671,7 +671,7 @@ xfs_buf_read_flags(
671 671
672 flags |= XBF_READ; 672 flags |= XBF_READ;
673 673
674 bp = xfs_buf_get_flags(target, ioff, isize, flags); 674 bp = xfs_buf_get(target, ioff, isize, flags);
675 if (bp) { 675 if (bp) {
676 if (!XFS_BUF_ISDONE(bp)) { 676 if (!XFS_BUF_ISDONE(bp)) {
677 XB_TRACE(bp, "read", (unsigned long)flags); 677 XB_TRACE(bp, "read", (unsigned long)flags);
@@ -718,7 +718,7 @@ xfs_buf_readahead(
718 return; 718 return;
719 719
720 flags |= (XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD); 720 flags |= (XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD);
721 xfs_buf_read_flags(target, ioff, isize, flags); 721 xfs_buf_read(target, ioff, isize, flags);
722} 722}
723 723
724xfs_buf_t * 724xfs_buf_t *
@@ -1113,7 +1113,7 @@ xfs_bdwrite(
1113 xfs_buf_delwri_queue(bp, 1); 1113 xfs_buf_delwri_queue(bp, 1);
1114} 1114}
1115 1115
1116STATIC_INLINE void 1116STATIC void
1117_xfs_buf_ioend( 1117_xfs_buf_ioend(
1118 xfs_buf_t *bp, 1118 xfs_buf_t *bp,
1119 int schedule) 1119 int schedule)
diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h
index 9b4d666ad31f..5f07dd91c5fa 100644
--- a/fs/xfs/linux-2.6/xfs_buf.h
+++ b/fs/xfs/linux-2.6/xfs_buf.h
@@ -186,15 +186,10 @@ extern xfs_buf_t *_xfs_buf_find(xfs_buftarg_t *, xfs_off_t, size_t,
186#define xfs_incore(buftarg,blkno,len,lockit) \ 186#define xfs_incore(buftarg,blkno,len,lockit) \
187 _xfs_buf_find(buftarg, blkno ,len, lockit, NULL) 187 _xfs_buf_find(buftarg, blkno ,len, lockit, NULL)
188 188
189extern xfs_buf_t *xfs_buf_get_flags(xfs_buftarg_t *, xfs_off_t, size_t, 189extern xfs_buf_t *xfs_buf_get(xfs_buftarg_t *, xfs_off_t, size_t,
190 xfs_buf_flags_t); 190 xfs_buf_flags_t);
191#define xfs_buf_get(target, blkno, len, flags) \ 191extern xfs_buf_t *xfs_buf_read(xfs_buftarg_t *, xfs_off_t, size_t,
192 xfs_buf_get_flags((target), (blkno), (len), XBF_LOCK | XBF_MAPPED)
193
194extern xfs_buf_t *xfs_buf_read_flags(xfs_buftarg_t *, xfs_off_t, size_t,
195 xfs_buf_flags_t); 192 xfs_buf_flags_t);
196#define xfs_buf_read(target, blkno, len, flags) \
197 xfs_buf_read_flags((target), (blkno), (len), XBF_LOCK | XBF_MAPPED)
198 193
199extern xfs_buf_t *xfs_buf_get_empty(size_t, xfs_buftarg_t *); 194extern xfs_buf_t *xfs_buf_get_empty(size_t, xfs_buftarg_t *);
200extern xfs_buf_t *xfs_buf_get_noaddr(size_t, xfs_buftarg_t *); 195extern xfs_buf_t *xfs_buf_get_noaddr(size_t, xfs_buftarg_t *);
diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c
index eff61e2732af..e4caeb28ce2e 100644
--- a/fs/xfs/linux-2.6/xfs_file.c
+++ b/fs/xfs/linux-2.6/xfs_file.c
@@ -52,7 +52,7 @@ xfs_file_aio_read(
52 loff_t pos) 52 loff_t pos)
53{ 53{
54 struct file *file = iocb->ki_filp; 54 struct file *file = iocb->ki_filp;
55 int ioflags = IO_ISAIO; 55 int ioflags = 0;
56 56
57 BUG_ON(iocb->ki_pos != pos); 57 BUG_ON(iocb->ki_pos != pos);
58 if (unlikely(file->f_flags & O_DIRECT)) 58 if (unlikely(file->f_flags & O_DIRECT))
@@ -71,7 +71,7 @@ xfs_file_aio_write(
71 loff_t pos) 71 loff_t pos)
72{ 72{
73 struct file *file = iocb->ki_filp; 73 struct file *file = iocb->ki_filp;
74 int ioflags = IO_ISAIO; 74 int ioflags = 0;
75 75
76 BUG_ON(iocb->ki_pos != pos); 76 BUG_ON(iocb->ki_pos != pos);
77 if (unlikely(file->f_flags & O_DIRECT)) 77 if (unlikely(file->f_flags & O_DIRECT))
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
index cd42ef78f6b5..1f3b4b8f7dd4 100644
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ b/fs/xfs/linux-2.6/xfs_iops.c
@@ -573,8 +573,8 @@ xfs_vn_fallocate(
573 bf.l_len = len; 573 bf.l_len = len;
574 574
575 xfs_ilock(ip, XFS_IOLOCK_EXCL); 575 xfs_ilock(ip, XFS_IOLOCK_EXCL);
576 error = xfs_change_file_space(ip, XFS_IOC_RESVSP, &bf, 576 error = -xfs_change_file_space(ip, XFS_IOC_RESVSP, &bf,
577 0, XFS_ATTR_NOLOCK); 577 0, XFS_ATTR_NOLOCK);
578 if (!error && !(mode & FALLOC_FL_KEEP_SIZE) && 578 if (!error && !(mode & FALLOC_FL_KEEP_SIZE) &&
579 offset + len > i_size_read(inode)) 579 offset + len > i_size_read(inode))
580 new_size = offset + len; 580 new_size = offset + len;
@@ -585,7 +585,7 @@ xfs_vn_fallocate(
585 585
586 iattr.ia_valid = ATTR_SIZE; 586 iattr.ia_valid = ATTR_SIZE;
587 iattr.ia_size = new_size; 587 iattr.ia_size = new_size;
588 error = xfs_setattr(ip, &iattr, XFS_ATTR_NOLOCK); 588 error = -xfs_setattr(ip, &iattr, XFS_ATTR_NOLOCK);
589 } 589 }
590 590
591 xfs_iunlock(ip, XFS_IOLOCK_EXCL); 591 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
diff --git a/fs/xfs/linux-2.6/xfs_lrw.c b/fs/xfs/linux-2.6/xfs_lrw.c
index 072050f8d346..78dbfcd5eec2 100644
--- a/fs/xfs/linux-2.6/xfs_lrw.c
+++ b/fs/xfs/linux-2.6/xfs_lrw.c
@@ -255,8 +255,6 @@ xfs_read(
255 255
256 iocb->ki_pos = *offset; 256 iocb->ki_pos = *offset;
257 ret = generic_file_aio_read(iocb, iovp, segs, *offset); 257 ret = generic_file_aio_read(iocb, iovp, segs, *offset);
258 if (ret == -EIOCBQUEUED && !(ioflags & IO_ISAIO))
259 ret = wait_on_sync_kiocb(iocb);
260 if (ret > 0) 258 if (ret > 0)
261 XFS_STATS_ADD(xs_read_bytes, ret); 259 XFS_STATS_ADD(xs_read_bytes, ret);
262 260
@@ -774,9 +772,6 @@ write_retry:
774 772
775 current->backing_dev_info = NULL; 773 current->backing_dev_info = NULL;
776 774
777 if (ret == -EIOCBQUEUED && !(ioflags & IO_ISAIO))
778 ret = wait_on_sync_kiocb(iocb);
779
780 isize = i_size_read(inode); 775 isize = i_size_read(inode);
781 if (unlikely(ret < 0 && ret != -EFAULT && *offset > isize)) 776 if (unlikely(ret < 0 && ret != -EFAULT && *offset > isize))
782 *offset = isize; 777 *offset = isize;
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 18a4b8e11df2..1bfb0e980193 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -930,13 +930,39 @@ xfs_fs_alloc_inode(
930 */ 930 */
931STATIC void 931STATIC void
932xfs_fs_destroy_inode( 932xfs_fs_destroy_inode(
933 struct inode *inode) 933 struct inode *inode)
934{ 934{
935 xfs_inode_t *ip = XFS_I(inode); 935 struct xfs_inode *ip = XFS_I(inode);
936
937 xfs_itrace_entry(ip);
936 938
937 XFS_STATS_INC(vn_reclaim); 939 XFS_STATS_INC(vn_reclaim);
938 if (xfs_reclaim(ip)) 940
939 panic("%s: cannot reclaim 0x%p\n", __func__, inode); 941 /* bad inode, get out here ASAP */
942 if (is_bad_inode(inode))
943 goto out_reclaim;
944
945 xfs_ioend_wait(ip);
946
947 ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || ip->i_delayed_blks == 0);
948
949 /*
950 * We should never get here with one of the reclaim flags already set.
951 */
952 ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIMABLE));
953 ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIM));
954
955 /*
956 * If we have nothing to flush with this inode then complete the
957 * teardown now, otherwise delay the flush operation.
958 */
959 if (!xfs_inode_clean(ip)) {
960 xfs_inode_set_reclaim_tag(ip);
961 return;
962 }
963
964out_reclaim:
965 xfs_ireclaim(ip);
940} 966}
941 967
942/* 968/*
@@ -973,7 +999,6 @@ xfs_fs_inode_init_once(
973 999
974 mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER, 1000 mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
975 "xfsino", ip->i_ino); 1001 "xfsino", ip->i_ino);
976 mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
977} 1002}
978 1003
979/* 1004/*
@@ -1075,6 +1100,20 @@ xfs_fs_clear_inode(
1075 XFS_STATS_INC(vn_remove); 1100 XFS_STATS_INC(vn_remove);
1076 XFS_STATS_DEC(vn_active); 1101 XFS_STATS_DEC(vn_active);
1077 1102
1103 /*
1104 * The iolock is used by the file system to coordinate reads,
1105 * writes, and block truncates. Up to this point the lock
1106 * protected concurrent accesses by users of the inode. But
1107 * from here forward we're doing some final processing of the
1108 * inode because we're done with it, and although we reuse the
1109 * iolock for protection it is really a distinct lock class
1110 * (in the lockdep sense) from before. To keep lockdep happy
1111 * (and basically indicate what we are doing), we explicitly
1112 * re-init the iolock here.
1113 */
1114 ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
1115 mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
1116
1078 xfs_inactive(ip); 1117 xfs_inactive(ip);
1079} 1118}
1080 1119
@@ -1092,8 +1131,6 @@ xfs_fs_put_super(
1092 struct super_block *sb) 1131 struct super_block *sb)
1093{ 1132{
1094 struct xfs_mount *mp = XFS_M(sb); 1133 struct xfs_mount *mp = XFS_M(sb);
1095 struct xfs_inode *rip = mp->m_rootip;
1096 int unmount_event_flags = 0;
1097 1134
1098 xfs_syncd_stop(mp); 1135 xfs_syncd_stop(mp);
1099 1136
@@ -1109,20 +1146,7 @@ xfs_fs_put_super(
1109 xfs_sync_attr(mp, 0); 1146 xfs_sync_attr(mp, 0);
1110 } 1147 }
1111 1148
1112#ifdef HAVE_DMAPI 1149 XFS_SEND_PREUNMOUNT(mp);
1113 if (mp->m_flags & XFS_MOUNT_DMAPI) {
1114 unmount_event_flags =
1115 (mp->m_dmevmask & (1 << DM_EVENT_UNMOUNT)) ?
1116 0 : DM_FLAGS_UNWANTED;
1117 /*
1118 * Ignore error from dmapi here, first unmount is not allowed
1119 * to fail anyway, and second we wouldn't want to fail a
1120 * unmount because of dmapi.
1121 */
1122 XFS_SEND_PREUNMOUNT(mp, rip, DM_RIGHT_NULL, rip, DM_RIGHT_NULL,
1123 NULL, NULL, 0, 0, unmount_event_flags);
1124 }
1125#endif
1126 1150
1127 /* 1151 /*
1128 * Blow away any referenced inode in the filestreams cache. 1152 * Blow away any referenced inode in the filestreams cache.
@@ -1133,10 +1157,7 @@ xfs_fs_put_super(
1133 1157
1134 XFS_bflush(mp->m_ddev_targp); 1158 XFS_bflush(mp->m_ddev_targp);
1135 1159
1136 if (mp->m_flags & XFS_MOUNT_DMAPI) { 1160 XFS_SEND_UNMOUNT(mp);
1137 XFS_SEND_UNMOUNT(mp, rip, DM_RIGHT_NULL, 0, 0,
1138 unmount_event_flags);
1139 }
1140 1161
1141 xfs_unmountfs(mp); 1162 xfs_unmountfs(mp);
1142 xfs_freesb(mp); 1163 xfs_freesb(mp);
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
index 961df0a22c78..d895a3a960f5 100644
--- a/fs/xfs/linux-2.6/xfs_sync.c
+++ b/fs/xfs/linux-2.6/xfs_sync.c
@@ -663,10 +663,9 @@ xfs_syncd_stop(
663 kthread_stop(mp->m_sync_task); 663 kthread_stop(mp->m_sync_task);
664} 664}
665 665
666int 666STATIC int
667xfs_reclaim_inode( 667xfs_reclaim_inode(
668 xfs_inode_t *ip, 668 xfs_inode_t *ip,
669 int locked,
670 int sync_mode) 669 int sync_mode)
671{ 670{
672 xfs_perag_t *pag = xfs_get_perag(ip->i_mount, ip->i_ino); 671 xfs_perag_t *pag = xfs_get_perag(ip->i_mount, ip->i_ino);
@@ -682,10 +681,6 @@ xfs_reclaim_inode(
682 !__xfs_iflags_test(ip, XFS_IRECLAIMABLE)) { 681 !__xfs_iflags_test(ip, XFS_IRECLAIMABLE)) {
683 spin_unlock(&ip->i_flags_lock); 682 spin_unlock(&ip->i_flags_lock);
684 write_unlock(&pag->pag_ici_lock); 683 write_unlock(&pag->pag_ici_lock);
685 if (locked) {
686 xfs_ifunlock(ip);
687 xfs_iunlock(ip, XFS_ILOCK_EXCL);
688 }
689 return -EAGAIN; 684 return -EAGAIN;
690 } 685 }
691 __xfs_iflags_set(ip, XFS_IRECLAIM); 686 __xfs_iflags_set(ip, XFS_IRECLAIM);
@@ -704,10 +699,8 @@ xfs_reclaim_inode(
704 * We get the flush lock regardless, though, just to make sure 699 * We get the flush lock regardless, though, just to make sure
705 * we don't free it while it is being flushed. 700 * we don't free it while it is being flushed.
706 */ 701 */
707 if (!locked) { 702 xfs_ilock(ip, XFS_ILOCK_EXCL);
708 xfs_ilock(ip, XFS_ILOCK_EXCL); 703 xfs_iflock(ip);
709 xfs_iflock(ip);
710 }
711 704
712 /* 705 /*
713 * In the case of a forced shutdown we rely on xfs_iflush() to 706 * In the case of a forced shutdown we rely on xfs_iflush() to
@@ -778,7 +771,7 @@ xfs_reclaim_inode_now(
778 } 771 }
779 read_unlock(&pag->pag_ici_lock); 772 read_unlock(&pag->pag_ici_lock);
780 773
781 return xfs_reclaim_inode(ip, 0, flags); 774 return xfs_reclaim_inode(ip, flags);
782} 775}
783 776
784int 777int
diff --git a/fs/xfs/linux-2.6/xfs_sync.h b/fs/xfs/linux-2.6/xfs_sync.h
index 27920eb7a820..a500b4d91835 100644
--- a/fs/xfs/linux-2.6/xfs_sync.h
+++ b/fs/xfs/linux-2.6/xfs_sync.h
@@ -44,7 +44,6 @@ void xfs_quiesce_attr(struct xfs_mount *mp);
44 44
45void xfs_flush_inodes(struct xfs_inode *ip); 45void xfs_flush_inodes(struct xfs_inode *ip);
46 46
47int xfs_reclaim_inode(struct xfs_inode *ip, int locked, int sync_mode);
48int xfs_reclaim_inodes(struct xfs_mount *mp, int mode); 47int xfs_reclaim_inodes(struct xfs_mount *mp, int mode);
49 48
50void xfs_inode_set_reclaim_tag(struct xfs_inode *ip); 49void xfs_inode_set_reclaim_tag(struct xfs_inode *ip);
diff --git a/fs/xfs/linux-2.6/xfs_vnode.h b/fs/xfs/linux-2.6/xfs_vnode.h
index ad7fbead4c97..00cabf5354d2 100644
--- a/fs/xfs/linux-2.6/xfs_vnode.h
+++ b/fs/xfs/linux-2.6/xfs_vnode.h
@@ -36,7 +36,6 @@ struct attrlist_cursor_kern;
36/* 36/*
37 * Flags for read/write calls - same values as IRIX 37 * Flags for read/write calls - same values as IRIX
38 */ 38 */
39#define IO_ISAIO 0x00001 /* don't wait for completion */
40#define IO_ISDIRECT 0x00004 /* bypass page cache */ 39#define IO_ISDIRECT 0x00004 /* bypass page cache */
41#define IO_INVIS 0x00020 /* don't update inode timestamps */ 40#define IO_INVIS 0x00020 /* don't update inode timestamps */
42 41
diff --git a/fs/xfs/support/debug.h b/fs/xfs/support/debug.h
index 6f4fd37c67af..d2d20462fd4f 100644
--- a/fs/xfs/support/debug.h
+++ b/fs/xfs/support/debug.h
@@ -41,10 +41,6 @@ extern void assfail(char *expr, char *f, int l);
41# define STATIC static noinline 41# define STATIC static noinline
42#endif 42#endif
43 43
44#ifndef STATIC_INLINE
45# define STATIC_INLINE static inline
46#endif
47
48#else /* DEBUG */ 44#else /* DEBUG */
49 45
50#define ASSERT(expr) \ 46#define ASSERT(expr) \
@@ -54,19 +50,5 @@ extern void assfail(char *expr, char *f, int l);
54# define STATIC noinline 50# define STATIC noinline
55#endif 51#endif
56 52
57/*
58 * We stop inlining of inline functions in debug mode.
59 * Unfortunately, this means static inline in header files
60 * get multiple definitions, so they need to remain static.
61 * This then gives tonnes of warnings about unused but defined
62 * functions, so we need to add the unused attribute to prevent
63 * these spurious warnings.
64 */
65#ifndef STATIC_INLINE
66# define STATIC_INLINE static __attribute__ ((unused)) noinline
67#endif
68
69#endif /* DEBUG */ 53#endif /* DEBUG */
70
71
72#endif /* __XFS_SUPPORT_DEBUG_H__ */ 54#endif /* __XFS_SUPPORT_DEBUG_H__ */
diff --git a/fs/xfs/xfs_attr.c b/fs/xfs/xfs_attr.c
index 4ece1906bd41..8fe6f6b78a4a 100644
--- a/fs/xfs/xfs_attr.c
+++ b/fs/xfs/xfs_attr.c
@@ -123,9 +123,13 @@ xfs_inode_hasattr(
123 * Overall external interface routines. 123 * Overall external interface routines.
124 *========================================================================*/ 124 *========================================================================*/
125 125
126int 126STATIC int
127xfs_attr_fetch(xfs_inode_t *ip, struct xfs_name *name, 127xfs_attr_get_int(
128 char *value, int *valuelenp, int flags) 128 struct xfs_inode *ip,
129 struct xfs_name *name,
130 char *value,
131 int *valuelenp,
132 int flags)
129{ 133{
130 xfs_da_args_t args; 134 xfs_da_args_t args;
131 int error; 135 int error;
@@ -188,7 +192,7 @@ xfs_attr_get(
188 return error; 192 return error;
189 193
190 xfs_ilock(ip, XFS_ILOCK_SHARED); 194 xfs_ilock(ip, XFS_ILOCK_SHARED);
191 error = xfs_attr_fetch(ip, &xname, value, valuelenp, flags); 195 error = xfs_attr_get_int(ip, &xname, value, valuelenp, flags);
192 xfs_iunlock(ip, XFS_ILOCK_SHARED); 196 xfs_iunlock(ip, XFS_ILOCK_SHARED);
193 return(error); 197 return(error);
194} 198}
@@ -2143,8 +2147,8 @@ xfs_attr_rmtval_set(xfs_da_args_t *args)
2143 dblkno = XFS_FSB_TO_DADDR(mp, map.br_startblock), 2147 dblkno = XFS_FSB_TO_DADDR(mp, map.br_startblock),
2144 blkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount); 2148 blkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount);
2145 2149
2146 bp = xfs_buf_get_flags(mp->m_ddev_targp, dblkno, blkcnt, 2150 bp = xfs_buf_get(mp->m_ddev_targp, dblkno, blkcnt,
2147 XFS_BUF_LOCK | XBF_DONT_BLOCK); 2151 XFS_BUF_LOCK | XBF_DONT_BLOCK);
2148 ASSERT(bp); 2152 ASSERT(bp);
2149 ASSERT(!XFS_BUF_GETERROR(bp)); 2153 ASSERT(!XFS_BUF_GETERROR(bp));
2150 2154
diff --git a/fs/xfs/xfs_attr.h b/fs/xfs/xfs_attr.h
index fb3b2a68b9b9..12f0be3a73d4 100644
--- a/fs/xfs/xfs_attr.h
+++ b/fs/xfs/xfs_attr.h
@@ -131,7 +131,6 @@ typedef struct xfs_attr_list_context {
131 */ 131 */
132int xfs_attr_calc_size(struct xfs_inode *, int, int, int *); 132int xfs_attr_calc_size(struct xfs_inode *, int, int, int *);
133int xfs_attr_inactive(struct xfs_inode *dp); 133int xfs_attr_inactive(struct xfs_inode *dp);
134int xfs_attr_fetch(struct xfs_inode *, struct xfs_name *, char *, int *, int);
135int xfs_attr_rmtval_get(struct xfs_da_args *args); 134int xfs_attr_rmtval_get(struct xfs_da_args *args);
136int xfs_attr_list_int(struct xfs_attr_list_context *); 135int xfs_attr_list_int(struct xfs_attr_list_context *);
137 136
diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/xfs_attr_leaf.c
index afdc8911637d..0b687351293f 100644
--- a/fs/xfs/xfs_attr_leaf.c
+++ b/fs/xfs/xfs_attr_leaf.c
@@ -98,7 +98,7 @@ STATIC int xfs_attr_leaf_entsize(xfs_attr_leafblock_t *leaf, int index);
98 * If namespace bits don't match return 0. 98 * If namespace bits don't match return 0.
99 * If all match then return 1. 99 * If all match then return 1.
100 */ 100 */
101STATIC_INLINE int 101STATIC int
102xfs_attr_namesp_match(int arg_flags, int ondisk_flags) 102xfs_attr_namesp_match(int arg_flags, int ondisk_flags)
103{ 103{
104 return XFS_ATTR_NSP_ONDISK(ondisk_flags) == XFS_ATTR_NSP_ARGS_TO_ONDISK(arg_flags); 104 return XFS_ATTR_NSP_ONDISK(ondisk_flags) == XFS_ATTR_NSP_ARGS_TO_ONDISK(arg_flags);
diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c
index eb7b702d0690..6f5ccede63f9 100644
--- a/fs/xfs/xfs_bmap_btree.c
+++ b/fs/xfs/xfs_bmap_btree.c
@@ -98,8 +98,7 @@ xfs_bmdr_to_bmbt(
98 * This code must be in sync with the routines xfs_bmbt_get_startoff, 98 * This code must be in sync with the routines xfs_bmbt_get_startoff,
99 * xfs_bmbt_get_startblock, xfs_bmbt_get_blockcount and xfs_bmbt_get_state. 99 * xfs_bmbt_get_startblock, xfs_bmbt_get_blockcount and xfs_bmbt_get_state.
100 */ 100 */
101 101STATIC void
102STATIC_INLINE void
103__xfs_bmbt_get_all( 102__xfs_bmbt_get_all(
104 __uint64_t l0, 103 __uint64_t l0,
105 __uint64_t l1, 104 __uint64_t l1,
diff --git a/fs/xfs/xfs_filestream.h b/fs/xfs/xfs_filestream.h
index f655f7dc334c..4aba67c5f64f 100644
--- a/fs/xfs/xfs_filestream.h
+++ b/fs/xfs/xfs_filestream.h
@@ -79,7 +79,7 @@ extern ktrace_t *xfs_filestreams_trace_buf;
79 * the cache that reference per-ag array elements that have since been 79 * the cache that reference per-ag array elements that have since been
80 * reallocated. 80 * reallocated.
81 */ 81 */
82STATIC_INLINE int 82static inline int
83xfs_filestream_peek_ag( 83xfs_filestream_peek_ag(
84 xfs_mount_t *mp, 84 xfs_mount_t *mp,
85 xfs_agnumber_t agno) 85 xfs_agnumber_t agno)
@@ -87,7 +87,7 @@ xfs_filestream_peek_ag(
87 return atomic_read(&mp->m_perag[agno].pagf_fstrms); 87 return atomic_read(&mp->m_perag[agno].pagf_fstrms);
88} 88}
89 89
90STATIC_INLINE int 90static inline int
91xfs_filestream_get_ag( 91xfs_filestream_get_ag(
92 xfs_mount_t *mp, 92 xfs_mount_t *mp,
93 xfs_agnumber_t agno) 93 xfs_agnumber_t agno)
@@ -95,7 +95,7 @@ xfs_filestream_get_ag(
95 return atomic_inc_return(&mp->m_perag[agno].pagf_fstrms); 95 return atomic_inc_return(&mp->m_perag[agno].pagf_fstrms);
96} 96}
97 97
98STATIC_INLINE int 98static inline int
99xfs_filestream_put_ag( 99xfs_filestream_put_ag(
100 xfs_mount_t *mp, 100 xfs_mount_t *mp,
101 xfs_agnumber_t agno) 101 xfs_agnumber_t agno)
@@ -122,7 +122,7 @@ int xfs_filestream_new_ag(struct xfs_bmalloca *ap, xfs_agnumber_t *agp);
122 122
123 123
124/* filestreams for the inode? */ 124/* filestreams for the inode? */
125STATIC_INLINE int 125static inline int
126xfs_inode_is_filestream( 126xfs_inode_is_filestream(
127 struct xfs_inode *ip) 127 struct xfs_inode *ip)
128{ 128{
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index 2d0b3e1da9e6..36079aa91344 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -201,8 +201,8 @@ xfs_growfs_data_private(
201 * AG freelist header block 201 * AG freelist header block
202 */ 202 */
203 bp = xfs_buf_get(mp->m_ddev_targp, 203 bp = xfs_buf_get(mp->m_ddev_targp,
204 XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)), 204 XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)),
205 XFS_FSS_TO_BB(mp, 1), 0); 205 XFS_FSS_TO_BB(mp, 1), XBF_LOCK | XBF_MAPPED);
206 agf = XFS_BUF_TO_AGF(bp); 206 agf = XFS_BUF_TO_AGF(bp);
207 memset(agf, 0, mp->m_sb.sb_sectsize); 207 memset(agf, 0, mp->m_sb.sb_sectsize);
208 agf->agf_magicnum = cpu_to_be32(XFS_AGF_MAGIC); 208 agf->agf_magicnum = cpu_to_be32(XFS_AGF_MAGIC);
@@ -233,8 +233,8 @@ xfs_growfs_data_private(
233 * AG inode header block 233 * AG inode header block
234 */ 234 */
235 bp = xfs_buf_get(mp->m_ddev_targp, 235 bp = xfs_buf_get(mp->m_ddev_targp,
236 XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)), 236 XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)),
237 XFS_FSS_TO_BB(mp, 1), 0); 237 XFS_FSS_TO_BB(mp, 1), XBF_LOCK | XBF_MAPPED);
238 agi = XFS_BUF_TO_AGI(bp); 238 agi = XFS_BUF_TO_AGI(bp);
239 memset(agi, 0, mp->m_sb.sb_sectsize); 239 memset(agi, 0, mp->m_sb.sb_sectsize);
240 agi->agi_magicnum = cpu_to_be32(XFS_AGI_MAGIC); 240 agi->agi_magicnum = cpu_to_be32(XFS_AGI_MAGIC);
@@ -257,8 +257,9 @@ xfs_growfs_data_private(
257 * BNO btree root block 257 * BNO btree root block
258 */ 258 */
259 bp = xfs_buf_get(mp->m_ddev_targp, 259 bp = xfs_buf_get(mp->m_ddev_targp,
260 XFS_AGB_TO_DADDR(mp, agno, XFS_BNO_BLOCK(mp)), 260 XFS_AGB_TO_DADDR(mp, agno, XFS_BNO_BLOCK(mp)),
261 BTOBB(mp->m_sb.sb_blocksize), 0); 261 BTOBB(mp->m_sb.sb_blocksize),
262 XBF_LOCK | XBF_MAPPED);
262 block = XFS_BUF_TO_BLOCK(bp); 263 block = XFS_BUF_TO_BLOCK(bp);
263 memset(block, 0, mp->m_sb.sb_blocksize); 264 memset(block, 0, mp->m_sb.sb_blocksize);
264 block->bb_magic = cpu_to_be32(XFS_ABTB_MAGIC); 265 block->bb_magic = cpu_to_be32(XFS_ABTB_MAGIC);
@@ -278,8 +279,9 @@ xfs_growfs_data_private(
278 * CNT btree root block 279 * CNT btree root block
279 */ 280 */
280 bp = xfs_buf_get(mp->m_ddev_targp, 281 bp = xfs_buf_get(mp->m_ddev_targp,
281 XFS_AGB_TO_DADDR(mp, agno, XFS_CNT_BLOCK(mp)), 282 XFS_AGB_TO_DADDR(mp, agno, XFS_CNT_BLOCK(mp)),
282 BTOBB(mp->m_sb.sb_blocksize), 0); 283 BTOBB(mp->m_sb.sb_blocksize),
284 XBF_LOCK | XBF_MAPPED);
283 block = XFS_BUF_TO_BLOCK(bp); 285 block = XFS_BUF_TO_BLOCK(bp);
284 memset(block, 0, mp->m_sb.sb_blocksize); 286 memset(block, 0, mp->m_sb.sb_blocksize);
285 block->bb_magic = cpu_to_be32(XFS_ABTC_MAGIC); 287 block->bb_magic = cpu_to_be32(XFS_ABTC_MAGIC);
@@ -300,8 +302,9 @@ xfs_growfs_data_private(
300 * INO btree root block 302 * INO btree root block
301 */ 303 */
302 bp = xfs_buf_get(mp->m_ddev_targp, 304 bp = xfs_buf_get(mp->m_ddev_targp,
303 XFS_AGB_TO_DADDR(mp, agno, XFS_IBT_BLOCK(mp)), 305 XFS_AGB_TO_DADDR(mp, agno, XFS_IBT_BLOCK(mp)),
304 BTOBB(mp->m_sb.sb_blocksize), 0); 306 BTOBB(mp->m_sb.sb_blocksize),
307 XBF_LOCK | XBF_MAPPED);
305 block = XFS_BUF_TO_BLOCK(bp); 308 block = XFS_BUF_TO_BLOCK(bp);
306 memset(block, 0, mp->m_sb.sb_blocksize); 309 memset(block, 0, mp->m_sb.sb_blocksize);
307 block->bb_magic = cpu_to_be32(XFS_IBT_MAGIC); 310 block->bb_magic = cpu_to_be32(XFS_IBT_MAGIC);
@@ -611,7 +614,7 @@ xfs_fs_log_dummy(
611 xfs_inode_t *ip; 614 xfs_inode_t *ip;
612 int error; 615 int error;
613 616
614 tp = _xfs_trans_alloc(mp, XFS_TRANS_DUMMY1); 617 tp = _xfs_trans_alloc(mp, XFS_TRANS_DUMMY1, KM_SLEEP);
615 error = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0); 618 error = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0);
616 if (error) { 619 if (error) {
617 xfs_trans_cancel(tp, 0); 620 xfs_trans_cancel(tp, 0);
diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c
index 0785797db828..cb907ba69c4c 100644
--- a/fs/xfs/xfs_ialloc.c
+++ b/fs/xfs/xfs_ialloc.c
@@ -425,7 +425,7 @@ xfs_ialloc_ag_alloc(
425 return 0; 425 return 0;
426} 426}
427 427
428STATIC_INLINE xfs_agnumber_t 428STATIC xfs_agnumber_t
429xfs_ialloc_next_ag( 429xfs_ialloc_next_ag(
430 xfs_mount_t *mp) 430 xfs_mount_t *mp)
431{ 431{
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c
index 80e526489be5..073bb4a26b19 100644
--- a/fs/xfs/xfs_iget.c
+++ b/fs/xfs/xfs_iget.c
@@ -73,6 +73,9 @@ xfs_inode_alloc(
73 ASSERT(atomic_read(&ip->i_pincount) == 0); 73 ASSERT(atomic_read(&ip->i_pincount) == 0);
74 ASSERT(!spin_is_locked(&ip->i_flags_lock)); 74 ASSERT(!spin_is_locked(&ip->i_flags_lock));
75 ASSERT(completion_done(&ip->i_flush)); 75 ASSERT(completion_done(&ip->i_flush));
76 ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
77
78 mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
76 79
77 /* initialise the xfs inode */ 80 /* initialise the xfs inode */
78 ip->i_ino = ino; 81 ip->i_ino = ino;
@@ -290,7 +293,7 @@ xfs_iget_cache_miss(
290 struct xfs_inode **ipp, 293 struct xfs_inode **ipp,
291 xfs_daddr_t bno, 294 xfs_daddr_t bno,
292 int flags, 295 int flags,
293 int lock_flags) __releases(pag->pag_ici_lock) 296 int lock_flags)
294{ 297{
295 struct xfs_inode *ip; 298 struct xfs_inode *ip;
296 int error; 299 int error;
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 67ae5555a30a..7294abce6ef2 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -860,8 +860,15 @@ xfs_iomap_write_unwritten(
860 * set up a transaction to convert the range of extents 860 * set up a transaction to convert the range of extents
861 * from unwritten to real. Do allocations in a loop until 861 * from unwritten to real. Do allocations in a loop until
862 * we have covered the range passed in. 862 * we have covered the range passed in.
863 *
864 * Note that we open code the transaction allocation here
865 * to pass KM_NOFS--we can't risk to recursing back into
866 * the filesystem here as we might be asked to write out
867 * the same inode that we complete here and might deadlock
868 * on the iolock.
863 */ 869 */
864 tp = xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE); 870 xfs_wait_for_freeze(mp, SB_FREEZE_TRANS);
871 tp = _xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE, KM_NOFS);
865 tp->t_flags |= XFS_TRANS_RESERVE; 872 tp->t_flags |= XFS_TRANS_RESERVE;
866 error = xfs_trans_reserve(tp, resblks, 873 error = xfs_trans_reserve(tp, resblks,
867 XFS_WRITE_LOG_RES(mp), 0, 874 XFS_WRITE_LOG_RES(mp), 0,
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index fb17f8226b09..1ec98ed914d4 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -2206,6 +2206,7 @@ xlog_recover_do_buffer_trans(
2206 xfs_daddr_t blkno; 2206 xfs_daddr_t blkno;
2207 int len; 2207 int len;
2208 ushort flags; 2208 ushort flags;
2209 uint buf_flags;
2209 2210
2210 buf_f = (xfs_buf_log_format_t *)item->ri_buf[0].i_addr; 2211 buf_f = (xfs_buf_log_format_t *)item->ri_buf[0].i_addr;
2211 2212
@@ -2246,12 +2247,11 @@ xlog_recover_do_buffer_trans(
2246 } 2247 }
2247 2248
2248 mp = log->l_mp; 2249 mp = log->l_mp;
2249 if (flags & XFS_BLI_INODE_BUF) { 2250 buf_flags = XFS_BUF_LOCK;
2250 bp = xfs_buf_read_flags(mp->m_ddev_targp, blkno, len, 2251 if (!(flags & XFS_BLI_INODE_BUF))
2251 XFS_BUF_LOCK); 2252 buf_flags |= XFS_BUF_MAPPED;
2252 } else { 2253
2253 bp = xfs_buf_read(mp->m_ddev_targp, blkno, len, 0); 2254 bp = xfs_buf_read(mp->m_ddev_targp, blkno, len, buf_flags);
2254 }
2255 if (XFS_BUF_ISERROR(bp)) { 2255 if (XFS_BUF_ISERROR(bp)) {
2256 xfs_ioerror_alert("xlog_recover_do..(read#1)", log->l_mp, 2256 xfs_ioerror_alert("xlog_recover_do..(read#1)", log->l_mp,
2257 bp, blkno); 2257 bp, blkno);
@@ -2350,8 +2350,8 @@ xlog_recover_do_inode_trans(
2350 goto error; 2350 goto error;
2351 } 2351 }
2352 2352
2353 bp = xfs_buf_read_flags(mp->m_ddev_targp, in_f->ilf_blkno, 2353 bp = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len,
2354 in_f->ilf_len, XFS_BUF_LOCK); 2354 XFS_BUF_LOCK);
2355 if (XFS_BUF_ISERROR(bp)) { 2355 if (XFS_BUF_ISERROR(bp)) {
2356 xfs_ioerror_alert("xlog_recover_do..(read#2)", mp, 2356 xfs_ioerror_alert("xlog_recover_do..(read#2)", mp,
2357 bp, in_f->ilf_blkno); 2357 bp, in_f->ilf_blkno);
@@ -3517,7 +3517,7 @@ xlog_do_recovery_pass(
3517{ 3517{
3518 xlog_rec_header_t *rhead; 3518 xlog_rec_header_t *rhead;
3519 xfs_daddr_t blk_no; 3519 xfs_daddr_t blk_no;
3520 xfs_caddr_t bufaddr, offset; 3520 xfs_caddr_t offset;
3521 xfs_buf_t *hbp, *dbp; 3521 xfs_buf_t *hbp, *dbp;
3522 int error = 0, h_size; 3522 int error = 0, h_size;
3523 int bblks, split_bblks; 3523 int bblks, split_bblks;
@@ -3610,7 +3610,7 @@ xlog_do_recovery_pass(
3610 /* 3610 /*
3611 * Check for header wrapping around physical end-of-log 3611 * Check for header wrapping around physical end-of-log
3612 */ 3612 */
3613 offset = NULL; 3613 offset = XFS_BUF_PTR(hbp);
3614 split_hblks = 0; 3614 split_hblks = 0;
3615 wrapped_hblks = 0; 3615 wrapped_hblks = 0;
3616 if (blk_no + hblks <= log->l_logBBsize) { 3616 if (blk_no + hblks <= log->l_logBBsize) {
@@ -3646,9 +3646,8 @@ xlog_do_recovery_pass(
3646 * - order is important. 3646 * - order is important.
3647 */ 3647 */
3648 wrapped_hblks = hblks - split_hblks; 3648 wrapped_hblks = hblks - split_hblks;
3649 bufaddr = XFS_BUF_PTR(hbp);
3650 error = XFS_BUF_SET_PTR(hbp, 3649 error = XFS_BUF_SET_PTR(hbp,
3651 bufaddr + BBTOB(split_hblks), 3650 offset + BBTOB(split_hblks),
3652 BBTOB(hblks - split_hblks)); 3651 BBTOB(hblks - split_hblks));
3653 if (error) 3652 if (error)
3654 goto bread_err2; 3653 goto bread_err2;
@@ -3658,14 +3657,10 @@ xlog_do_recovery_pass(
3658 if (error) 3657 if (error)
3659 goto bread_err2; 3658 goto bread_err2;
3660 3659
3661 error = XFS_BUF_SET_PTR(hbp, bufaddr, 3660 error = XFS_BUF_SET_PTR(hbp, offset,
3662 BBTOB(hblks)); 3661 BBTOB(hblks));
3663 if (error) 3662 if (error)
3664 goto bread_err2; 3663 goto bread_err2;
3665
3666 if (!offset)
3667 offset = xlog_align(log, 0,
3668 wrapped_hblks, hbp);
3669 } 3664 }
3670 rhead = (xlog_rec_header_t *)offset; 3665 rhead = (xlog_rec_header_t *)offset;
3671 error = xlog_valid_rec_header(log, rhead, 3666 error = xlog_valid_rec_header(log, rhead,
@@ -3685,7 +3680,7 @@ xlog_do_recovery_pass(
3685 } else { 3680 } else {
3686 /* This log record is split across the 3681 /* This log record is split across the
3687 * physical end of log */ 3682 * physical end of log */
3688 offset = NULL; 3683 offset = XFS_BUF_PTR(dbp);
3689 split_bblks = 0; 3684 split_bblks = 0;
3690 if (blk_no != log->l_logBBsize) { 3685 if (blk_no != log->l_logBBsize) {
3691 /* some data is before the physical 3686 /* some data is before the physical
@@ -3714,9 +3709,8 @@ xlog_do_recovery_pass(
3714 * _first_, then the log start (LR header end) 3709 * _first_, then the log start (LR header end)
3715 * - order is important. 3710 * - order is important.
3716 */ 3711 */
3717 bufaddr = XFS_BUF_PTR(dbp);
3718 error = XFS_BUF_SET_PTR(dbp, 3712 error = XFS_BUF_SET_PTR(dbp,
3719 bufaddr + BBTOB(split_bblks), 3713 offset + BBTOB(split_bblks),
3720 BBTOB(bblks - split_bblks)); 3714 BBTOB(bblks - split_bblks));
3721 if (error) 3715 if (error)
3722 goto bread_err2; 3716 goto bread_err2;
@@ -3727,13 +3721,9 @@ xlog_do_recovery_pass(
3727 if (error) 3721 if (error)
3728 goto bread_err2; 3722 goto bread_err2;
3729 3723
3730 error = XFS_BUF_SET_PTR(dbp, bufaddr, h_size); 3724 error = XFS_BUF_SET_PTR(dbp, offset, h_size);
3731 if (error) 3725 if (error)
3732 goto bread_err2; 3726 goto bread_err2;
3733
3734 if (!offset)
3735 offset = xlog_align(log, wrapped_hblks,
3736 bblks - split_bblks, dbp);
3737 } 3727 }
3738 xlog_unpack_data(rhead, offset, log); 3728 xlog_unpack_data(rhead, offset, log);
3739 if ((error = xlog_recover_process_data(log, rhash, 3729 if ((error = xlog_recover_process_data(log, rhash,
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 8b6c9e807efb..66a888a9ad6f 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -583,8 +583,8 @@ xfs_readsb(xfs_mount_t *mp, int flags)
583 sector_size = xfs_getsize_buftarg(mp->m_ddev_targp); 583 sector_size = xfs_getsize_buftarg(mp->m_ddev_targp);
584 extra_flags = XFS_BUF_LOCK | XFS_BUF_MANAGE | XFS_BUF_MAPPED; 584 extra_flags = XFS_BUF_LOCK | XFS_BUF_MANAGE | XFS_BUF_MAPPED;
585 585
586 bp = xfs_buf_read_flags(mp->m_ddev_targp, XFS_SB_DADDR, 586 bp = xfs_buf_read(mp->m_ddev_targp, XFS_SB_DADDR, BTOBB(sector_size),
587 BTOBB(sector_size), extra_flags); 587 extra_flags);
588 if (!bp || XFS_BUF_ISERROR(bp)) { 588 if (!bp || XFS_BUF_ISERROR(bp)) {
589 xfs_fs_mount_cmn_err(flags, "SB read failed"); 589 xfs_fs_mount_cmn_err(flags, "SB read failed");
590 error = bp ? XFS_BUF_GETERROR(bp) : ENOMEM; 590 error = bp ? XFS_BUF_GETERROR(bp) : ENOMEM;
@@ -624,8 +624,8 @@ xfs_readsb(xfs_mount_t *mp, int flags)
624 XFS_BUF_UNMANAGE(bp); 624 XFS_BUF_UNMANAGE(bp);
625 xfs_buf_relse(bp); 625 xfs_buf_relse(bp);
626 sector_size = mp->m_sb.sb_sectsize; 626 sector_size = mp->m_sb.sb_sectsize;
627 bp = xfs_buf_read_flags(mp->m_ddev_targp, XFS_SB_DADDR, 627 bp = xfs_buf_read(mp->m_ddev_targp, XFS_SB_DADDR,
628 BTOBB(sector_size), extra_flags); 628 BTOBB(sector_size), extra_flags);
629 if (!bp || XFS_BUF_ISERROR(bp)) { 629 if (!bp || XFS_BUF_ISERROR(bp)) {
630 xfs_fs_mount_cmn_err(flags, "SB re-read failed"); 630 xfs_fs_mount_cmn_err(flags, "SB re-read failed");
631 error = bp ? XFS_BUF_GETERROR(bp) : ENOMEM; 631 error = bp ? XFS_BUF_GETERROR(bp) : ENOMEM;
@@ -1471,7 +1471,7 @@ xfs_log_sbcount(
1471 if (!xfs_sb_version_haslazysbcount(&mp->m_sb)) 1471 if (!xfs_sb_version_haslazysbcount(&mp->m_sb))
1472 return 0; 1472 return 0;
1473 1473
1474 tp = _xfs_trans_alloc(mp, XFS_TRANS_SB_COUNT); 1474 tp = _xfs_trans_alloc(mp, XFS_TRANS_SB_COUNT, KM_SLEEP);
1475 error = xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0, 1475 error = xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0,
1476 XFS_DEFAULT_LOG_COUNT); 1476 XFS_DEFAULT_LOG_COUNT);
1477 if (error) { 1477 if (error) {
@@ -2123,7 +2123,7 @@ xfs_icsb_destroy_counters(
2123 mutex_destroy(&mp->m_icsb_mutex); 2123 mutex_destroy(&mp->m_icsb_mutex);
2124} 2124}
2125 2125
2126STATIC_INLINE void 2126STATIC void
2127xfs_icsb_lock_cntr( 2127xfs_icsb_lock_cntr(
2128 xfs_icsb_cnts_t *icsbp) 2128 xfs_icsb_cnts_t *icsbp)
2129{ 2129{
@@ -2132,7 +2132,7 @@ xfs_icsb_lock_cntr(
2132 } 2132 }
2133} 2133}
2134 2134
2135STATIC_INLINE void 2135STATIC void
2136xfs_icsb_unlock_cntr( 2136xfs_icsb_unlock_cntr(
2137 xfs_icsb_cnts_t *icsbp) 2137 xfs_icsb_cnts_t *icsbp)
2138{ 2138{
@@ -2140,7 +2140,7 @@ xfs_icsb_unlock_cntr(
2140} 2140}
2141 2141
2142 2142
2143STATIC_INLINE void 2143STATIC void
2144xfs_icsb_lock_all_counters( 2144xfs_icsb_lock_all_counters(
2145 xfs_mount_t *mp) 2145 xfs_mount_t *mp)
2146{ 2146{
@@ -2153,7 +2153,7 @@ xfs_icsb_lock_all_counters(
2153 } 2153 }
2154} 2154}
2155 2155
2156STATIC_INLINE void 2156STATIC void
2157xfs_icsb_unlock_all_counters( 2157xfs_icsb_unlock_all_counters(
2158 xfs_mount_t *mp) 2158 xfs_mount_t *mp)
2159{ 2159{
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index a6c023bc0fb2..1df7e4502967 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -93,6 +93,9 @@ typedef struct xfs_dmops {
93 xfs_send_unmount_t xfs_send_unmount; 93 xfs_send_unmount_t xfs_send_unmount;
94} xfs_dmops_t; 94} xfs_dmops_t;
95 95
96#define XFS_DMAPI_UNMOUNT_FLAGS(mp) \
97 (((mp)->m_dmevmask & (1 << DM_EVENT_UNMOUNT)) ? 0 : DM_FLAGS_UNWANTED)
98
96#define XFS_SEND_DATA(mp, ev,ip,off,len,fl,lock) \ 99#define XFS_SEND_DATA(mp, ev,ip,off,len,fl,lock) \
97 (*(mp)->m_dm_ops->xfs_send_data)(ev,ip,off,len,fl,lock) 100 (*(mp)->m_dm_ops->xfs_send_data)(ev,ip,off,len,fl,lock)
98#define XFS_SEND_MMAP(mp, vma,fl) \ 101#define XFS_SEND_MMAP(mp, vma,fl) \
@@ -101,12 +104,24 @@ typedef struct xfs_dmops {
101 (*(mp)->m_dm_ops->xfs_send_destroy)(ip,right) 104 (*(mp)->m_dm_ops->xfs_send_destroy)(ip,right)
102#define XFS_SEND_NAMESP(mp, ev,b1,r1,b2,r2,n1,n2,mode,rval,fl) \ 105#define XFS_SEND_NAMESP(mp, ev,b1,r1,b2,r2,n1,n2,mode,rval,fl) \
103 (*(mp)->m_dm_ops->xfs_send_namesp)(ev,NULL,b1,r1,b2,r2,n1,n2,mode,rval,fl) 106 (*(mp)->m_dm_ops->xfs_send_namesp)(ev,NULL,b1,r1,b2,r2,n1,n2,mode,rval,fl)
104#define XFS_SEND_PREUNMOUNT(mp,b1,r1,b2,r2,n1,n2,mode,rval,fl) \
105 (*(mp)->m_dm_ops->xfs_send_namesp)(DM_EVENT_PREUNMOUNT,mp,b1,r1,b2,r2,n1,n2,mode,rval,fl)
106#define XFS_SEND_MOUNT(mp,right,path,name) \ 107#define XFS_SEND_MOUNT(mp,right,path,name) \
107 (*(mp)->m_dm_ops->xfs_send_mount)(mp,right,path,name) 108 (*(mp)->m_dm_ops->xfs_send_mount)(mp,right,path,name)
108#define XFS_SEND_UNMOUNT(mp, ip,right,mode,rval,fl) \ 109#define XFS_SEND_PREUNMOUNT(mp) \
109 (*(mp)->m_dm_ops->xfs_send_unmount)(mp,ip,right,mode,rval,fl) 110do { \
111 if (mp->m_flags & XFS_MOUNT_DMAPI) { \
112 (*(mp)->m_dm_ops->xfs_send_namesp)(DM_EVENT_PREUNMOUNT, mp, \
113 (mp)->m_rootip, DM_RIGHT_NULL, \
114 (mp)->m_rootip, DM_RIGHT_NULL, \
115 NULL, NULL, 0, 0, XFS_DMAPI_UNMOUNT_FLAGS(mp)); \
116 } \
117} while (0)
118#define XFS_SEND_UNMOUNT(mp) \
119do { \
120 if (mp->m_flags & XFS_MOUNT_DMAPI) { \
121 (*(mp)->m_dm_ops->xfs_send_unmount)(mp, (mp)->m_rootip, \
122 DM_RIGHT_NULL, 0, 0, XFS_DMAPI_UNMOUNT_FLAGS(mp)); \
123 } \
124} while (0)
110 125
111 126
112#ifdef HAVE_PERCPU_SB 127#ifdef HAVE_PERCPU_SB
@@ -387,13 +402,13 @@ xfs_put_perag(struct xfs_mount *mp, xfs_perag_t *pag)
387 * Per-cpu superblock locking functions 402 * Per-cpu superblock locking functions
388 */ 403 */
389#ifdef HAVE_PERCPU_SB 404#ifdef HAVE_PERCPU_SB
390STATIC_INLINE void 405static inline void
391xfs_icsb_lock(xfs_mount_t *mp) 406xfs_icsb_lock(xfs_mount_t *mp)
392{ 407{
393 mutex_lock(&mp->m_icsb_mutex); 408 mutex_lock(&mp->m_icsb_mutex);
394} 409}
395 410
396STATIC_INLINE void 411static inline void
397xfs_icsb_unlock(xfs_mount_t *mp) 412xfs_icsb_unlock(xfs_mount_t *mp)
398{ 413{
399 mutex_unlock(&mp->m_icsb_mutex); 414 mutex_unlock(&mp->m_icsb_mutex);
diff --git a/fs/xfs/xfs_rw.c b/fs/xfs/xfs_rw.c
index 3f816ad7ff19..4c199d18f850 100644
--- a/fs/xfs/xfs_rw.c
+++ b/fs/xfs/xfs_rw.c
@@ -277,10 +277,10 @@ xfs_read_buf(
277 xfs_buf_t *bp; 277 xfs_buf_t *bp;
278 int error; 278 int error;
279 279
280 if (flags) 280 if (!flags)
281 bp = xfs_buf_read_flags(target, blkno, len, flags); 281 flags = XBF_LOCK | XBF_MAPPED;
282 else 282
283 bp = xfs_buf_read(target, blkno, len, flags); 283 bp = xfs_buf_read(target, blkno, len, flags);
284 if (!bp) 284 if (!bp)
285 return XFS_ERROR(EIO); 285 return XFS_ERROR(EIO);
286 error = XFS_BUF_GETERROR(bp); 286 error = XFS_BUF_GETERROR(bp);
@@ -336,3 +336,25 @@ xfs_bwrite(
336 } 336 }
337 return (error); 337 return (error);
338} 338}
339
340/*
341 * helper function to extract extent size hint from inode
342 */
343xfs_extlen_t
344xfs_get_extsz_hint(
345 struct xfs_inode *ip)
346{
347 xfs_extlen_t extsz;
348
349 if (unlikely(XFS_IS_REALTIME_INODE(ip))) {
350 extsz = (ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE)
351 ? ip->i_d.di_extsize
352 : ip->i_mount->m_sb.sb_rextsize;
353 ASSERT(extsz);
354 } else {
355 extsz = (ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE)
356 ? ip->i_d.di_extsize : 0;
357 }
358
359 return extsz;
360}
diff --git a/fs/xfs/xfs_rw.h b/fs/xfs/xfs_rw.h
index f5e4874c37d8..571f2174435c 100644
--- a/fs/xfs/xfs_rw.h
+++ b/fs/xfs/xfs_rw.h
@@ -37,34 +37,6 @@ xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
37} 37}
38 38
39/* 39/*
40 * Flags for xfs_free_eofblocks
41 */
42#define XFS_FREE_EOF_LOCK (1<<0)
43#define XFS_FREE_EOF_NOLOCK (1<<1)
44
45
46/*
47 * helper function to extract extent size hint from inode
48 */
49STATIC_INLINE xfs_extlen_t
50xfs_get_extsz_hint(
51 xfs_inode_t *ip)
52{
53 xfs_extlen_t extsz;
54
55 if (unlikely(XFS_IS_REALTIME_INODE(ip))) {
56 extsz = (ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE)
57 ? ip->i_d.di_extsize
58 : ip->i_mount->m_sb.sb_rextsize;
59 ASSERT(extsz);
60 } else {
61 extsz = (ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE)
62 ? ip->i_d.di_extsize : 0;
63 }
64 return extsz;
65}
66
67/*
68 * Prototypes for functions in xfs_rw.c. 40 * Prototypes for functions in xfs_rw.c.
69 */ 41 */
70extern int xfs_write_clear_setuid(struct xfs_inode *ip); 42extern int xfs_write_clear_setuid(struct xfs_inode *ip);
@@ -76,5 +48,6 @@ extern int xfs_read_buf(struct xfs_mount *mp, xfs_buftarg_t *btp,
76 struct xfs_buf **bpp); 48 struct xfs_buf **bpp);
77extern void xfs_ioerror_alert(char *func, struct xfs_mount *mp, 49extern void xfs_ioerror_alert(char *func, struct xfs_mount *mp,
78 xfs_buf_t *bp, xfs_daddr_t blkno); 50 xfs_buf_t *bp, xfs_daddr_t blkno);
51extern xfs_extlen_t xfs_get_extsz_hint(struct xfs_inode *ip);
79 52
80#endif /* __XFS_RW_H__ */ 53#endif /* __XFS_RW_H__ */
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index 66b849358e62..237badcbac3b 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -236,19 +236,20 @@ xfs_trans_alloc(
236 uint type) 236 uint type)
237{ 237{
238 xfs_wait_for_freeze(mp, SB_FREEZE_TRANS); 238 xfs_wait_for_freeze(mp, SB_FREEZE_TRANS);
239 return _xfs_trans_alloc(mp, type); 239 return _xfs_trans_alloc(mp, type, KM_SLEEP);
240} 240}
241 241
242xfs_trans_t * 242xfs_trans_t *
243_xfs_trans_alloc( 243_xfs_trans_alloc(
244 xfs_mount_t *mp, 244 xfs_mount_t *mp,
245 uint type) 245 uint type,
246 uint memflags)
246{ 247{
247 xfs_trans_t *tp; 248 xfs_trans_t *tp;
248 249
249 atomic_inc(&mp->m_active_trans); 250 atomic_inc(&mp->m_active_trans);
250 251
251 tp = kmem_zone_zalloc(xfs_trans_zone, KM_SLEEP); 252 tp = kmem_zone_zalloc(xfs_trans_zone, memflags);
252 tp->t_magic = XFS_TRANS_MAGIC; 253 tp->t_magic = XFS_TRANS_MAGIC;
253 tp->t_type = type; 254 tp->t_type = type;
254 tp->t_mountp = mp; 255 tp->t_mountp = mp;
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index ed47fc77759c..a0574f593f52 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -924,7 +924,7 @@ typedef struct xfs_trans {
924 * XFS transaction mechanism exported interfaces. 924 * XFS transaction mechanism exported interfaces.
925 */ 925 */
926xfs_trans_t *xfs_trans_alloc(struct xfs_mount *, uint); 926xfs_trans_t *xfs_trans_alloc(struct xfs_mount *, uint);
927xfs_trans_t *_xfs_trans_alloc(struct xfs_mount *, uint); 927xfs_trans_t *_xfs_trans_alloc(struct xfs_mount *, uint, uint);
928xfs_trans_t *xfs_trans_dup(xfs_trans_t *); 928xfs_trans_t *xfs_trans_dup(xfs_trans_t *);
929int xfs_trans_reserve(xfs_trans_t *, uint, uint, uint, 929int xfs_trans_reserve(xfs_trans_t *, uint, uint, uint,
930 uint, uint); 930 uint, uint);
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c
index 218829e6a152..03a1f701fea8 100644
--- a/fs/xfs/xfs_trans_buf.c
+++ b/fs/xfs/xfs_trans_buf.c
@@ -79,11 +79,8 @@ xfs_trans_get_buf(xfs_trans_t *tp,
79 /* 79 /*
80 * Default to a normal get_buf() call if the tp is NULL. 80 * Default to a normal get_buf() call if the tp is NULL.
81 */ 81 */
82 if (tp == NULL) { 82 if (tp == NULL)
83 bp = xfs_buf_get_flags(target_dev, blkno, len, 83 return xfs_buf_get(target_dev, blkno, len, flags | BUF_BUSY);
84 flags | BUF_BUSY);
85 return(bp);
86 }
87 84
88 /* 85 /*
89 * If we find the buffer in the cache with this transaction 86 * If we find the buffer in the cache with this transaction
@@ -129,7 +126,7 @@ xfs_trans_get_buf(xfs_trans_t *tp,
129 * easily deadlock with our current transaction as well as cause 126 * easily deadlock with our current transaction as well as cause
130 * us to run out of stack space. 127 * us to run out of stack space.
131 */ 128 */
132 bp = xfs_buf_get_flags(target_dev, blkno, len, flags | BUF_BUSY); 129 bp = xfs_buf_get(target_dev, blkno, len, flags | BUF_BUSY);
133 if (bp == NULL) { 130 if (bp == NULL) {
134 return NULL; 131 return NULL;
135 } 132 }
@@ -302,7 +299,7 @@ xfs_trans_read_buf(
302 * Default to a normal get_buf() call if the tp is NULL. 299 * Default to a normal get_buf() call if the tp is NULL.
303 */ 300 */
304 if (tp == NULL) { 301 if (tp == NULL) {
305 bp = xfs_buf_read_flags(target, blkno, len, flags | BUF_BUSY); 302 bp = xfs_buf_read(target, blkno, len, flags | BUF_BUSY);
306 if (!bp) 303 if (!bp)
307 return (flags & XFS_BUF_TRYLOCK) ? 304 return (flags & XFS_BUF_TRYLOCK) ?
308 EAGAIN : XFS_ERROR(ENOMEM); 305 EAGAIN : XFS_ERROR(ENOMEM);
@@ -398,7 +395,7 @@ xfs_trans_read_buf(
398 * easily deadlock with our current transaction as well as cause 395 * easily deadlock with our current transaction as well as cause
399 * us to run out of stack space. 396 * us to run out of stack space.
400 */ 397 */
401 bp = xfs_buf_read_flags(target, blkno, len, flags | BUF_BUSY); 398 bp = xfs_buf_read(target, blkno, len, flags | BUF_BUSY);
402 if (bp == NULL) { 399 if (bp == NULL) {
403 *bpp = NULL; 400 *bpp = NULL;
404 return 0; 401 return 0;
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index b572f7e840e0..578f3f59b789 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -538,9 +538,8 @@ xfs_readlink_bmap(
538 d = XFS_FSB_TO_DADDR(mp, mval[n].br_startblock); 538 d = XFS_FSB_TO_DADDR(mp, mval[n].br_startblock);
539 byte_cnt = XFS_FSB_TO_B(mp, mval[n].br_blockcount); 539 byte_cnt = XFS_FSB_TO_B(mp, mval[n].br_blockcount);
540 540
541 bp = xfs_buf_read_flags(mp->m_ddev_targp, d, BTOBB(byte_cnt), 541 bp = xfs_buf_read(mp->m_ddev_targp, d, BTOBB(byte_cnt),
542 XBF_LOCK | XBF_MAPPED | 542 XBF_LOCK | XBF_MAPPED | XBF_DONT_BLOCK);
543 XBF_DONT_BLOCK);
544 error = XFS_BUF_GETERROR(bp); 543 error = XFS_BUF_GETERROR(bp);
545 if (error) { 544 if (error) {
546 xfs_ioerror_alert("xfs_readlink", 545 xfs_ioerror_alert("xfs_readlink",
@@ -709,6 +708,11 @@ xfs_fsync(
709} 708}
710 709
711/* 710/*
711 * Flags for xfs_free_eofblocks
712 */
713#define XFS_FREE_EOF_TRYLOCK (1<<0)
714
715/*
712 * This is called by xfs_inactive to free any blocks beyond eof 716 * This is called by xfs_inactive to free any blocks beyond eof
713 * when the link count isn't zero and by xfs_dm_punch_hole() when 717 * when the link count isn't zero and by xfs_dm_punch_hole() when
714 * punching a hole to EOF. 718 * punching a hole to EOF.
@@ -726,7 +730,6 @@ xfs_free_eofblocks(
726 xfs_filblks_t map_len; 730 xfs_filblks_t map_len;
727 int nimaps; 731 int nimaps;
728 xfs_bmbt_irec_t imap; 732 xfs_bmbt_irec_t imap;
729 int use_iolock = (flags & XFS_FREE_EOF_LOCK);
730 733
731 /* 734 /*
732 * Figure out if there are any blocks beyond the end 735 * Figure out if there are any blocks beyond the end
@@ -768,14 +771,19 @@ xfs_free_eofblocks(
768 * cache and we can't 771 * cache and we can't
769 * do that within a transaction. 772 * do that within a transaction.
770 */ 773 */
771 if (use_iolock) 774 if (flags & XFS_FREE_EOF_TRYLOCK) {
775 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
776 xfs_trans_cancel(tp, 0);
777 return 0;
778 }
779 } else {
772 xfs_ilock(ip, XFS_IOLOCK_EXCL); 780 xfs_ilock(ip, XFS_IOLOCK_EXCL);
781 }
773 error = xfs_itruncate_start(ip, XFS_ITRUNC_DEFINITE, 782 error = xfs_itruncate_start(ip, XFS_ITRUNC_DEFINITE,
774 ip->i_size); 783 ip->i_size);
775 if (error) { 784 if (error) {
776 xfs_trans_cancel(tp, 0); 785 xfs_trans_cancel(tp, 0);
777 if (use_iolock) 786 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
778 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
779 return error; 787 return error;
780 } 788 }
781 789
@@ -812,8 +820,7 @@ xfs_free_eofblocks(
812 error = xfs_trans_commit(tp, 820 error = xfs_trans_commit(tp,
813 XFS_TRANS_RELEASE_LOG_RES); 821 XFS_TRANS_RELEASE_LOG_RES);
814 } 822 }
815 xfs_iunlock(ip, (use_iolock ? (XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL) 823 xfs_iunlock(ip, XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL);
816 : XFS_ILOCK_EXCL));
817 } 824 }
818 return error; 825 return error;
819} 826}
@@ -1113,7 +1120,17 @@ xfs_release(
1113 (ip->i_df.if_flags & XFS_IFEXTENTS)) && 1120 (ip->i_df.if_flags & XFS_IFEXTENTS)) &&
1114 (!(ip->i_d.di_flags & 1121 (!(ip->i_d.di_flags &
1115 (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))) { 1122 (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))) {
1116 error = xfs_free_eofblocks(mp, ip, XFS_FREE_EOF_LOCK); 1123
1124 /*
1125 * If we can't get the iolock just skip truncating
1126 * the blocks past EOF because we could deadlock
1127 * with the mmap_sem otherwise. We'll get another
1128 * chance to drop them once the last reference to
1129 * the inode is dropped, so we'll never leak blocks
1130 * permanently.
1131 */
1132 error = xfs_free_eofblocks(mp, ip,
1133 XFS_FREE_EOF_TRYLOCK);
1117 if (error) 1134 if (error)
1118 return error; 1135 return error;
1119 } 1136 }
@@ -1184,7 +1201,7 @@ xfs_inactive(
1184 (!(ip->i_d.di_flags & 1201 (!(ip->i_d.di_flags &
1185 (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)) || 1202 (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)) ||
1186 (ip->i_delayed_blks != 0)))) { 1203 (ip->i_delayed_blks != 0)))) {
1187 error = xfs_free_eofblocks(mp, ip, XFS_FREE_EOF_LOCK); 1204 error = xfs_free_eofblocks(mp, ip, 0);
1188 if (error) 1205 if (error)
1189 return VN_INACTIVE_CACHE; 1206 return VN_INACTIVE_CACHE;
1190 } 1207 }
@@ -2456,46 +2473,6 @@ xfs_set_dmattrs(
2456 return error; 2473 return error;
2457} 2474}
2458 2475
2459int
2460xfs_reclaim(
2461 xfs_inode_t *ip)
2462{
2463
2464 xfs_itrace_entry(ip);
2465
2466 ASSERT(!VN_MAPPED(VFS_I(ip)));
2467
2468 /* bad inode, get out here ASAP */
2469 if (is_bad_inode(VFS_I(ip))) {
2470 xfs_ireclaim(ip);
2471 return 0;
2472 }
2473
2474 xfs_ioend_wait(ip);
2475
2476 ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || ip->i_delayed_blks == 0);
2477
2478 /*
2479 * If we have nothing to flush with this inode then complete the
2480 * teardown now, otherwise break the link between the xfs inode and the
2481 * linux inode and clean up the xfs inode later. This avoids flushing
2482 * the inode to disk during the delete operation itself.
2483 *
2484 * When breaking the link, we need to set the XFS_IRECLAIMABLE flag
2485 * first to ensure that xfs_iunpin() will never see an xfs inode
2486 * that has a linux inode being reclaimed. Synchronisation is provided
2487 * by the i_flags_lock.
2488 */
2489 if (!ip->i_update_core && (ip->i_itemp == NULL)) {
2490 xfs_ilock(ip, XFS_ILOCK_EXCL);
2491 xfs_iflock(ip);
2492 xfs_iflags_set(ip, XFS_IRECLAIMABLE);
2493 return xfs_reclaim_inode(ip, 1, XFS_IFLUSH_DELWRI_ELSE_SYNC);
2494 }
2495 xfs_inode_set_reclaim_tag(ip);
2496 return 0;
2497}
2498
2499/* 2476/*
2500 * xfs_alloc_file_space() 2477 * xfs_alloc_file_space()
2501 * This routine allocates disk space for the given file. 2478 * This routine allocates disk space for the given file.
diff --git a/fs/xfs/xfs_vnodeops.h b/fs/xfs/xfs_vnodeops.h
index a9e102de71a1..167a467403a5 100644
--- a/fs/xfs/xfs_vnodeops.h
+++ b/fs/xfs/xfs_vnodeops.h
@@ -38,7 +38,6 @@ int xfs_symlink(struct xfs_inode *dp, struct xfs_name *link_name,
38 const char *target_path, mode_t mode, struct xfs_inode **ipp, 38 const char *target_path, mode_t mode, struct xfs_inode **ipp,
39 cred_t *credp); 39 cred_t *credp);
40int xfs_set_dmattrs(struct xfs_inode *ip, u_int evmask, u_int16_t state); 40int xfs_set_dmattrs(struct xfs_inode *ip, u_int evmask, u_int16_t state);
41int xfs_reclaim(struct xfs_inode *ip);
42int xfs_change_file_space(struct xfs_inode *ip, int cmd, 41int xfs_change_file_space(struct xfs_inode *ip, int cmd,
43 xfs_flock64_t *bf, xfs_off_t offset, int attr_flags); 42 xfs_flock64_t *bf, xfs_off_t offset, int attr_flags);
44int xfs_rename(struct xfs_inode *src_dp, struct xfs_name *src_name, 43int xfs_rename(struct xfs_inode *src_dp, struct xfs_name *src_name,