diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-10-22 20:32:27 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-10-22 20:32:27 -0400 |
commit | 5fe3a5ae5c09d53b2b3c7a971e1d87ab3a747055 (patch) | |
tree | 1e0d3e10c83e456a1678c4e01acb5ff624129202 | |
parent | 0fc0531e0a2174377a86fd6953ecaa00287d8f70 (diff) | |
parent | 39dc948c6921169e13224a97fa53188922acfde8 (diff) |
Merge branch 'for-linus' of git://oss.sgi.com/xfs/xfs
* 'for-linus' of git://oss.sgi.com/xfs/xfs: (36 commits)
xfs: semaphore cleanup
xfs: Extend project quotas to support 32bit project ids
xfs: remove xfs_buf wrappers
xfs: remove xfs_cred.h
xfs: remove xfs_globals.h
xfs: remove xfs_version.h
xfs: remove xfs_refcache.h
xfs: fix the xfs_trans_committed
xfs: remove unused t_callback field in struct xfs_trans
xfs: fix bogus m_maxagi check in xfs_iget
xfs: do not use xfs_mod_incore_sb_batch for per-cpu counters
xfs: do not use xfs_mod_incore_sb for per-cpu counters
xfs: remove XFS_MOUNT_NO_PERCPU_SB
xfs: pack xfs_buf structure more tightly
xfs: convert buffer cache hash to rbtree
xfs: serialise inode reclaim within an AG
xfs: batch inode reclaim lookup
xfs: implement batched inode lookups for AG walking
xfs: split out inode walk inode grabbing
xfs: split inode AG walking into separate code for reclaim
...
60 files changed, 1185 insertions, 1375 deletions
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index f3ccaec5760a..ba5312802aa9 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c | |||
@@ -188,8 +188,8 @@ _xfs_buf_initialize( | |||
188 | atomic_set(&bp->b_hold, 1); | 188 | atomic_set(&bp->b_hold, 1); |
189 | init_completion(&bp->b_iowait); | 189 | init_completion(&bp->b_iowait); |
190 | INIT_LIST_HEAD(&bp->b_list); | 190 | INIT_LIST_HEAD(&bp->b_list); |
191 | INIT_LIST_HEAD(&bp->b_hash_list); | 191 | RB_CLEAR_NODE(&bp->b_rbnode); |
192 | init_MUTEX_LOCKED(&bp->b_sema); /* held, no waiters */ | 192 | sema_init(&bp->b_sema, 0); /* held, no waiters */ |
193 | XB_SET_OWNER(bp); | 193 | XB_SET_OWNER(bp); |
194 | bp->b_target = target; | 194 | bp->b_target = target; |
195 | bp->b_file_offset = range_base; | 195 | bp->b_file_offset = range_base; |
@@ -262,8 +262,6 @@ xfs_buf_free( | |||
262 | { | 262 | { |
263 | trace_xfs_buf_free(bp, _RET_IP_); | 263 | trace_xfs_buf_free(bp, _RET_IP_); |
264 | 264 | ||
265 | ASSERT(list_empty(&bp->b_hash_list)); | ||
266 | |||
267 | if (bp->b_flags & (_XBF_PAGE_CACHE|_XBF_PAGES)) { | 265 | if (bp->b_flags & (_XBF_PAGE_CACHE|_XBF_PAGES)) { |
268 | uint i; | 266 | uint i; |
269 | 267 | ||
@@ -422,8 +420,10 @@ _xfs_buf_find( | |||
422 | { | 420 | { |
423 | xfs_off_t range_base; | 421 | xfs_off_t range_base; |
424 | size_t range_length; | 422 | size_t range_length; |
425 | xfs_bufhash_t *hash; | 423 | struct xfs_perag *pag; |
426 | xfs_buf_t *bp, *n; | 424 | struct rb_node **rbp; |
425 | struct rb_node *parent; | ||
426 | xfs_buf_t *bp; | ||
427 | 427 | ||
428 | range_base = (ioff << BBSHIFT); | 428 | range_base = (ioff << BBSHIFT); |
429 | range_length = (isize << BBSHIFT); | 429 | range_length = (isize << BBSHIFT); |
@@ -432,14 +432,37 @@ _xfs_buf_find( | |||
432 | ASSERT(!(range_length < (1 << btp->bt_sshift))); | 432 | ASSERT(!(range_length < (1 << btp->bt_sshift))); |
433 | ASSERT(!(range_base & (xfs_off_t)btp->bt_smask)); | 433 | ASSERT(!(range_base & (xfs_off_t)btp->bt_smask)); |
434 | 434 | ||
435 | hash = &btp->bt_hash[hash_long((unsigned long)ioff, btp->bt_hashshift)]; | 435 | /* get tree root */ |
436 | 436 | pag = xfs_perag_get(btp->bt_mount, | |
437 | spin_lock(&hash->bh_lock); | 437 | xfs_daddr_to_agno(btp->bt_mount, ioff)); |
438 | 438 | ||
439 | list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) { | 439 | /* walk tree */ |
440 | ASSERT(btp == bp->b_target); | 440 | spin_lock(&pag->pag_buf_lock); |
441 | if (bp->b_file_offset == range_base && | 441 | rbp = &pag->pag_buf_tree.rb_node; |
442 | bp->b_buffer_length == range_length) { | 442 | parent = NULL; |
443 | bp = NULL; | ||
444 | while (*rbp) { | ||
445 | parent = *rbp; | ||
446 | bp = rb_entry(parent, struct xfs_buf, b_rbnode); | ||
447 | |||
448 | if (range_base < bp->b_file_offset) | ||
449 | rbp = &(*rbp)->rb_left; | ||
450 | else if (range_base > bp->b_file_offset) | ||
451 | rbp = &(*rbp)->rb_right; | ||
452 | else { | ||
453 | /* | ||
454 | * found a block offset match. If the range doesn't | ||
455 | * match, the only way this is allowed is if the buffer | ||
456 | * in the cache is stale and the transaction that made | ||
457 | * it stale has not yet committed. i.e. we are | ||
458 | * reallocating a busy extent. Skip this buffer and | ||
459 | * continue searching to the right for an exact match. | ||
460 | */ | ||
461 | if (bp->b_buffer_length != range_length) { | ||
462 | ASSERT(bp->b_flags & XBF_STALE); | ||
463 | rbp = &(*rbp)->rb_right; | ||
464 | continue; | ||
465 | } | ||
443 | atomic_inc(&bp->b_hold); | 466 | atomic_inc(&bp->b_hold); |
444 | goto found; | 467 | goto found; |
445 | } | 468 | } |
@@ -449,17 +472,21 @@ _xfs_buf_find( | |||
449 | if (new_bp) { | 472 | if (new_bp) { |
450 | _xfs_buf_initialize(new_bp, btp, range_base, | 473 | _xfs_buf_initialize(new_bp, btp, range_base, |
451 | range_length, flags); | 474 | range_length, flags); |
452 | new_bp->b_hash = hash; | 475 | rb_link_node(&new_bp->b_rbnode, parent, rbp); |
453 | list_add(&new_bp->b_hash_list, &hash->bh_list); | 476 | rb_insert_color(&new_bp->b_rbnode, &pag->pag_buf_tree); |
477 | /* the buffer keeps the perag reference until it is freed */ | ||
478 | new_bp->b_pag = pag; | ||
479 | spin_unlock(&pag->pag_buf_lock); | ||
454 | } else { | 480 | } else { |
455 | XFS_STATS_INC(xb_miss_locked); | 481 | XFS_STATS_INC(xb_miss_locked); |
482 | spin_unlock(&pag->pag_buf_lock); | ||
483 | xfs_perag_put(pag); | ||
456 | } | 484 | } |
457 | |||
458 | spin_unlock(&hash->bh_lock); | ||
459 | return new_bp; | 485 | return new_bp; |
460 | 486 | ||
461 | found: | 487 | found: |
462 | spin_unlock(&hash->bh_lock); | 488 | spin_unlock(&pag->pag_buf_lock); |
489 | xfs_perag_put(pag); | ||
463 | 490 | ||
464 | /* Attempt to get the semaphore without sleeping, | 491 | /* Attempt to get the semaphore without sleeping, |
465 | * if this does not work then we need to drop the | 492 | * if this does not work then we need to drop the |
@@ -625,8 +652,7 @@ void | |||
625 | xfs_buf_readahead( | 652 | xfs_buf_readahead( |
626 | xfs_buftarg_t *target, | 653 | xfs_buftarg_t *target, |
627 | xfs_off_t ioff, | 654 | xfs_off_t ioff, |
628 | size_t isize, | 655 | size_t isize) |
629 | xfs_buf_flags_t flags) | ||
630 | { | 656 | { |
631 | struct backing_dev_info *bdi; | 657 | struct backing_dev_info *bdi; |
632 | 658 | ||
@@ -634,8 +660,42 @@ xfs_buf_readahead( | |||
634 | if (bdi_read_congested(bdi)) | 660 | if (bdi_read_congested(bdi)) |
635 | return; | 661 | return; |
636 | 662 | ||
637 | flags |= (XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD); | 663 | xfs_buf_read(target, ioff, isize, |
638 | xfs_buf_read(target, ioff, isize, flags); | 664 | XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD|XBF_DONT_BLOCK); |
665 | } | ||
666 | |||
667 | /* | ||
668 | * Read an uncached buffer from disk. Allocates and returns a locked | ||
669 | * buffer containing the disk contents or nothing. | ||
670 | */ | ||
671 | struct xfs_buf * | ||
672 | xfs_buf_read_uncached( | ||
673 | struct xfs_mount *mp, | ||
674 | struct xfs_buftarg *target, | ||
675 | xfs_daddr_t daddr, | ||
676 | size_t length, | ||
677 | int flags) | ||
678 | { | ||
679 | xfs_buf_t *bp; | ||
680 | int error; | ||
681 | |||
682 | bp = xfs_buf_get_uncached(target, length, flags); | ||
683 | if (!bp) | ||
684 | return NULL; | ||
685 | |||
686 | /* set up the buffer for a read IO */ | ||
687 | xfs_buf_lock(bp); | ||
688 | XFS_BUF_SET_ADDR(bp, daddr); | ||
689 | XFS_BUF_READ(bp); | ||
690 | XFS_BUF_BUSY(bp); | ||
691 | |||
692 | xfsbdstrat(mp, bp); | ||
693 | error = xfs_buf_iowait(bp); | ||
694 | if (error || bp->b_error) { | ||
695 | xfs_buf_relse(bp); | ||
696 | return NULL; | ||
697 | } | ||
698 | return bp; | ||
639 | } | 699 | } |
640 | 700 | ||
641 | xfs_buf_t * | 701 | xfs_buf_t * |
@@ -707,9 +767,10 @@ xfs_buf_associate_memory( | |||
707 | } | 767 | } |
708 | 768 | ||
709 | xfs_buf_t * | 769 | xfs_buf_t * |
710 | xfs_buf_get_noaddr( | 770 | xfs_buf_get_uncached( |
771 | struct xfs_buftarg *target, | ||
711 | size_t len, | 772 | size_t len, |
712 | xfs_buftarg_t *target) | 773 | int flags) |
713 | { | 774 | { |
714 | unsigned long page_count = PAGE_ALIGN(len) >> PAGE_SHIFT; | 775 | unsigned long page_count = PAGE_ALIGN(len) >> PAGE_SHIFT; |
715 | int error, i; | 776 | int error, i; |
@@ -725,7 +786,7 @@ xfs_buf_get_noaddr( | |||
725 | goto fail_free_buf; | 786 | goto fail_free_buf; |
726 | 787 | ||
727 | for (i = 0; i < page_count; i++) { | 788 | for (i = 0; i < page_count; i++) { |
728 | bp->b_pages[i] = alloc_page(GFP_KERNEL); | 789 | bp->b_pages[i] = alloc_page(xb_to_gfp(flags)); |
729 | if (!bp->b_pages[i]) | 790 | if (!bp->b_pages[i]) |
730 | goto fail_free_mem; | 791 | goto fail_free_mem; |
731 | } | 792 | } |
@@ -740,7 +801,7 @@ xfs_buf_get_noaddr( | |||
740 | 801 | ||
741 | xfs_buf_unlock(bp); | 802 | xfs_buf_unlock(bp); |
742 | 803 | ||
743 | trace_xfs_buf_get_noaddr(bp, _RET_IP_); | 804 | trace_xfs_buf_get_uncached(bp, _RET_IP_); |
744 | return bp; | 805 | return bp; |
745 | 806 | ||
746 | fail_free_mem: | 807 | fail_free_mem: |
@@ -774,29 +835,30 @@ void | |||
774 | xfs_buf_rele( | 835 | xfs_buf_rele( |
775 | xfs_buf_t *bp) | 836 | xfs_buf_t *bp) |
776 | { | 837 | { |
777 | xfs_bufhash_t *hash = bp->b_hash; | 838 | struct xfs_perag *pag = bp->b_pag; |
778 | 839 | ||
779 | trace_xfs_buf_rele(bp, _RET_IP_); | 840 | trace_xfs_buf_rele(bp, _RET_IP_); |
780 | 841 | ||
781 | if (unlikely(!hash)) { | 842 | if (!pag) { |
782 | ASSERT(!bp->b_relse); | 843 | ASSERT(!bp->b_relse); |
844 | ASSERT(RB_EMPTY_NODE(&bp->b_rbnode)); | ||
783 | if (atomic_dec_and_test(&bp->b_hold)) | 845 | if (atomic_dec_and_test(&bp->b_hold)) |
784 | xfs_buf_free(bp); | 846 | xfs_buf_free(bp); |
785 | return; | 847 | return; |
786 | } | 848 | } |
787 | 849 | ||
850 | ASSERT(!RB_EMPTY_NODE(&bp->b_rbnode)); | ||
788 | ASSERT(atomic_read(&bp->b_hold) > 0); | 851 | ASSERT(atomic_read(&bp->b_hold) > 0); |
789 | if (atomic_dec_and_lock(&bp->b_hold, &hash->bh_lock)) { | 852 | if (atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock)) { |
790 | if (bp->b_relse) { | 853 | if (bp->b_relse) { |
791 | atomic_inc(&bp->b_hold); | 854 | atomic_inc(&bp->b_hold); |
792 | spin_unlock(&hash->bh_lock); | 855 | spin_unlock(&pag->pag_buf_lock); |
793 | (*(bp->b_relse)) (bp); | 856 | bp->b_relse(bp); |
794 | } else if (bp->b_flags & XBF_FS_MANAGED) { | ||
795 | spin_unlock(&hash->bh_lock); | ||
796 | } else { | 857 | } else { |
797 | ASSERT(!(bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q))); | 858 | ASSERT(!(bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q))); |
798 | list_del_init(&bp->b_hash_list); | 859 | rb_erase(&bp->b_rbnode, &pag->pag_buf_tree); |
799 | spin_unlock(&hash->bh_lock); | 860 | spin_unlock(&pag->pag_buf_lock); |
861 | xfs_perag_put(pag); | ||
800 | xfs_buf_free(bp); | 862 | xfs_buf_free(bp); |
801 | } | 863 | } |
802 | } | 864 | } |
@@ -859,7 +921,7 @@ xfs_buf_lock( | |||
859 | trace_xfs_buf_lock(bp, _RET_IP_); | 921 | trace_xfs_buf_lock(bp, _RET_IP_); |
860 | 922 | ||
861 | if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE)) | 923 | if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE)) |
862 | xfs_log_force(bp->b_mount, 0); | 924 | xfs_log_force(bp->b_target->bt_mount, 0); |
863 | if (atomic_read(&bp->b_io_remaining)) | 925 | if (atomic_read(&bp->b_io_remaining)) |
864 | blk_run_address_space(bp->b_target->bt_mapping); | 926 | blk_run_address_space(bp->b_target->bt_mapping); |
865 | down(&bp->b_sema); | 927 | down(&bp->b_sema); |
@@ -970,7 +1032,6 @@ xfs_bwrite( | |||
970 | { | 1032 | { |
971 | int error; | 1033 | int error; |
972 | 1034 | ||
973 | bp->b_mount = mp; | ||
974 | bp->b_flags |= XBF_WRITE; | 1035 | bp->b_flags |= XBF_WRITE; |
975 | bp->b_flags &= ~(XBF_ASYNC | XBF_READ); | 1036 | bp->b_flags &= ~(XBF_ASYNC | XBF_READ); |
976 | 1037 | ||
@@ -991,8 +1052,6 @@ xfs_bdwrite( | |||
991 | { | 1052 | { |
992 | trace_xfs_buf_bdwrite(bp, _RET_IP_); | 1053 | trace_xfs_buf_bdwrite(bp, _RET_IP_); |
993 | 1054 | ||
994 | bp->b_mount = mp; | ||
995 | |||
996 | bp->b_flags &= ~XBF_READ; | 1055 | bp->b_flags &= ~XBF_READ; |
997 | bp->b_flags |= (XBF_DELWRI | XBF_ASYNC); | 1056 | bp->b_flags |= (XBF_DELWRI | XBF_ASYNC); |
998 | 1057 | ||
@@ -1001,7 +1060,7 @@ xfs_bdwrite( | |||
1001 | 1060 | ||
1002 | /* | 1061 | /* |
1003 | * Called when we want to stop a buffer from getting written or read. | 1062 | * Called when we want to stop a buffer from getting written or read. |
1004 | * We attach the EIO error, muck with its flags, and call biodone | 1063 | * We attach the EIO error, muck with its flags, and call xfs_buf_ioend |
1005 | * so that the proper iodone callbacks get called. | 1064 | * so that the proper iodone callbacks get called. |
1006 | */ | 1065 | */ |
1007 | STATIC int | 1066 | STATIC int |
@@ -1018,21 +1077,21 @@ xfs_bioerror( | |||
1018 | XFS_BUF_ERROR(bp, EIO); | 1077 | XFS_BUF_ERROR(bp, EIO); |
1019 | 1078 | ||
1020 | /* | 1079 | /* |
1021 | * We're calling biodone, so delete XBF_DONE flag. | 1080 | * We're calling xfs_buf_ioend, so delete XBF_DONE flag. |
1022 | */ | 1081 | */ |
1023 | XFS_BUF_UNREAD(bp); | 1082 | XFS_BUF_UNREAD(bp); |
1024 | XFS_BUF_UNDELAYWRITE(bp); | 1083 | XFS_BUF_UNDELAYWRITE(bp); |
1025 | XFS_BUF_UNDONE(bp); | 1084 | XFS_BUF_UNDONE(bp); |
1026 | XFS_BUF_STALE(bp); | 1085 | XFS_BUF_STALE(bp); |
1027 | 1086 | ||
1028 | xfs_biodone(bp); | 1087 | xfs_buf_ioend(bp, 0); |
1029 | 1088 | ||
1030 | return EIO; | 1089 | return EIO; |
1031 | } | 1090 | } |
1032 | 1091 | ||
1033 | /* | 1092 | /* |
1034 | * Same as xfs_bioerror, except that we are releasing the buffer | 1093 | * Same as xfs_bioerror, except that we are releasing the buffer |
1035 | * here ourselves, and avoiding the biodone call. | 1094 | * here ourselves, and avoiding the xfs_buf_ioend call. |
1036 | * This is meant for userdata errors; metadata bufs come with | 1095 | * This is meant for userdata errors; metadata bufs come with |
1037 | * iodone functions attached, so that we can track down errors. | 1096 | * iodone functions attached, so that we can track down errors. |
1038 | */ | 1097 | */ |
@@ -1081,7 +1140,7 @@ int | |||
1081 | xfs_bdstrat_cb( | 1140 | xfs_bdstrat_cb( |
1082 | struct xfs_buf *bp) | 1141 | struct xfs_buf *bp) |
1083 | { | 1142 | { |
1084 | if (XFS_FORCED_SHUTDOWN(bp->b_mount)) { | 1143 | if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) { |
1085 | trace_xfs_bdstrat_shut(bp, _RET_IP_); | 1144 | trace_xfs_bdstrat_shut(bp, _RET_IP_); |
1086 | /* | 1145 | /* |
1087 | * Metadata write that didn't get logged but | 1146 | * Metadata write that didn't get logged but |
@@ -1387,62 +1446,24 @@ xfs_buf_iomove( | |||
1387 | */ | 1446 | */ |
1388 | void | 1447 | void |
1389 | xfs_wait_buftarg( | 1448 | xfs_wait_buftarg( |
1390 | xfs_buftarg_t *btp) | 1449 | struct xfs_buftarg *btp) |
1391 | { | ||
1392 | xfs_buf_t *bp, *n; | ||
1393 | xfs_bufhash_t *hash; | ||
1394 | uint i; | ||
1395 | |||
1396 | for (i = 0; i < (1 << btp->bt_hashshift); i++) { | ||
1397 | hash = &btp->bt_hash[i]; | ||
1398 | again: | ||
1399 | spin_lock(&hash->bh_lock); | ||
1400 | list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) { | ||
1401 | ASSERT(btp == bp->b_target); | ||
1402 | if (!(bp->b_flags & XBF_FS_MANAGED)) { | ||
1403 | spin_unlock(&hash->bh_lock); | ||
1404 | /* | ||
1405 | * Catch superblock reference count leaks | ||
1406 | * immediately | ||
1407 | */ | ||
1408 | BUG_ON(bp->b_bn == 0); | ||
1409 | delay(100); | ||
1410 | goto again; | ||
1411 | } | ||
1412 | } | ||
1413 | spin_unlock(&hash->bh_lock); | ||
1414 | } | ||
1415 | } | ||
1416 | |||
1417 | /* | ||
1418 | * Allocate buffer hash table for a given target. | ||
1419 | * For devices containing metadata (i.e. not the log/realtime devices) | ||
1420 | * we need to allocate a much larger hash table. | ||
1421 | */ | ||
1422 | STATIC void | ||
1423 | xfs_alloc_bufhash( | ||
1424 | xfs_buftarg_t *btp, | ||
1425 | int external) | ||
1426 | { | 1450 | { |
1427 | unsigned int i; | 1451 | struct xfs_perag *pag; |
1452 | uint i; | ||
1428 | 1453 | ||
1429 | btp->bt_hashshift = external ? 3 : 12; /* 8 or 4096 buckets */ | 1454 | for (i = 0; i < btp->bt_mount->m_sb.sb_agcount; i++) { |
1430 | btp->bt_hash = kmem_zalloc_large((1 << btp->bt_hashshift) * | 1455 | pag = xfs_perag_get(btp->bt_mount, i); |
1431 | sizeof(xfs_bufhash_t)); | 1456 | spin_lock(&pag->pag_buf_lock); |
1432 | for (i = 0; i < (1 << btp->bt_hashshift); i++) { | 1457 | while (rb_first(&pag->pag_buf_tree)) { |
1433 | spin_lock_init(&btp->bt_hash[i].bh_lock); | 1458 | spin_unlock(&pag->pag_buf_lock); |
1434 | INIT_LIST_HEAD(&btp->bt_hash[i].bh_list); | 1459 | delay(100); |
1460 | spin_lock(&pag->pag_buf_lock); | ||
1461 | } | ||
1462 | spin_unlock(&pag->pag_buf_lock); | ||
1463 | xfs_perag_put(pag); | ||
1435 | } | 1464 | } |
1436 | } | 1465 | } |
1437 | 1466 | ||
1438 | STATIC void | ||
1439 | xfs_free_bufhash( | ||
1440 | xfs_buftarg_t *btp) | ||
1441 | { | ||
1442 | kmem_free_large(btp->bt_hash); | ||
1443 | btp->bt_hash = NULL; | ||
1444 | } | ||
1445 | |||
1446 | /* | 1467 | /* |
1447 | * buftarg list for delwrite queue processing | 1468 | * buftarg list for delwrite queue processing |
1448 | */ | 1469 | */ |
@@ -1475,7 +1496,6 @@ xfs_free_buftarg( | |||
1475 | xfs_flush_buftarg(btp, 1); | 1496 | xfs_flush_buftarg(btp, 1); |
1476 | if (mp->m_flags & XFS_MOUNT_BARRIER) | 1497 | if (mp->m_flags & XFS_MOUNT_BARRIER) |
1477 | xfs_blkdev_issue_flush(btp); | 1498 | xfs_blkdev_issue_flush(btp); |
1478 | xfs_free_bufhash(btp); | ||
1479 | iput(btp->bt_mapping->host); | 1499 | iput(btp->bt_mapping->host); |
1480 | 1500 | ||
1481 | /* Unregister the buftarg first so that we don't get a | 1501 | /* Unregister the buftarg first so that we don't get a |
@@ -1597,6 +1617,7 @@ out_error: | |||
1597 | 1617 | ||
1598 | xfs_buftarg_t * | 1618 | xfs_buftarg_t * |
1599 | xfs_alloc_buftarg( | 1619 | xfs_alloc_buftarg( |
1620 | struct xfs_mount *mp, | ||
1600 | struct block_device *bdev, | 1621 | struct block_device *bdev, |
1601 | int external, | 1622 | int external, |
1602 | const char *fsname) | 1623 | const char *fsname) |
@@ -1605,6 +1626,7 @@ xfs_alloc_buftarg( | |||
1605 | 1626 | ||
1606 | btp = kmem_zalloc(sizeof(*btp), KM_SLEEP); | 1627 | btp = kmem_zalloc(sizeof(*btp), KM_SLEEP); |
1607 | 1628 | ||
1629 | btp->bt_mount = mp; | ||
1608 | btp->bt_dev = bdev->bd_dev; | 1630 | btp->bt_dev = bdev->bd_dev; |
1609 | btp->bt_bdev = bdev; | 1631 | btp->bt_bdev = bdev; |
1610 | if (xfs_setsize_buftarg_early(btp, bdev)) | 1632 | if (xfs_setsize_buftarg_early(btp, bdev)) |
@@ -1613,7 +1635,6 @@ xfs_alloc_buftarg( | |||
1613 | goto error; | 1635 | goto error; |
1614 | if (xfs_alloc_delwrite_queue(btp, fsname)) | 1636 | if (xfs_alloc_delwrite_queue(btp, fsname)) |
1615 | goto error; | 1637 | goto error; |
1616 | xfs_alloc_bufhash(btp, external); | ||
1617 | return btp; | 1638 | return btp; |
1618 | 1639 | ||
1619 | error: | 1640 | error: |
@@ -1904,7 +1925,7 @@ xfs_flush_buftarg( | |||
1904 | bp = list_first_entry(&wait_list, struct xfs_buf, b_list); | 1925 | bp = list_first_entry(&wait_list, struct xfs_buf, b_list); |
1905 | 1926 | ||
1906 | list_del_init(&bp->b_list); | 1927 | list_del_init(&bp->b_list); |
1907 | xfs_iowait(bp); | 1928 | xfs_buf_iowait(bp); |
1908 | xfs_buf_relse(bp); | 1929 | xfs_buf_relse(bp); |
1909 | } | 1930 | } |
1910 | } | 1931 | } |
diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h index 9d021c73ea52..383a3f37cf98 100644 --- a/fs/xfs/linux-2.6/xfs_buf.h +++ b/fs/xfs/linux-2.6/xfs_buf.h | |||
@@ -51,7 +51,6 @@ typedef enum { | |||
51 | #define XBF_DONE (1 << 5) /* all pages in the buffer uptodate */ | 51 | #define XBF_DONE (1 << 5) /* all pages in the buffer uptodate */ |
52 | #define XBF_DELWRI (1 << 6) /* buffer has dirty pages */ | 52 | #define XBF_DELWRI (1 << 6) /* buffer has dirty pages */ |
53 | #define XBF_STALE (1 << 7) /* buffer has been staled, do not find it */ | 53 | #define XBF_STALE (1 << 7) /* buffer has been staled, do not find it */ |
54 | #define XBF_FS_MANAGED (1 << 8) /* filesystem controls freeing memory */ | ||
55 | #define XBF_ORDERED (1 << 11)/* use ordered writes */ | 54 | #define XBF_ORDERED (1 << 11)/* use ordered writes */ |
56 | #define XBF_READ_AHEAD (1 << 12)/* asynchronous read-ahead */ | 55 | #define XBF_READ_AHEAD (1 << 12)/* asynchronous read-ahead */ |
57 | #define XBF_LOG_BUFFER (1 << 13)/* this is a buffer used for the log */ | 56 | #define XBF_LOG_BUFFER (1 << 13)/* this is a buffer used for the log */ |
@@ -96,7 +95,6 @@ typedef unsigned int xfs_buf_flags_t; | |||
96 | { XBF_DONE, "DONE" }, \ | 95 | { XBF_DONE, "DONE" }, \ |
97 | { XBF_DELWRI, "DELWRI" }, \ | 96 | { XBF_DELWRI, "DELWRI" }, \ |
98 | { XBF_STALE, "STALE" }, \ | 97 | { XBF_STALE, "STALE" }, \ |
99 | { XBF_FS_MANAGED, "FS_MANAGED" }, \ | ||
100 | { XBF_ORDERED, "ORDERED" }, \ | 98 | { XBF_ORDERED, "ORDERED" }, \ |
101 | { XBF_READ_AHEAD, "READ_AHEAD" }, \ | 99 | { XBF_READ_AHEAD, "READ_AHEAD" }, \ |
102 | { XBF_LOCK, "LOCK" }, /* should never be set */\ | 100 | { XBF_LOCK, "LOCK" }, /* should never be set */\ |
@@ -123,14 +121,11 @@ typedef struct xfs_buftarg { | |||
123 | dev_t bt_dev; | 121 | dev_t bt_dev; |
124 | struct block_device *bt_bdev; | 122 | struct block_device *bt_bdev; |
125 | struct address_space *bt_mapping; | 123 | struct address_space *bt_mapping; |
124 | struct xfs_mount *bt_mount; | ||
126 | unsigned int bt_bsize; | 125 | unsigned int bt_bsize; |
127 | unsigned int bt_sshift; | 126 | unsigned int bt_sshift; |
128 | size_t bt_smask; | 127 | size_t bt_smask; |
129 | 128 | ||
130 | /* per device buffer hash table */ | ||
131 | uint bt_hashshift; | ||
132 | xfs_bufhash_t *bt_hash; | ||
133 | |||
134 | /* per device delwri queue */ | 129 | /* per device delwri queue */ |
135 | struct task_struct *bt_task; | 130 | struct task_struct *bt_task; |
136 | struct list_head bt_list; | 131 | struct list_head bt_list; |
@@ -158,34 +153,41 @@ typedef int (*xfs_buf_bdstrat_t)(struct xfs_buf *); | |||
158 | #define XB_PAGES 2 | 153 | #define XB_PAGES 2 |
159 | 154 | ||
160 | typedef struct xfs_buf { | 155 | typedef struct xfs_buf { |
156 | /* | ||
157 | * first cacheline holds all the fields needed for an uncontended cache | ||
158 | * hit to be fully processed. The semaphore straddles the cacheline | ||
159 | * boundary, but the counter and lock sits on the first cacheline, | ||
160 | * which is the only bit that is touched if we hit the semaphore | ||
161 | * fast-path on locking. | ||
162 | */ | ||
163 | struct rb_node b_rbnode; /* rbtree node */ | ||
164 | xfs_off_t b_file_offset; /* offset in file */ | ||
165 | size_t b_buffer_length;/* size of buffer in bytes */ | ||
166 | atomic_t b_hold; /* reference count */ | ||
167 | xfs_buf_flags_t b_flags; /* status flags */ | ||
161 | struct semaphore b_sema; /* semaphore for lockables */ | 168 | struct semaphore b_sema; /* semaphore for lockables */ |
162 | unsigned long b_queuetime; /* time buffer was queued */ | 169 | |
163 | atomic_t b_pin_count; /* pin count */ | ||
164 | wait_queue_head_t b_waiters; /* unpin waiters */ | 170 | wait_queue_head_t b_waiters; /* unpin waiters */ |
165 | struct list_head b_list; | 171 | struct list_head b_list; |
166 | xfs_buf_flags_t b_flags; /* status flags */ | 172 | struct xfs_perag *b_pag; /* contains rbtree root */ |
167 | struct list_head b_hash_list; /* hash table list */ | ||
168 | xfs_bufhash_t *b_hash; /* hash table list start */ | ||
169 | xfs_buftarg_t *b_target; /* buffer target (device) */ | 173 | xfs_buftarg_t *b_target; /* buffer target (device) */ |
170 | atomic_t b_hold; /* reference count */ | ||
171 | xfs_daddr_t b_bn; /* block number for I/O */ | 174 | xfs_daddr_t b_bn; /* block number for I/O */ |
172 | xfs_off_t b_file_offset; /* offset in file */ | ||
173 | size_t b_buffer_length;/* size of buffer in bytes */ | ||
174 | size_t b_count_desired;/* desired transfer size */ | 175 | size_t b_count_desired;/* desired transfer size */ |
175 | void *b_addr; /* virtual address of buffer */ | 176 | void *b_addr; /* virtual address of buffer */ |
176 | struct work_struct b_iodone_work; | 177 | struct work_struct b_iodone_work; |
177 | atomic_t b_io_remaining; /* #outstanding I/O requests */ | ||
178 | xfs_buf_iodone_t b_iodone; /* I/O completion function */ | 178 | xfs_buf_iodone_t b_iodone; /* I/O completion function */ |
179 | xfs_buf_relse_t b_relse; /* releasing function */ | 179 | xfs_buf_relse_t b_relse; /* releasing function */ |
180 | struct completion b_iowait; /* queue for I/O waiters */ | 180 | struct completion b_iowait; /* queue for I/O waiters */ |
181 | void *b_fspriv; | 181 | void *b_fspriv; |
182 | void *b_fspriv2; | 182 | void *b_fspriv2; |
183 | struct xfs_mount *b_mount; | ||
184 | unsigned short b_error; /* error code on I/O */ | ||
185 | unsigned int b_page_count; /* size of page array */ | ||
186 | unsigned int b_offset; /* page offset in first page */ | ||
187 | struct page **b_pages; /* array of page pointers */ | 183 | struct page **b_pages; /* array of page pointers */ |
188 | struct page *b_page_array[XB_PAGES]; /* inline pages */ | 184 | struct page *b_page_array[XB_PAGES]; /* inline pages */ |
185 | unsigned long b_queuetime; /* time buffer was queued */ | ||
186 | atomic_t b_pin_count; /* pin count */ | ||
187 | atomic_t b_io_remaining; /* #outstanding I/O requests */ | ||
188 | unsigned int b_page_count; /* size of page array */ | ||
189 | unsigned int b_offset; /* page offset in first page */ | ||
190 | unsigned short b_error; /* error code on I/O */ | ||
189 | #ifdef XFS_BUF_LOCK_TRACKING | 191 | #ifdef XFS_BUF_LOCK_TRACKING |
190 | int b_last_holder; | 192 | int b_last_holder; |
191 | #endif | 193 | #endif |
@@ -204,11 +206,13 @@ extern xfs_buf_t *xfs_buf_read(xfs_buftarg_t *, xfs_off_t, size_t, | |||
204 | xfs_buf_flags_t); | 206 | xfs_buf_flags_t); |
205 | 207 | ||
206 | extern xfs_buf_t *xfs_buf_get_empty(size_t, xfs_buftarg_t *); | 208 | extern xfs_buf_t *xfs_buf_get_empty(size_t, xfs_buftarg_t *); |
207 | extern xfs_buf_t *xfs_buf_get_noaddr(size_t, xfs_buftarg_t *); | 209 | extern xfs_buf_t *xfs_buf_get_uncached(struct xfs_buftarg *, size_t, int); |
208 | extern int xfs_buf_associate_memory(xfs_buf_t *, void *, size_t); | 210 | extern int xfs_buf_associate_memory(xfs_buf_t *, void *, size_t); |
209 | extern void xfs_buf_hold(xfs_buf_t *); | 211 | extern void xfs_buf_hold(xfs_buf_t *); |
210 | extern void xfs_buf_readahead(xfs_buftarg_t *, xfs_off_t, size_t, | 212 | extern void xfs_buf_readahead(xfs_buftarg_t *, xfs_off_t, size_t); |
211 | xfs_buf_flags_t); | 213 | struct xfs_buf *xfs_buf_read_uncached(struct xfs_mount *mp, |
214 | struct xfs_buftarg *target, | ||
215 | xfs_daddr_t daddr, size_t length, int flags); | ||
212 | 216 | ||
213 | /* Releasing Buffers */ | 217 | /* Releasing Buffers */ |
214 | extern void xfs_buf_free(xfs_buf_t *); | 218 | extern void xfs_buf_free(xfs_buf_t *); |
@@ -233,6 +237,8 @@ extern int xfs_buf_iorequest(xfs_buf_t *); | |||
233 | extern int xfs_buf_iowait(xfs_buf_t *); | 237 | extern int xfs_buf_iowait(xfs_buf_t *); |
234 | extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, void *, | 238 | extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, void *, |
235 | xfs_buf_rw_t); | 239 | xfs_buf_rw_t); |
240 | #define xfs_buf_zero(bp, off, len) \ | ||
241 | xfs_buf_iomove((bp), (off), (len), NULL, XBRW_ZERO) | ||
236 | 242 | ||
237 | static inline int xfs_buf_geterror(xfs_buf_t *bp) | 243 | static inline int xfs_buf_geterror(xfs_buf_t *bp) |
238 | { | 244 | { |
@@ -267,8 +273,6 @@ extern void xfs_buf_terminate(void); | |||
267 | XFS_BUF_DONE(bp); \ | 273 | XFS_BUF_DONE(bp); \ |
268 | } while (0) | 274 | } while (0) |
269 | 275 | ||
270 | #define XFS_BUF_UNMANAGE(bp) ((bp)->b_flags &= ~XBF_FS_MANAGED) | ||
271 | |||
272 | #define XFS_BUF_DELAYWRITE(bp) ((bp)->b_flags |= XBF_DELWRI) | 276 | #define XFS_BUF_DELAYWRITE(bp) ((bp)->b_flags |= XBF_DELWRI) |
273 | #define XFS_BUF_UNDELAYWRITE(bp) xfs_buf_delwri_dequeue(bp) | 277 | #define XFS_BUF_UNDELAYWRITE(bp) xfs_buf_delwri_dequeue(bp) |
274 | #define XFS_BUF_ISDELAYWRITE(bp) ((bp)->b_flags & XBF_DELWRI) | 278 | #define XFS_BUF_ISDELAYWRITE(bp) ((bp)->b_flags & XBF_DELWRI) |
@@ -347,25 +351,11 @@ static inline void xfs_buf_relse(xfs_buf_t *bp) | |||
347 | xfs_buf_rele(bp); | 351 | xfs_buf_rele(bp); |
348 | } | 352 | } |
349 | 353 | ||
350 | #define xfs_biodone(bp) xfs_buf_ioend(bp, 0) | ||
351 | |||
352 | #define xfs_biomove(bp, off, len, data, rw) \ | ||
353 | xfs_buf_iomove((bp), (off), (len), (data), \ | ||
354 | ((rw) == XBF_WRITE) ? XBRW_WRITE : XBRW_READ) | ||
355 | |||
356 | #define xfs_biozero(bp, off, len) \ | ||
357 | xfs_buf_iomove((bp), (off), (len), NULL, XBRW_ZERO) | ||
358 | |||
359 | #define xfs_iowait(bp) xfs_buf_iowait(bp) | ||
360 | |||
361 | #define xfs_baread(target, rablkno, ralen) \ | ||
362 | xfs_buf_readahead((target), (rablkno), (ralen), XBF_DONT_BLOCK) | ||
363 | |||
364 | |||
365 | /* | 354 | /* |
366 | * Handling of buftargs. | 355 | * Handling of buftargs. |
367 | */ | 356 | */ |
368 | extern xfs_buftarg_t *xfs_alloc_buftarg(struct block_device *, int, const char *); | 357 | extern xfs_buftarg_t *xfs_alloc_buftarg(struct xfs_mount *, |
358 | struct block_device *, int, const char *); | ||
369 | extern void xfs_free_buftarg(struct xfs_mount *, struct xfs_buftarg *); | 359 | extern void xfs_free_buftarg(struct xfs_mount *, struct xfs_buftarg *); |
370 | extern void xfs_wait_buftarg(xfs_buftarg_t *); | 360 | extern void xfs_wait_buftarg(xfs_buftarg_t *); |
371 | extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int, unsigned int); | 361 | extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int, unsigned int); |
diff --git a/fs/xfs/linux-2.6/xfs_cred.h b/fs/xfs/linux-2.6/xfs_cred.h deleted file mode 100644 index 55bddf3b6091..000000000000 --- a/fs/xfs/linux-2.6/xfs_cred.h +++ /dev/null | |||
@@ -1,28 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc. | ||
3 | * All Rights Reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it would be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write the Free Software Foundation, | ||
16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
17 | */ | ||
18 | #ifndef __XFS_CRED_H__ | ||
19 | #define __XFS_CRED_H__ | ||
20 | |||
21 | #include <linux/capability.h> | ||
22 | |||
23 | /* | ||
24 | * Credentials | ||
25 | */ | ||
26 | typedef const struct cred cred_t; | ||
27 | |||
28 | #endif /* __XFS_CRED_H__ */ | ||
diff --git a/fs/xfs/linux-2.6/xfs_fs_subr.c b/fs/xfs/linux-2.6/xfs_fs_subr.c index 1f279b012f94..ed88ed16811c 100644 --- a/fs/xfs/linux-2.6/xfs_fs_subr.c +++ b/fs/xfs/linux-2.6/xfs_fs_subr.c | |||
@@ -32,10 +32,9 @@ xfs_tosspages( | |||
32 | xfs_off_t last, | 32 | xfs_off_t last, |
33 | int fiopt) | 33 | int fiopt) |
34 | { | 34 | { |
35 | struct address_space *mapping = VFS_I(ip)->i_mapping; | 35 | /* can't toss partial tail pages, so mask them out */ |
36 | 36 | last &= ~(PAGE_SIZE - 1); | |
37 | if (mapping->nrpages) | 37 | truncate_inode_pages_range(VFS_I(ip)->i_mapping, first, last - 1); |
38 | truncate_inode_pages(mapping, first); | ||
39 | } | 38 | } |
40 | 39 | ||
41 | int | 40 | int |
@@ -50,12 +49,11 @@ xfs_flushinval_pages( | |||
50 | 49 | ||
51 | trace_xfs_pagecache_inval(ip, first, last); | 50 | trace_xfs_pagecache_inval(ip, first, last); |
52 | 51 | ||
53 | if (mapping->nrpages) { | 52 | xfs_iflags_clear(ip, XFS_ITRUNCATED); |
54 | xfs_iflags_clear(ip, XFS_ITRUNCATED); | 53 | ret = filemap_write_and_wait_range(mapping, first, |
55 | ret = filemap_write_and_wait(mapping); | 54 | last == -1 ? LLONG_MAX : last); |
56 | if (!ret) | 55 | if (!ret) |
57 | truncate_inode_pages(mapping, first); | 56 | truncate_inode_pages_range(mapping, first, last); |
58 | } | ||
59 | return -ret; | 57 | return -ret; |
60 | } | 58 | } |
61 | 59 | ||
@@ -71,10 +69,9 @@ xfs_flush_pages( | |||
71 | int ret = 0; | 69 | int ret = 0; |
72 | int ret2; | 70 | int ret2; |
73 | 71 | ||
74 | if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { | 72 | xfs_iflags_clear(ip, XFS_ITRUNCATED); |
75 | xfs_iflags_clear(ip, XFS_ITRUNCATED); | 73 | ret = -filemap_fdatawrite_range(mapping, first, |
76 | ret = -filemap_fdatawrite(mapping); | 74 | last == -1 ? LLONG_MAX : last); |
77 | } | ||
78 | if (flags & XBF_ASYNC) | 75 | if (flags & XBF_ASYNC) |
79 | return ret; | 76 | return ret; |
80 | ret2 = xfs_wait_on_pages(ip, first, last); | 77 | ret2 = xfs_wait_on_pages(ip, first, last); |
@@ -91,7 +88,9 @@ xfs_wait_on_pages( | |||
91 | { | 88 | { |
92 | struct address_space *mapping = VFS_I(ip)->i_mapping; | 89 | struct address_space *mapping = VFS_I(ip)->i_mapping; |
93 | 90 | ||
94 | if (mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK)) | 91 | if (mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK)) { |
95 | return -filemap_fdatawait(mapping); | 92 | return -filemap_fdatawait_range(mapping, first, |
93 | last == -1 ? ip->i_size - 1 : last); | ||
94 | } | ||
96 | return 0; | 95 | return 0; |
97 | } | 96 | } |
diff --git a/fs/xfs/linux-2.6/xfs_globals.c b/fs/xfs/linux-2.6/xfs_globals.c index 2ae8b1ccb02e..76e81cff70b9 100644 --- a/fs/xfs/linux-2.6/xfs_globals.c +++ b/fs/xfs/linux-2.6/xfs_globals.c | |||
@@ -16,7 +16,6 @@ | |||
16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | 16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
17 | */ | 17 | */ |
18 | #include "xfs.h" | 18 | #include "xfs.h" |
19 | #include "xfs_cred.h" | ||
20 | #include "xfs_sysctl.h" | 19 | #include "xfs_sysctl.h" |
21 | 20 | ||
22 | /* | 21 | /* |
diff --git a/fs/xfs/linux-2.6/xfs_globals.h b/fs/xfs/linux-2.6/xfs_globals.h deleted file mode 100644 index 69f71caf061c..000000000000 --- a/fs/xfs/linux-2.6/xfs_globals.h +++ /dev/null | |||
@@ -1,23 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. | ||
3 | * All Rights Reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it would be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write the Free Software Foundation, | ||
16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
17 | */ | ||
18 | #ifndef __XFS_GLOBALS_H__ | ||
19 | #define __XFS_GLOBALS_H__ | ||
20 | |||
21 | extern uint64_t xfs_panic_mask; /* set to cause more panics */ | ||
22 | |||
23 | #endif /* __XFS_GLOBALS_H__ */ | ||
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c index 3b9e626f7cd1..2ea238f6d38e 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl.c +++ b/fs/xfs/linux-2.6/xfs_ioctl.c | |||
@@ -790,7 +790,7 @@ xfs_ioc_fsgetxattr( | |||
790 | xfs_ilock(ip, XFS_ILOCK_SHARED); | 790 | xfs_ilock(ip, XFS_ILOCK_SHARED); |
791 | fa.fsx_xflags = xfs_ip2xflags(ip); | 791 | fa.fsx_xflags = xfs_ip2xflags(ip); |
792 | fa.fsx_extsize = ip->i_d.di_extsize << ip->i_mount->m_sb.sb_blocklog; | 792 | fa.fsx_extsize = ip->i_d.di_extsize << ip->i_mount->m_sb.sb_blocklog; |
793 | fa.fsx_projid = ip->i_d.di_projid; | 793 | fa.fsx_projid = xfs_get_projid(ip); |
794 | 794 | ||
795 | if (attr) { | 795 | if (attr) { |
796 | if (ip->i_afp) { | 796 | if (ip->i_afp) { |
@@ -909,10 +909,10 @@ xfs_ioctl_setattr( | |||
909 | return XFS_ERROR(EIO); | 909 | return XFS_ERROR(EIO); |
910 | 910 | ||
911 | /* | 911 | /* |
912 | * Disallow 32bit project ids because on-disk structure | 912 | * Disallow 32bit project ids when projid32bit feature is not enabled. |
913 | * is 16bit only. | ||
914 | */ | 913 | */ |
915 | if ((mask & FSX_PROJID) && (fa->fsx_projid > (__uint16_t)-1)) | 914 | if ((mask & FSX_PROJID) && (fa->fsx_projid > (__uint16_t)-1) && |
915 | !xfs_sb_version_hasprojid32bit(&ip->i_mount->m_sb)) | ||
916 | return XFS_ERROR(EINVAL); | 916 | return XFS_ERROR(EINVAL); |
917 | 917 | ||
918 | /* | 918 | /* |
@@ -961,7 +961,7 @@ xfs_ioctl_setattr( | |||
961 | if (mask & FSX_PROJID) { | 961 | if (mask & FSX_PROJID) { |
962 | if (XFS_IS_QUOTA_RUNNING(mp) && | 962 | if (XFS_IS_QUOTA_RUNNING(mp) && |
963 | XFS_IS_PQUOTA_ON(mp) && | 963 | XFS_IS_PQUOTA_ON(mp) && |
964 | ip->i_d.di_projid != fa->fsx_projid) { | 964 | xfs_get_projid(ip) != fa->fsx_projid) { |
965 | ASSERT(tp); | 965 | ASSERT(tp); |
966 | code = xfs_qm_vop_chown_reserve(tp, ip, udqp, gdqp, | 966 | code = xfs_qm_vop_chown_reserve(tp, ip, udqp, gdqp, |
967 | capable(CAP_FOWNER) ? | 967 | capable(CAP_FOWNER) ? |
@@ -1063,12 +1063,12 @@ xfs_ioctl_setattr( | |||
1063 | * Change the ownerships and register quota modifications | 1063 | * Change the ownerships and register quota modifications |
1064 | * in the transaction. | 1064 | * in the transaction. |
1065 | */ | 1065 | */ |
1066 | if (ip->i_d.di_projid != fa->fsx_projid) { | 1066 | if (xfs_get_projid(ip) != fa->fsx_projid) { |
1067 | if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_PQUOTA_ON(mp)) { | 1067 | if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_PQUOTA_ON(mp)) { |
1068 | olddquot = xfs_qm_vop_chown(tp, ip, | 1068 | olddquot = xfs_qm_vop_chown(tp, ip, |
1069 | &ip->i_gdquot, gdqp); | 1069 | &ip->i_gdquot, gdqp); |
1070 | } | 1070 | } |
1071 | ip->i_d.di_projid = fa->fsx_projid; | 1071 | xfs_set_projid(ip, fa->fsx_projid); |
1072 | 1072 | ||
1073 | /* | 1073 | /* |
1074 | * We may have to rev the inode as well as | 1074 | * We may have to rev the inode as well as |
@@ -1088,8 +1088,8 @@ xfs_ioctl_setattr( | |||
1088 | xfs_diflags_to_linux(ip); | 1088 | xfs_diflags_to_linux(ip); |
1089 | } | 1089 | } |
1090 | 1090 | ||
1091 | xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG); | ||
1091 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); | 1092 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); |
1092 | xfs_ichgtime(ip, XFS_ICHGTIME_CHG); | ||
1093 | 1093 | ||
1094 | XFS_STATS_INC(xs_ig_attrchg); | 1094 | XFS_STATS_INC(xs_ig_attrchg); |
1095 | 1095 | ||
@@ -1301,7 +1301,8 @@ xfs_file_ioctl( | |||
1301 | case XFS_IOC_ALLOCSP64: | 1301 | case XFS_IOC_ALLOCSP64: |
1302 | case XFS_IOC_FREESP64: | 1302 | case XFS_IOC_FREESP64: |
1303 | case XFS_IOC_RESVSP64: | 1303 | case XFS_IOC_RESVSP64: |
1304 | case XFS_IOC_UNRESVSP64: { | 1304 | case XFS_IOC_UNRESVSP64: |
1305 | case XFS_IOC_ZERO_RANGE: { | ||
1305 | xfs_flock64_t bf; | 1306 | xfs_flock64_t bf; |
1306 | 1307 | ||
1307 | if (copy_from_user(&bf, arg, sizeof(bf))) | 1308 | if (copy_from_user(&bf, arg, sizeof(bf))) |
diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c index 6c83f7f62dc9..b3486dfa5520 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl32.c +++ b/fs/xfs/linux-2.6/xfs_ioctl32.c | |||
@@ -164,7 +164,8 @@ xfs_ioctl32_bstat_copyin( | |||
164 | get_user(bstat->bs_extsize, &bstat32->bs_extsize) || | 164 | get_user(bstat->bs_extsize, &bstat32->bs_extsize) || |
165 | get_user(bstat->bs_extents, &bstat32->bs_extents) || | 165 | get_user(bstat->bs_extents, &bstat32->bs_extents) || |
166 | get_user(bstat->bs_gen, &bstat32->bs_gen) || | 166 | get_user(bstat->bs_gen, &bstat32->bs_gen) || |
167 | get_user(bstat->bs_projid, &bstat32->bs_projid) || | 167 | get_user(bstat->bs_projid_lo, &bstat32->bs_projid_lo) || |
168 | get_user(bstat->bs_projid_hi, &bstat32->bs_projid_hi) || | ||
168 | get_user(bstat->bs_dmevmask, &bstat32->bs_dmevmask) || | 169 | get_user(bstat->bs_dmevmask, &bstat32->bs_dmevmask) || |
169 | get_user(bstat->bs_dmstate, &bstat32->bs_dmstate) || | 170 | get_user(bstat->bs_dmstate, &bstat32->bs_dmstate) || |
170 | get_user(bstat->bs_aextents, &bstat32->bs_aextents)) | 171 | get_user(bstat->bs_aextents, &bstat32->bs_aextents)) |
@@ -218,6 +219,7 @@ xfs_bulkstat_one_fmt_compat( | |||
218 | put_user(buffer->bs_extents, &p32->bs_extents) || | 219 | put_user(buffer->bs_extents, &p32->bs_extents) || |
219 | put_user(buffer->bs_gen, &p32->bs_gen) || | 220 | put_user(buffer->bs_gen, &p32->bs_gen) || |
220 | put_user(buffer->bs_projid, &p32->bs_projid) || | 221 | put_user(buffer->bs_projid, &p32->bs_projid) || |
222 | put_user(buffer->bs_projid_hi, &p32->bs_projid_hi) || | ||
221 | put_user(buffer->bs_dmevmask, &p32->bs_dmevmask) || | 223 | put_user(buffer->bs_dmevmask, &p32->bs_dmevmask) || |
222 | put_user(buffer->bs_dmstate, &p32->bs_dmstate) || | 224 | put_user(buffer->bs_dmstate, &p32->bs_dmstate) || |
223 | put_user(buffer->bs_aextents, &p32->bs_aextents)) | 225 | put_user(buffer->bs_aextents, &p32->bs_aextents)) |
@@ -574,6 +576,7 @@ xfs_file_compat_ioctl( | |||
574 | case XFS_IOC_FSGEOMETRY_V1: | 576 | case XFS_IOC_FSGEOMETRY_V1: |
575 | case XFS_IOC_FSGROWFSDATA: | 577 | case XFS_IOC_FSGROWFSDATA: |
576 | case XFS_IOC_FSGROWFSRT: | 578 | case XFS_IOC_FSGROWFSRT: |
579 | case XFS_IOC_ZERO_RANGE: | ||
577 | return xfs_file_ioctl(filp, cmd, p); | 580 | return xfs_file_ioctl(filp, cmd, p); |
578 | #else | 581 | #else |
579 | case XFS_IOC_ALLOCSP_32: | 582 | case XFS_IOC_ALLOCSP_32: |
diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.h b/fs/xfs/linux-2.6/xfs_ioctl32.h index 1024c4f8ba0d..08b605792a99 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl32.h +++ b/fs/xfs/linux-2.6/xfs_ioctl32.h | |||
@@ -65,8 +65,10 @@ typedef struct compat_xfs_bstat { | |||
65 | __s32 bs_extsize; /* extent size */ | 65 | __s32 bs_extsize; /* extent size */ |
66 | __s32 bs_extents; /* number of extents */ | 66 | __s32 bs_extents; /* number of extents */ |
67 | __u32 bs_gen; /* generation count */ | 67 | __u32 bs_gen; /* generation count */ |
68 | __u16 bs_projid; /* project id */ | 68 | __u16 bs_projid_lo; /* lower part of project id */ |
69 | unsigned char bs_pad[14]; /* pad space, unused */ | 69 | #define bs_projid bs_projid_lo /* (previously just bs_projid) */ |
70 | __u16 bs_projid_hi; /* high part of project id */ | ||
71 | unsigned char bs_pad[12]; /* pad space, unused */ | ||
70 | __u32 bs_dmevmask; /* DMIG event mask */ | 72 | __u32 bs_dmevmask; /* DMIG event mask */ |
71 | __u16 bs_dmstate; /* DMIG state info */ | 73 | __u16 bs_dmstate; /* DMIG state info */ |
72 | __u16 bs_aextents; /* attribute number of extents */ | 74 | __u16 bs_aextents; /* attribute number of extents */ |
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c index b1fc2a6bfe83..ec858e09d546 100644 --- a/fs/xfs/linux-2.6/xfs_iops.c +++ b/fs/xfs/linux-2.6/xfs_iops.c | |||
@@ -95,41 +95,6 @@ xfs_mark_inode_dirty( | |||
95 | } | 95 | } |
96 | 96 | ||
97 | /* | 97 | /* |
98 | * Change the requested timestamp in the given inode. | ||
99 | * We don't lock across timestamp updates, and we don't log them but | ||
100 | * we do record the fact that there is dirty information in core. | ||
101 | */ | ||
102 | void | ||
103 | xfs_ichgtime( | ||
104 | xfs_inode_t *ip, | ||
105 | int flags) | ||
106 | { | ||
107 | struct inode *inode = VFS_I(ip); | ||
108 | timespec_t tv; | ||
109 | int sync_it = 0; | ||
110 | |||
111 | tv = current_fs_time(inode->i_sb); | ||
112 | |||
113 | if ((flags & XFS_ICHGTIME_MOD) && | ||
114 | !timespec_equal(&inode->i_mtime, &tv)) { | ||
115 | inode->i_mtime = tv; | ||
116 | sync_it = 1; | ||
117 | } | ||
118 | if ((flags & XFS_ICHGTIME_CHG) && | ||
119 | !timespec_equal(&inode->i_ctime, &tv)) { | ||
120 | inode->i_ctime = tv; | ||
121 | sync_it = 1; | ||
122 | } | ||
123 | |||
124 | /* | ||
125 | * Update complete - now make sure everyone knows that the inode | ||
126 | * is dirty. | ||
127 | */ | ||
128 | if (sync_it) | ||
129 | xfs_mark_inode_dirty_sync(ip); | ||
130 | } | ||
131 | |||
132 | /* | ||
133 | * Hook in SELinux. This is not quite correct yet, what we really need | 98 | * Hook in SELinux. This is not quite correct yet, what we really need |
134 | * here (as we do for default ACLs) is a mechanism by which creation of | 99 | * here (as we do for default ACLs) is a mechanism by which creation of |
135 | * these attrs can be journalled at inode creation time (along with the | 100 | * these attrs can be journalled at inode creation time (along with the |
@@ -224,7 +189,7 @@ xfs_vn_mknod( | |||
224 | } | 189 | } |
225 | 190 | ||
226 | xfs_dentry_to_name(&name, dentry); | 191 | xfs_dentry_to_name(&name, dentry); |
227 | error = xfs_create(XFS_I(dir), &name, mode, rdev, &ip, NULL); | 192 | error = xfs_create(XFS_I(dir), &name, mode, rdev, &ip); |
228 | if (unlikely(error)) | 193 | if (unlikely(error)) |
229 | goto out_free_acl; | 194 | goto out_free_acl; |
230 | 195 | ||
@@ -397,7 +362,7 @@ xfs_vn_symlink( | |||
397 | (irix_symlink_mode ? 0777 & ~current_umask() : S_IRWXUGO); | 362 | (irix_symlink_mode ? 0777 & ~current_umask() : S_IRWXUGO); |
398 | xfs_dentry_to_name(&name, dentry); | 363 | xfs_dentry_to_name(&name, dentry); |
399 | 364 | ||
400 | error = xfs_symlink(XFS_I(dir), &name, symname, mode, &cip, NULL); | 365 | error = xfs_symlink(XFS_I(dir), &name, symname, mode, &cip); |
401 | if (unlikely(error)) | 366 | if (unlikely(error)) |
402 | goto out; | 367 | goto out; |
403 | 368 | ||
diff --git a/fs/xfs/linux-2.6/xfs_linux.h b/fs/xfs/linux-2.6/xfs_linux.h index 2fa0bd9ebc7f..214ddd71ff79 100644 --- a/fs/xfs/linux-2.6/xfs_linux.h +++ b/fs/xfs/linux-2.6/xfs_linux.h | |||
@@ -71,6 +71,7 @@ | |||
71 | #include <linux/random.h> | 71 | #include <linux/random.h> |
72 | #include <linux/ctype.h> | 72 | #include <linux/ctype.h> |
73 | #include <linux/writeback.h> | 73 | #include <linux/writeback.h> |
74 | #include <linux/capability.h> | ||
74 | 75 | ||
75 | #include <asm/page.h> | 76 | #include <asm/page.h> |
76 | #include <asm/div64.h> | 77 | #include <asm/div64.h> |
@@ -79,14 +80,12 @@ | |||
79 | #include <asm/byteorder.h> | 80 | #include <asm/byteorder.h> |
80 | #include <asm/unaligned.h> | 81 | #include <asm/unaligned.h> |
81 | 82 | ||
82 | #include <xfs_cred.h> | ||
83 | #include <xfs_vnode.h> | 83 | #include <xfs_vnode.h> |
84 | #include <xfs_stats.h> | 84 | #include <xfs_stats.h> |
85 | #include <xfs_sysctl.h> | 85 | #include <xfs_sysctl.h> |
86 | #include <xfs_iops.h> | 86 | #include <xfs_iops.h> |
87 | #include <xfs_aops.h> | 87 | #include <xfs_aops.h> |
88 | #include <xfs_super.h> | 88 | #include <xfs_super.h> |
89 | #include <xfs_globals.h> | ||
90 | #include <xfs_buf.h> | 89 | #include <xfs_buf.h> |
91 | 90 | ||
92 | /* | 91 | /* |
@@ -144,7 +143,7 @@ | |||
144 | #define SYNCHRONIZE() barrier() | 143 | #define SYNCHRONIZE() barrier() |
145 | #define __return_address __builtin_return_address(0) | 144 | #define __return_address __builtin_return_address(0) |
146 | 145 | ||
147 | #define dfltprid 0 | 146 | #define XFS_PROJID_DEFAULT 0 |
148 | #define MAXPATHLEN 1024 | 147 | #define MAXPATHLEN 1024 |
149 | 148 | ||
150 | #define MIN(a,b) (min(a,b)) | 149 | #define MIN(a,b) (min(a,b)) |
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index 08fd3102128c..ab31ce5aeaf9 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c | |||
@@ -44,7 +44,6 @@ | |||
44 | #include "xfs_buf_item.h" | 44 | #include "xfs_buf_item.h" |
45 | #include "xfs_utils.h" | 45 | #include "xfs_utils.h" |
46 | #include "xfs_vnodeops.h" | 46 | #include "xfs_vnodeops.h" |
47 | #include "xfs_version.h" | ||
48 | #include "xfs_log_priv.h" | 47 | #include "xfs_log_priv.h" |
49 | #include "xfs_trans_priv.h" | 48 | #include "xfs_trans_priv.h" |
50 | #include "xfs_filestream.h" | 49 | #include "xfs_filestream.h" |
@@ -645,7 +644,7 @@ xfs_barrier_test( | |||
645 | XFS_BUF_ORDERED(sbp); | 644 | XFS_BUF_ORDERED(sbp); |
646 | 645 | ||
647 | xfsbdstrat(mp, sbp); | 646 | xfsbdstrat(mp, sbp); |
648 | error = xfs_iowait(sbp); | 647 | error = xfs_buf_iowait(sbp); |
649 | 648 | ||
650 | /* | 649 | /* |
651 | * Clear all the flags we set and possible error state in the | 650 | * Clear all the flags we set and possible error state in the |
@@ -757,18 +756,20 @@ xfs_open_devices( | |||
757 | * Setup xfs_mount buffer target pointers | 756 | * Setup xfs_mount buffer target pointers |
758 | */ | 757 | */ |
759 | error = ENOMEM; | 758 | error = ENOMEM; |
760 | mp->m_ddev_targp = xfs_alloc_buftarg(ddev, 0, mp->m_fsname); | 759 | mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev, 0, mp->m_fsname); |
761 | if (!mp->m_ddev_targp) | 760 | if (!mp->m_ddev_targp) |
762 | goto out_close_rtdev; | 761 | goto out_close_rtdev; |
763 | 762 | ||
764 | if (rtdev) { | 763 | if (rtdev) { |
765 | mp->m_rtdev_targp = xfs_alloc_buftarg(rtdev, 1, mp->m_fsname); | 764 | mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev, 1, |
765 | mp->m_fsname); | ||
766 | if (!mp->m_rtdev_targp) | 766 | if (!mp->m_rtdev_targp) |
767 | goto out_free_ddev_targ; | 767 | goto out_free_ddev_targ; |
768 | } | 768 | } |
769 | 769 | ||
770 | if (logdev && logdev != ddev) { | 770 | if (logdev && logdev != ddev) { |
771 | mp->m_logdev_targp = xfs_alloc_buftarg(logdev, 1, mp->m_fsname); | 771 | mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev, 1, |
772 | mp->m_fsname); | ||
772 | if (!mp->m_logdev_targp) | 773 | if (!mp->m_logdev_targp) |
773 | goto out_free_rtdev_targ; | 774 | goto out_free_rtdev_targ; |
774 | } else { | 775 | } else { |
@@ -971,12 +972,7 @@ xfs_fs_inode_init_once( | |||
971 | 972 | ||
972 | /* | 973 | /* |
973 | * Dirty the XFS inode when mark_inode_dirty_sync() is called so that | 974 | * Dirty the XFS inode when mark_inode_dirty_sync() is called so that |
974 | * we catch unlogged VFS level updates to the inode. Care must be taken | 975 | * we catch unlogged VFS level updates to the inode. |
975 | * here - the transaction code calls mark_inode_dirty_sync() to mark the | ||
976 | * VFS inode dirty in a transaction and clears the i_update_core field; | ||
977 | * it must clear the field after calling mark_inode_dirty_sync() to | ||
978 | * correctly indicate that the dirty state has been propagated into the | ||
979 | * inode log item. | ||
980 | * | 976 | * |
981 | * We need the barrier() to maintain correct ordering between unlogged | 977 | * We need the barrier() to maintain correct ordering between unlogged |
982 | * updates and the transaction commit code that clears the i_update_core | 978 | * updates and the transaction commit code that clears the i_update_core |
@@ -1520,8 +1516,9 @@ xfs_fs_fill_super( | |||
1520 | if (error) | 1516 | if (error) |
1521 | goto out_free_fsname; | 1517 | goto out_free_fsname; |
1522 | 1518 | ||
1523 | if (xfs_icsb_init_counters(mp)) | 1519 | error = xfs_icsb_init_counters(mp); |
1524 | mp->m_flags |= XFS_MOUNT_NO_PERCPU_SB; | 1520 | if (error) |
1521 | goto out_close_devices; | ||
1525 | 1522 | ||
1526 | error = xfs_readsb(mp, flags); | 1523 | error = xfs_readsb(mp, flags); |
1527 | if (error) | 1524 | if (error) |
@@ -1582,6 +1579,7 @@ xfs_fs_fill_super( | |||
1582 | xfs_freesb(mp); | 1579 | xfs_freesb(mp); |
1583 | out_destroy_counters: | 1580 | out_destroy_counters: |
1584 | xfs_icsb_destroy_counters(mp); | 1581 | xfs_icsb_destroy_counters(mp); |
1582 | out_close_devices: | ||
1585 | xfs_close_devices(mp); | 1583 | xfs_close_devices(mp); |
1586 | out_free_fsname: | 1584 | out_free_fsname: |
1587 | xfs_free_fsname(mp); | 1585 | xfs_free_fsname(mp); |
diff --git a/fs/xfs/linux-2.6/xfs_super.h b/fs/xfs/linux-2.6/xfs_super.h index 1ef4a4d2d997..50a3266c999e 100644 --- a/fs/xfs/linux-2.6/xfs_super.h +++ b/fs/xfs/linux-2.6/xfs_super.h | |||
@@ -62,6 +62,7 @@ extern void xfs_qm_exit(void); | |||
62 | # define XFS_DBG_STRING "no debug" | 62 | # define XFS_DBG_STRING "no debug" |
63 | #endif | 63 | #endif |
64 | 64 | ||
65 | #define XFS_VERSION_STRING "SGI XFS" | ||
65 | #define XFS_BUILD_OPTIONS XFS_ACL_STRING \ | 66 | #define XFS_BUILD_OPTIONS XFS_ACL_STRING \ |
66 | XFS_SECURITY_STRING \ | 67 | XFS_SECURITY_STRING \ |
67 | XFS_REALTIME_STRING \ | 68 | XFS_REALTIME_STRING \ |
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c index 81976ffed7d6..37d33254981d 100644 --- a/fs/xfs/linux-2.6/xfs_sync.c +++ b/fs/xfs/linux-2.6/xfs_sync.c | |||
@@ -39,42 +39,39 @@ | |||
39 | #include <linux/kthread.h> | 39 | #include <linux/kthread.h> |
40 | #include <linux/freezer.h> | 40 | #include <linux/freezer.h> |
41 | 41 | ||
42 | /* | ||
43 | * The inode lookup is done in batches to keep the amount of lock traffic and | ||
44 | * radix tree lookups to a minimum. The batch size is a trade off between | ||
45 | * lookup reduction and stack usage. This is in the reclaim path, so we can't | ||
46 | * be too greedy. | ||
47 | */ | ||
48 | #define XFS_LOOKUP_BATCH 32 | ||
42 | 49 | ||
43 | STATIC xfs_inode_t * | 50 | STATIC int |
44 | xfs_inode_ag_lookup( | 51 | xfs_inode_ag_walk_grab( |
45 | struct xfs_mount *mp, | 52 | struct xfs_inode *ip) |
46 | struct xfs_perag *pag, | ||
47 | uint32_t *first_index, | ||
48 | int tag) | ||
49 | { | 53 | { |
50 | int nr_found; | 54 | struct inode *inode = VFS_I(ip); |
51 | struct xfs_inode *ip; | ||
52 | 55 | ||
53 | /* | 56 | /* nothing to sync during shutdown */ |
54 | * use a gang lookup to find the next inode in the tree | 57 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) |
55 | * as the tree is sparse and a gang lookup walks to find | 58 | return EFSCORRUPTED; |
56 | * the number of objects requested. | 59 | |
57 | */ | 60 | /* avoid new or reclaimable inodes. Leave for reclaim code to flush */ |
58 | if (tag == XFS_ICI_NO_TAG) { | 61 | if (xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM)) |
59 | nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, | 62 | return ENOENT; |
60 | (void **)&ip, *first_index, 1); | 63 | |
61 | } else { | 64 | /* If we can't grab the inode, it must on it's way to reclaim. */ |
62 | nr_found = radix_tree_gang_lookup_tag(&pag->pag_ici_root, | 65 | if (!igrab(inode)) |
63 | (void **)&ip, *first_index, 1, tag); | 66 | return ENOENT; |
67 | |||
68 | if (is_bad_inode(inode)) { | ||
69 | IRELE(ip); | ||
70 | return ENOENT; | ||
64 | } | 71 | } |
65 | if (!nr_found) | ||
66 | return NULL; | ||
67 | 72 | ||
68 | /* | 73 | /* inode is valid */ |
69 | * Update the index for the next lookup. Catch overflows | 74 | return 0; |
70 | * into the next AG range which can occur if we have inodes | ||
71 | * in the last block of the AG and we are currently | ||
72 | * pointing to the last inode. | ||
73 | */ | ||
74 | *first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); | ||
75 | if (*first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) | ||
76 | return NULL; | ||
77 | return ip; | ||
78 | } | 75 | } |
79 | 76 | ||
80 | STATIC int | 77 | STATIC int |
@@ -83,49 +80,75 @@ xfs_inode_ag_walk( | |||
83 | struct xfs_perag *pag, | 80 | struct xfs_perag *pag, |
84 | int (*execute)(struct xfs_inode *ip, | 81 | int (*execute)(struct xfs_inode *ip, |
85 | struct xfs_perag *pag, int flags), | 82 | struct xfs_perag *pag, int flags), |
86 | int flags, | 83 | int flags) |
87 | int tag, | ||
88 | int exclusive, | ||
89 | int *nr_to_scan) | ||
90 | { | 84 | { |
91 | uint32_t first_index; | 85 | uint32_t first_index; |
92 | int last_error = 0; | 86 | int last_error = 0; |
93 | int skipped; | 87 | int skipped; |
88 | int done; | ||
89 | int nr_found; | ||
94 | 90 | ||
95 | restart: | 91 | restart: |
92 | done = 0; | ||
96 | skipped = 0; | 93 | skipped = 0; |
97 | first_index = 0; | 94 | first_index = 0; |
95 | nr_found = 0; | ||
98 | do { | 96 | do { |
97 | struct xfs_inode *batch[XFS_LOOKUP_BATCH]; | ||
99 | int error = 0; | 98 | int error = 0; |
100 | xfs_inode_t *ip; | 99 | int i; |
101 | 100 | ||
102 | if (exclusive) | 101 | read_lock(&pag->pag_ici_lock); |
103 | write_lock(&pag->pag_ici_lock); | 102 | nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, |
104 | else | 103 | (void **)batch, first_index, |
105 | read_lock(&pag->pag_ici_lock); | 104 | XFS_LOOKUP_BATCH); |
106 | ip = xfs_inode_ag_lookup(mp, pag, &first_index, tag); | 105 | if (!nr_found) { |
107 | if (!ip) { | 106 | read_unlock(&pag->pag_ici_lock); |
108 | if (exclusive) | ||
109 | write_unlock(&pag->pag_ici_lock); | ||
110 | else | ||
111 | read_unlock(&pag->pag_ici_lock); | ||
112 | break; | 107 | break; |
113 | } | 108 | } |
114 | 109 | ||
115 | /* execute releases pag->pag_ici_lock */ | 110 | /* |
116 | error = execute(ip, pag, flags); | 111 | * Grab the inodes before we drop the lock. if we found |
117 | if (error == EAGAIN) { | 112 | * nothing, nr == 0 and the loop will be skipped. |
118 | skipped++; | 113 | */ |
119 | continue; | 114 | for (i = 0; i < nr_found; i++) { |
115 | struct xfs_inode *ip = batch[i]; | ||
116 | |||
117 | if (done || xfs_inode_ag_walk_grab(ip)) | ||
118 | batch[i] = NULL; | ||
119 | |||
120 | /* | ||
121 | * Update the index for the next lookup. Catch overflows | ||
122 | * into the next AG range which can occur if we have inodes | ||
123 | * in the last block of the AG and we are currently | ||
124 | * pointing to the last inode. | ||
125 | */ | ||
126 | first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); | ||
127 | if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) | ||
128 | done = 1; | ||
129 | } | ||
130 | |||
131 | /* unlock now we've grabbed the inodes. */ | ||
132 | read_unlock(&pag->pag_ici_lock); | ||
133 | |||
134 | for (i = 0; i < nr_found; i++) { | ||
135 | if (!batch[i]) | ||
136 | continue; | ||
137 | error = execute(batch[i], pag, flags); | ||
138 | IRELE(batch[i]); | ||
139 | if (error == EAGAIN) { | ||
140 | skipped++; | ||
141 | continue; | ||
142 | } | ||
143 | if (error && last_error != EFSCORRUPTED) | ||
144 | last_error = error; | ||
120 | } | 145 | } |
121 | if (error) | ||
122 | last_error = error; | ||
123 | 146 | ||
124 | /* bail out if the filesystem is corrupted. */ | 147 | /* bail out if the filesystem is corrupted. */ |
125 | if (error == EFSCORRUPTED) | 148 | if (error == EFSCORRUPTED) |
126 | break; | 149 | break; |
127 | 150 | ||
128 | } while ((*nr_to_scan)--); | 151 | } while (nr_found && !done); |
129 | 152 | ||
130 | if (skipped) { | 153 | if (skipped) { |
131 | delay(1); | 154 | delay(1); |
@@ -134,110 +157,32 @@ restart: | |||
134 | return last_error; | 157 | return last_error; |
135 | } | 158 | } |
136 | 159 | ||
137 | /* | ||
138 | * Select the next per-ag structure to iterate during the walk. The reclaim | ||
139 | * walk is optimised only to walk AGs with reclaimable inodes in them. | ||
140 | */ | ||
141 | static struct xfs_perag * | ||
142 | xfs_inode_ag_iter_next_pag( | ||
143 | struct xfs_mount *mp, | ||
144 | xfs_agnumber_t *first, | ||
145 | int tag) | ||
146 | { | ||
147 | struct xfs_perag *pag = NULL; | ||
148 | |||
149 | if (tag == XFS_ICI_RECLAIM_TAG) { | ||
150 | int found; | ||
151 | int ref; | ||
152 | |||
153 | spin_lock(&mp->m_perag_lock); | ||
154 | found = radix_tree_gang_lookup_tag(&mp->m_perag_tree, | ||
155 | (void **)&pag, *first, 1, tag); | ||
156 | if (found <= 0) { | ||
157 | spin_unlock(&mp->m_perag_lock); | ||
158 | return NULL; | ||
159 | } | ||
160 | *first = pag->pag_agno + 1; | ||
161 | /* open coded pag reference increment */ | ||
162 | ref = atomic_inc_return(&pag->pag_ref); | ||
163 | spin_unlock(&mp->m_perag_lock); | ||
164 | trace_xfs_perag_get_reclaim(mp, pag->pag_agno, ref, _RET_IP_); | ||
165 | } else { | ||
166 | pag = xfs_perag_get(mp, *first); | ||
167 | (*first)++; | ||
168 | } | ||
169 | return pag; | ||
170 | } | ||
171 | |||
172 | int | 160 | int |
173 | xfs_inode_ag_iterator( | 161 | xfs_inode_ag_iterator( |
174 | struct xfs_mount *mp, | 162 | struct xfs_mount *mp, |
175 | int (*execute)(struct xfs_inode *ip, | 163 | int (*execute)(struct xfs_inode *ip, |
176 | struct xfs_perag *pag, int flags), | 164 | struct xfs_perag *pag, int flags), |
177 | int flags, | 165 | int flags) |
178 | int tag, | ||
179 | int exclusive, | ||
180 | int *nr_to_scan) | ||
181 | { | 166 | { |
182 | struct xfs_perag *pag; | 167 | struct xfs_perag *pag; |
183 | int error = 0; | 168 | int error = 0; |
184 | int last_error = 0; | 169 | int last_error = 0; |
185 | xfs_agnumber_t ag; | 170 | xfs_agnumber_t ag; |
186 | int nr; | ||
187 | 171 | ||
188 | nr = nr_to_scan ? *nr_to_scan : INT_MAX; | ||
189 | ag = 0; | 172 | ag = 0; |
190 | while ((pag = xfs_inode_ag_iter_next_pag(mp, &ag, tag))) { | 173 | while ((pag = xfs_perag_get(mp, ag))) { |
191 | error = xfs_inode_ag_walk(mp, pag, execute, flags, tag, | 174 | ag = pag->pag_agno + 1; |
192 | exclusive, &nr); | 175 | error = xfs_inode_ag_walk(mp, pag, execute, flags); |
193 | xfs_perag_put(pag); | 176 | xfs_perag_put(pag); |
194 | if (error) { | 177 | if (error) { |
195 | last_error = error; | 178 | last_error = error; |
196 | if (error == EFSCORRUPTED) | 179 | if (error == EFSCORRUPTED) |
197 | break; | 180 | break; |
198 | } | 181 | } |
199 | if (nr <= 0) | ||
200 | break; | ||
201 | } | 182 | } |
202 | if (nr_to_scan) | ||
203 | *nr_to_scan = nr; | ||
204 | return XFS_ERROR(last_error); | 183 | return XFS_ERROR(last_error); |
205 | } | 184 | } |
206 | 185 | ||
207 | /* must be called with pag_ici_lock held and releases it */ | ||
208 | int | ||
209 | xfs_sync_inode_valid( | ||
210 | struct xfs_inode *ip, | ||
211 | struct xfs_perag *pag) | ||
212 | { | ||
213 | struct inode *inode = VFS_I(ip); | ||
214 | int error = EFSCORRUPTED; | ||
215 | |||
216 | /* nothing to sync during shutdown */ | ||
217 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) | ||
218 | goto out_unlock; | ||
219 | |||
220 | /* avoid new or reclaimable inodes. Leave for reclaim code to flush */ | ||
221 | error = ENOENT; | ||
222 | if (xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM)) | ||
223 | goto out_unlock; | ||
224 | |||
225 | /* If we can't grab the inode, it must on it's way to reclaim. */ | ||
226 | if (!igrab(inode)) | ||
227 | goto out_unlock; | ||
228 | |||
229 | if (is_bad_inode(inode)) { | ||
230 | IRELE(ip); | ||
231 | goto out_unlock; | ||
232 | } | ||
233 | |||
234 | /* inode is valid */ | ||
235 | error = 0; | ||
236 | out_unlock: | ||
237 | read_unlock(&pag->pag_ici_lock); | ||
238 | return error; | ||
239 | } | ||
240 | |||
241 | STATIC int | 186 | STATIC int |
242 | xfs_sync_inode_data( | 187 | xfs_sync_inode_data( |
243 | struct xfs_inode *ip, | 188 | struct xfs_inode *ip, |
@@ -248,10 +193,6 @@ xfs_sync_inode_data( | |||
248 | struct address_space *mapping = inode->i_mapping; | 193 | struct address_space *mapping = inode->i_mapping; |
249 | int error = 0; | 194 | int error = 0; |
250 | 195 | ||
251 | error = xfs_sync_inode_valid(ip, pag); | ||
252 | if (error) | ||
253 | return error; | ||
254 | |||
255 | if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) | 196 | if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) |
256 | goto out_wait; | 197 | goto out_wait; |
257 | 198 | ||
@@ -268,7 +209,6 @@ xfs_sync_inode_data( | |||
268 | out_wait: | 209 | out_wait: |
269 | if (flags & SYNC_WAIT) | 210 | if (flags & SYNC_WAIT) |
270 | xfs_ioend_wait(ip); | 211 | xfs_ioend_wait(ip); |
271 | IRELE(ip); | ||
272 | return error; | 212 | return error; |
273 | } | 213 | } |
274 | 214 | ||
@@ -280,10 +220,6 @@ xfs_sync_inode_attr( | |||
280 | { | 220 | { |
281 | int error = 0; | 221 | int error = 0; |
282 | 222 | ||
283 | error = xfs_sync_inode_valid(ip, pag); | ||
284 | if (error) | ||
285 | return error; | ||
286 | |||
287 | xfs_ilock(ip, XFS_ILOCK_SHARED); | 223 | xfs_ilock(ip, XFS_ILOCK_SHARED); |
288 | if (xfs_inode_clean(ip)) | 224 | if (xfs_inode_clean(ip)) |
289 | goto out_unlock; | 225 | goto out_unlock; |
@@ -302,7 +238,6 @@ xfs_sync_inode_attr( | |||
302 | 238 | ||
303 | out_unlock: | 239 | out_unlock: |
304 | xfs_iunlock(ip, XFS_ILOCK_SHARED); | 240 | xfs_iunlock(ip, XFS_ILOCK_SHARED); |
305 | IRELE(ip); | ||
306 | return error; | 241 | return error; |
307 | } | 242 | } |
308 | 243 | ||
@@ -318,8 +253,7 @@ xfs_sync_data( | |||
318 | 253 | ||
319 | ASSERT((flags & ~(SYNC_TRYLOCK|SYNC_WAIT)) == 0); | 254 | ASSERT((flags & ~(SYNC_TRYLOCK|SYNC_WAIT)) == 0); |
320 | 255 | ||
321 | error = xfs_inode_ag_iterator(mp, xfs_sync_inode_data, flags, | 256 | error = xfs_inode_ag_iterator(mp, xfs_sync_inode_data, flags); |
322 | XFS_ICI_NO_TAG, 0, NULL); | ||
323 | if (error) | 257 | if (error) |
324 | return XFS_ERROR(error); | 258 | return XFS_ERROR(error); |
325 | 259 | ||
@@ -337,8 +271,7 @@ xfs_sync_attr( | |||
337 | { | 271 | { |
338 | ASSERT((flags & ~SYNC_WAIT) == 0); | 272 | ASSERT((flags & ~SYNC_WAIT) == 0); |
339 | 273 | ||
340 | return xfs_inode_ag_iterator(mp, xfs_sync_inode_attr, flags, | 274 | return xfs_inode_ag_iterator(mp, xfs_sync_inode_attr, flags); |
341 | XFS_ICI_NO_TAG, 0, NULL); | ||
342 | } | 275 | } |
343 | 276 | ||
344 | STATIC int | 277 | STATIC int |
@@ -698,6 +631,43 @@ __xfs_inode_clear_reclaim_tag( | |||
698 | } | 631 | } |
699 | 632 | ||
700 | /* | 633 | /* |
634 | * Grab the inode for reclaim exclusively. | ||
635 | * Return 0 if we grabbed it, non-zero otherwise. | ||
636 | */ | ||
637 | STATIC int | ||
638 | xfs_reclaim_inode_grab( | ||
639 | struct xfs_inode *ip, | ||
640 | int flags) | ||
641 | { | ||
642 | |||
643 | /* | ||
644 | * do some unlocked checks first to avoid unnecceary lock traffic. | ||
645 | * The first is a flush lock check, the second is a already in reclaim | ||
646 | * check. Only do these checks if we are not going to block on locks. | ||
647 | */ | ||
648 | if ((flags & SYNC_TRYLOCK) && | ||
649 | (!ip->i_flush.done || __xfs_iflags_test(ip, XFS_IRECLAIM))) { | ||
650 | return 1; | ||
651 | } | ||
652 | |||
653 | /* | ||
654 | * The radix tree lock here protects a thread in xfs_iget from racing | ||
655 | * with us starting reclaim on the inode. Once we have the | ||
656 | * XFS_IRECLAIM flag set it will not touch us. | ||
657 | */ | ||
658 | spin_lock(&ip->i_flags_lock); | ||
659 | ASSERT_ALWAYS(__xfs_iflags_test(ip, XFS_IRECLAIMABLE)); | ||
660 | if (__xfs_iflags_test(ip, XFS_IRECLAIM)) { | ||
661 | /* ignore as it is already under reclaim */ | ||
662 | spin_unlock(&ip->i_flags_lock); | ||
663 | return 1; | ||
664 | } | ||
665 | __xfs_iflags_set(ip, XFS_IRECLAIM); | ||
666 | spin_unlock(&ip->i_flags_lock); | ||
667 | return 0; | ||
668 | } | ||
669 | |||
670 | /* | ||
701 | * Inodes in different states need to be treated differently, and the return | 671 | * Inodes in different states need to be treated differently, and the return |
702 | * value of xfs_iflush is not sufficient to get this right. The following table | 672 | * value of xfs_iflush is not sufficient to get this right. The following table |
703 | * lists the inode states and the reclaim actions necessary for non-blocking | 673 | * lists the inode states and the reclaim actions necessary for non-blocking |
@@ -755,23 +725,6 @@ xfs_reclaim_inode( | |||
755 | { | 725 | { |
756 | int error = 0; | 726 | int error = 0; |
757 | 727 | ||
758 | /* | ||
759 | * The radix tree lock here protects a thread in xfs_iget from racing | ||
760 | * with us starting reclaim on the inode. Once we have the | ||
761 | * XFS_IRECLAIM flag set it will not touch us. | ||
762 | */ | ||
763 | spin_lock(&ip->i_flags_lock); | ||
764 | ASSERT_ALWAYS(__xfs_iflags_test(ip, XFS_IRECLAIMABLE)); | ||
765 | if (__xfs_iflags_test(ip, XFS_IRECLAIM)) { | ||
766 | /* ignore as it is already under reclaim */ | ||
767 | spin_unlock(&ip->i_flags_lock); | ||
768 | write_unlock(&pag->pag_ici_lock); | ||
769 | return 0; | ||
770 | } | ||
771 | __xfs_iflags_set(ip, XFS_IRECLAIM); | ||
772 | spin_unlock(&ip->i_flags_lock); | ||
773 | write_unlock(&pag->pag_ici_lock); | ||
774 | |||
775 | xfs_ilock(ip, XFS_ILOCK_EXCL); | 728 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
776 | if (!xfs_iflock_nowait(ip)) { | 729 | if (!xfs_iflock_nowait(ip)) { |
777 | if (!(sync_mode & SYNC_WAIT)) | 730 | if (!(sync_mode & SYNC_WAIT)) |
@@ -868,13 +821,126 @@ reclaim: | |||
868 | 821 | ||
869 | } | 822 | } |
870 | 823 | ||
824 | /* | ||
825 | * Walk the AGs and reclaim the inodes in them. Even if the filesystem is | ||
826 | * corrupted, we still want to try to reclaim all the inodes. If we don't, | ||
827 | * then a shut down during filesystem unmount reclaim walk leak all the | ||
828 | * unreclaimed inodes. | ||
829 | */ | ||
830 | int | ||
831 | xfs_reclaim_inodes_ag( | ||
832 | struct xfs_mount *mp, | ||
833 | int flags, | ||
834 | int *nr_to_scan) | ||
835 | { | ||
836 | struct xfs_perag *pag; | ||
837 | int error = 0; | ||
838 | int last_error = 0; | ||
839 | xfs_agnumber_t ag; | ||
840 | int trylock = flags & SYNC_TRYLOCK; | ||
841 | int skipped; | ||
842 | |||
843 | restart: | ||
844 | ag = 0; | ||
845 | skipped = 0; | ||
846 | while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) { | ||
847 | unsigned long first_index = 0; | ||
848 | int done = 0; | ||
849 | int nr_found = 0; | ||
850 | |||
851 | ag = pag->pag_agno + 1; | ||
852 | |||
853 | if (trylock) { | ||
854 | if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) { | ||
855 | skipped++; | ||
856 | continue; | ||
857 | } | ||
858 | first_index = pag->pag_ici_reclaim_cursor; | ||
859 | } else | ||
860 | mutex_lock(&pag->pag_ici_reclaim_lock); | ||
861 | |||
862 | do { | ||
863 | struct xfs_inode *batch[XFS_LOOKUP_BATCH]; | ||
864 | int i; | ||
865 | |||
866 | write_lock(&pag->pag_ici_lock); | ||
867 | nr_found = radix_tree_gang_lookup_tag( | ||
868 | &pag->pag_ici_root, | ||
869 | (void **)batch, first_index, | ||
870 | XFS_LOOKUP_BATCH, | ||
871 | XFS_ICI_RECLAIM_TAG); | ||
872 | if (!nr_found) { | ||
873 | write_unlock(&pag->pag_ici_lock); | ||
874 | break; | ||
875 | } | ||
876 | |||
877 | /* | ||
878 | * Grab the inodes before we drop the lock. if we found | ||
879 | * nothing, nr == 0 and the loop will be skipped. | ||
880 | */ | ||
881 | for (i = 0; i < nr_found; i++) { | ||
882 | struct xfs_inode *ip = batch[i]; | ||
883 | |||
884 | if (done || xfs_reclaim_inode_grab(ip, flags)) | ||
885 | batch[i] = NULL; | ||
886 | |||
887 | /* | ||
888 | * Update the index for the next lookup. Catch | ||
889 | * overflows into the next AG range which can | ||
890 | * occur if we have inodes in the last block of | ||
891 | * the AG and we are currently pointing to the | ||
892 | * last inode. | ||
893 | */ | ||
894 | first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); | ||
895 | if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) | ||
896 | done = 1; | ||
897 | } | ||
898 | |||
899 | /* unlock now we've grabbed the inodes. */ | ||
900 | write_unlock(&pag->pag_ici_lock); | ||
901 | |||
902 | for (i = 0; i < nr_found; i++) { | ||
903 | if (!batch[i]) | ||
904 | continue; | ||
905 | error = xfs_reclaim_inode(batch[i], pag, flags); | ||
906 | if (error && last_error != EFSCORRUPTED) | ||
907 | last_error = error; | ||
908 | } | ||
909 | |||
910 | *nr_to_scan -= XFS_LOOKUP_BATCH; | ||
911 | |||
912 | } while (nr_found && !done && *nr_to_scan > 0); | ||
913 | |||
914 | if (trylock && !done) | ||
915 | pag->pag_ici_reclaim_cursor = first_index; | ||
916 | else | ||
917 | pag->pag_ici_reclaim_cursor = 0; | ||
918 | mutex_unlock(&pag->pag_ici_reclaim_lock); | ||
919 | xfs_perag_put(pag); | ||
920 | } | ||
921 | |||
922 | /* | ||
923 | * if we skipped any AG, and we still have scan count remaining, do | ||
924 | * another pass this time using blocking reclaim semantics (i.e | ||
925 | * waiting on the reclaim locks and ignoring the reclaim cursors). This | ||
926 | * ensure that when we get more reclaimers than AGs we block rather | ||
927 | * than spin trying to execute reclaim. | ||
928 | */ | ||
929 | if (trylock && skipped && *nr_to_scan > 0) { | ||
930 | trylock = 0; | ||
931 | goto restart; | ||
932 | } | ||
933 | return XFS_ERROR(last_error); | ||
934 | } | ||
935 | |||
871 | int | 936 | int |
872 | xfs_reclaim_inodes( | 937 | xfs_reclaim_inodes( |
873 | xfs_mount_t *mp, | 938 | xfs_mount_t *mp, |
874 | int mode) | 939 | int mode) |
875 | { | 940 | { |
876 | return xfs_inode_ag_iterator(mp, xfs_reclaim_inode, mode, | 941 | int nr_to_scan = INT_MAX; |
877 | XFS_ICI_RECLAIM_TAG, 1, NULL); | 942 | |
943 | return xfs_reclaim_inodes_ag(mp, mode, &nr_to_scan); | ||
878 | } | 944 | } |
879 | 945 | ||
880 | /* | 946 | /* |
@@ -896,17 +962,16 @@ xfs_reclaim_inode_shrink( | |||
896 | if (!(gfp_mask & __GFP_FS)) | 962 | if (!(gfp_mask & __GFP_FS)) |
897 | return -1; | 963 | return -1; |
898 | 964 | ||
899 | xfs_inode_ag_iterator(mp, xfs_reclaim_inode, 0, | 965 | xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK, &nr_to_scan); |
900 | XFS_ICI_RECLAIM_TAG, 1, &nr_to_scan); | 966 | /* terminate if we don't exhaust the scan */ |
901 | /* if we don't exhaust the scan, don't bother coming back */ | ||
902 | if (nr_to_scan > 0) | 967 | if (nr_to_scan > 0) |
903 | return -1; | 968 | return -1; |
904 | } | 969 | } |
905 | 970 | ||
906 | reclaimable = 0; | 971 | reclaimable = 0; |
907 | ag = 0; | 972 | ag = 0; |
908 | while ((pag = xfs_inode_ag_iter_next_pag(mp, &ag, | 973 | while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) { |
909 | XFS_ICI_RECLAIM_TAG))) { | 974 | ag = pag->pag_agno + 1; |
910 | reclaimable += pag->pag_ici_reclaimable; | 975 | reclaimable += pag->pag_ici_reclaimable; |
911 | xfs_perag_put(pag); | 976 | xfs_perag_put(pag); |
912 | } | 977 | } |
diff --git a/fs/xfs/linux-2.6/xfs_sync.h b/fs/xfs/linux-2.6/xfs_sync.h index fe78726196f8..32ba6628290c 100644 --- a/fs/xfs/linux-2.6/xfs_sync.h +++ b/fs/xfs/linux-2.6/xfs_sync.h | |||
@@ -47,10 +47,10 @@ void __xfs_inode_set_reclaim_tag(struct xfs_perag *pag, struct xfs_inode *ip); | |||
47 | void __xfs_inode_clear_reclaim_tag(struct xfs_mount *mp, struct xfs_perag *pag, | 47 | void __xfs_inode_clear_reclaim_tag(struct xfs_mount *mp, struct xfs_perag *pag, |
48 | struct xfs_inode *ip); | 48 | struct xfs_inode *ip); |
49 | 49 | ||
50 | int xfs_sync_inode_valid(struct xfs_inode *ip, struct xfs_perag *pag); | 50 | int xfs_sync_inode_grab(struct xfs_inode *ip); |
51 | int xfs_inode_ag_iterator(struct xfs_mount *mp, | 51 | int xfs_inode_ag_iterator(struct xfs_mount *mp, |
52 | int (*execute)(struct xfs_inode *ip, struct xfs_perag *pag, int flags), | 52 | int (*execute)(struct xfs_inode *ip, struct xfs_perag *pag, int flags), |
53 | int flags, int tag, int write_lock, int *nr_to_scan); | 53 | int flags); |
54 | 54 | ||
55 | void xfs_inode_shrinker_register(struct xfs_mount *mp); | 55 | void xfs_inode_shrinker_register(struct xfs_mount *mp); |
56 | void xfs_inode_shrinker_unregister(struct xfs_mount *mp); | 56 | void xfs_inode_shrinker_unregister(struct xfs_mount *mp); |
diff --git a/fs/xfs/linux-2.6/xfs_trace.h b/fs/xfs/linux-2.6/xfs_trace.h index 8fe311a456e2..acef2e98c594 100644 --- a/fs/xfs/linux-2.6/xfs_trace.h +++ b/fs/xfs/linux-2.6/xfs_trace.h | |||
@@ -124,7 +124,7 @@ DEFINE_EVENT(xfs_perag_class, name, \ | |||
124 | unsigned long caller_ip), \ | 124 | unsigned long caller_ip), \ |
125 | TP_ARGS(mp, agno, refcount, caller_ip)) | 125 | TP_ARGS(mp, agno, refcount, caller_ip)) |
126 | DEFINE_PERAG_REF_EVENT(xfs_perag_get); | 126 | DEFINE_PERAG_REF_EVENT(xfs_perag_get); |
127 | DEFINE_PERAG_REF_EVENT(xfs_perag_get_reclaim); | 127 | DEFINE_PERAG_REF_EVENT(xfs_perag_get_tag); |
128 | DEFINE_PERAG_REF_EVENT(xfs_perag_put); | 128 | DEFINE_PERAG_REF_EVENT(xfs_perag_put); |
129 | DEFINE_PERAG_REF_EVENT(xfs_perag_set_reclaim); | 129 | DEFINE_PERAG_REF_EVENT(xfs_perag_set_reclaim); |
130 | DEFINE_PERAG_REF_EVENT(xfs_perag_clear_reclaim); | 130 | DEFINE_PERAG_REF_EVENT(xfs_perag_clear_reclaim); |
@@ -330,7 +330,7 @@ DEFINE_BUF_EVENT(xfs_buf_iowait_done); | |||
330 | DEFINE_BUF_EVENT(xfs_buf_delwri_queue); | 330 | DEFINE_BUF_EVENT(xfs_buf_delwri_queue); |
331 | DEFINE_BUF_EVENT(xfs_buf_delwri_dequeue); | 331 | DEFINE_BUF_EVENT(xfs_buf_delwri_dequeue); |
332 | DEFINE_BUF_EVENT(xfs_buf_delwri_split); | 332 | DEFINE_BUF_EVENT(xfs_buf_delwri_split); |
333 | DEFINE_BUF_EVENT(xfs_buf_get_noaddr); | 333 | DEFINE_BUF_EVENT(xfs_buf_get_uncached); |
334 | DEFINE_BUF_EVENT(xfs_bdstrat_shut); | 334 | DEFINE_BUF_EVENT(xfs_bdstrat_shut); |
335 | DEFINE_BUF_EVENT(xfs_buf_item_relse); | 335 | DEFINE_BUF_EVENT(xfs_buf_item_relse); |
336 | DEFINE_BUF_EVENT(xfs_buf_item_iodone); | 336 | DEFINE_BUF_EVENT(xfs_buf_item_iodone); |
diff --git a/fs/xfs/linux-2.6/xfs_version.h b/fs/xfs/linux-2.6/xfs_version.h deleted file mode 100644 index f8d279d7563a..000000000000 --- a/fs/xfs/linux-2.6/xfs_version.h +++ /dev/null | |||
@@ -1,29 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2001-2002,2005 Silicon Graphics, Inc. | ||
3 | * All Rights Reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it would be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write the Free Software Foundation, | ||
16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
17 | */ | ||
18 | #ifndef __XFS_VERSION_H__ | ||
19 | #define __XFS_VERSION_H__ | ||
20 | |||
21 | /* | ||
22 | * Dummy file that can contain a timestamp to put into the | ||
23 | * XFS init string, to help users keep track of what they're | ||
24 | * running | ||
25 | */ | ||
26 | |||
27 | #define XFS_VERSION_STRING "SGI XFS" | ||
28 | |||
29 | #endif /* __XFS_VERSION_H__ */ | ||
diff --git a/fs/xfs/quota/xfs_dquot.c b/fs/xfs/quota/xfs_dquot.c index e1a2f6800e01..faf8e1a83a12 100644 --- a/fs/xfs/quota/xfs_dquot.c +++ b/fs/xfs/quota/xfs_dquot.c | |||
@@ -463,87 +463,68 @@ xfs_qm_dqtobp( | |||
463 | uint flags) | 463 | uint flags) |
464 | { | 464 | { |
465 | xfs_bmbt_irec_t map; | 465 | xfs_bmbt_irec_t map; |
466 | int nmaps, error; | 466 | int nmaps = 1, error; |
467 | xfs_buf_t *bp; | 467 | xfs_buf_t *bp; |
468 | xfs_inode_t *quotip; | 468 | xfs_inode_t *quotip = XFS_DQ_TO_QIP(dqp); |
469 | xfs_mount_t *mp; | 469 | xfs_mount_t *mp = dqp->q_mount; |
470 | xfs_disk_dquot_t *ddq; | 470 | xfs_disk_dquot_t *ddq; |
471 | xfs_dqid_t id; | 471 | xfs_dqid_t id = be32_to_cpu(dqp->q_core.d_id); |
472 | boolean_t newdquot; | ||
473 | xfs_trans_t *tp = (tpp ? *tpp : NULL); | 472 | xfs_trans_t *tp = (tpp ? *tpp : NULL); |
474 | 473 | ||
475 | mp = dqp->q_mount; | 474 | dqp->q_fileoffset = (xfs_fileoff_t)id / mp->m_quotainfo->qi_dqperchunk; |
476 | id = be32_to_cpu(dqp->q_core.d_id); | ||
477 | nmaps = 1; | ||
478 | newdquot = B_FALSE; | ||
479 | 475 | ||
480 | /* | 476 | xfs_ilock(quotip, XFS_ILOCK_SHARED); |
481 | * If we don't know where the dquot lives, find out. | 477 | if (XFS_IS_THIS_QUOTA_OFF(dqp)) { |
482 | */ | ||
483 | if (dqp->q_blkno == (xfs_daddr_t) 0) { | ||
484 | /* We use the id as an index */ | ||
485 | dqp->q_fileoffset = (xfs_fileoff_t)id / | ||
486 | mp->m_quotainfo->qi_dqperchunk; | ||
487 | nmaps = 1; | ||
488 | quotip = XFS_DQ_TO_QIP(dqp); | ||
489 | xfs_ilock(quotip, XFS_ILOCK_SHARED); | ||
490 | /* | 478 | /* |
491 | * Return if this type of quotas is turned off while we didn't | 479 | * Return if this type of quotas is turned off while we |
492 | * have an inode lock | 480 | * didn't have the quota inode lock. |
493 | */ | 481 | */ |
494 | if (XFS_IS_THIS_QUOTA_OFF(dqp)) { | 482 | xfs_iunlock(quotip, XFS_ILOCK_SHARED); |
495 | xfs_iunlock(quotip, XFS_ILOCK_SHARED); | 483 | return ESRCH; |
496 | return (ESRCH); | 484 | } |
497 | } | 485 | |
486 | /* | ||
487 | * Find the block map; no allocations yet | ||
488 | */ | ||
489 | error = xfs_bmapi(NULL, quotip, dqp->q_fileoffset, | ||
490 | XFS_DQUOT_CLUSTER_SIZE_FSB, XFS_BMAPI_METADATA, | ||
491 | NULL, 0, &map, &nmaps, NULL); | ||
492 | |||
493 | xfs_iunlock(quotip, XFS_ILOCK_SHARED); | ||
494 | if (error) | ||
495 | return error; | ||
496 | |||
497 | ASSERT(nmaps == 1); | ||
498 | ASSERT(map.br_blockcount == 1); | ||
499 | |||
500 | /* | ||
501 | * Offset of dquot in the (fixed sized) dquot chunk. | ||
502 | */ | ||
503 | dqp->q_bufoffset = (id % mp->m_quotainfo->qi_dqperchunk) * | ||
504 | sizeof(xfs_dqblk_t); | ||
505 | |||
506 | ASSERT(map.br_startblock != DELAYSTARTBLOCK); | ||
507 | if (map.br_startblock == HOLESTARTBLOCK) { | ||
498 | /* | 508 | /* |
499 | * Find the block map; no allocations yet | 509 | * We don't allocate unless we're asked to |
500 | */ | 510 | */ |
501 | error = xfs_bmapi(NULL, quotip, dqp->q_fileoffset, | 511 | if (!(flags & XFS_QMOPT_DQALLOC)) |
502 | XFS_DQUOT_CLUSTER_SIZE_FSB, | 512 | return ENOENT; |
503 | XFS_BMAPI_METADATA, | ||
504 | NULL, 0, &map, &nmaps, NULL); | ||
505 | 513 | ||
506 | xfs_iunlock(quotip, XFS_ILOCK_SHARED); | 514 | ASSERT(tp); |
515 | error = xfs_qm_dqalloc(tpp, mp, dqp, quotip, | ||
516 | dqp->q_fileoffset, &bp); | ||
507 | if (error) | 517 | if (error) |
508 | return (error); | 518 | return error; |
509 | ASSERT(nmaps == 1); | 519 | tp = *tpp; |
510 | ASSERT(map.br_blockcount == 1); | 520 | } else { |
521 | trace_xfs_dqtobp_read(dqp); | ||
511 | 522 | ||
512 | /* | 523 | /* |
513 | * offset of dquot in the (fixed sized) dquot chunk. | 524 | * store the blkno etc so that we don't have to do the |
525 | * mapping all the time | ||
514 | */ | 526 | */ |
515 | dqp->q_bufoffset = (id % mp->m_quotainfo->qi_dqperchunk) * | 527 | dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock); |
516 | sizeof(xfs_dqblk_t); | ||
517 | if (map.br_startblock == HOLESTARTBLOCK) { | ||
518 | /* | ||
519 | * We don't allocate unless we're asked to | ||
520 | */ | ||
521 | if (!(flags & XFS_QMOPT_DQALLOC)) | ||
522 | return (ENOENT); | ||
523 | |||
524 | ASSERT(tp); | ||
525 | if ((error = xfs_qm_dqalloc(tpp, mp, dqp, quotip, | ||
526 | dqp->q_fileoffset, &bp))) | ||
527 | return (error); | ||
528 | tp = *tpp; | ||
529 | newdquot = B_TRUE; | ||
530 | } else { | ||
531 | /* | ||
532 | * store the blkno etc so that we don't have to do the | ||
533 | * mapping all the time | ||
534 | */ | ||
535 | dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock); | ||
536 | } | ||
537 | } | ||
538 | ASSERT(dqp->q_blkno != DELAYSTARTBLOCK); | ||
539 | ASSERT(dqp->q_blkno != HOLESTARTBLOCK); | ||
540 | |||
541 | /* | ||
542 | * Read in the buffer, unless we've just done the allocation | ||
543 | * (in which case we already have the buf). | ||
544 | */ | ||
545 | if (!newdquot) { | ||
546 | trace_xfs_dqtobp_read(dqp); | ||
547 | 528 | ||
548 | error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, | 529 | error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, |
549 | dqp->q_blkno, | 530 | dqp->q_blkno, |
@@ -552,13 +533,14 @@ xfs_qm_dqtobp( | |||
552 | if (error || !bp) | 533 | if (error || !bp) |
553 | return XFS_ERROR(error); | 534 | return XFS_ERROR(error); |
554 | } | 535 | } |
536 | |||
555 | ASSERT(XFS_BUF_ISBUSY(bp)); | 537 | ASSERT(XFS_BUF_ISBUSY(bp)); |
556 | ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); | 538 | ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); |
557 | 539 | ||
558 | /* | 540 | /* |
559 | * calculate the location of the dquot inside the buffer. | 541 | * calculate the location of the dquot inside the buffer. |
560 | */ | 542 | */ |
561 | ddq = (xfs_disk_dquot_t *)((char *)XFS_BUF_PTR(bp) + dqp->q_bufoffset); | 543 | ddq = (struct xfs_disk_dquot *)(XFS_BUF_PTR(bp) + dqp->q_bufoffset); |
562 | 544 | ||
563 | /* | 545 | /* |
564 | * A simple sanity check in case we got a corrupted dquot... | 546 | * A simple sanity check in case we got a corrupted dquot... |
@@ -1176,18 +1158,18 @@ xfs_qm_dqflush( | |||
1176 | xfs_dquot_t *dqp, | 1158 | xfs_dquot_t *dqp, |
1177 | uint flags) | 1159 | uint flags) |
1178 | { | 1160 | { |
1179 | xfs_mount_t *mp; | 1161 | struct xfs_mount *mp = dqp->q_mount; |
1180 | xfs_buf_t *bp; | 1162 | struct xfs_buf *bp; |
1181 | xfs_disk_dquot_t *ddqp; | 1163 | struct xfs_disk_dquot *ddqp; |
1182 | int error; | 1164 | int error; |
1183 | 1165 | ||
1184 | ASSERT(XFS_DQ_IS_LOCKED(dqp)); | 1166 | ASSERT(XFS_DQ_IS_LOCKED(dqp)); |
1185 | ASSERT(!completion_done(&dqp->q_flush)); | 1167 | ASSERT(!completion_done(&dqp->q_flush)); |
1168 | |||
1186 | trace_xfs_dqflush(dqp); | 1169 | trace_xfs_dqflush(dqp); |
1187 | 1170 | ||
1188 | /* | 1171 | /* |
1189 | * If not dirty, or it's pinned and we are not supposed to | 1172 | * If not dirty, or it's pinned and we are not supposed to block, nada. |
1190 | * block, nada. | ||
1191 | */ | 1173 | */ |
1192 | if (!XFS_DQ_IS_DIRTY(dqp) || | 1174 | if (!XFS_DQ_IS_DIRTY(dqp) || |
1193 | (!(flags & SYNC_WAIT) && atomic_read(&dqp->q_pincount) > 0)) { | 1175 | (!(flags & SYNC_WAIT) && atomic_read(&dqp->q_pincount) > 0)) { |
@@ -1201,40 +1183,46 @@ xfs_qm_dqflush( | |||
1201 | * down forcibly. If that's the case we must not write this dquot | 1183 | * down forcibly. If that's the case we must not write this dquot |
1202 | * to disk, because the log record didn't make it to disk! | 1184 | * to disk, because the log record didn't make it to disk! |
1203 | */ | 1185 | */ |
1204 | if (XFS_FORCED_SHUTDOWN(dqp->q_mount)) { | 1186 | if (XFS_FORCED_SHUTDOWN(mp)) { |
1205 | dqp->dq_flags &= ~(XFS_DQ_DIRTY); | 1187 | dqp->dq_flags &= ~XFS_DQ_DIRTY; |
1206 | xfs_dqfunlock(dqp); | 1188 | xfs_dqfunlock(dqp); |
1207 | return XFS_ERROR(EIO); | 1189 | return XFS_ERROR(EIO); |
1208 | } | 1190 | } |
1209 | 1191 | ||
1210 | /* | 1192 | /* |
1211 | * Get the buffer containing the on-disk dquot | 1193 | * Get the buffer containing the on-disk dquot |
1212 | * We don't need a transaction envelope because we know that the | ||
1213 | * the ondisk-dquot has already been allocated for. | ||
1214 | */ | 1194 | */ |
1215 | if ((error = xfs_qm_dqtobp(NULL, dqp, &ddqp, &bp, XFS_QMOPT_DOWARN))) { | 1195 | error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno, |
1196 | mp->m_quotainfo->qi_dqchunklen, 0, &bp); | ||
1197 | if (error) { | ||
1216 | ASSERT(error != ENOENT); | 1198 | ASSERT(error != ENOENT); |
1217 | /* | ||
1218 | * Quotas could have gotten turned off (ESRCH) | ||
1219 | */ | ||
1220 | xfs_dqfunlock(dqp); | 1199 | xfs_dqfunlock(dqp); |
1221 | return (error); | 1200 | return error; |
1222 | } | 1201 | } |
1223 | 1202 | ||
1224 | if (xfs_qm_dqcheck(&dqp->q_core, be32_to_cpu(ddqp->d_id), | 1203 | /* |
1225 | 0, XFS_QMOPT_DOWARN, "dqflush (incore copy)")) { | 1204 | * Calculate the location of the dquot inside the buffer. |
1226 | xfs_force_shutdown(dqp->q_mount, SHUTDOWN_CORRUPT_INCORE); | 1205 | */ |
1206 | ddqp = (struct xfs_disk_dquot *)(XFS_BUF_PTR(bp) + dqp->q_bufoffset); | ||
1207 | |||
1208 | /* | ||
1209 | * A simple sanity check in case we got a corrupted dquot.. | ||
1210 | */ | ||
1211 | if (xfs_qm_dqcheck(&dqp->q_core, be32_to_cpu(ddqp->d_id), 0, | ||
1212 | XFS_QMOPT_DOWARN, "dqflush (incore copy)")) { | ||
1213 | xfs_buf_relse(bp); | ||
1214 | xfs_dqfunlock(dqp); | ||
1215 | xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); | ||
1227 | return XFS_ERROR(EIO); | 1216 | return XFS_ERROR(EIO); |
1228 | } | 1217 | } |
1229 | 1218 | ||
1230 | /* This is the only portion of data that needs to persist */ | 1219 | /* This is the only portion of data that needs to persist */ |
1231 | memcpy(ddqp, &(dqp->q_core), sizeof(xfs_disk_dquot_t)); | 1220 | memcpy(ddqp, &dqp->q_core, sizeof(xfs_disk_dquot_t)); |
1232 | 1221 | ||
1233 | /* | 1222 | /* |
1234 | * Clear the dirty field and remember the flush lsn for later use. | 1223 | * Clear the dirty field and remember the flush lsn for later use. |
1235 | */ | 1224 | */ |
1236 | dqp->dq_flags &= ~(XFS_DQ_DIRTY); | 1225 | dqp->dq_flags &= ~XFS_DQ_DIRTY; |
1237 | mp = dqp->q_mount; | ||
1238 | 1226 | ||
1239 | xfs_trans_ail_copy_lsn(mp->m_ail, &dqp->q_logitem.qli_flush_lsn, | 1227 | xfs_trans_ail_copy_lsn(mp->m_ail, &dqp->q_logitem.qli_flush_lsn, |
1240 | &dqp->q_logitem.qli_item.li_lsn); | 1228 | &dqp->q_logitem.qli_item.li_lsn); |
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c index 9a92407109a1..f8e854b4fde8 100644 --- a/fs/xfs/quota/xfs_qm.c +++ b/fs/xfs/quota/xfs_qm.c | |||
@@ -55,8 +55,6 @@ uint ndquot; | |||
55 | kmem_zone_t *qm_dqzone; | 55 | kmem_zone_t *qm_dqzone; |
56 | kmem_zone_t *qm_dqtrxzone; | 56 | kmem_zone_t *qm_dqtrxzone; |
57 | 57 | ||
58 | static cred_t xfs_zerocr; | ||
59 | |||
60 | STATIC void xfs_qm_list_init(xfs_dqlist_t *, char *, int); | 58 | STATIC void xfs_qm_list_init(xfs_dqlist_t *, char *, int); |
61 | STATIC void xfs_qm_list_destroy(xfs_dqlist_t *); | 59 | STATIC void xfs_qm_list_destroy(xfs_dqlist_t *); |
62 | 60 | ||
@@ -837,7 +835,7 @@ xfs_qm_dqattach_locked( | |||
837 | xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP, | 835 | xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP, |
838 | flags & XFS_QMOPT_DQALLOC, | 836 | flags & XFS_QMOPT_DQALLOC, |
839 | ip->i_udquot, &ip->i_gdquot) : | 837 | ip->i_udquot, &ip->i_gdquot) : |
840 | xfs_qm_dqattach_one(ip, ip->i_d.di_projid, XFS_DQ_PROJ, | 838 | xfs_qm_dqattach_one(ip, xfs_get_projid(ip), XFS_DQ_PROJ, |
841 | flags & XFS_QMOPT_DQALLOC, | 839 | flags & XFS_QMOPT_DQALLOC, |
842 | ip->i_udquot, &ip->i_gdquot); | 840 | ip->i_udquot, &ip->i_gdquot); |
843 | /* | 841 | /* |
@@ -1199,87 +1197,6 @@ xfs_qm_list_destroy( | |||
1199 | mutex_destroy(&(list->qh_lock)); | 1197 | mutex_destroy(&(list->qh_lock)); |
1200 | } | 1198 | } |
1201 | 1199 | ||
1202 | |||
1203 | /* | ||
1204 | * Stripped down version of dqattach. This doesn't attach, or even look at the | ||
1205 | * dquots attached to the inode. The rationale is that there won't be any | ||
1206 | * attached at the time this is called from quotacheck. | ||
1207 | */ | ||
1208 | STATIC int | ||
1209 | xfs_qm_dqget_noattach( | ||
1210 | xfs_inode_t *ip, | ||
1211 | xfs_dquot_t **O_udqpp, | ||
1212 | xfs_dquot_t **O_gdqpp) | ||
1213 | { | ||
1214 | int error; | ||
1215 | xfs_mount_t *mp; | ||
1216 | xfs_dquot_t *udqp, *gdqp; | ||
1217 | |||
1218 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); | ||
1219 | mp = ip->i_mount; | ||
1220 | udqp = NULL; | ||
1221 | gdqp = NULL; | ||
1222 | |||
1223 | if (XFS_IS_UQUOTA_ON(mp)) { | ||
1224 | ASSERT(ip->i_udquot == NULL); | ||
1225 | /* | ||
1226 | * We want the dquot allocated if it doesn't exist. | ||
1227 | */ | ||
1228 | if ((error = xfs_qm_dqget(mp, ip, ip->i_d.di_uid, XFS_DQ_USER, | ||
1229 | XFS_QMOPT_DQALLOC | XFS_QMOPT_DOWARN, | ||
1230 | &udqp))) { | ||
1231 | /* | ||
1232 | * Shouldn't be able to turn off quotas here. | ||
1233 | */ | ||
1234 | ASSERT(error != ESRCH); | ||
1235 | ASSERT(error != ENOENT); | ||
1236 | return error; | ||
1237 | } | ||
1238 | ASSERT(udqp); | ||
1239 | } | ||
1240 | |||
1241 | if (XFS_IS_OQUOTA_ON(mp)) { | ||
1242 | ASSERT(ip->i_gdquot == NULL); | ||
1243 | if (udqp) | ||
1244 | xfs_dqunlock(udqp); | ||
1245 | error = XFS_IS_GQUOTA_ON(mp) ? | ||
1246 | xfs_qm_dqget(mp, ip, | ||
1247 | ip->i_d.di_gid, XFS_DQ_GROUP, | ||
1248 | XFS_QMOPT_DQALLOC|XFS_QMOPT_DOWARN, | ||
1249 | &gdqp) : | ||
1250 | xfs_qm_dqget(mp, ip, | ||
1251 | ip->i_d.di_projid, XFS_DQ_PROJ, | ||
1252 | XFS_QMOPT_DQALLOC|XFS_QMOPT_DOWARN, | ||
1253 | &gdqp); | ||
1254 | if (error) { | ||
1255 | if (udqp) | ||
1256 | xfs_qm_dqrele(udqp); | ||
1257 | ASSERT(error != ESRCH); | ||
1258 | ASSERT(error != ENOENT); | ||
1259 | return error; | ||
1260 | } | ||
1261 | ASSERT(gdqp); | ||
1262 | |||
1263 | /* Reacquire the locks in the right order */ | ||
1264 | if (udqp) { | ||
1265 | if (! xfs_qm_dqlock_nowait(udqp)) { | ||
1266 | xfs_dqunlock(gdqp); | ||
1267 | xfs_dqlock(udqp); | ||
1268 | xfs_dqlock(gdqp); | ||
1269 | } | ||
1270 | } | ||
1271 | } | ||
1272 | |||
1273 | *O_udqpp = udqp; | ||
1274 | *O_gdqpp = gdqp; | ||
1275 | |||
1276 | #ifdef QUOTADEBUG | ||
1277 | if (udqp) ASSERT(XFS_DQ_IS_LOCKED(udqp)); | ||
1278 | if (gdqp) ASSERT(XFS_DQ_IS_LOCKED(gdqp)); | ||
1279 | #endif | ||
1280 | return 0; | ||
1281 | } | ||
1282 | |||
1283 | /* | 1200 | /* |
1284 | * Create an inode and return with a reference already taken, but unlocked | 1201 | * Create an inode and return with a reference already taken, but unlocked |
1285 | * This is how we create quota inodes | 1202 | * This is how we create quota inodes |
@@ -1305,8 +1222,8 @@ xfs_qm_qino_alloc( | |||
1305 | return error; | 1222 | return error; |
1306 | } | 1223 | } |
1307 | 1224 | ||
1308 | if ((error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, | 1225 | error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, 1, ip, &committed); |
1309 | &xfs_zerocr, 0, 1, ip, &committed))) { | 1226 | if (error) { |
1310 | xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | | 1227 | xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | |
1311 | XFS_TRANS_ABORT); | 1228 | XFS_TRANS_ABORT); |
1312 | return error; | 1229 | return error; |
@@ -1516,7 +1433,7 @@ xfs_qm_dqiterate( | |||
1516 | rablkcnt = map[i+1].br_blockcount; | 1433 | rablkcnt = map[i+1].br_blockcount; |
1517 | rablkno = map[i+1].br_startblock; | 1434 | rablkno = map[i+1].br_startblock; |
1518 | while (rablkcnt--) { | 1435 | while (rablkcnt--) { |
1519 | xfs_baread(mp->m_ddev_targp, | 1436 | xfs_buf_readahead(mp->m_ddev_targp, |
1520 | XFS_FSB_TO_DADDR(mp, rablkno), | 1437 | XFS_FSB_TO_DADDR(mp, rablkno), |
1521 | mp->m_quotainfo->qi_dqchunklen); | 1438 | mp->m_quotainfo->qi_dqchunklen); |
1522 | rablkno++; | 1439 | rablkno++; |
@@ -1546,18 +1463,34 @@ xfs_qm_dqiterate( | |||
1546 | 1463 | ||
1547 | /* | 1464 | /* |
1548 | * Called by dqusage_adjust in doing a quotacheck. | 1465 | * Called by dqusage_adjust in doing a quotacheck. |
1549 | * Given the inode, and a dquot (either USR or GRP, doesn't matter), | 1466 | * |
1550 | * this updates its incore copy as well as the buffer copy. This is | 1467 | * Given the inode, and a dquot id this updates both the incore dqout as well |
1551 | * so that once the quotacheck is done, we can just log all the buffers, | 1468 | * as the buffer copy. This is so that once the quotacheck is done, we can |
1552 | * as opposed to logging numerous updates to individual dquots. | 1469 | * just log all the buffers, as opposed to logging numerous updates to |
1470 | * individual dquots. | ||
1553 | */ | 1471 | */ |
1554 | STATIC void | 1472 | STATIC int |
1555 | xfs_qm_quotacheck_dqadjust( | 1473 | xfs_qm_quotacheck_dqadjust( |
1556 | xfs_dquot_t *dqp, | 1474 | struct xfs_inode *ip, |
1475 | xfs_dqid_t id, | ||
1476 | uint type, | ||
1557 | xfs_qcnt_t nblks, | 1477 | xfs_qcnt_t nblks, |
1558 | xfs_qcnt_t rtblks) | 1478 | xfs_qcnt_t rtblks) |
1559 | { | 1479 | { |
1560 | ASSERT(XFS_DQ_IS_LOCKED(dqp)); | 1480 | struct xfs_mount *mp = ip->i_mount; |
1481 | struct xfs_dquot *dqp; | ||
1482 | int error; | ||
1483 | |||
1484 | error = xfs_qm_dqget(mp, ip, id, type, | ||
1485 | XFS_QMOPT_DQALLOC | XFS_QMOPT_DOWARN, &dqp); | ||
1486 | if (error) { | ||
1487 | /* | ||
1488 | * Shouldn't be able to turn off quotas here. | ||
1489 | */ | ||
1490 | ASSERT(error != ESRCH); | ||
1491 | ASSERT(error != ENOENT); | ||
1492 | return error; | ||
1493 | } | ||
1561 | 1494 | ||
1562 | trace_xfs_dqadjust(dqp); | 1495 | trace_xfs_dqadjust(dqp); |
1563 | 1496 | ||
@@ -1582,11 +1515,13 @@ xfs_qm_quotacheck_dqadjust( | |||
1582 | * There are no timers for the default values set in the root dquot. | 1515 | * There are no timers for the default values set in the root dquot. |
1583 | */ | 1516 | */ |
1584 | if (dqp->q_core.d_id) { | 1517 | if (dqp->q_core.d_id) { |
1585 | xfs_qm_adjust_dqlimits(dqp->q_mount, &dqp->q_core); | 1518 | xfs_qm_adjust_dqlimits(mp, &dqp->q_core); |
1586 | xfs_qm_adjust_dqtimers(dqp->q_mount, &dqp->q_core); | 1519 | xfs_qm_adjust_dqtimers(mp, &dqp->q_core); |
1587 | } | 1520 | } |
1588 | 1521 | ||
1589 | dqp->dq_flags |= XFS_DQ_DIRTY; | 1522 | dqp->dq_flags |= XFS_DQ_DIRTY; |
1523 | xfs_qm_dqput(dqp); | ||
1524 | return 0; | ||
1590 | } | 1525 | } |
1591 | 1526 | ||
1592 | STATIC int | 1527 | STATIC int |
@@ -1629,8 +1564,7 @@ xfs_qm_dqusage_adjust( | |||
1629 | int *res) /* result code value */ | 1564 | int *res) /* result code value */ |
1630 | { | 1565 | { |
1631 | xfs_inode_t *ip; | 1566 | xfs_inode_t *ip; |
1632 | xfs_dquot_t *udqp, *gdqp; | 1567 | xfs_qcnt_t nblks, rtblks = 0; |
1633 | xfs_qcnt_t nblks, rtblks; | ||
1634 | int error; | 1568 | int error; |
1635 | 1569 | ||
1636 | ASSERT(XFS_IS_QUOTA_RUNNING(mp)); | 1570 | ASSERT(XFS_IS_QUOTA_RUNNING(mp)); |
@@ -1650,51 +1584,24 @@ xfs_qm_dqusage_adjust( | |||
1650 | * the case in all other instances. It's OK that we do this because | 1584 | * the case in all other instances. It's OK that we do this because |
1651 | * quotacheck is done only at mount time. | 1585 | * quotacheck is done only at mount time. |
1652 | */ | 1586 | */ |
1653 | if ((error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip))) { | 1587 | error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip); |
1588 | if (error) { | ||
1654 | *res = BULKSTAT_RV_NOTHING; | 1589 | *res = BULKSTAT_RV_NOTHING; |
1655 | return error; | 1590 | return error; |
1656 | } | 1591 | } |
1657 | 1592 | ||
1658 | /* | 1593 | ASSERT(ip->i_delayed_blks == 0); |
1659 | * Obtain the locked dquots. In case of an error (eg. allocation | ||
1660 | * fails for ENOSPC), we return the negative of the error number | ||
1661 | * to bulkstat, so that it can get propagated to quotacheck() and | ||
1662 | * making us disable quotas for the file system. | ||
1663 | */ | ||
1664 | if ((error = xfs_qm_dqget_noattach(ip, &udqp, &gdqp))) { | ||
1665 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | ||
1666 | IRELE(ip); | ||
1667 | *res = BULKSTAT_RV_GIVEUP; | ||
1668 | return error; | ||
1669 | } | ||
1670 | 1594 | ||
1671 | rtblks = 0; | 1595 | if (XFS_IS_REALTIME_INODE(ip)) { |
1672 | if (! XFS_IS_REALTIME_INODE(ip)) { | ||
1673 | nblks = (xfs_qcnt_t)ip->i_d.di_nblocks; | ||
1674 | } else { | ||
1675 | /* | 1596 | /* |
1676 | * Walk thru the extent list and count the realtime blocks. | 1597 | * Walk thru the extent list and count the realtime blocks. |
1677 | */ | 1598 | */ |
1678 | if ((error = xfs_qm_get_rtblks(ip, &rtblks))) { | 1599 | error = xfs_qm_get_rtblks(ip, &rtblks); |
1679 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | 1600 | if (error) |
1680 | IRELE(ip); | 1601 | goto error0; |
1681 | if (udqp) | ||
1682 | xfs_qm_dqput(udqp); | ||
1683 | if (gdqp) | ||
1684 | xfs_qm_dqput(gdqp); | ||
1685 | *res = BULKSTAT_RV_GIVEUP; | ||
1686 | return error; | ||
1687 | } | ||
1688 | nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks; | ||
1689 | } | 1602 | } |
1690 | ASSERT(ip->i_delayed_blks == 0); | ||
1691 | 1603 | ||
1692 | /* | 1604 | nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks; |
1693 | * We can't release the inode while holding its dquot locks. | ||
1694 | * The inode can go into inactive and might try to acquire the dquotlocks. | ||
1695 | * So, just unlock here and do a vn_rele at the end. | ||
1696 | */ | ||
1697 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | ||
1698 | 1605 | ||
1699 | /* | 1606 | /* |
1700 | * Add the (disk blocks and inode) resources occupied by this | 1607 | * Add the (disk blocks and inode) resources occupied by this |
@@ -1709,26 +1616,36 @@ xfs_qm_dqusage_adjust( | |||
1709 | * and quotaoffs don't race. (Quotachecks happen at mount time only). | 1616 | * and quotaoffs don't race. (Quotachecks happen at mount time only). |
1710 | */ | 1617 | */ |
1711 | if (XFS_IS_UQUOTA_ON(mp)) { | 1618 | if (XFS_IS_UQUOTA_ON(mp)) { |
1712 | ASSERT(udqp); | 1619 | error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_uid, |
1713 | xfs_qm_quotacheck_dqadjust(udqp, nblks, rtblks); | 1620 | XFS_DQ_USER, nblks, rtblks); |
1714 | xfs_qm_dqput(udqp); | 1621 | if (error) |
1622 | goto error0; | ||
1715 | } | 1623 | } |
1716 | if (XFS_IS_OQUOTA_ON(mp)) { | 1624 | |
1717 | ASSERT(gdqp); | 1625 | if (XFS_IS_GQUOTA_ON(mp)) { |
1718 | xfs_qm_quotacheck_dqadjust(gdqp, nblks, rtblks); | 1626 | error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_gid, |
1719 | xfs_qm_dqput(gdqp); | 1627 | XFS_DQ_GROUP, nblks, rtblks); |
1628 | if (error) | ||
1629 | goto error0; | ||
1720 | } | 1630 | } |
1721 | /* | ||
1722 | * Now release the inode. This will send it to 'inactive', and | ||
1723 | * possibly even free blocks. | ||
1724 | */ | ||
1725 | IRELE(ip); | ||
1726 | 1631 | ||
1727 | /* | 1632 | if (XFS_IS_PQUOTA_ON(mp)) { |
1728 | * Goto next inode. | 1633 | error = xfs_qm_quotacheck_dqadjust(ip, xfs_get_projid(ip), |
1729 | */ | 1634 | XFS_DQ_PROJ, nblks, rtblks); |
1635 | if (error) | ||
1636 | goto error0; | ||
1637 | } | ||
1638 | |||
1639 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | ||
1640 | IRELE(ip); | ||
1730 | *res = BULKSTAT_RV_DIDONE; | 1641 | *res = BULKSTAT_RV_DIDONE; |
1731 | return 0; | 1642 | return 0; |
1643 | |||
1644 | error0: | ||
1645 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | ||
1646 | IRELE(ip); | ||
1647 | *res = BULKSTAT_RV_GIVEUP; | ||
1648 | return error; | ||
1732 | } | 1649 | } |
1733 | 1650 | ||
1734 | /* | 1651 | /* |
@@ -2224,7 +2141,7 @@ xfs_qm_write_sb_changes( | |||
2224 | 2141 | ||
2225 | 2142 | ||
2226 | /* | 2143 | /* |
2227 | * Given an inode, a uid and gid (from cred_t) make sure that we have | 2144 | * Given an inode, a uid, gid and prid make sure that we have |
2228 | * allocated relevant dquot(s) on disk, and that we won't exceed inode | 2145 | * allocated relevant dquot(s) on disk, and that we won't exceed inode |
2229 | * quotas by creating this file. | 2146 | * quotas by creating this file. |
2230 | * This also attaches dquot(s) to the given inode after locking it, | 2147 | * This also attaches dquot(s) to the given inode after locking it, |
@@ -2332,7 +2249,7 @@ xfs_qm_vop_dqalloc( | |||
2332 | xfs_dqunlock(gq); | 2249 | xfs_dqunlock(gq); |
2333 | } | 2250 | } |
2334 | } else if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) { | 2251 | } else if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) { |
2335 | if (ip->i_d.di_projid != prid) { | 2252 | if (xfs_get_projid(ip) != prid) { |
2336 | xfs_iunlock(ip, lockflags); | 2253 | xfs_iunlock(ip, lockflags); |
2337 | if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)prid, | 2254 | if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)prid, |
2338 | XFS_DQ_PROJ, | 2255 | XFS_DQ_PROJ, |
@@ -2454,7 +2371,7 @@ xfs_qm_vop_chown_reserve( | |||
2454 | } | 2371 | } |
2455 | if (XFS_IS_OQUOTA_ON(ip->i_mount) && gdqp) { | 2372 | if (XFS_IS_OQUOTA_ON(ip->i_mount) && gdqp) { |
2456 | if (XFS_IS_PQUOTA_ON(ip->i_mount) && | 2373 | if (XFS_IS_PQUOTA_ON(ip->i_mount) && |
2457 | ip->i_d.di_projid != be32_to_cpu(gdqp->q_core.d_id)) | 2374 | xfs_get_projid(ip) != be32_to_cpu(gdqp->q_core.d_id)) |
2458 | prjflags = XFS_QMOPT_ENOSPC; | 2375 | prjflags = XFS_QMOPT_ENOSPC; |
2459 | 2376 | ||
2460 | if (prjflags || | 2377 | if (prjflags || |
@@ -2558,7 +2475,7 @@ xfs_qm_vop_create_dqattach( | |||
2558 | ip->i_gdquot = gdqp; | 2475 | ip->i_gdquot = gdqp; |
2559 | ASSERT(XFS_IS_OQUOTA_ON(mp)); | 2476 | ASSERT(XFS_IS_OQUOTA_ON(mp)); |
2560 | ASSERT((XFS_IS_GQUOTA_ON(mp) ? | 2477 | ASSERT((XFS_IS_GQUOTA_ON(mp) ? |
2561 | ip->i_d.di_gid : ip->i_d.di_projid) == | 2478 | ip->i_d.di_gid : xfs_get_projid(ip)) == |
2562 | be32_to_cpu(gdqp->q_core.d_id)); | 2479 | be32_to_cpu(gdqp->q_core.d_id)); |
2563 | xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1); | 2480 | xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1); |
2564 | } | 2481 | } |
diff --git a/fs/xfs/quota/xfs_qm_bhv.c b/fs/xfs/quota/xfs_qm_bhv.c index bea02d786c5d..45b5cb1788ab 100644 --- a/fs/xfs/quota/xfs_qm_bhv.c +++ b/fs/xfs/quota/xfs_qm_bhv.c | |||
@@ -81,7 +81,7 @@ xfs_qm_statvfs( | |||
81 | xfs_mount_t *mp = ip->i_mount; | 81 | xfs_mount_t *mp = ip->i_mount; |
82 | xfs_dquot_t *dqp; | 82 | xfs_dquot_t *dqp; |
83 | 83 | ||
84 | if (!xfs_qm_dqget(mp, NULL, ip->i_d.di_projid, XFS_DQ_PROJ, 0, &dqp)) { | 84 | if (!xfs_qm_dqget(mp, NULL, xfs_get_projid(ip), XFS_DQ_PROJ, 0, &dqp)) { |
85 | xfs_fill_statvfs_from_dquot(statp, &dqp->q_core); | 85 | xfs_fill_statvfs_from_dquot(statp, &dqp->q_core); |
86 | xfs_qm_dqput(dqp); | 86 | xfs_qm_dqput(dqp); |
87 | } | 87 | } |
diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c index 45e5849df238..bdebc183223e 100644 --- a/fs/xfs/quota/xfs_qm_syscalls.c +++ b/fs/xfs/quota/xfs_qm_syscalls.c | |||
@@ -276,7 +276,7 @@ xfs_qm_scall_trunc_qfile( | |||
276 | goto out_unlock; | 276 | goto out_unlock; |
277 | } | 277 | } |
278 | 278 | ||
279 | xfs_ichgtime(ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); | 279 | xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); |
280 | error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); | 280 | error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); |
281 | 281 | ||
282 | out_unlock: | 282 | out_unlock: |
@@ -875,21 +875,14 @@ xfs_dqrele_inode( | |||
875 | struct xfs_perag *pag, | 875 | struct xfs_perag *pag, |
876 | int flags) | 876 | int flags) |
877 | { | 877 | { |
878 | int error; | ||
879 | |||
880 | /* skip quota inodes */ | 878 | /* skip quota inodes */ |
881 | if (ip == ip->i_mount->m_quotainfo->qi_uquotaip || | 879 | if (ip == ip->i_mount->m_quotainfo->qi_uquotaip || |
882 | ip == ip->i_mount->m_quotainfo->qi_gquotaip) { | 880 | ip == ip->i_mount->m_quotainfo->qi_gquotaip) { |
883 | ASSERT(ip->i_udquot == NULL); | 881 | ASSERT(ip->i_udquot == NULL); |
884 | ASSERT(ip->i_gdquot == NULL); | 882 | ASSERT(ip->i_gdquot == NULL); |
885 | read_unlock(&pag->pag_ici_lock); | ||
886 | return 0; | 883 | return 0; |
887 | } | 884 | } |
888 | 885 | ||
889 | error = xfs_sync_inode_valid(ip, pag); | ||
890 | if (error) | ||
891 | return error; | ||
892 | |||
893 | xfs_ilock(ip, XFS_ILOCK_EXCL); | 886 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
894 | if ((flags & XFS_UQUOTA_ACCT) && ip->i_udquot) { | 887 | if ((flags & XFS_UQUOTA_ACCT) && ip->i_udquot) { |
895 | xfs_qm_dqrele(ip->i_udquot); | 888 | xfs_qm_dqrele(ip->i_udquot); |
@@ -900,8 +893,6 @@ xfs_dqrele_inode( | |||
900 | ip->i_gdquot = NULL; | 893 | ip->i_gdquot = NULL; |
901 | } | 894 | } |
902 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | 895 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
903 | |||
904 | IRELE(ip); | ||
905 | return 0; | 896 | return 0; |
906 | } | 897 | } |
907 | 898 | ||
@@ -918,8 +909,7 @@ xfs_qm_dqrele_all_inodes( | |||
918 | uint flags) | 909 | uint flags) |
919 | { | 910 | { |
920 | ASSERT(mp->m_quotainfo); | 911 | ASSERT(mp->m_quotainfo); |
921 | xfs_inode_ag_iterator(mp, xfs_dqrele_inode, flags, | 912 | xfs_inode_ag_iterator(mp, xfs_dqrele_inode, flags); |
922 | XFS_ICI_NO_TAG, 0, NULL); | ||
923 | } | 913 | } |
924 | 914 | ||
925 | /*------------------------------------------------------------------------*/ | 915 | /*------------------------------------------------------------------------*/ |
@@ -1175,7 +1165,7 @@ xfs_qm_internalqcheck_adjust( | |||
1175 | } | 1165 | } |
1176 | xfs_qm_internalqcheck_get_dquots(mp, | 1166 | xfs_qm_internalqcheck_get_dquots(mp, |
1177 | (xfs_dqid_t) ip->i_d.di_uid, | 1167 | (xfs_dqid_t) ip->i_d.di_uid, |
1178 | (xfs_dqid_t) ip->i_d.di_projid, | 1168 | (xfs_dqid_t) xfs_get_projid(ip), |
1179 | (xfs_dqid_t) ip->i_d.di_gid, | 1169 | (xfs_dqid_t) ip->i_d.di_gid, |
1180 | &ud, &gd); | 1170 | &ud, &gd); |
1181 | if (XFS_IS_UQUOTA_ON(mp)) { | 1171 | if (XFS_IS_UQUOTA_ON(mp)) { |
diff --git a/fs/xfs/xfs_ag.h b/fs/xfs/xfs_ag.h index 4917d4eed4ed..63c7a1a6c022 100644 --- a/fs/xfs/xfs_ag.h +++ b/fs/xfs/xfs_ag.h | |||
@@ -230,6 +230,15 @@ typedef struct xfs_perag { | |||
230 | rwlock_t pag_ici_lock; /* incore inode lock */ | 230 | rwlock_t pag_ici_lock; /* incore inode lock */ |
231 | struct radix_tree_root pag_ici_root; /* incore inode cache root */ | 231 | struct radix_tree_root pag_ici_root; /* incore inode cache root */ |
232 | int pag_ici_reclaimable; /* reclaimable inodes */ | 232 | int pag_ici_reclaimable; /* reclaimable inodes */ |
233 | struct mutex pag_ici_reclaim_lock; /* serialisation point */ | ||
234 | unsigned long pag_ici_reclaim_cursor; /* reclaim restart point */ | ||
235 | |||
236 | /* buffer cache index */ | ||
237 | spinlock_t pag_buf_lock; /* lock for pag_buf_tree */ | ||
238 | struct rb_root pag_buf_tree; /* ordered tree of active buffers */ | ||
239 | |||
240 | /* for rcu-safe freeing */ | ||
241 | struct rcu_head rcu_head; | ||
233 | #endif | 242 | #endif |
234 | int pagb_count; /* pagb slots in use */ | 243 | int pagb_count; /* pagb slots in use */ |
235 | } xfs_perag_t; | 244 | } xfs_perag_t; |
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c index af168faccc7a..112abc439ca5 100644 --- a/fs/xfs/xfs_alloc.c +++ b/fs/xfs/xfs_alloc.c | |||
@@ -675,7 +675,7 @@ xfs_alloc_ag_vextent_near( | |||
675 | xfs_agblock_t gtbnoa; /* aligned ... */ | 675 | xfs_agblock_t gtbnoa; /* aligned ... */ |
676 | xfs_extlen_t gtdiff; /* difference to right side entry */ | 676 | xfs_extlen_t gtdiff; /* difference to right side entry */ |
677 | xfs_extlen_t gtlen; /* length of right side entry */ | 677 | xfs_extlen_t gtlen; /* length of right side entry */ |
678 | xfs_extlen_t gtlena; /* aligned ... */ | 678 | xfs_extlen_t gtlena = 0; /* aligned ... */ |
679 | xfs_agblock_t gtnew; /* useful start bno of right side */ | 679 | xfs_agblock_t gtnew; /* useful start bno of right side */ |
680 | int error; /* error code */ | 680 | int error; /* error code */ |
681 | int i; /* result code, temporary */ | 681 | int i; /* result code, temporary */ |
@@ -684,7 +684,7 @@ xfs_alloc_ag_vextent_near( | |||
684 | xfs_agblock_t ltbnoa; /* aligned ... */ | 684 | xfs_agblock_t ltbnoa; /* aligned ... */ |
685 | xfs_extlen_t ltdiff; /* difference to left side entry */ | 685 | xfs_extlen_t ltdiff; /* difference to left side entry */ |
686 | xfs_extlen_t ltlen; /* length of left side entry */ | 686 | xfs_extlen_t ltlen; /* length of left side entry */ |
687 | xfs_extlen_t ltlena; /* aligned ... */ | 687 | xfs_extlen_t ltlena = 0; /* aligned ... */ |
688 | xfs_agblock_t ltnew; /* useful start bno of left side */ | 688 | xfs_agblock_t ltnew; /* useful start bno of left side */ |
689 | xfs_extlen_t rlen; /* length of returned extent */ | 689 | xfs_extlen_t rlen; /* length of returned extent */ |
690 | #if defined(DEBUG) && defined(__KERNEL__) | 690 | #if defined(DEBUG) && defined(__KERNEL__) |
diff --git a/fs/xfs/xfs_alloc_btree.c b/fs/xfs/xfs_alloc_btree.c index 97f7328967fd..3916925e2584 100644 --- a/fs/xfs/xfs_alloc_btree.c +++ b/fs/xfs/xfs_alloc_btree.c | |||
@@ -280,38 +280,6 @@ xfs_allocbt_key_diff( | |||
280 | return (__int64_t)be32_to_cpu(kp->ar_startblock) - rec->ar_startblock; | 280 | return (__int64_t)be32_to_cpu(kp->ar_startblock) - rec->ar_startblock; |
281 | } | 281 | } |
282 | 282 | ||
283 | STATIC int | ||
284 | xfs_allocbt_kill_root( | ||
285 | struct xfs_btree_cur *cur, | ||
286 | struct xfs_buf *bp, | ||
287 | int level, | ||
288 | union xfs_btree_ptr *newroot) | ||
289 | { | ||
290 | int error; | ||
291 | |||
292 | XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); | ||
293 | XFS_BTREE_STATS_INC(cur, killroot); | ||
294 | |||
295 | /* | ||
296 | * Update the root pointer, decreasing the level by 1 and then | ||
297 | * free the old root. | ||
298 | */ | ||
299 | xfs_allocbt_set_root(cur, newroot, -1); | ||
300 | error = xfs_allocbt_free_block(cur, bp); | ||
301 | if (error) { | ||
302 | XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); | ||
303 | return error; | ||
304 | } | ||
305 | |||
306 | XFS_BTREE_STATS_INC(cur, free); | ||
307 | |||
308 | xfs_btree_setbuf(cur, level, NULL); | ||
309 | cur->bc_nlevels--; | ||
310 | |||
311 | XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); | ||
312 | return 0; | ||
313 | } | ||
314 | |||
315 | #ifdef DEBUG | 283 | #ifdef DEBUG |
316 | STATIC int | 284 | STATIC int |
317 | xfs_allocbt_keys_inorder( | 285 | xfs_allocbt_keys_inorder( |
@@ -423,7 +391,6 @@ static const struct xfs_btree_ops xfs_allocbt_ops = { | |||
423 | 391 | ||
424 | .dup_cursor = xfs_allocbt_dup_cursor, | 392 | .dup_cursor = xfs_allocbt_dup_cursor, |
425 | .set_root = xfs_allocbt_set_root, | 393 | .set_root = xfs_allocbt_set_root, |
426 | .kill_root = xfs_allocbt_kill_root, | ||
427 | .alloc_block = xfs_allocbt_alloc_block, | 394 | .alloc_block = xfs_allocbt_alloc_block, |
428 | .free_block = xfs_allocbt_free_block, | 395 | .free_block = xfs_allocbt_free_block, |
429 | .update_lastrec = xfs_allocbt_update_lastrec, | 396 | .update_lastrec = xfs_allocbt_update_lastrec, |
diff --git a/fs/xfs/xfs_attr.c b/fs/xfs/xfs_attr.c index c2568242a901..c86375378810 100644 --- a/fs/xfs/xfs_attr.c +++ b/fs/xfs/xfs_attr.c | |||
@@ -355,16 +355,15 @@ xfs_attr_set_int( | |||
355 | if (mp->m_flags & XFS_MOUNT_WSYNC) { | 355 | if (mp->m_flags & XFS_MOUNT_WSYNC) { |
356 | xfs_trans_set_sync(args.trans); | 356 | xfs_trans_set_sync(args.trans); |
357 | } | 357 | } |
358 | |||
359 | if (!error && (flags & ATTR_KERNOTIME) == 0) { | ||
360 | xfs_trans_ichgtime(args.trans, dp, | ||
361 | XFS_ICHGTIME_CHG); | ||
362 | } | ||
358 | err2 = xfs_trans_commit(args.trans, | 363 | err2 = xfs_trans_commit(args.trans, |
359 | XFS_TRANS_RELEASE_LOG_RES); | 364 | XFS_TRANS_RELEASE_LOG_RES); |
360 | xfs_iunlock(dp, XFS_ILOCK_EXCL); | 365 | xfs_iunlock(dp, XFS_ILOCK_EXCL); |
361 | 366 | ||
362 | /* | ||
363 | * Hit the inode change time. | ||
364 | */ | ||
365 | if (!error && (flags & ATTR_KERNOTIME) == 0) { | ||
366 | xfs_ichgtime(dp, XFS_ICHGTIME_CHG); | ||
367 | } | ||
368 | return(error == 0 ? err2 : error); | 367 | return(error == 0 ? err2 : error); |
369 | } | 368 | } |
370 | 369 | ||
@@ -420,6 +419,9 @@ xfs_attr_set_int( | |||
420 | xfs_trans_set_sync(args.trans); | 419 | xfs_trans_set_sync(args.trans); |
421 | } | 420 | } |
422 | 421 | ||
422 | if ((flags & ATTR_KERNOTIME) == 0) | ||
423 | xfs_trans_ichgtime(args.trans, dp, XFS_ICHGTIME_CHG); | ||
424 | |||
423 | /* | 425 | /* |
424 | * Commit the last in the sequence of transactions. | 426 | * Commit the last in the sequence of transactions. |
425 | */ | 427 | */ |
@@ -427,13 +429,6 @@ xfs_attr_set_int( | |||
427 | error = xfs_trans_commit(args.trans, XFS_TRANS_RELEASE_LOG_RES); | 429 | error = xfs_trans_commit(args.trans, XFS_TRANS_RELEASE_LOG_RES); |
428 | xfs_iunlock(dp, XFS_ILOCK_EXCL); | 430 | xfs_iunlock(dp, XFS_ILOCK_EXCL); |
429 | 431 | ||
430 | /* | ||
431 | * Hit the inode change time. | ||
432 | */ | ||
433 | if (!error && (flags & ATTR_KERNOTIME) == 0) { | ||
434 | xfs_ichgtime(dp, XFS_ICHGTIME_CHG); | ||
435 | } | ||
436 | |||
437 | return(error); | 432 | return(error); |
438 | 433 | ||
439 | out: | 434 | out: |
@@ -567,6 +562,9 @@ xfs_attr_remove_int(xfs_inode_t *dp, struct xfs_name *name, int flags) | |||
567 | xfs_trans_set_sync(args.trans); | 562 | xfs_trans_set_sync(args.trans); |
568 | } | 563 | } |
569 | 564 | ||
565 | if ((flags & ATTR_KERNOTIME) == 0) | ||
566 | xfs_trans_ichgtime(args.trans, dp, XFS_ICHGTIME_CHG); | ||
567 | |||
570 | /* | 568 | /* |
571 | * Commit the last in the sequence of transactions. | 569 | * Commit the last in the sequence of transactions. |
572 | */ | 570 | */ |
@@ -574,13 +572,6 @@ xfs_attr_remove_int(xfs_inode_t *dp, struct xfs_name *name, int flags) | |||
574 | error = xfs_trans_commit(args.trans, XFS_TRANS_RELEASE_LOG_RES); | 572 | error = xfs_trans_commit(args.trans, XFS_TRANS_RELEASE_LOG_RES); |
575 | xfs_iunlock(dp, XFS_ILOCK_EXCL); | 573 | xfs_iunlock(dp, XFS_ILOCK_EXCL); |
576 | 574 | ||
577 | /* | ||
578 | * Hit the inode change time. | ||
579 | */ | ||
580 | if (!error && (flags & ATTR_KERNOTIME) == 0) { | ||
581 | xfs_ichgtime(dp, XFS_ICHGTIME_CHG); | ||
582 | } | ||
583 | |||
584 | return(error); | 575 | return(error); |
585 | 576 | ||
586 | out: | 577 | out: |
@@ -1995,7 +1986,7 @@ xfs_attr_rmtval_get(xfs_da_args_t *args) | |||
1995 | 1986 | ||
1996 | tmp = (valuelen < XFS_BUF_SIZE(bp)) | 1987 | tmp = (valuelen < XFS_BUF_SIZE(bp)) |
1997 | ? valuelen : XFS_BUF_SIZE(bp); | 1988 | ? valuelen : XFS_BUF_SIZE(bp); |
1998 | xfs_biomove(bp, 0, tmp, dst, XBF_READ); | 1989 | xfs_buf_iomove(bp, 0, tmp, dst, XBRW_READ); |
1999 | xfs_buf_relse(bp); | 1990 | xfs_buf_relse(bp); |
2000 | dst += tmp; | 1991 | dst += tmp; |
2001 | valuelen -= tmp; | 1992 | valuelen -= tmp; |
@@ -2125,9 +2116,9 @@ xfs_attr_rmtval_set(xfs_da_args_t *args) | |||
2125 | 2116 | ||
2126 | tmp = (valuelen < XFS_BUF_SIZE(bp)) ? valuelen : | 2117 | tmp = (valuelen < XFS_BUF_SIZE(bp)) ? valuelen : |
2127 | XFS_BUF_SIZE(bp); | 2118 | XFS_BUF_SIZE(bp); |
2128 | xfs_biomove(bp, 0, tmp, src, XBF_WRITE); | 2119 | xfs_buf_iomove(bp, 0, tmp, src, XBRW_WRITE); |
2129 | if (tmp < XFS_BUF_SIZE(bp)) | 2120 | if (tmp < XFS_BUF_SIZE(bp)) |
2130 | xfs_biozero(bp, tmp, XFS_BUF_SIZE(bp) - tmp); | 2121 | xfs_buf_zero(bp, tmp, XFS_BUF_SIZE(bp) - tmp); |
2131 | if ((error = xfs_bwrite(mp, bp))) {/* GROT: NOTE: synchronous write */ | 2122 | if ((error = xfs_bwrite(mp, bp))) {/* GROT: NOTE: synchronous write */ |
2132 | return (error); | 2123 | return (error); |
2133 | } | 2124 | } |
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c index f90dadd5a968..8abd12e32e13 100644 --- a/fs/xfs/xfs_bmap.c +++ b/fs/xfs/xfs_bmap.c | |||
@@ -614,7 +614,7 @@ xfs_bmap_add_extent( | |||
614 | nblks += cur->bc_private.b.allocated; | 614 | nblks += cur->bc_private.b.allocated; |
615 | ASSERT(nblks <= da_old); | 615 | ASSERT(nblks <= da_old); |
616 | if (nblks < da_old) | 616 | if (nblks < da_old) |
617 | xfs_mod_incore_sb(ip->i_mount, XFS_SBS_FDBLOCKS, | 617 | xfs_icsb_modify_counters(ip->i_mount, XFS_SBS_FDBLOCKS, |
618 | (int64_t)(da_old - nblks), rsvd); | 618 | (int64_t)(da_old - nblks), rsvd); |
619 | } | 619 | } |
620 | /* | 620 | /* |
@@ -1079,7 +1079,8 @@ xfs_bmap_add_extent_delay_real( | |||
1079 | diff = (int)(temp + temp2 - startblockval(PREV.br_startblock) - | 1079 | diff = (int)(temp + temp2 - startblockval(PREV.br_startblock) - |
1080 | (cur ? cur->bc_private.b.allocated : 0)); | 1080 | (cur ? cur->bc_private.b.allocated : 0)); |
1081 | if (diff > 0 && | 1081 | if (diff > 0 && |
1082 | xfs_mod_incore_sb(ip->i_mount, XFS_SBS_FDBLOCKS, -((int64_t)diff), rsvd)) { | 1082 | xfs_icsb_modify_counters(ip->i_mount, XFS_SBS_FDBLOCKS, |
1083 | -((int64_t)diff), rsvd)) { | ||
1083 | /* | 1084 | /* |
1084 | * Ick gross gag me with a spoon. | 1085 | * Ick gross gag me with a spoon. |
1085 | */ | 1086 | */ |
@@ -1089,16 +1090,18 @@ xfs_bmap_add_extent_delay_real( | |||
1089 | temp--; | 1090 | temp--; |
1090 | diff--; | 1091 | diff--; |
1091 | if (!diff || | 1092 | if (!diff || |
1092 | !xfs_mod_incore_sb(ip->i_mount, | 1093 | !xfs_icsb_modify_counters(ip->i_mount, |
1093 | XFS_SBS_FDBLOCKS, -((int64_t)diff), rsvd)) | 1094 | XFS_SBS_FDBLOCKS, |
1095 | -((int64_t)diff), rsvd)) | ||
1094 | break; | 1096 | break; |
1095 | } | 1097 | } |
1096 | if (temp2) { | 1098 | if (temp2) { |
1097 | temp2--; | 1099 | temp2--; |
1098 | diff--; | 1100 | diff--; |
1099 | if (!diff || | 1101 | if (!diff || |
1100 | !xfs_mod_incore_sb(ip->i_mount, | 1102 | !xfs_icsb_modify_counters(ip->i_mount, |
1101 | XFS_SBS_FDBLOCKS, -((int64_t)diff), rsvd)) | 1103 | XFS_SBS_FDBLOCKS, |
1104 | -((int64_t)diff), rsvd)) | ||
1102 | break; | 1105 | break; |
1103 | } | 1106 | } |
1104 | } | 1107 | } |
@@ -1766,7 +1769,7 @@ xfs_bmap_add_extent_hole_delay( | |||
1766 | } | 1769 | } |
1767 | if (oldlen != newlen) { | 1770 | if (oldlen != newlen) { |
1768 | ASSERT(oldlen > newlen); | 1771 | ASSERT(oldlen > newlen); |
1769 | xfs_mod_incore_sb(ip->i_mount, XFS_SBS_FDBLOCKS, | 1772 | xfs_icsb_modify_counters(ip->i_mount, XFS_SBS_FDBLOCKS, |
1770 | (int64_t)(oldlen - newlen), rsvd); | 1773 | (int64_t)(oldlen - newlen), rsvd); |
1771 | /* | 1774 | /* |
1772 | * Nothing to do for disk quota accounting here. | 1775 | * Nothing to do for disk quota accounting here. |
@@ -3111,9 +3114,10 @@ xfs_bmap_del_extent( | |||
3111 | * Nothing to do for disk quota accounting here. | 3114 | * Nothing to do for disk quota accounting here. |
3112 | */ | 3115 | */ |
3113 | ASSERT(da_old >= da_new); | 3116 | ASSERT(da_old >= da_new); |
3114 | if (da_old > da_new) | 3117 | if (da_old > da_new) { |
3115 | xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS, (int64_t)(da_old - da_new), | 3118 | xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS, |
3116 | rsvd); | 3119 | (int64_t)(da_old - da_new), rsvd); |
3120 | } | ||
3117 | done: | 3121 | done: |
3118 | *logflagsp = flags; | 3122 | *logflagsp = flags; |
3119 | return error; | 3123 | return error; |
@@ -4526,13 +4530,13 @@ xfs_bmapi( | |||
4526 | -((int64_t)extsz), (flags & | 4530 | -((int64_t)extsz), (flags & |
4527 | XFS_BMAPI_RSVBLOCKS)); | 4531 | XFS_BMAPI_RSVBLOCKS)); |
4528 | } else { | 4532 | } else { |
4529 | error = xfs_mod_incore_sb(mp, | 4533 | error = xfs_icsb_modify_counters(mp, |
4530 | XFS_SBS_FDBLOCKS, | 4534 | XFS_SBS_FDBLOCKS, |
4531 | -((int64_t)alen), (flags & | 4535 | -((int64_t)alen), (flags & |
4532 | XFS_BMAPI_RSVBLOCKS)); | 4536 | XFS_BMAPI_RSVBLOCKS)); |
4533 | } | 4537 | } |
4534 | if (!error) { | 4538 | if (!error) { |
4535 | error = xfs_mod_incore_sb(mp, | 4539 | error = xfs_icsb_modify_counters(mp, |
4536 | XFS_SBS_FDBLOCKS, | 4540 | XFS_SBS_FDBLOCKS, |
4537 | -((int64_t)indlen), (flags & | 4541 | -((int64_t)indlen), (flags & |
4538 | XFS_BMAPI_RSVBLOCKS)); | 4542 | XFS_BMAPI_RSVBLOCKS)); |
@@ -4542,7 +4546,7 @@ xfs_bmapi( | |||
4542 | (int64_t)extsz, (flags & | 4546 | (int64_t)extsz, (flags & |
4543 | XFS_BMAPI_RSVBLOCKS)); | 4547 | XFS_BMAPI_RSVBLOCKS)); |
4544 | else if (error) | 4548 | else if (error) |
4545 | xfs_mod_incore_sb(mp, | 4549 | xfs_icsb_modify_counters(mp, |
4546 | XFS_SBS_FDBLOCKS, | 4550 | XFS_SBS_FDBLOCKS, |
4547 | (int64_t)alen, (flags & | 4551 | (int64_t)alen, (flags & |
4548 | XFS_BMAPI_RSVBLOCKS)); | 4552 | XFS_BMAPI_RSVBLOCKS)); |
@@ -4744,8 +4748,12 @@ xfs_bmapi( | |||
4744 | * Check if writing previously allocated but | 4748 | * Check if writing previously allocated but |
4745 | * unwritten extents. | 4749 | * unwritten extents. |
4746 | */ | 4750 | */ |
4747 | if (wr && mval->br_state == XFS_EXT_UNWRITTEN && | 4751 | if (wr && |
4748 | ((flags & (XFS_BMAPI_PREALLOC|XFS_BMAPI_DELAY)) == 0)) { | 4752 | ((mval->br_state == XFS_EXT_UNWRITTEN && |
4753 | ((flags & (XFS_BMAPI_PREALLOC|XFS_BMAPI_DELAY)) == 0)) || | ||
4754 | (mval->br_state == XFS_EXT_NORM && | ||
4755 | ((flags & (XFS_BMAPI_PREALLOC|XFS_BMAPI_CONVERT)) == | ||
4756 | (XFS_BMAPI_PREALLOC|XFS_BMAPI_CONVERT))))) { | ||
4749 | /* | 4757 | /* |
4750 | * Modify (by adding) the state flag, if writing. | 4758 | * Modify (by adding) the state flag, if writing. |
4751 | */ | 4759 | */ |
@@ -4757,7 +4765,9 @@ xfs_bmapi( | |||
4757 | *firstblock; | 4765 | *firstblock; |
4758 | cur->bc_private.b.flist = flist; | 4766 | cur->bc_private.b.flist = flist; |
4759 | } | 4767 | } |
4760 | mval->br_state = XFS_EXT_NORM; | 4768 | mval->br_state = (mval->br_state == XFS_EXT_UNWRITTEN) |
4769 | ? XFS_EXT_NORM | ||
4770 | : XFS_EXT_UNWRITTEN; | ||
4761 | error = xfs_bmap_add_extent(ip, lastx, &cur, mval, | 4771 | error = xfs_bmap_add_extent(ip, lastx, &cur, mval, |
4762 | firstblock, flist, &tmp_logflags, | 4772 | firstblock, flist, &tmp_logflags, |
4763 | whichfork, (flags & XFS_BMAPI_RSVBLOCKS)); | 4773 | whichfork, (flags & XFS_BMAPI_RSVBLOCKS)); |
@@ -5200,7 +5210,7 @@ xfs_bunmapi( | |||
5200 | ip, -((long)del.br_blockcount), 0, | 5210 | ip, -((long)del.br_blockcount), 0, |
5201 | XFS_QMOPT_RES_RTBLKS); | 5211 | XFS_QMOPT_RES_RTBLKS); |
5202 | } else { | 5212 | } else { |
5203 | xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS, | 5213 | xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS, |
5204 | (int64_t)del.br_blockcount, rsvd); | 5214 | (int64_t)del.br_blockcount, rsvd); |
5205 | (void)xfs_trans_reserve_quota_nblks(NULL, | 5215 | (void)xfs_trans_reserve_quota_nblks(NULL, |
5206 | ip, -((long)del.br_blockcount), 0, | 5216 | ip, -((long)del.br_blockcount), 0, |
diff --git a/fs/xfs/xfs_bmap.h b/fs/xfs/xfs_bmap.h index b13569a6179b..71ec9b6ecdfc 100644 --- a/fs/xfs/xfs_bmap.h +++ b/fs/xfs/xfs_bmap.h | |||
@@ -74,9 +74,12 @@ typedef struct xfs_bmap_free | |||
74 | #define XFS_BMAPI_IGSTATE 0x080 /* Ignore state - */ | 74 | #define XFS_BMAPI_IGSTATE 0x080 /* Ignore state - */ |
75 | /* combine contig. space */ | 75 | /* combine contig. space */ |
76 | #define XFS_BMAPI_CONTIG 0x100 /* must allocate only one extent */ | 76 | #define XFS_BMAPI_CONTIG 0x100 /* must allocate only one extent */ |
77 | #define XFS_BMAPI_CONVERT 0x200 /* unwritten extent conversion - */ | 77 | /* |
78 | /* need write cache flushing and no */ | 78 | * unwritten extent conversion - this needs write cache flushing and no additional |
79 | /* additional allocation alignments */ | 79 | * allocation alignments. When specified with XFS_BMAPI_PREALLOC it converts |
80 | * from written to unwritten, otherwise convert from unwritten to written. | ||
81 | */ | ||
82 | #define XFS_BMAPI_CONVERT 0x200 | ||
80 | 83 | ||
81 | #define XFS_BMAPI_FLAGS \ | 84 | #define XFS_BMAPI_FLAGS \ |
82 | { XFS_BMAPI_WRITE, "WRITE" }, \ | 85 | { XFS_BMAPI_WRITE, "WRITE" }, \ |
diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c index 829af92f0fba..04f9cca8da7e 100644 --- a/fs/xfs/xfs_btree.c +++ b/fs/xfs/xfs_btree.c | |||
@@ -217,7 +217,7 @@ xfs_btree_del_cursor( | |||
217 | */ | 217 | */ |
218 | for (i = 0; i < cur->bc_nlevels; i++) { | 218 | for (i = 0; i < cur->bc_nlevels; i++) { |
219 | if (cur->bc_bufs[i]) | 219 | if (cur->bc_bufs[i]) |
220 | xfs_btree_setbuf(cur, i, NULL); | 220 | xfs_trans_brelse(cur->bc_tp, cur->bc_bufs[i]); |
221 | else if (!error) | 221 | else if (!error) |
222 | break; | 222 | break; |
223 | } | 223 | } |
@@ -656,7 +656,7 @@ xfs_btree_reada_bufl( | |||
656 | 656 | ||
657 | ASSERT(fsbno != NULLFSBLOCK); | 657 | ASSERT(fsbno != NULLFSBLOCK); |
658 | d = XFS_FSB_TO_DADDR(mp, fsbno); | 658 | d = XFS_FSB_TO_DADDR(mp, fsbno); |
659 | xfs_baread(mp->m_ddev_targp, d, mp->m_bsize * count); | 659 | xfs_buf_readahead(mp->m_ddev_targp, d, mp->m_bsize * count); |
660 | } | 660 | } |
661 | 661 | ||
662 | /* | 662 | /* |
@@ -676,7 +676,7 @@ xfs_btree_reada_bufs( | |||
676 | ASSERT(agno != NULLAGNUMBER); | 676 | ASSERT(agno != NULLAGNUMBER); |
677 | ASSERT(agbno != NULLAGBLOCK); | 677 | ASSERT(agbno != NULLAGBLOCK); |
678 | d = XFS_AGB_TO_DADDR(mp, agno, agbno); | 678 | d = XFS_AGB_TO_DADDR(mp, agno, agbno); |
679 | xfs_baread(mp->m_ddev_targp, d, mp->m_bsize * count); | 679 | xfs_buf_readahead(mp->m_ddev_targp, d, mp->m_bsize * count); |
680 | } | 680 | } |
681 | 681 | ||
682 | STATIC int | 682 | STATIC int |
@@ -763,22 +763,19 @@ xfs_btree_readahead( | |||
763 | * Set the buffer for level "lev" in the cursor to bp, releasing | 763 | * Set the buffer for level "lev" in the cursor to bp, releasing |
764 | * any previous buffer. | 764 | * any previous buffer. |
765 | */ | 765 | */ |
766 | void | 766 | STATIC void |
767 | xfs_btree_setbuf( | 767 | xfs_btree_setbuf( |
768 | xfs_btree_cur_t *cur, /* btree cursor */ | 768 | xfs_btree_cur_t *cur, /* btree cursor */ |
769 | int lev, /* level in btree */ | 769 | int lev, /* level in btree */ |
770 | xfs_buf_t *bp) /* new buffer to set */ | 770 | xfs_buf_t *bp) /* new buffer to set */ |
771 | { | 771 | { |
772 | struct xfs_btree_block *b; /* btree block */ | 772 | struct xfs_btree_block *b; /* btree block */ |
773 | xfs_buf_t *obp; /* old buffer pointer */ | ||
774 | 773 | ||
775 | obp = cur->bc_bufs[lev]; | 774 | if (cur->bc_bufs[lev]) |
776 | if (obp) | 775 | xfs_trans_brelse(cur->bc_tp, cur->bc_bufs[lev]); |
777 | xfs_trans_brelse(cur->bc_tp, obp); | ||
778 | cur->bc_bufs[lev] = bp; | 776 | cur->bc_bufs[lev] = bp; |
779 | cur->bc_ra[lev] = 0; | 777 | cur->bc_ra[lev] = 0; |
780 | if (!bp) | 778 | |
781 | return; | ||
782 | b = XFS_BUF_TO_BLOCK(bp); | 779 | b = XFS_BUF_TO_BLOCK(bp); |
783 | if (cur->bc_flags & XFS_BTREE_LONG_PTRS) { | 780 | if (cur->bc_flags & XFS_BTREE_LONG_PTRS) { |
784 | if (be64_to_cpu(b->bb_u.l.bb_leftsib) == NULLDFSBNO) | 781 | if (be64_to_cpu(b->bb_u.l.bb_leftsib) == NULLDFSBNO) |
@@ -3011,6 +3008,43 @@ out0: | |||
3011 | return 0; | 3008 | return 0; |
3012 | } | 3009 | } |
3013 | 3010 | ||
3011 | /* | ||
3012 | * Kill the current root node, and replace it with it's only child node. | ||
3013 | */ | ||
3014 | STATIC int | ||
3015 | xfs_btree_kill_root( | ||
3016 | struct xfs_btree_cur *cur, | ||
3017 | struct xfs_buf *bp, | ||
3018 | int level, | ||
3019 | union xfs_btree_ptr *newroot) | ||
3020 | { | ||
3021 | int error; | ||
3022 | |||
3023 | XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); | ||
3024 | XFS_BTREE_STATS_INC(cur, killroot); | ||
3025 | |||
3026 | /* | ||
3027 | * Update the root pointer, decreasing the level by 1 and then | ||
3028 | * free the old root. | ||
3029 | */ | ||
3030 | cur->bc_ops->set_root(cur, newroot, -1); | ||
3031 | |||
3032 | error = cur->bc_ops->free_block(cur, bp); | ||
3033 | if (error) { | ||
3034 | XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); | ||
3035 | return error; | ||
3036 | } | ||
3037 | |||
3038 | XFS_BTREE_STATS_INC(cur, free); | ||
3039 | |||
3040 | cur->bc_bufs[level] = NULL; | ||
3041 | cur->bc_ra[level] = 0; | ||
3042 | cur->bc_nlevels--; | ||
3043 | |||
3044 | XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); | ||
3045 | return 0; | ||
3046 | } | ||
3047 | |||
3014 | STATIC int | 3048 | STATIC int |
3015 | xfs_btree_dec_cursor( | 3049 | xfs_btree_dec_cursor( |
3016 | struct xfs_btree_cur *cur, | 3050 | struct xfs_btree_cur *cur, |
@@ -3195,7 +3229,7 @@ xfs_btree_delrec( | |||
3195 | * Make it the new root of the btree. | 3229 | * Make it the new root of the btree. |
3196 | */ | 3230 | */ |
3197 | pp = xfs_btree_ptr_addr(cur, 1, block); | 3231 | pp = xfs_btree_ptr_addr(cur, 1, block); |
3198 | error = cur->bc_ops->kill_root(cur, bp, level, pp); | 3232 | error = xfs_btree_kill_root(cur, bp, level, pp); |
3199 | if (error) | 3233 | if (error) |
3200 | goto error0; | 3234 | goto error0; |
3201 | } else if (level > 0) { | 3235 | } else if (level > 0) { |
diff --git a/fs/xfs/xfs_btree.h b/fs/xfs/xfs_btree.h index 7fa07062bdda..82fafc66bd1f 100644 --- a/fs/xfs/xfs_btree.h +++ b/fs/xfs/xfs_btree.h | |||
@@ -152,9 +152,7 @@ struct xfs_btree_ops { | |||
152 | 152 | ||
153 | /* update btree root pointer */ | 153 | /* update btree root pointer */ |
154 | void (*set_root)(struct xfs_btree_cur *cur, | 154 | void (*set_root)(struct xfs_btree_cur *cur, |
155 | union xfs_btree_ptr *nptr, int level_change); | 155 | union xfs_btree_ptr *nptr, int level_change); |
156 | int (*kill_root)(struct xfs_btree_cur *cur, struct xfs_buf *bp, | ||
157 | int level, union xfs_btree_ptr *newroot); | ||
158 | 156 | ||
159 | /* block allocation / freeing */ | 157 | /* block allocation / freeing */ |
160 | int (*alloc_block)(struct xfs_btree_cur *cur, | 158 | int (*alloc_block)(struct xfs_btree_cur *cur, |
@@ -399,16 +397,6 @@ xfs_btree_reada_bufs( | |||
399 | xfs_agblock_t agbno, /* allocation group block number */ | 397 | xfs_agblock_t agbno, /* allocation group block number */ |
400 | xfs_extlen_t count); /* count of filesystem blocks */ | 398 | xfs_extlen_t count); /* count of filesystem blocks */ |
401 | 399 | ||
402 | /* | ||
403 | * Set the buffer for level "lev" in the cursor to bp, releasing | ||
404 | * any previous buffer. | ||
405 | */ | ||
406 | void | ||
407 | xfs_btree_setbuf( | ||
408 | xfs_btree_cur_t *cur, /* btree cursor */ | ||
409 | int lev, /* level in btree */ | ||
410 | struct xfs_buf *bp); /* new buffer to set */ | ||
411 | |||
412 | 400 | ||
413 | /* | 401 | /* |
414 | * Common btree core entry points. | 402 | * Common btree core entry points. |
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c index 1b09d7a280df..2686d0d54c5b 100644 --- a/fs/xfs/xfs_buf_item.c +++ b/fs/xfs/xfs_buf_item.c | |||
@@ -692,8 +692,7 @@ xfs_buf_item_init( | |||
692 | * the first. If we do already have one, there is | 692 | * the first. If we do already have one, there is |
693 | * nothing to do here so return. | 693 | * nothing to do here so return. |
694 | */ | 694 | */ |
695 | if (bp->b_mount != mp) | 695 | ASSERT(bp->b_target->bt_mount == mp); |
696 | bp->b_mount = mp; | ||
697 | if (XFS_BUF_FSPRIVATE(bp, void *) != NULL) { | 696 | if (XFS_BUF_FSPRIVATE(bp, void *) != NULL) { |
698 | lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *); | 697 | lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *); |
699 | if (lip->li_type == XFS_LI_BUF) { | 698 | if (lip->li_type == XFS_LI_BUF) { |
@@ -974,7 +973,7 @@ xfs_buf_iodone_callbacks( | |||
974 | xfs_buf_do_callbacks(bp, lip); | 973 | xfs_buf_do_callbacks(bp, lip); |
975 | XFS_BUF_SET_FSPRIVATE(bp, NULL); | 974 | XFS_BUF_SET_FSPRIVATE(bp, NULL); |
976 | XFS_BUF_CLR_IODONE_FUNC(bp); | 975 | XFS_BUF_CLR_IODONE_FUNC(bp); |
977 | xfs_biodone(bp); | 976 | xfs_buf_ioend(bp, 0); |
978 | return; | 977 | return; |
979 | } | 978 | } |
980 | 979 | ||
@@ -1033,7 +1032,7 @@ xfs_buf_iodone_callbacks( | |||
1033 | xfs_buf_do_callbacks(bp, lip); | 1032 | xfs_buf_do_callbacks(bp, lip); |
1034 | XFS_BUF_SET_FSPRIVATE(bp, NULL); | 1033 | XFS_BUF_SET_FSPRIVATE(bp, NULL); |
1035 | XFS_BUF_CLR_IODONE_FUNC(bp); | 1034 | XFS_BUF_CLR_IODONE_FUNC(bp); |
1036 | xfs_biodone(bp); | 1035 | xfs_buf_ioend(bp, 0); |
1037 | } | 1036 | } |
1038 | 1037 | ||
1039 | /* | 1038 | /* |
diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c index 30fa0e206fba..1c00bedb3175 100644 --- a/fs/xfs/xfs_da_btree.c +++ b/fs/xfs/xfs_da_btree.c | |||
@@ -2042,7 +2042,7 @@ xfs_da_do_buf( | |||
2042 | mappedbno, nmapped, 0, &bp); | 2042 | mappedbno, nmapped, 0, &bp); |
2043 | break; | 2043 | break; |
2044 | case 3: | 2044 | case 3: |
2045 | xfs_baread(mp->m_ddev_targp, mappedbno, nmapped); | 2045 | xfs_buf_readahead(mp->m_ddev_targp, mappedbno, nmapped); |
2046 | error = 0; | 2046 | error = 0; |
2047 | bp = NULL; | 2047 | bp = NULL; |
2048 | break; | 2048 | break; |
diff --git a/fs/xfs/xfs_dinode.h b/fs/xfs/xfs_dinode.h index e5b153b2e6a3..dffba9ba0db6 100644 --- a/fs/xfs/xfs_dinode.h +++ b/fs/xfs/xfs_dinode.h | |||
@@ -49,8 +49,9 @@ typedef struct xfs_dinode { | |||
49 | __be32 di_uid; /* owner's user id */ | 49 | __be32 di_uid; /* owner's user id */ |
50 | __be32 di_gid; /* owner's group id */ | 50 | __be32 di_gid; /* owner's group id */ |
51 | __be32 di_nlink; /* number of links to file */ | 51 | __be32 di_nlink; /* number of links to file */ |
52 | __be16 di_projid; /* owner's project id */ | 52 | __be16 di_projid_lo; /* lower part of owner's project id */ |
53 | __u8 di_pad[8]; /* unused, zeroed space */ | 53 | __be16 di_projid_hi; /* higher part owner's project id */ |
54 | __u8 di_pad[6]; /* unused, zeroed space */ | ||
54 | __be16 di_flushiter; /* incremented on flush */ | 55 | __be16 di_flushiter; /* incremented on flush */ |
55 | xfs_timestamp_t di_atime; /* time last accessed */ | 56 | xfs_timestamp_t di_atime; /* time last accessed */ |
56 | xfs_timestamp_t di_mtime; /* time last modified */ | 57 | xfs_timestamp_t di_mtime; /* time last modified */ |
diff --git a/fs/xfs/xfs_dir2_leaf.c b/fs/xfs/xfs_dir2_leaf.c index 504be8640e91..ae891223be90 100644 --- a/fs/xfs/xfs_dir2_leaf.c +++ b/fs/xfs/xfs_dir2_leaf.c | |||
@@ -961,7 +961,7 @@ xfs_dir2_leaf_getdents( | |||
961 | if (i > ra_current && | 961 | if (i > ra_current && |
962 | map[ra_index].br_blockcount >= | 962 | map[ra_index].br_blockcount >= |
963 | mp->m_dirblkfsbs) { | 963 | mp->m_dirblkfsbs) { |
964 | xfs_baread(mp->m_ddev_targp, | 964 | xfs_buf_readahead(mp->m_ddev_targp, |
965 | XFS_FSB_TO_DADDR(mp, | 965 | XFS_FSB_TO_DADDR(mp, |
966 | map[ra_index].br_startblock + | 966 | map[ra_index].br_startblock + |
967 | ra_offset), | 967 | ra_offset), |
diff --git a/fs/xfs/xfs_fs.h b/fs/xfs/xfs_fs.h index 87c2e9d02288..8f6fc1a96386 100644 --- a/fs/xfs/xfs_fs.h +++ b/fs/xfs/xfs_fs.h | |||
@@ -293,9 +293,11 @@ typedef struct xfs_bstat { | |||
293 | __s32 bs_extsize; /* extent size */ | 293 | __s32 bs_extsize; /* extent size */ |
294 | __s32 bs_extents; /* number of extents */ | 294 | __s32 bs_extents; /* number of extents */ |
295 | __u32 bs_gen; /* generation count */ | 295 | __u32 bs_gen; /* generation count */ |
296 | __u16 bs_projid; /* project id */ | 296 | __u16 bs_projid_lo; /* lower part of project id */ |
297 | #define bs_projid bs_projid_lo /* (previously just bs_projid) */ | ||
297 | __u16 bs_forkoff; /* inode fork offset in bytes */ | 298 | __u16 bs_forkoff; /* inode fork offset in bytes */ |
298 | unsigned char bs_pad[12]; /* pad space, unused */ | 299 | __u16 bs_projid_hi; /* higher part of project id */ |
300 | unsigned char bs_pad[10]; /* pad space, unused */ | ||
299 | __u32 bs_dmevmask; /* DMIG event mask */ | 301 | __u32 bs_dmevmask; /* DMIG event mask */ |
300 | __u16 bs_dmstate; /* DMIG state info */ | 302 | __u16 bs_dmstate; /* DMIG state info */ |
301 | __u16 bs_aextents; /* attribute number of extents */ | 303 | __u16 bs_aextents; /* attribute number of extents */ |
@@ -448,6 +450,7 @@ typedef struct xfs_handle { | |||
448 | /* XFS_IOC_SETBIOSIZE ---- deprecated 46 */ | 450 | /* XFS_IOC_SETBIOSIZE ---- deprecated 46 */ |
449 | /* XFS_IOC_GETBIOSIZE ---- deprecated 47 */ | 451 | /* XFS_IOC_GETBIOSIZE ---- deprecated 47 */ |
450 | #define XFS_IOC_GETBMAPX _IOWR('X', 56, struct getbmap) | 452 | #define XFS_IOC_GETBMAPX _IOWR('X', 56, struct getbmap) |
453 | #define XFS_IOC_ZERO_RANGE _IOW ('X', 57, struct xfs_flock64) | ||
451 | 454 | ||
452 | /* | 455 | /* |
453 | * ioctl commands that replace IRIX syssgi()'s | 456 | * ioctl commands that replace IRIX syssgi()'s |
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c index 43b1d5699335..a7c116e814af 100644 --- a/fs/xfs/xfs_fsops.c +++ b/fs/xfs/xfs_fsops.c | |||
@@ -144,12 +144,11 @@ xfs_growfs_data_private( | |||
144 | if ((error = xfs_sb_validate_fsb_count(&mp->m_sb, nb))) | 144 | if ((error = xfs_sb_validate_fsb_count(&mp->m_sb, nb))) |
145 | return error; | 145 | return error; |
146 | dpct = pct - mp->m_sb.sb_imax_pct; | 146 | dpct = pct - mp->m_sb.sb_imax_pct; |
147 | error = xfs_read_buf(mp, mp->m_ddev_targp, | 147 | bp = xfs_buf_read_uncached(mp, mp->m_ddev_targp, |
148 | XFS_FSB_TO_BB(mp, nb) - XFS_FSS_TO_BB(mp, 1), | 148 | XFS_FSB_TO_BB(mp, nb) - XFS_FSS_TO_BB(mp, 1), |
149 | XFS_FSS_TO_BB(mp, 1), 0, &bp); | 149 | BBTOB(XFS_FSS_TO_BB(mp, 1)), 0); |
150 | if (error) | 150 | if (!bp) |
151 | return error; | 151 | return EIO; |
152 | ASSERT(bp); | ||
153 | xfs_buf_relse(bp); | 152 | xfs_buf_relse(bp); |
154 | 153 | ||
155 | new = nb; /* use new as a temporary here */ | 154 | new = nb; /* use new as a temporary here */ |
@@ -597,7 +596,8 @@ out: | |||
597 | * the extra reserve blocks from the reserve..... | 596 | * the extra reserve blocks from the reserve..... |
598 | */ | 597 | */ |
599 | int error; | 598 | int error; |
600 | error = xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS, fdblks_delta, 0); | 599 | error = xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS, |
600 | fdblks_delta, 0); | ||
601 | if (error == ENOSPC) | 601 | if (error == ENOSPC) |
602 | goto retry; | 602 | goto retry; |
603 | } | 603 | } |
diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c index 5371d2dc360e..0626a32c3447 100644 --- a/fs/xfs/xfs_ialloc.c +++ b/fs/xfs/xfs_ialloc.c | |||
@@ -212,7 +212,7 @@ xfs_ialloc_inode_init( | |||
212 | * to log a whole cluster of inodes instead of all the | 212 | * to log a whole cluster of inodes instead of all the |
213 | * individual transactions causing a lot of log traffic. | 213 | * individual transactions causing a lot of log traffic. |
214 | */ | 214 | */ |
215 | xfs_biozero(fbuf, 0, ninodes << mp->m_sb.sb_inodelog); | 215 | xfs_buf_zero(fbuf, 0, ninodes << mp->m_sb.sb_inodelog); |
216 | for (i = 0; i < ninodes; i++) { | 216 | for (i = 0; i < ninodes; i++) { |
217 | int ioffset = i << mp->m_sb.sb_inodelog; | 217 | int ioffset = i << mp->m_sb.sb_inodelog; |
218 | uint isize = sizeof(struct xfs_dinode); | 218 | uint isize = sizeof(struct xfs_dinode); |
diff --git a/fs/xfs/xfs_ialloc_btree.c b/fs/xfs/xfs_ialloc_btree.c index d352862cefa0..16921f55c542 100644 --- a/fs/xfs/xfs_ialloc_btree.c +++ b/fs/xfs/xfs_ialloc_btree.c | |||
@@ -183,38 +183,6 @@ xfs_inobt_key_diff( | |||
183 | cur->bc_rec.i.ir_startino; | 183 | cur->bc_rec.i.ir_startino; |
184 | } | 184 | } |
185 | 185 | ||
186 | STATIC int | ||
187 | xfs_inobt_kill_root( | ||
188 | struct xfs_btree_cur *cur, | ||
189 | struct xfs_buf *bp, | ||
190 | int level, | ||
191 | union xfs_btree_ptr *newroot) | ||
192 | { | ||
193 | int error; | ||
194 | |||
195 | XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); | ||
196 | XFS_BTREE_STATS_INC(cur, killroot); | ||
197 | |||
198 | /* | ||
199 | * Update the root pointer, decreasing the level by 1 and then | ||
200 | * free the old root. | ||
201 | */ | ||
202 | xfs_inobt_set_root(cur, newroot, -1); | ||
203 | error = xfs_inobt_free_block(cur, bp); | ||
204 | if (error) { | ||
205 | XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); | ||
206 | return error; | ||
207 | } | ||
208 | |||
209 | XFS_BTREE_STATS_INC(cur, free); | ||
210 | |||
211 | cur->bc_bufs[level] = NULL; | ||
212 | cur->bc_nlevels--; | ||
213 | |||
214 | XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); | ||
215 | return 0; | ||
216 | } | ||
217 | |||
218 | #ifdef DEBUG | 186 | #ifdef DEBUG |
219 | STATIC int | 187 | STATIC int |
220 | xfs_inobt_keys_inorder( | 188 | xfs_inobt_keys_inorder( |
@@ -309,7 +277,6 @@ static const struct xfs_btree_ops xfs_inobt_ops = { | |||
309 | 277 | ||
310 | .dup_cursor = xfs_inobt_dup_cursor, | 278 | .dup_cursor = xfs_inobt_dup_cursor, |
311 | .set_root = xfs_inobt_set_root, | 279 | .set_root = xfs_inobt_set_root, |
312 | .kill_root = xfs_inobt_kill_root, | ||
313 | .alloc_block = xfs_inobt_alloc_block, | 280 | .alloc_block = xfs_inobt_alloc_block, |
314 | .free_block = xfs_inobt_free_block, | 281 | .free_block = xfs_inobt_free_block, |
315 | .get_minrecs = xfs_inobt_get_minrecs, | 282 | .get_minrecs = xfs_inobt_get_minrecs, |
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c index b1ecc6f97ade..0cdd26932d8e 100644 --- a/fs/xfs/xfs_iget.c +++ b/fs/xfs/xfs_iget.c | |||
@@ -365,8 +365,8 @@ xfs_iget( | |||
365 | xfs_perag_t *pag; | 365 | xfs_perag_t *pag; |
366 | xfs_agino_t agino; | 366 | xfs_agino_t agino; |
367 | 367 | ||
368 | /* the radix tree exists only in inode capable AGs */ | 368 | /* reject inode numbers outside existing AGs */ |
369 | if (XFS_INO_TO_AGNO(mp, ino) >= mp->m_maxagi) | 369 | if (XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount) |
370 | return EINVAL; | 370 | return EINVAL; |
371 | 371 | ||
372 | /* get the perag structure and ensure that it's inode capable */ | 372 | /* get the perag structure and ensure that it's inode capable */ |
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 34798f391c49..108c7a085f94 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c | |||
@@ -660,7 +660,8 @@ xfs_dinode_from_disk( | |||
660 | to->di_uid = be32_to_cpu(from->di_uid); | 660 | to->di_uid = be32_to_cpu(from->di_uid); |
661 | to->di_gid = be32_to_cpu(from->di_gid); | 661 | to->di_gid = be32_to_cpu(from->di_gid); |
662 | to->di_nlink = be32_to_cpu(from->di_nlink); | 662 | to->di_nlink = be32_to_cpu(from->di_nlink); |
663 | to->di_projid = be16_to_cpu(from->di_projid); | 663 | to->di_projid_lo = be16_to_cpu(from->di_projid_lo); |
664 | to->di_projid_hi = be16_to_cpu(from->di_projid_hi); | ||
664 | memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad)); | 665 | memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad)); |
665 | to->di_flushiter = be16_to_cpu(from->di_flushiter); | 666 | to->di_flushiter = be16_to_cpu(from->di_flushiter); |
666 | to->di_atime.t_sec = be32_to_cpu(from->di_atime.t_sec); | 667 | to->di_atime.t_sec = be32_to_cpu(from->di_atime.t_sec); |
@@ -695,7 +696,8 @@ xfs_dinode_to_disk( | |||
695 | to->di_uid = cpu_to_be32(from->di_uid); | 696 | to->di_uid = cpu_to_be32(from->di_uid); |
696 | to->di_gid = cpu_to_be32(from->di_gid); | 697 | to->di_gid = cpu_to_be32(from->di_gid); |
697 | to->di_nlink = cpu_to_be32(from->di_nlink); | 698 | to->di_nlink = cpu_to_be32(from->di_nlink); |
698 | to->di_projid = cpu_to_be16(from->di_projid); | 699 | to->di_projid_lo = cpu_to_be16(from->di_projid_lo); |
700 | to->di_projid_hi = cpu_to_be16(from->di_projid_hi); | ||
699 | memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad)); | 701 | memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad)); |
700 | to->di_flushiter = cpu_to_be16(from->di_flushiter); | 702 | to->di_flushiter = cpu_to_be16(from->di_flushiter); |
701 | to->di_atime.t_sec = cpu_to_be32(from->di_atime.t_sec); | 703 | to->di_atime.t_sec = cpu_to_be32(from->di_atime.t_sec); |
@@ -874,7 +876,7 @@ xfs_iread( | |||
874 | if (ip->i_d.di_version == 1) { | 876 | if (ip->i_d.di_version == 1) { |
875 | ip->i_d.di_nlink = ip->i_d.di_onlink; | 877 | ip->i_d.di_nlink = ip->i_d.di_onlink; |
876 | ip->i_d.di_onlink = 0; | 878 | ip->i_d.di_onlink = 0; |
877 | ip->i_d.di_projid = 0; | 879 | xfs_set_projid(ip, 0); |
878 | } | 880 | } |
879 | 881 | ||
880 | ip->i_delayed_blks = 0; | 882 | ip->i_delayed_blks = 0; |
@@ -982,8 +984,7 @@ xfs_ialloc( | |||
982 | mode_t mode, | 984 | mode_t mode, |
983 | xfs_nlink_t nlink, | 985 | xfs_nlink_t nlink, |
984 | xfs_dev_t rdev, | 986 | xfs_dev_t rdev, |
985 | cred_t *cr, | 987 | prid_t prid, |
986 | xfs_prid_t prid, | ||
987 | int okalloc, | 988 | int okalloc, |
988 | xfs_buf_t **ialloc_context, | 989 | xfs_buf_t **ialloc_context, |
989 | boolean_t *call_again, | 990 | boolean_t *call_again, |
@@ -1027,7 +1028,7 @@ xfs_ialloc( | |||
1027 | ASSERT(ip->i_d.di_nlink == nlink); | 1028 | ASSERT(ip->i_d.di_nlink == nlink); |
1028 | ip->i_d.di_uid = current_fsuid(); | 1029 | ip->i_d.di_uid = current_fsuid(); |
1029 | ip->i_d.di_gid = current_fsgid(); | 1030 | ip->i_d.di_gid = current_fsgid(); |
1030 | ip->i_d.di_projid = prid; | 1031 | xfs_set_projid(ip, prid); |
1031 | memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad)); | 1032 | memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad)); |
1032 | 1033 | ||
1033 | /* | 1034 | /* |
@@ -2725,7 +2726,7 @@ cluster_corrupt_out: | |||
2725 | XFS_BUF_UNDONE(bp); | 2726 | XFS_BUF_UNDONE(bp); |
2726 | XFS_BUF_STALE(bp); | 2727 | XFS_BUF_STALE(bp); |
2727 | XFS_BUF_ERROR(bp,EIO); | 2728 | XFS_BUF_ERROR(bp,EIO); |
2728 | xfs_biodone(bp); | 2729 | xfs_buf_ioend(bp, 0); |
2729 | } else { | 2730 | } else { |
2730 | XFS_BUF_STALE(bp); | 2731 | XFS_BUF_STALE(bp); |
2731 | xfs_buf_relse(bp); | 2732 | xfs_buf_relse(bp); |
@@ -3008,7 +3009,7 @@ xfs_iflush_int( | |||
3008 | memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad)); | 3009 | memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad)); |
3009 | memset(&(dip->di_pad[0]), 0, | 3010 | memset(&(dip->di_pad[0]), 0, |
3010 | sizeof(dip->di_pad)); | 3011 | sizeof(dip->di_pad)); |
3011 | ASSERT(ip->i_d.di_projid == 0); | 3012 | ASSERT(xfs_get_projid(ip) == 0); |
3012 | } | 3013 | } |
3013 | } | 3014 | } |
3014 | 3015 | ||
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index 0898c5417d12..fac52290de90 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h | |||
@@ -134,8 +134,9 @@ typedef struct xfs_icdinode { | |||
134 | __uint32_t di_uid; /* owner's user id */ | 134 | __uint32_t di_uid; /* owner's user id */ |
135 | __uint32_t di_gid; /* owner's group id */ | 135 | __uint32_t di_gid; /* owner's group id */ |
136 | __uint32_t di_nlink; /* number of links to file */ | 136 | __uint32_t di_nlink; /* number of links to file */ |
137 | __uint16_t di_projid; /* owner's project id */ | 137 | __uint16_t di_projid_lo; /* lower part of owner's project id */ |
138 | __uint8_t di_pad[8]; /* unused, zeroed space */ | 138 | __uint16_t di_projid_hi; /* higher part of owner's project id */ |
139 | __uint8_t di_pad[6]; /* unused, zeroed space */ | ||
139 | __uint16_t di_flushiter; /* incremented on flush */ | 140 | __uint16_t di_flushiter; /* incremented on flush */ |
140 | xfs_ictimestamp_t di_atime; /* time last accessed */ | 141 | xfs_ictimestamp_t di_atime; /* time last accessed */ |
141 | xfs_ictimestamp_t di_mtime; /* time last modified */ | 142 | xfs_ictimestamp_t di_mtime; /* time last modified */ |
@@ -212,7 +213,6 @@ typedef struct xfs_icdinode { | |||
212 | #ifdef __KERNEL__ | 213 | #ifdef __KERNEL__ |
213 | 214 | ||
214 | struct bhv_desc; | 215 | struct bhv_desc; |
215 | struct cred; | ||
216 | struct xfs_buf; | 216 | struct xfs_buf; |
217 | struct xfs_bmap_free; | 217 | struct xfs_bmap_free; |
218 | struct xfs_bmbt_irec; | 218 | struct xfs_bmbt_irec; |
@@ -335,6 +335,25 @@ xfs_iflags_test_and_clear(xfs_inode_t *ip, unsigned short flags) | |||
335 | } | 335 | } |
336 | 336 | ||
337 | /* | 337 | /* |
338 | * Project quota id helpers (previously projid was 16bit only | ||
339 | * and using two 16bit values to hold new 32bit projid was choosen | ||
340 | * to retain compatibility with "old" filesystems). | ||
341 | */ | ||
342 | static inline prid_t | ||
343 | xfs_get_projid(struct xfs_inode *ip) | ||
344 | { | ||
345 | return (prid_t)ip->i_d.di_projid_hi << 16 | ip->i_d.di_projid_lo; | ||
346 | } | ||
347 | |||
348 | static inline void | ||
349 | xfs_set_projid(struct xfs_inode *ip, | ||
350 | prid_t projid) | ||
351 | { | ||
352 | ip->i_d.di_projid_hi = (__uint16_t) (projid >> 16); | ||
353 | ip->i_d.di_projid_lo = (__uint16_t) (projid & 0xffff); | ||
354 | } | ||
355 | |||
356 | /* | ||
338 | * Manage the i_flush queue embedded in the inode. This completion | 357 | * Manage the i_flush queue embedded in the inode. This completion |
339 | * queue synchronizes processes attempting to flush the in-core | 358 | * queue synchronizes processes attempting to flush the in-core |
340 | * inode back to disk. | 359 | * inode back to disk. |
@@ -456,8 +475,8 @@ void xfs_inode_free(struct xfs_inode *ip); | |||
456 | * xfs_inode.c prototypes. | 475 | * xfs_inode.c prototypes. |
457 | */ | 476 | */ |
458 | int xfs_ialloc(struct xfs_trans *, xfs_inode_t *, mode_t, | 477 | int xfs_ialloc(struct xfs_trans *, xfs_inode_t *, mode_t, |
459 | xfs_nlink_t, xfs_dev_t, cred_t *, xfs_prid_t, | 478 | xfs_nlink_t, xfs_dev_t, prid_t, int, |
460 | int, struct xfs_buf **, boolean_t *, xfs_inode_t **); | 479 | struct xfs_buf **, boolean_t *, xfs_inode_t **); |
461 | 480 | ||
462 | uint xfs_ip2xflags(struct xfs_inode *); | 481 | uint xfs_ip2xflags(struct xfs_inode *); |
463 | uint xfs_dic2xflags(struct xfs_dinode *); | 482 | uint xfs_dic2xflags(struct xfs_dinode *); |
@@ -471,7 +490,6 @@ int xfs_iunlink(struct xfs_trans *, xfs_inode_t *); | |||
471 | void xfs_iext_realloc(xfs_inode_t *, int, int); | 490 | void xfs_iext_realloc(xfs_inode_t *, int, int); |
472 | void xfs_iunpin_wait(xfs_inode_t *); | 491 | void xfs_iunpin_wait(xfs_inode_t *); |
473 | int xfs_iflush(xfs_inode_t *, uint); | 492 | int xfs_iflush(xfs_inode_t *, uint); |
474 | void xfs_ichgtime(xfs_inode_t *, int); | ||
475 | void xfs_lock_inodes(xfs_inode_t **, int, uint); | 493 | void xfs_lock_inodes(xfs_inode_t **, int, uint); |
476 | void xfs_lock_two_inodes(xfs_inode_t *, xfs_inode_t *, uint); | 494 | void xfs_lock_two_inodes(xfs_inode_t *, xfs_inode_t *, uint); |
477 | 495 | ||
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c index fe00777e2796..c7ac020705df 100644 --- a/fs/xfs/xfs_inode_item.c +++ b/fs/xfs/xfs_inode_item.c | |||
@@ -223,15 +223,6 @@ xfs_inode_item_format( | |||
223 | nvecs = 1; | 223 | nvecs = 1; |
224 | 224 | ||
225 | /* | 225 | /* |
226 | * Make sure the linux inode is dirty. We do this before | ||
227 | * clearing i_update_core as the VFS will call back into | ||
228 | * XFS here and set i_update_core, so we need to dirty the | ||
229 | * inode first so that the ordering of i_update_core and | ||
230 | * unlogged modifications still works as described below. | ||
231 | */ | ||
232 | xfs_mark_inode_dirty_sync(ip); | ||
233 | |||
234 | /* | ||
235 | * Clear i_update_core if the timestamps (or any other | 226 | * Clear i_update_core if the timestamps (or any other |
236 | * non-transactional modification) need flushing/logging | 227 | * non-transactional modification) need flushing/logging |
237 | * and we're about to log them with the rest of the core. | 228 | * and we're about to log them with the rest of the core. |
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c index 7e3626e5925c..dc1882adaf54 100644 --- a/fs/xfs/xfs_itable.c +++ b/fs/xfs/xfs_itable.c | |||
@@ -92,7 +92,8 @@ xfs_bulkstat_one_int( | |||
92 | * further change. | 92 | * further change. |
93 | */ | 93 | */ |
94 | buf->bs_nlink = dic->di_nlink; | 94 | buf->bs_nlink = dic->di_nlink; |
95 | buf->bs_projid = dic->di_projid; | 95 | buf->bs_projid_lo = dic->di_projid_lo; |
96 | buf->bs_projid_hi = dic->di_projid_hi; | ||
96 | buf->bs_ino = ino; | 97 | buf->bs_ino = ino; |
97 | buf->bs_mode = dic->di_mode; | 98 | buf->bs_mode = dic->di_mode; |
98 | buf->bs_uid = dic->di_uid; | 99 | buf->bs_uid = dic->di_uid; |
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index ba8e36e0b4e7..cee4ab9f8a9e 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c | |||
@@ -1118,7 +1118,8 @@ xlog_alloc_log(xfs_mount_t *mp, | |||
1118 | iclog->ic_prev = prev_iclog; | 1118 | iclog->ic_prev = prev_iclog; |
1119 | prev_iclog = iclog; | 1119 | prev_iclog = iclog; |
1120 | 1120 | ||
1121 | bp = xfs_buf_get_noaddr(log->l_iclog_size, mp->m_logdev_targp); | 1121 | bp = xfs_buf_get_uncached(mp->m_logdev_targp, |
1122 | log->l_iclog_size, 0); | ||
1122 | if (!bp) | 1123 | if (!bp) |
1123 | goto out_free_iclog; | 1124 | goto out_free_iclog; |
1124 | if (!XFS_BUF_CPSEMA(bp)) | 1125 | if (!XFS_BUF_CPSEMA(bp)) |
@@ -1296,7 +1297,7 @@ xlog_bdstrat( | |||
1296 | if (iclog->ic_state & XLOG_STATE_IOERROR) { | 1297 | if (iclog->ic_state & XLOG_STATE_IOERROR) { |
1297 | XFS_BUF_ERROR(bp, EIO); | 1298 | XFS_BUF_ERROR(bp, EIO); |
1298 | XFS_BUF_STALE(bp); | 1299 | XFS_BUF_STALE(bp); |
1299 | xfs_biodone(bp); | 1300 | xfs_buf_ioend(bp, 0); |
1300 | /* | 1301 | /* |
1301 | * It would seem logical to return EIO here, but we rely on | 1302 | * It would seem logical to return EIO here, but we rely on |
1302 | * the log state machine to propagate I/O errors instead of | 1303 | * the log state machine to propagate I/O errors instead of |
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c index 7e206fc1fa36..23d6ceb5e97b 100644 --- a/fs/xfs/xfs_log_cil.c +++ b/fs/xfs/xfs_log_cil.c | |||
@@ -146,102 +146,6 @@ xlog_cil_init_post_recovery( | |||
146 | } | 146 | } |
147 | 147 | ||
148 | /* | 148 | /* |
149 | * Insert the log item into the CIL and calculate the difference in space | ||
150 | * consumed by the item. Add the space to the checkpoint ticket and calculate | ||
151 | * if the change requires additional log metadata. If it does, take that space | ||
152 | * as well. Remove the amount of space we addded to the checkpoint ticket from | ||
153 | * the current transaction ticket so that the accounting works out correctly. | ||
154 | * | ||
155 | * If this is the first time the item is being placed into the CIL in this | ||
156 | * context, pin it so it can't be written to disk until the CIL is flushed to | ||
157 | * the iclog and the iclog written to disk. | ||
158 | */ | ||
159 | static void | ||
160 | xlog_cil_insert( | ||
161 | struct log *log, | ||
162 | struct xlog_ticket *ticket, | ||
163 | struct xfs_log_item *item, | ||
164 | struct xfs_log_vec *lv) | ||
165 | { | ||
166 | struct xfs_cil *cil = log->l_cilp; | ||
167 | struct xfs_log_vec *old = lv->lv_item->li_lv; | ||
168 | struct xfs_cil_ctx *ctx = cil->xc_ctx; | ||
169 | int len; | ||
170 | int diff_iovecs; | ||
171 | int iclog_space; | ||
172 | |||
173 | if (old) { | ||
174 | /* existing lv on log item, space used is a delta */ | ||
175 | ASSERT(!list_empty(&item->li_cil)); | ||
176 | ASSERT(old->lv_buf && old->lv_buf_len && old->lv_niovecs); | ||
177 | |||
178 | len = lv->lv_buf_len - old->lv_buf_len; | ||
179 | diff_iovecs = lv->lv_niovecs - old->lv_niovecs; | ||
180 | kmem_free(old->lv_buf); | ||
181 | kmem_free(old); | ||
182 | } else { | ||
183 | /* new lv, must pin the log item */ | ||
184 | ASSERT(!lv->lv_item->li_lv); | ||
185 | ASSERT(list_empty(&item->li_cil)); | ||
186 | |||
187 | len = lv->lv_buf_len; | ||
188 | diff_iovecs = lv->lv_niovecs; | ||
189 | IOP_PIN(lv->lv_item); | ||
190 | |||
191 | } | ||
192 | len += diff_iovecs * sizeof(xlog_op_header_t); | ||
193 | |||
194 | /* attach new log vector to log item */ | ||
195 | lv->lv_item->li_lv = lv; | ||
196 | |||
197 | spin_lock(&cil->xc_cil_lock); | ||
198 | list_move_tail(&item->li_cil, &cil->xc_cil); | ||
199 | ctx->nvecs += diff_iovecs; | ||
200 | |||
201 | /* | ||
202 | * If this is the first time the item is being committed to the CIL, | ||
203 | * store the sequence number on the log item so we can tell | ||
204 | * in future commits whether this is the first checkpoint the item is | ||
205 | * being committed into. | ||
206 | */ | ||
207 | if (!item->li_seq) | ||
208 | item->li_seq = ctx->sequence; | ||
209 | |||
210 | /* | ||
211 | * Now transfer enough transaction reservation to the context ticket | ||
212 | * for the checkpoint. The context ticket is special - the unit | ||
213 | * reservation has to grow as well as the current reservation as we | ||
214 | * steal from tickets so we can correctly determine the space used | ||
215 | * during the transaction commit. | ||
216 | */ | ||
217 | if (ctx->ticket->t_curr_res == 0) { | ||
218 | /* first commit in checkpoint, steal the header reservation */ | ||
219 | ASSERT(ticket->t_curr_res >= ctx->ticket->t_unit_res + len); | ||
220 | ctx->ticket->t_curr_res = ctx->ticket->t_unit_res; | ||
221 | ticket->t_curr_res -= ctx->ticket->t_unit_res; | ||
222 | } | ||
223 | |||
224 | /* do we need space for more log record headers? */ | ||
225 | iclog_space = log->l_iclog_size - log->l_iclog_hsize; | ||
226 | if (len > 0 && (ctx->space_used / iclog_space != | ||
227 | (ctx->space_used + len) / iclog_space)) { | ||
228 | int hdrs; | ||
229 | |||
230 | hdrs = (len + iclog_space - 1) / iclog_space; | ||
231 | /* need to take into account split region headers, too */ | ||
232 | hdrs *= log->l_iclog_hsize + sizeof(struct xlog_op_header); | ||
233 | ctx->ticket->t_unit_res += hdrs; | ||
234 | ctx->ticket->t_curr_res += hdrs; | ||
235 | ticket->t_curr_res -= hdrs; | ||
236 | ASSERT(ticket->t_curr_res >= len); | ||
237 | } | ||
238 | ticket->t_curr_res -= len; | ||
239 | ctx->space_used += len; | ||
240 | |||
241 | spin_unlock(&cil->xc_cil_lock); | ||
242 | } | ||
243 | |||
244 | /* | ||
245 | * Format log item into a flat buffers | 149 | * Format log item into a flat buffers |
246 | * | 150 | * |
247 | * For delayed logging, we need to hold a formatted buffer containing all the | 151 | * For delayed logging, we need to hold a formatted buffer containing all the |
@@ -286,7 +190,7 @@ xlog_cil_format_items( | |||
286 | len += lv->lv_iovecp[index].i_len; | 190 | len += lv->lv_iovecp[index].i_len; |
287 | 191 | ||
288 | lv->lv_buf_len = len; | 192 | lv->lv_buf_len = len; |
289 | lv->lv_buf = kmem_zalloc(lv->lv_buf_len, KM_SLEEP|KM_NOFS); | 193 | lv->lv_buf = kmem_alloc(lv->lv_buf_len, KM_SLEEP|KM_NOFS); |
290 | ptr = lv->lv_buf; | 194 | ptr = lv->lv_buf; |
291 | 195 | ||
292 | for (index = 0; index < lv->lv_niovecs; index++) { | 196 | for (index = 0; index < lv->lv_niovecs; index++) { |
@@ -300,21 +204,136 @@ xlog_cil_format_items( | |||
300 | } | 204 | } |
301 | } | 205 | } |
302 | 206 | ||
207 | /* | ||
208 | * Prepare the log item for insertion into the CIL. Calculate the difference in | ||
209 | * log space and vectors it will consume, and if it is a new item pin it as | ||
210 | * well. | ||
211 | */ | ||
212 | STATIC void | ||
213 | xfs_cil_prepare_item( | ||
214 | struct log *log, | ||
215 | struct xfs_log_vec *lv, | ||
216 | int *len, | ||
217 | int *diff_iovecs) | ||
218 | { | ||
219 | struct xfs_log_vec *old = lv->lv_item->li_lv; | ||
220 | |||
221 | if (old) { | ||
222 | /* existing lv on log item, space used is a delta */ | ||
223 | ASSERT(!list_empty(&lv->lv_item->li_cil)); | ||
224 | ASSERT(old->lv_buf && old->lv_buf_len && old->lv_niovecs); | ||
225 | |||
226 | *len += lv->lv_buf_len - old->lv_buf_len; | ||
227 | *diff_iovecs += lv->lv_niovecs - old->lv_niovecs; | ||
228 | kmem_free(old->lv_buf); | ||
229 | kmem_free(old); | ||
230 | } else { | ||
231 | /* new lv, must pin the log item */ | ||
232 | ASSERT(!lv->lv_item->li_lv); | ||
233 | ASSERT(list_empty(&lv->lv_item->li_cil)); | ||
234 | |||
235 | *len += lv->lv_buf_len; | ||
236 | *diff_iovecs += lv->lv_niovecs; | ||
237 | IOP_PIN(lv->lv_item); | ||
238 | |||
239 | } | ||
240 | |||
241 | /* attach new log vector to log item */ | ||
242 | lv->lv_item->li_lv = lv; | ||
243 | |||
244 | /* | ||
245 | * If this is the first time the item is being committed to the | ||
246 | * CIL, store the sequence number on the log item so we can | ||
247 | * tell in future commits whether this is the first checkpoint | ||
248 | * the item is being committed into. | ||
249 | */ | ||
250 | if (!lv->lv_item->li_seq) | ||
251 | lv->lv_item->li_seq = log->l_cilp->xc_ctx->sequence; | ||
252 | } | ||
253 | |||
254 | /* | ||
255 | * Insert the log items into the CIL and calculate the difference in space | ||
256 | * consumed by the item. Add the space to the checkpoint ticket and calculate | ||
257 | * if the change requires additional log metadata. If it does, take that space | ||
258 | * as well. Remove the amount of space we addded to the checkpoint ticket from | ||
259 | * the current transaction ticket so that the accounting works out correctly. | ||
260 | */ | ||
303 | static void | 261 | static void |
304 | xlog_cil_insert_items( | 262 | xlog_cil_insert_items( |
305 | struct log *log, | 263 | struct log *log, |
306 | struct xfs_log_vec *log_vector, | 264 | struct xfs_log_vec *log_vector, |
307 | struct xlog_ticket *ticket, | 265 | struct xlog_ticket *ticket) |
308 | xfs_lsn_t *start_lsn) | ||
309 | { | 266 | { |
310 | struct xfs_log_vec *lv; | 267 | struct xfs_cil *cil = log->l_cilp; |
311 | 268 | struct xfs_cil_ctx *ctx = cil->xc_ctx; | |
312 | if (start_lsn) | 269 | struct xfs_log_vec *lv; |
313 | *start_lsn = log->l_cilp->xc_ctx->sequence; | 270 | int len = 0; |
271 | int diff_iovecs = 0; | ||
272 | int iclog_space; | ||
314 | 273 | ||
315 | ASSERT(log_vector); | 274 | ASSERT(log_vector); |
275 | |||
276 | /* | ||
277 | * Do all the accounting aggregation and switching of log vectors | ||
278 | * around in a separate loop to the insertion of items into the CIL. | ||
279 | * Then we can do a separate loop to update the CIL within a single | ||
280 | * lock/unlock pair. This reduces the number of round trips on the CIL | ||
281 | * lock from O(nr_logvectors) to O(1) and greatly reduces the overall | ||
282 | * hold time for the transaction commit. | ||
283 | * | ||
284 | * If this is the first time the item is being placed into the CIL in | ||
285 | * this context, pin it so it can't be written to disk until the CIL is | ||
286 | * flushed to the iclog and the iclog written to disk. | ||
287 | * | ||
288 | * We can do this safely because the context can't checkpoint until we | ||
289 | * are done so it doesn't matter exactly how we update the CIL. | ||
290 | */ | ||
291 | for (lv = log_vector; lv; lv = lv->lv_next) | ||
292 | xfs_cil_prepare_item(log, lv, &len, &diff_iovecs); | ||
293 | |||
294 | /* account for space used by new iovec headers */ | ||
295 | len += diff_iovecs * sizeof(xlog_op_header_t); | ||
296 | |||
297 | spin_lock(&cil->xc_cil_lock); | ||
298 | |||
299 | /* move the items to the tail of the CIL */ | ||
316 | for (lv = log_vector; lv; lv = lv->lv_next) | 300 | for (lv = log_vector; lv; lv = lv->lv_next) |
317 | xlog_cil_insert(log, ticket, lv->lv_item, lv); | 301 | list_move_tail(&lv->lv_item->li_cil, &cil->xc_cil); |
302 | |||
303 | ctx->nvecs += diff_iovecs; | ||
304 | |||
305 | /* | ||
306 | * Now transfer enough transaction reservation to the context ticket | ||
307 | * for the checkpoint. The context ticket is special - the unit | ||
308 | * reservation has to grow as well as the current reservation as we | ||
309 | * steal from tickets so we can correctly determine the space used | ||
310 | * during the transaction commit. | ||
311 | */ | ||
312 | if (ctx->ticket->t_curr_res == 0) { | ||
313 | /* first commit in checkpoint, steal the header reservation */ | ||
314 | ASSERT(ticket->t_curr_res >= ctx->ticket->t_unit_res + len); | ||
315 | ctx->ticket->t_curr_res = ctx->ticket->t_unit_res; | ||
316 | ticket->t_curr_res -= ctx->ticket->t_unit_res; | ||
317 | } | ||
318 | |||
319 | /* do we need space for more log record headers? */ | ||
320 | iclog_space = log->l_iclog_size - log->l_iclog_hsize; | ||
321 | if (len > 0 && (ctx->space_used / iclog_space != | ||
322 | (ctx->space_used + len) / iclog_space)) { | ||
323 | int hdrs; | ||
324 | |||
325 | hdrs = (len + iclog_space - 1) / iclog_space; | ||
326 | /* need to take into account split region headers, too */ | ||
327 | hdrs *= log->l_iclog_hsize + sizeof(struct xlog_op_header); | ||
328 | ctx->ticket->t_unit_res += hdrs; | ||
329 | ctx->ticket->t_curr_res += hdrs; | ||
330 | ticket->t_curr_res -= hdrs; | ||
331 | ASSERT(ticket->t_curr_res >= len); | ||
332 | } | ||
333 | ticket->t_curr_res -= len; | ||
334 | ctx->space_used += len; | ||
335 | |||
336 | spin_unlock(&cil->xc_cil_lock); | ||
318 | } | 337 | } |
319 | 338 | ||
320 | static void | 339 | static void |
@@ -638,7 +657,10 @@ xfs_log_commit_cil( | |||
638 | 657 | ||
639 | /* lock out background commit */ | 658 | /* lock out background commit */ |
640 | down_read(&log->l_cilp->xc_ctx_lock); | 659 | down_read(&log->l_cilp->xc_ctx_lock); |
641 | xlog_cil_insert_items(log, log_vector, tp->t_ticket, commit_lsn); | 660 | if (commit_lsn) |
661 | *commit_lsn = log->l_cilp->xc_ctx->sequence; | ||
662 | |||
663 | xlog_cil_insert_items(log, log_vector, tp->t_ticket); | ||
642 | 664 | ||
643 | /* check we didn't blow the reservation */ | 665 | /* check we didn't blow the reservation */ |
644 | if (tp->t_ticket->t_curr_res < 0) | 666 | if (tp->t_ticket->t_curr_res < 0) |
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 6f3f5fa37acf..966d3f97458c 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c | |||
@@ -107,7 +107,8 @@ xlog_get_bp( | |||
107 | nbblks += log->l_sectBBsize; | 107 | nbblks += log->l_sectBBsize; |
108 | nbblks = round_up(nbblks, log->l_sectBBsize); | 108 | nbblks = round_up(nbblks, log->l_sectBBsize); |
109 | 109 | ||
110 | return xfs_buf_get_noaddr(BBTOB(nbblks), log->l_mp->m_logdev_targp); | 110 | return xfs_buf_get_uncached(log->l_mp->m_logdev_targp, |
111 | BBTOB(nbblks), 0); | ||
111 | } | 112 | } |
112 | 113 | ||
113 | STATIC void | 114 | STATIC void |
@@ -167,7 +168,7 @@ xlog_bread_noalign( | |||
167 | XFS_BUF_SET_TARGET(bp, log->l_mp->m_logdev_targp); | 168 | XFS_BUF_SET_TARGET(bp, log->l_mp->m_logdev_targp); |
168 | 169 | ||
169 | xfsbdstrat(log->l_mp, bp); | 170 | xfsbdstrat(log->l_mp, bp); |
170 | error = xfs_iowait(bp); | 171 | error = xfs_buf_iowait(bp); |
171 | if (error) | 172 | if (error) |
172 | xfs_ioerror_alert("xlog_bread", log->l_mp, | 173 | xfs_ioerror_alert("xlog_bread", log->l_mp, |
173 | bp, XFS_BUF_ADDR(bp)); | 174 | bp, XFS_BUF_ADDR(bp)); |
@@ -321,12 +322,13 @@ xlog_recover_iodone( | |||
321 | * this during recovery. One strike! | 322 | * this during recovery. One strike! |
322 | */ | 323 | */ |
323 | xfs_ioerror_alert("xlog_recover_iodone", | 324 | xfs_ioerror_alert("xlog_recover_iodone", |
324 | bp->b_mount, bp, XFS_BUF_ADDR(bp)); | 325 | bp->b_target->bt_mount, bp, |
325 | xfs_force_shutdown(bp->b_mount, SHUTDOWN_META_IO_ERROR); | 326 | XFS_BUF_ADDR(bp)); |
327 | xfs_force_shutdown(bp->b_target->bt_mount, | ||
328 | SHUTDOWN_META_IO_ERROR); | ||
326 | } | 329 | } |
327 | bp->b_mount = NULL; | ||
328 | XFS_BUF_CLR_IODONE_FUNC(bp); | 330 | XFS_BUF_CLR_IODONE_FUNC(bp); |
329 | xfs_biodone(bp); | 331 | xfs_buf_ioend(bp, 0); |
330 | } | 332 | } |
331 | 333 | ||
332 | /* | 334 | /* |
@@ -2275,8 +2277,7 @@ xlog_recover_do_buffer_trans( | |||
2275 | XFS_BUF_STALE(bp); | 2277 | XFS_BUF_STALE(bp); |
2276 | error = xfs_bwrite(mp, bp); | 2278 | error = xfs_bwrite(mp, bp); |
2277 | } else { | 2279 | } else { |
2278 | ASSERT(bp->b_mount == NULL || bp->b_mount == mp); | 2280 | ASSERT(bp->b_target->bt_mount == mp); |
2279 | bp->b_mount = mp; | ||
2280 | XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone); | 2281 | XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone); |
2281 | xfs_bdwrite(mp, bp); | 2282 | xfs_bdwrite(mp, bp); |
2282 | } | 2283 | } |
@@ -2540,8 +2541,7 @@ xlog_recover_do_inode_trans( | |||
2540 | } | 2541 | } |
2541 | 2542 | ||
2542 | write_inode_buffer: | 2543 | write_inode_buffer: |
2543 | ASSERT(bp->b_mount == NULL || bp->b_mount == mp); | 2544 | ASSERT(bp->b_target->bt_mount == mp); |
2544 | bp->b_mount = mp; | ||
2545 | XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone); | 2545 | XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone); |
2546 | xfs_bdwrite(mp, bp); | 2546 | xfs_bdwrite(mp, bp); |
2547 | error: | 2547 | error: |
@@ -2678,8 +2678,7 @@ xlog_recover_do_dquot_trans( | |||
2678 | memcpy(ddq, recddq, item->ri_buf[1].i_len); | 2678 | memcpy(ddq, recddq, item->ri_buf[1].i_len); |
2679 | 2679 | ||
2680 | ASSERT(dq_f->qlf_size == 2); | 2680 | ASSERT(dq_f->qlf_size == 2); |
2681 | ASSERT(bp->b_mount == NULL || bp->b_mount == mp); | 2681 | ASSERT(bp->b_target->bt_mount == mp); |
2682 | bp->b_mount = mp; | ||
2683 | XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone); | 2682 | XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone); |
2684 | xfs_bdwrite(mp, bp); | 2683 | xfs_bdwrite(mp, bp); |
2685 | 2684 | ||
@@ -3817,7 +3816,7 @@ xlog_do_recover( | |||
3817 | XFS_BUF_READ(bp); | 3816 | XFS_BUF_READ(bp); |
3818 | XFS_BUF_UNASYNC(bp); | 3817 | XFS_BUF_UNASYNC(bp); |
3819 | xfsbdstrat(log->l_mp, bp); | 3818 | xfsbdstrat(log->l_mp, bp); |
3820 | error = xfs_iowait(bp); | 3819 | error = xfs_buf_iowait(bp); |
3821 | if (error) { | 3820 | if (error) { |
3822 | xfs_ioerror_alert("xlog_do_recover", | 3821 | xfs_ioerror_alert("xlog_do_recover", |
3823 | log->l_mp, bp, XFS_BUF_ADDR(bp)); | 3822 | log->l_mp, bp, XFS_BUF_ADDR(bp)); |
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index aeb9d72ebf6e..b1498ab5a399 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c | |||
@@ -52,16 +52,11 @@ STATIC void xfs_icsb_balance_counter(xfs_mount_t *, xfs_sb_field_t, | |||
52 | int); | 52 | int); |
53 | STATIC void xfs_icsb_balance_counter_locked(xfs_mount_t *, xfs_sb_field_t, | 53 | STATIC void xfs_icsb_balance_counter_locked(xfs_mount_t *, xfs_sb_field_t, |
54 | int); | 54 | int); |
55 | STATIC int xfs_icsb_modify_counters(xfs_mount_t *, xfs_sb_field_t, | ||
56 | int64_t, int); | ||
57 | STATIC void xfs_icsb_disable_counter(xfs_mount_t *, xfs_sb_field_t); | 55 | STATIC void xfs_icsb_disable_counter(xfs_mount_t *, xfs_sb_field_t); |
58 | |||
59 | #else | 56 | #else |
60 | 57 | ||
61 | #define xfs_icsb_balance_counter(mp, a, b) do { } while (0) | 58 | #define xfs_icsb_balance_counter(mp, a, b) do { } while (0) |
62 | #define xfs_icsb_balance_counter_locked(mp, a, b) do { } while (0) | 59 | #define xfs_icsb_balance_counter_locked(mp, a, b) do { } while (0) |
63 | #define xfs_icsb_modify_counters(mp, a, b, c) do { } while (0) | ||
64 | |||
65 | #endif | 60 | #endif |
66 | 61 | ||
67 | static const struct { | 62 | static const struct { |
@@ -199,6 +194,8 @@ xfs_uuid_unmount( | |||
199 | 194 | ||
200 | /* | 195 | /* |
201 | * Reference counting access wrappers to the perag structures. | 196 | * Reference counting access wrappers to the perag structures. |
197 | * Because we never free per-ag structures, the only thing we | ||
198 | * have to protect against changes is the tree structure itself. | ||
202 | */ | 199 | */ |
203 | struct xfs_perag * | 200 | struct xfs_perag * |
204 | xfs_perag_get(struct xfs_mount *mp, xfs_agnumber_t agno) | 201 | xfs_perag_get(struct xfs_mount *mp, xfs_agnumber_t agno) |
@@ -206,19 +203,43 @@ xfs_perag_get(struct xfs_mount *mp, xfs_agnumber_t agno) | |||
206 | struct xfs_perag *pag; | 203 | struct xfs_perag *pag; |
207 | int ref = 0; | 204 | int ref = 0; |
208 | 205 | ||
209 | spin_lock(&mp->m_perag_lock); | 206 | rcu_read_lock(); |
210 | pag = radix_tree_lookup(&mp->m_perag_tree, agno); | 207 | pag = radix_tree_lookup(&mp->m_perag_tree, agno); |
211 | if (pag) { | 208 | if (pag) { |
212 | ASSERT(atomic_read(&pag->pag_ref) >= 0); | 209 | ASSERT(atomic_read(&pag->pag_ref) >= 0); |
213 | /* catch leaks in the positive direction during testing */ | ||
214 | ASSERT(atomic_read(&pag->pag_ref) < 1000); | ||
215 | ref = atomic_inc_return(&pag->pag_ref); | 210 | ref = atomic_inc_return(&pag->pag_ref); |
216 | } | 211 | } |
217 | spin_unlock(&mp->m_perag_lock); | 212 | rcu_read_unlock(); |
218 | trace_xfs_perag_get(mp, agno, ref, _RET_IP_); | 213 | trace_xfs_perag_get(mp, agno, ref, _RET_IP_); |
219 | return pag; | 214 | return pag; |
220 | } | 215 | } |
221 | 216 | ||
217 | /* | ||
218 | * search from @first to find the next perag with the given tag set. | ||
219 | */ | ||
220 | struct xfs_perag * | ||
221 | xfs_perag_get_tag( | ||
222 | struct xfs_mount *mp, | ||
223 | xfs_agnumber_t first, | ||
224 | int tag) | ||
225 | { | ||
226 | struct xfs_perag *pag; | ||
227 | int found; | ||
228 | int ref; | ||
229 | |||
230 | rcu_read_lock(); | ||
231 | found = radix_tree_gang_lookup_tag(&mp->m_perag_tree, | ||
232 | (void **)&pag, first, 1, tag); | ||
233 | if (found <= 0) { | ||
234 | rcu_read_unlock(); | ||
235 | return NULL; | ||
236 | } | ||
237 | ref = atomic_inc_return(&pag->pag_ref); | ||
238 | rcu_read_unlock(); | ||
239 | trace_xfs_perag_get_tag(mp, pag->pag_agno, ref, _RET_IP_); | ||
240 | return pag; | ||
241 | } | ||
242 | |||
222 | void | 243 | void |
223 | xfs_perag_put(struct xfs_perag *pag) | 244 | xfs_perag_put(struct xfs_perag *pag) |
224 | { | 245 | { |
@@ -229,10 +250,18 @@ xfs_perag_put(struct xfs_perag *pag) | |||
229 | trace_xfs_perag_put(pag->pag_mount, pag->pag_agno, ref, _RET_IP_); | 250 | trace_xfs_perag_put(pag->pag_mount, pag->pag_agno, ref, _RET_IP_); |
230 | } | 251 | } |
231 | 252 | ||
253 | STATIC void | ||
254 | __xfs_free_perag( | ||
255 | struct rcu_head *head) | ||
256 | { | ||
257 | struct xfs_perag *pag = container_of(head, struct xfs_perag, rcu_head); | ||
258 | |||
259 | ASSERT(atomic_read(&pag->pag_ref) == 0); | ||
260 | kmem_free(pag); | ||
261 | } | ||
262 | |||
232 | /* | 263 | /* |
233 | * Free up the resources associated with a mount structure. Assume that | 264 | * Free up the per-ag resources associated with the mount structure. |
234 | * the structure was initially zeroed, so we can tell which fields got | ||
235 | * initialized. | ||
236 | */ | 265 | */ |
237 | STATIC void | 266 | STATIC void |
238 | xfs_free_perag( | 267 | xfs_free_perag( |
@@ -244,10 +273,9 @@ xfs_free_perag( | |||
244 | for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) { | 273 | for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) { |
245 | spin_lock(&mp->m_perag_lock); | 274 | spin_lock(&mp->m_perag_lock); |
246 | pag = radix_tree_delete(&mp->m_perag_tree, agno); | 275 | pag = radix_tree_delete(&mp->m_perag_tree, agno); |
247 | ASSERT(pag); | ||
248 | ASSERT(atomic_read(&pag->pag_ref) == 0); | ||
249 | spin_unlock(&mp->m_perag_lock); | 276 | spin_unlock(&mp->m_perag_lock); |
250 | kmem_free(pag); | 277 | ASSERT(pag); |
278 | call_rcu(&pag->rcu_head, __xfs_free_perag); | ||
251 | } | 279 | } |
252 | } | 280 | } |
253 | 281 | ||
@@ -444,7 +472,10 @@ xfs_initialize_perag( | |||
444 | pag->pag_agno = index; | 472 | pag->pag_agno = index; |
445 | pag->pag_mount = mp; | 473 | pag->pag_mount = mp; |
446 | rwlock_init(&pag->pag_ici_lock); | 474 | rwlock_init(&pag->pag_ici_lock); |
475 | mutex_init(&pag->pag_ici_reclaim_lock); | ||
447 | INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC); | 476 | INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC); |
477 | spin_lock_init(&pag->pag_buf_lock); | ||
478 | pag->pag_buf_tree = RB_ROOT; | ||
448 | 479 | ||
449 | if (radix_tree_preload(GFP_NOFS)) | 480 | if (radix_tree_preload(GFP_NOFS)) |
450 | goto out_unwind; | 481 | goto out_unwind; |
@@ -639,7 +670,6 @@ int | |||
639 | xfs_readsb(xfs_mount_t *mp, int flags) | 670 | xfs_readsb(xfs_mount_t *mp, int flags) |
640 | { | 671 | { |
641 | unsigned int sector_size; | 672 | unsigned int sector_size; |
642 | unsigned int extra_flags; | ||
643 | xfs_buf_t *bp; | 673 | xfs_buf_t *bp; |
644 | int error; | 674 | int error; |
645 | 675 | ||
@@ -652,28 +682,24 @@ xfs_readsb(xfs_mount_t *mp, int flags) | |||
652 | * access to the superblock. | 682 | * access to the superblock. |
653 | */ | 683 | */ |
654 | sector_size = xfs_getsize_buftarg(mp->m_ddev_targp); | 684 | sector_size = xfs_getsize_buftarg(mp->m_ddev_targp); |
655 | extra_flags = XBF_LOCK | XBF_FS_MANAGED | XBF_MAPPED; | ||
656 | 685 | ||
657 | bp = xfs_buf_read(mp->m_ddev_targp, XFS_SB_DADDR, BTOBB(sector_size), | 686 | reread: |
658 | extra_flags); | 687 | bp = xfs_buf_read_uncached(mp, mp->m_ddev_targp, |
659 | if (!bp || XFS_BUF_ISERROR(bp)) { | 688 | XFS_SB_DADDR, sector_size, 0); |
660 | xfs_fs_mount_cmn_err(flags, "SB read failed"); | 689 | if (!bp) { |
661 | error = bp ? XFS_BUF_GETERROR(bp) : ENOMEM; | 690 | xfs_fs_mount_cmn_err(flags, "SB buffer read failed"); |
662 | goto fail; | 691 | return EIO; |
663 | } | 692 | } |
664 | ASSERT(XFS_BUF_ISBUSY(bp)); | ||
665 | ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); | ||
666 | 693 | ||
667 | /* | 694 | /* |
668 | * Initialize the mount structure from the superblock. | 695 | * Initialize the mount structure from the superblock. |
669 | * But first do some basic consistency checking. | 696 | * But first do some basic consistency checking. |
670 | */ | 697 | */ |
671 | xfs_sb_from_disk(&mp->m_sb, XFS_BUF_TO_SBP(bp)); | 698 | xfs_sb_from_disk(&mp->m_sb, XFS_BUF_TO_SBP(bp)); |
672 | |||
673 | error = xfs_mount_validate_sb(mp, &(mp->m_sb), flags); | 699 | error = xfs_mount_validate_sb(mp, &(mp->m_sb), flags); |
674 | if (error) { | 700 | if (error) { |
675 | xfs_fs_mount_cmn_err(flags, "SB validate failed"); | 701 | xfs_fs_mount_cmn_err(flags, "SB validate failed"); |
676 | goto fail; | 702 | goto release_buf; |
677 | } | 703 | } |
678 | 704 | ||
679 | /* | 705 | /* |
@@ -684,7 +710,7 @@ xfs_readsb(xfs_mount_t *mp, int flags) | |||
684 | "device supports only %u byte sectors (not %u)", | 710 | "device supports only %u byte sectors (not %u)", |
685 | sector_size, mp->m_sb.sb_sectsize); | 711 | sector_size, mp->m_sb.sb_sectsize); |
686 | error = ENOSYS; | 712 | error = ENOSYS; |
687 | goto fail; | 713 | goto release_buf; |
688 | } | 714 | } |
689 | 715 | ||
690 | /* | 716 | /* |
@@ -692,33 +718,20 @@ xfs_readsb(xfs_mount_t *mp, int flags) | |||
692 | * re-read the superblock so the buffer is correctly sized. | 718 | * re-read the superblock so the buffer is correctly sized. |
693 | */ | 719 | */ |
694 | if (sector_size < mp->m_sb.sb_sectsize) { | 720 | if (sector_size < mp->m_sb.sb_sectsize) { |
695 | XFS_BUF_UNMANAGE(bp); | ||
696 | xfs_buf_relse(bp); | 721 | xfs_buf_relse(bp); |
697 | sector_size = mp->m_sb.sb_sectsize; | 722 | sector_size = mp->m_sb.sb_sectsize; |
698 | bp = xfs_buf_read(mp->m_ddev_targp, XFS_SB_DADDR, | 723 | goto reread; |
699 | BTOBB(sector_size), extra_flags); | ||
700 | if (!bp || XFS_BUF_ISERROR(bp)) { | ||
701 | xfs_fs_mount_cmn_err(flags, "SB re-read failed"); | ||
702 | error = bp ? XFS_BUF_GETERROR(bp) : ENOMEM; | ||
703 | goto fail; | ||
704 | } | ||
705 | ASSERT(XFS_BUF_ISBUSY(bp)); | ||
706 | ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); | ||
707 | } | 724 | } |
708 | 725 | ||
709 | /* Initialize per-cpu counters */ | 726 | /* Initialize per-cpu counters */ |
710 | xfs_icsb_reinit_counters(mp); | 727 | xfs_icsb_reinit_counters(mp); |
711 | 728 | ||
712 | mp->m_sb_bp = bp; | 729 | mp->m_sb_bp = bp; |
713 | xfs_buf_relse(bp); | 730 | xfs_buf_unlock(bp); |
714 | ASSERT(XFS_BUF_VALUSEMA(bp) > 0); | ||
715 | return 0; | 731 | return 0; |
716 | 732 | ||
717 | fail: | 733 | release_buf: |
718 | if (bp) { | 734 | xfs_buf_relse(bp); |
719 | XFS_BUF_UNMANAGE(bp); | ||
720 | xfs_buf_relse(bp); | ||
721 | } | ||
722 | return error; | 735 | return error; |
723 | } | 736 | } |
724 | 737 | ||
@@ -991,42 +1004,35 @@ xfs_check_sizes(xfs_mount_t *mp) | |||
991 | { | 1004 | { |
992 | xfs_buf_t *bp; | 1005 | xfs_buf_t *bp; |
993 | xfs_daddr_t d; | 1006 | xfs_daddr_t d; |
994 | int error; | ||
995 | 1007 | ||
996 | d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks); | 1008 | d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks); |
997 | if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_dblocks) { | 1009 | if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_dblocks) { |
998 | cmn_err(CE_WARN, "XFS: size check 1 failed"); | 1010 | cmn_err(CE_WARN, "XFS: filesystem size mismatch detected"); |
999 | return XFS_ERROR(EFBIG); | 1011 | return XFS_ERROR(EFBIG); |
1000 | } | 1012 | } |
1001 | error = xfs_read_buf(mp, mp->m_ddev_targp, | 1013 | bp = xfs_buf_read_uncached(mp, mp->m_ddev_targp, |
1002 | d - XFS_FSS_TO_BB(mp, 1), | 1014 | d - XFS_FSS_TO_BB(mp, 1), |
1003 | XFS_FSS_TO_BB(mp, 1), 0, &bp); | 1015 | BBTOB(XFS_FSS_TO_BB(mp, 1)), 0); |
1004 | if (!error) { | 1016 | if (!bp) { |
1005 | xfs_buf_relse(bp); | 1017 | cmn_err(CE_WARN, "XFS: last sector read failed"); |
1006 | } else { | 1018 | return EIO; |
1007 | cmn_err(CE_WARN, "XFS: size check 2 failed"); | ||
1008 | if (error == ENOSPC) | ||
1009 | error = XFS_ERROR(EFBIG); | ||
1010 | return error; | ||
1011 | } | 1019 | } |
1020 | xfs_buf_relse(bp); | ||
1012 | 1021 | ||
1013 | if (mp->m_logdev_targp != mp->m_ddev_targp) { | 1022 | if (mp->m_logdev_targp != mp->m_ddev_targp) { |
1014 | d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks); | 1023 | d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks); |
1015 | if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_logblocks) { | 1024 | if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_logblocks) { |
1016 | cmn_err(CE_WARN, "XFS: size check 3 failed"); | 1025 | cmn_err(CE_WARN, "XFS: log size mismatch detected"); |
1017 | return XFS_ERROR(EFBIG); | 1026 | return XFS_ERROR(EFBIG); |
1018 | } | 1027 | } |
1019 | error = xfs_read_buf(mp, mp->m_logdev_targp, | 1028 | bp = xfs_buf_read_uncached(mp, mp->m_logdev_targp, |
1020 | d - XFS_FSB_TO_BB(mp, 1), | 1029 | d - XFS_FSB_TO_BB(mp, 1), |
1021 | XFS_FSB_TO_BB(mp, 1), 0, &bp); | 1030 | XFS_FSB_TO_B(mp, 1), 0); |
1022 | if (!error) { | 1031 | if (!bp) { |
1023 | xfs_buf_relse(bp); | 1032 | cmn_err(CE_WARN, "XFS: log device read failed"); |
1024 | } else { | 1033 | return EIO; |
1025 | cmn_err(CE_WARN, "XFS: size check 3 failed"); | ||
1026 | if (error == ENOSPC) | ||
1027 | error = XFS_ERROR(EFBIG); | ||
1028 | return error; | ||
1029 | } | 1034 | } |
1035 | xfs_buf_relse(bp); | ||
1030 | } | 1036 | } |
1031 | return 0; | 1037 | return 0; |
1032 | } | 1038 | } |
@@ -1601,7 +1607,7 @@ xfs_unmountfs_writesb(xfs_mount_t *mp) | |||
1601 | XFS_BUF_UNASYNC(sbp); | 1607 | XFS_BUF_UNASYNC(sbp); |
1602 | ASSERT(XFS_BUF_TARGET(sbp) == mp->m_ddev_targp); | 1608 | ASSERT(XFS_BUF_TARGET(sbp) == mp->m_ddev_targp); |
1603 | xfsbdstrat(mp, sbp); | 1609 | xfsbdstrat(mp, sbp); |
1604 | error = xfs_iowait(sbp); | 1610 | error = xfs_buf_iowait(sbp); |
1605 | if (error) | 1611 | if (error) |
1606 | xfs_ioerror_alert("xfs_unmountfs_writesb", | 1612 | xfs_ioerror_alert("xfs_unmountfs_writesb", |
1607 | mp, sbp, XFS_BUF_ADDR(sbp)); | 1613 | mp, sbp, XFS_BUF_ADDR(sbp)); |
@@ -1832,135 +1838,72 @@ xfs_mod_incore_sb_unlocked( | |||
1832 | */ | 1838 | */ |
1833 | int | 1839 | int |
1834 | xfs_mod_incore_sb( | 1840 | xfs_mod_incore_sb( |
1835 | xfs_mount_t *mp, | 1841 | struct xfs_mount *mp, |
1836 | xfs_sb_field_t field, | 1842 | xfs_sb_field_t field, |
1837 | int64_t delta, | 1843 | int64_t delta, |
1838 | int rsvd) | 1844 | int rsvd) |
1839 | { | 1845 | { |
1840 | int status; | 1846 | int status; |
1841 | 1847 | ||
1842 | /* check for per-cpu counters */ | ||
1843 | switch (field) { | ||
1844 | #ifdef HAVE_PERCPU_SB | 1848 | #ifdef HAVE_PERCPU_SB |
1845 | case XFS_SBS_ICOUNT: | 1849 | ASSERT(field < XFS_SBS_ICOUNT || field > XFS_SBS_FDBLOCKS); |
1846 | case XFS_SBS_IFREE: | ||
1847 | case XFS_SBS_FDBLOCKS: | ||
1848 | if (!(mp->m_flags & XFS_MOUNT_NO_PERCPU_SB)) { | ||
1849 | status = xfs_icsb_modify_counters(mp, field, | ||
1850 | delta, rsvd); | ||
1851 | break; | ||
1852 | } | ||
1853 | /* FALLTHROUGH */ | ||
1854 | #endif | 1850 | #endif |
1855 | default: | 1851 | spin_lock(&mp->m_sb_lock); |
1856 | spin_lock(&mp->m_sb_lock); | 1852 | status = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd); |
1857 | status = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd); | 1853 | spin_unlock(&mp->m_sb_lock); |
1858 | spin_unlock(&mp->m_sb_lock); | ||
1859 | break; | ||
1860 | } | ||
1861 | 1854 | ||
1862 | return status; | 1855 | return status; |
1863 | } | 1856 | } |
1864 | 1857 | ||
1865 | /* | 1858 | /* |
1866 | * xfs_mod_incore_sb_batch() is used to change more than one field | 1859 | * Change more than one field in the in-core superblock structure at a time. |
1867 | * in the in-core superblock structure at a time. This modification | ||
1868 | * is protected by a lock internal to this module. The fields and | ||
1869 | * changes to those fields are specified in the array of xfs_mod_sb | ||
1870 | * structures passed in. | ||
1871 | * | 1860 | * |
1872 | * Either all of the specified deltas will be applied or none of | 1861 | * The fields and changes to those fields are specified in the array of |
1873 | * them will. If any modified field dips below 0, then all modifications | 1862 | * xfs_mod_sb structures passed in. Either all of the specified deltas |
1874 | * will be backed out and EINVAL will be returned. | 1863 | * will be applied or none of them will. If any modified field dips below 0, |
1864 | * then all modifications will be backed out and EINVAL will be returned. | ||
1865 | * | ||
1866 | * Note that this function may not be used for the superblock values that | ||
1867 | * are tracked with the in-memory per-cpu counters - a direct call to | ||
1868 | * xfs_icsb_modify_counters is required for these. | ||
1875 | */ | 1869 | */ |
1876 | int | 1870 | int |
1877 | xfs_mod_incore_sb_batch(xfs_mount_t *mp, xfs_mod_sb_t *msb, uint nmsb, int rsvd) | 1871 | xfs_mod_incore_sb_batch( |
1872 | struct xfs_mount *mp, | ||
1873 | xfs_mod_sb_t *msb, | ||
1874 | uint nmsb, | ||
1875 | int rsvd) | ||
1878 | { | 1876 | { |
1879 | int status=0; | 1877 | xfs_mod_sb_t *msbp = &msb[0]; |
1880 | xfs_mod_sb_t *msbp; | 1878 | int error = 0; |
1881 | 1879 | ||
1882 | /* | 1880 | /* |
1883 | * Loop through the array of mod structures and apply each | 1881 | * Loop through the array of mod structures and apply each individually. |
1884 | * individually. If any fail, then back out all those | 1882 | * If any fail, then back out all those which have already been applied. |
1885 | * which have already been applied. Do all of this within | 1883 | * Do all of this within the scope of the m_sb_lock so that all of the |
1886 | * the scope of the m_sb_lock so that all of the changes will | 1884 | * changes will be atomic. |
1887 | * be atomic. | ||
1888 | */ | 1885 | */ |
1889 | spin_lock(&mp->m_sb_lock); | 1886 | spin_lock(&mp->m_sb_lock); |
1890 | msbp = &msb[0]; | ||
1891 | for (msbp = &msbp[0]; msbp < (msb + nmsb); msbp++) { | 1887 | for (msbp = &msbp[0]; msbp < (msb + nmsb); msbp++) { |
1892 | /* | 1888 | ASSERT(msbp->msb_field < XFS_SBS_ICOUNT || |
1893 | * Apply the delta at index n. If it fails, break | 1889 | msbp->msb_field > XFS_SBS_FDBLOCKS); |
1894 | * from the loop so we'll fall into the undo loop | ||
1895 | * below. | ||
1896 | */ | ||
1897 | switch (msbp->msb_field) { | ||
1898 | #ifdef HAVE_PERCPU_SB | ||
1899 | case XFS_SBS_ICOUNT: | ||
1900 | case XFS_SBS_IFREE: | ||
1901 | case XFS_SBS_FDBLOCKS: | ||
1902 | if (!(mp->m_flags & XFS_MOUNT_NO_PERCPU_SB)) { | ||
1903 | spin_unlock(&mp->m_sb_lock); | ||
1904 | status = xfs_icsb_modify_counters(mp, | ||
1905 | msbp->msb_field, | ||
1906 | msbp->msb_delta, rsvd); | ||
1907 | spin_lock(&mp->m_sb_lock); | ||
1908 | break; | ||
1909 | } | ||
1910 | /* FALLTHROUGH */ | ||
1911 | #endif | ||
1912 | default: | ||
1913 | status = xfs_mod_incore_sb_unlocked(mp, | ||
1914 | msbp->msb_field, | ||
1915 | msbp->msb_delta, rsvd); | ||
1916 | break; | ||
1917 | } | ||
1918 | 1890 | ||
1919 | if (status != 0) { | 1891 | error = xfs_mod_incore_sb_unlocked(mp, msbp->msb_field, |
1920 | break; | 1892 | msbp->msb_delta, rsvd); |
1921 | } | 1893 | if (error) |
1894 | goto unwind; | ||
1922 | } | 1895 | } |
1896 | spin_unlock(&mp->m_sb_lock); | ||
1897 | return 0; | ||
1923 | 1898 | ||
1924 | /* | 1899 | unwind: |
1925 | * If we didn't complete the loop above, then back out | 1900 | while (--msbp >= msb) { |
1926 | * any changes made to the superblock. If you add code | 1901 | error = xfs_mod_incore_sb_unlocked(mp, msbp->msb_field, |
1927 | * between the loop above and here, make sure that you | 1902 | -msbp->msb_delta, rsvd); |
1928 | * preserve the value of status. Loop back until | 1903 | ASSERT(error == 0); |
1929 | * we step below the beginning of the array. Make sure | ||
1930 | * we don't touch anything back there. | ||
1931 | */ | ||
1932 | if (status != 0) { | ||
1933 | msbp--; | ||
1934 | while (msbp >= msb) { | ||
1935 | switch (msbp->msb_field) { | ||
1936 | #ifdef HAVE_PERCPU_SB | ||
1937 | case XFS_SBS_ICOUNT: | ||
1938 | case XFS_SBS_IFREE: | ||
1939 | case XFS_SBS_FDBLOCKS: | ||
1940 | if (!(mp->m_flags & XFS_MOUNT_NO_PERCPU_SB)) { | ||
1941 | spin_unlock(&mp->m_sb_lock); | ||
1942 | status = xfs_icsb_modify_counters(mp, | ||
1943 | msbp->msb_field, | ||
1944 | -(msbp->msb_delta), | ||
1945 | rsvd); | ||
1946 | spin_lock(&mp->m_sb_lock); | ||
1947 | break; | ||
1948 | } | ||
1949 | /* FALLTHROUGH */ | ||
1950 | #endif | ||
1951 | default: | ||
1952 | status = xfs_mod_incore_sb_unlocked(mp, | ||
1953 | msbp->msb_field, | ||
1954 | -(msbp->msb_delta), | ||
1955 | rsvd); | ||
1956 | break; | ||
1957 | } | ||
1958 | ASSERT(status == 0); | ||
1959 | msbp--; | ||
1960 | } | ||
1961 | } | 1904 | } |
1962 | spin_unlock(&mp->m_sb_lock); | 1905 | spin_unlock(&mp->m_sb_lock); |
1963 | return status; | 1906 | return error; |
1964 | } | 1907 | } |
1965 | 1908 | ||
1966 | /* | 1909 | /* |
@@ -1998,18 +1941,13 @@ xfs_getsb( | |||
1998 | */ | 1941 | */ |
1999 | void | 1942 | void |
2000 | xfs_freesb( | 1943 | xfs_freesb( |
2001 | xfs_mount_t *mp) | 1944 | struct xfs_mount *mp) |
2002 | { | 1945 | { |
2003 | xfs_buf_t *bp; | 1946 | struct xfs_buf *bp = mp->m_sb_bp; |
2004 | 1947 | ||
2005 | /* | 1948 | xfs_buf_lock(bp); |
2006 | * Use xfs_getsb() so that the buffer will be locked | ||
2007 | * when we call xfs_buf_relse(). | ||
2008 | */ | ||
2009 | bp = xfs_getsb(mp, 0); | ||
2010 | XFS_BUF_UNMANAGE(bp); | ||
2011 | xfs_buf_relse(bp); | ||
2012 | mp->m_sb_bp = NULL; | 1949 | mp->m_sb_bp = NULL; |
1950 | xfs_buf_relse(bp); | ||
2013 | } | 1951 | } |
2014 | 1952 | ||
2015 | /* | 1953 | /* |
@@ -2496,7 +2434,7 @@ xfs_icsb_balance_counter( | |||
2496 | spin_unlock(&mp->m_sb_lock); | 2434 | spin_unlock(&mp->m_sb_lock); |
2497 | } | 2435 | } |
2498 | 2436 | ||
2499 | STATIC int | 2437 | int |
2500 | xfs_icsb_modify_counters( | 2438 | xfs_icsb_modify_counters( |
2501 | xfs_mount_t *mp, | 2439 | xfs_mount_t *mp, |
2502 | xfs_sb_field_t field, | 2440 | xfs_sb_field_t field, |
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index 622da2179a57..5861b4980740 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h | |||
@@ -53,7 +53,6 @@ typedef struct xfs_trans_reservations { | |||
53 | 53 | ||
54 | #include "xfs_sync.h" | 54 | #include "xfs_sync.h" |
55 | 55 | ||
56 | struct cred; | ||
57 | struct log; | 56 | struct log; |
58 | struct xfs_mount_args; | 57 | struct xfs_mount_args; |
59 | struct xfs_inode; | 58 | struct xfs_inode; |
@@ -91,6 +90,8 @@ extern void xfs_icsb_reinit_counters(struct xfs_mount *); | |||
91 | extern void xfs_icsb_destroy_counters(struct xfs_mount *); | 90 | extern void xfs_icsb_destroy_counters(struct xfs_mount *); |
92 | extern void xfs_icsb_sync_counters(struct xfs_mount *, int); | 91 | extern void xfs_icsb_sync_counters(struct xfs_mount *, int); |
93 | extern void xfs_icsb_sync_counters_locked(struct xfs_mount *, int); | 92 | extern void xfs_icsb_sync_counters_locked(struct xfs_mount *, int); |
93 | extern int xfs_icsb_modify_counters(struct xfs_mount *, xfs_sb_field_t, | ||
94 | int64_t, int); | ||
94 | 95 | ||
95 | #else | 96 | #else |
96 | #define xfs_icsb_init_counters(mp) (0) | 97 | #define xfs_icsb_init_counters(mp) (0) |
@@ -98,6 +99,8 @@ extern void xfs_icsb_sync_counters_locked(struct xfs_mount *, int); | |||
98 | #define xfs_icsb_reinit_counters(mp) do { } while (0) | 99 | #define xfs_icsb_reinit_counters(mp) do { } while (0) |
99 | #define xfs_icsb_sync_counters(mp, flags) do { } while (0) | 100 | #define xfs_icsb_sync_counters(mp, flags) do { } while (0) |
100 | #define xfs_icsb_sync_counters_locked(mp, flags) do { } while (0) | 101 | #define xfs_icsb_sync_counters_locked(mp, flags) do { } while (0) |
102 | #define xfs_icsb_modify_counters(mp, field, delta, rsvd) \ | ||
103 | xfs_mod_incore_sb(mp, field, delta, rsvd) | ||
101 | #endif | 104 | #endif |
102 | 105 | ||
103 | typedef struct xfs_mount { | 106 | typedef struct xfs_mount { |
@@ -232,8 +235,6 @@ typedef struct xfs_mount { | |||
232 | #define XFS_MOUNT_DIRSYNC (1ULL << 21) /* synchronous directory ops */ | 235 | #define XFS_MOUNT_DIRSYNC (1ULL << 21) /* synchronous directory ops */ |
233 | #define XFS_MOUNT_COMPAT_IOSIZE (1ULL << 22) /* don't report large preferred | 236 | #define XFS_MOUNT_COMPAT_IOSIZE (1ULL << 22) /* don't report large preferred |
234 | * I/O size in stat() */ | 237 | * I/O size in stat() */ |
235 | #define XFS_MOUNT_NO_PERCPU_SB (1ULL << 23) /* don't use per-cpu superblock | ||
236 | counters */ | ||
237 | #define XFS_MOUNT_FILESTREAMS (1ULL << 24) /* enable the filestreams | 238 | #define XFS_MOUNT_FILESTREAMS (1ULL << 24) /* enable the filestreams |
238 | allocator */ | 239 | allocator */ |
239 | #define XFS_MOUNT_NOATTR2 (1ULL << 25) /* disable use of attr2 format */ | 240 | #define XFS_MOUNT_NOATTR2 (1ULL << 25) /* disable use of attr2 format */ |
@@ -327,6 +328,8 @@ xfs_daddr_to_agbno(struct xfs_mount *mp, xfs_daddr_t d) | |||
327 | * perag get/put wrappers for ref counting | 328 | * perag get/put wrappers for ref counting |
328 | */ | 329 | */ |
329 | struct xfs_perag *xfs_perag_get(struct xfs_mount *mp, xfs_agnumber_t agno); | 330 | struct xfs_perag *xfs_perag_get(struct xfs_mount *mp, xfs_agnumber_t agno); |
331 | struct xfs_perag *xfs_perag_get_tag(struct xfs_mount *mp, xfs_agnumber_t agno, | ||
332 | int tag); | ||
330 | void xfs_perag_put(struct xfs_perag *pag); | 333 | void xfs_perag_put(struct xfs_perag *pag); |
331 | 334 | ||
332 | /* | 335 | /* |
diff --git a/fs/xfs/xfs_refcache.h b/fs/xfs/xfs_refcache.h deleted file mode 100644 index 2dec79edb510..000000000000 --- a/fs/xfs/xfs_refcache.h +++ /dev/null | |||
@@ -1,52 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. | ||
3 | * All Rights Reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it would be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write the Free Software Foundation, | ||
16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
17 | */ | ||
18 | #ifndef __XFS_REFCACHE_H__ | ||
19 | #define __XFS_REFCACHE_H__ | ||
20 | |||
21 | #ifdef HAVE_REFCACHE | ||
22 | /* | ||
23 | * Maximum size (in inodes) for the NFS reference cache | ||
24 | */ | ||
25 | #define XFS_REFCACHE_SIZE_MAX 512 | ||
26 | |||
27 | struct xfs_inode; | ||
28 | struct xfs_mount; | ||
29 | |||
30 | extern void xfs_refcache_insert(struct xfs_inode *); | ||
31 | extern void xfs_refcache_purge_ip(struct xfs_inode *); | ||
32 | extern void xfs_refcache_purge_mp(struct xfs_mount *); | ||
33 | extern void xfs_refcache_purge_some(struct xfs_mount *); | ||
34 | extern void xfs_refcache_resize(int); | ||
35 | extern void xfs_refcache_destroy(void); | ||
36 | |||
37 | extern void xfs_refcache_iunlock(struct xfs_inode *, uint); | ||
38 | |||
39 | #else | ||
40 | |||
41 | #define xfs_refcache_insert(ip) do { } while (0) | ||
42 | #define xfs_refcache_purge_ip(ip) do { } while (0) | ||
43 | #define xfs_refcache_purge_mp(mp) do { } while (0) | ||
44 | #define xfs_refcache_purge_some(mp) do { } while (0) | ||
45 | #define xfs_refcache_resize(size) do { } while (0) | ||
46 | #define xfs_refcache_destroy() do { } while (0) | ||
47 | |||
48 | #define xfs_refcache_iunlock(ip, flags) xfs_iunlock(ip, flags) | ||
49 | |||
50 | #endif | ||
51 | |||
52 | #endif /* __XFS_REFCACHE_H__ */ | ||
diff --git a/fs/xfs/xfs_rename.c b/fs/xfs/xfs_rename.c index 8fca957200df..d2af0a8381a6 100644 --- a/fs/xfs/xfs_rename.c +++ b/fs/xfs/xfs_rename.c | |||
@@ -183,7 +183,7 @@ xfs_rename( | |||
183 | * tree quota mechanism would be circumvented. | 183 | * tree quota mechanism would be circumvented. |
184 | */ | 184 | */ |
185 | if (unlikely((target_dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) && | 185 | if (unlikely((target_dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) && |
186 | (target_dp->i_d.di_projid != src_ip->i_d.di_projid))) { | 186 | (xfs_get_projid(target_dp) != xfs_get_projid(src_ip)))) { |
187 | error = XFS_ERROR(EXDEV); | 187 | error = XFS_ERROR(EXDEV); |
188 | goto error_return; | 188 | goto error_return; |
189 | } | 189 | } |
@@ -211,7 +211,9 @@ xfs_rename( | |||
211 | goto error_return; | 211 | goto error_return; |
212 | if (error) | 212 | if (error) |
213 | goto abort_return; | 213 | goto abort_return; |
214 | xfs_ichgtime(target_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); | 214 | |
215 | xfs_trans_ichgtime(tp, target_dp, | ||
216 | XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); | ||
215 | 217 | ||
216 | if (new_parent && src_is_directory) { | 218 | if (new_parent && src_is_directory) { |
217 | error = xfs_bumplink(tp, target_dp); | 219 | error = xfs_bumplink(tp, target_dp); |
@@ -249,7 +251,9 @@ xfs_rename( | |||
249 | &first_block, &free_list, spaceres); | 251 | &first_block, &free_list, spaceres); |
250 | if (error) | 252 | if (error) |
251 | goto abort_return; | 253 | goto abort_return; |
252 | xfs_ichgtime(target_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); | 254 | |
255 | xfs_trans_ichgtime(tp, target_dp, | ||
256 | XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); | ||
253 | 257 | ||
254 | /* | 258 | /* |
255 | * Decrement the link count on the target since the target | 259 | * Decrement the link count on the target since the target |
@@ -292,7 +296,7 @@ xfs_rename( | |||
292 | * inode isn't really being changed, but old unix file systems did | 296 | * inode isn't really being changed, but old unix file systems did |
293 | * it and some incremental backup programs won't work without it. | 297 | * it and some incremental backup programs won't work without it. |
294 | */ | 298 | */ |
295 | xfs_ichgtime(src_ip, XFS_ICHGTIME_CHG); | 299 | xfs_trans_ichgtime(tp, src_ip, XFS_ICHGTIME_CHG); |
296 | 300 | ||
297 | /* | 301 | /* |
298 | * Adjust the link count on src_dp. This is necessary when | 302 | * Adjust the link count on src_dp. This is necessary when |
@@ -315,7 +319,7 @@ xfs_rename( | |||
315 | if (error) | 319 | if (error) |
316 | goto abort_return; | 320 | goto abort_return; |
317 | 321 | ||
318 | xfs_ichgtime(src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); | 322 | xfs_trans_ichgtime(tp, src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); |
319 | xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE); | 323 | xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE); |
320 | if (new_parent) | 324 | if (new_parent) |
321 | xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE); | 325 | xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE); |
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c index 891260fea11e..12a191385310 100644 --- a/fs/xfs/xfs_rtalloc.c +++ b/fs/xfs/xfs_rtalloc.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include "xfs_trans_space.h" | 39 | #include "xfs_trans_space.h" |
40 | #include "xfs_utils.h" | 40 | #include "xfs_utils.h" |
41 | #include "xfs_trace.h" | 41 | #include "xfs_trace.h" |
42 | #include "xfs_buf.h" | ||
42 | 43 | ||
43 | 44 | ||
44 | /* | 45 | /* |
@@ -1883,13 +1884,13 @@ xfs_growfs_rt( | |||
1883 | /* | 1884 | /* |
1884 | * Read in the last block of the device, make sure it exists. | 1885 | * Read in the last block of the device, make sure it exists. |
1885 | */ | 1886 | */ |
1886 | error = xfs_read_buf(mp, mp->m_rtdev_targp, | 1887 | bp = xfs_buf_read_uncached(mp, mp->m_rtdev_targp, |
1887 | XFS_FSB_TO_BB(mp, nrblocks - 1), | 1888 | XFS_FSB_TO_BB(mp, nrblocks - 1), |
1888 | XFS_FSB_TO_BB(mp, 1), 0, &bp); | 1889 | XFS_FSB_TO_B(mp, 1), 0); |
1889 | if (error) | 1890 | if (!bp) |
1890 | return error; | 1891 | return EIO; |
1891 | ASSERT(bp); | ||
1892 | xfs_buf_relse(bp); | 1892 | xfs_buf_relse(bp); |
1893 | |||
1893 | /* | 1894 | /* |
1894 | * Calculate new parameters. These are the final values to be reached. | 1895 | * Calculate new parameters. These are the final values to be reached. |
1895 | */ | 1896 | */ |
@@ -2215,7 +2216,6 @@ xfs_rtmount_init( | |||
2215 | { | 2216 | { |
2216 | xfs_buf_t *bp; /* buffer for last block of subvolume */ | 2217 | xfs_buf_t *bp; /* buffer for last block of subvolume */ |
2217 | xfs_daddr_t d; /* address of last block of subvolume */ | 2218 | xfs_daddr_t d; /* address of last block of subvolume */ |
2218 | int error; /* error return value */ | ||
2219 | xfs_sb_t *sbp; /* filesystem superblock copy in mount */ | 2219 | xfs_sb_t *sbp; /* filesystem superblock copy in mount */ |
2220 | 2220 | ||
2221 | sbp = &mp->m_sb; | 2221 | sbp = &mp->m_sb; |
@@ -2242,15 +2242,12 @@ xfs_rtmount_init( | |||
2242 | (unsigned long long) mp->m_sb.sb_rblocks); | 2242 | (unsigned long long) mp->m_sb.sb_rblocks); |
2243 | return XFS_ERROR(EFBIG); | 2243 | return XFS_ERROR(EFBIG); |
2244 | } | 2244 | } |
2245 | error = xfs_read_buf(mp, mp->m_rtdev_targp, | 2245 | bp = xfs_buf_read_uncached(mp, mp->m_rtdev_targp, |
2246 | d - XFS_FSB_TO_BB(mp, 1), | 2246 | d - XFS_FSB_TO_BB(mp, 1), |
2247 | XFS_FSB_TO_BB(mp, 1), 0, &bp); | 2247 | XFS_FSB_TO_B(mp, 1), 0); |
2248 | if (error) { | 2248 | if (!bp) { |
2249 | cmn_err(CE_WARN, | 2249 | cmn_err(CE_WARN, "XFS: realtime device size check failed"); |
2250 | "XFS: realtime mount -- xfs_read_buf failed, returned %d", error); | 2250 | return EIO; |
2251 | if (error == ENOSPC) | ||
2252 | return XFS_ERROR(EFBIG); | ||
2253 | return error; | ||
2254 | } | 2251 | } |
2255 | xfs_buf_relse(bp); | 2252 | xfs_buf_relse(bp); |
2256 | return 0; | 2253 | return 0; |
diff --git a/fs/xfs/xfs_sb.h b/fs/xfs/xfs_sb.h index 1b017c657494..1eb2ba586814 100644 --- a/fs/xfs/xfs_sb.h +++ b/fs/xfs/xfs_sb.h | |||
@@ -80,10 +80,12 @@ struct xfs_mount; | |||
80 | #define XFS_SB_VERSION2_RESERVED4BIT 0x00000004 | 80 | #define XFS_SB_VERSION2_RESERVED4BIT 0x00000004 |
81 | #define XFS_SB_VERSION2_ATTR2BIT 0x00000008 /* Inline attr rework */ | 81 | #define XFS_SB_VERSION2_ATTR2BIT 0x00000008 /* Inline attr rework */ |
82 | #define XFS_SB_VERSION2_PARENTBIT 0x00000010 /* parent pointers */ | 82 | #define XFS_SB_VERSION2_PARENTBIT 0x00000010 /* parent pointers */ |
83 | #define XFS_SB_VERSION2_PROJID32BIT 0x00000080 /* 32 bit project id */ | ||
83 | 84 | ||
84 | #define XFS_SB_VERSION2_OKREALFBITS \ | 85 | #define XFS_SB_VERSION2_OKREALFBITS \ |
85 | (XFS_SB_VERSION2_LAZYSBCOUNTBIT | \ | 86 | (XFS_SB_VERSION2_LAZYSBCOUNTBIT | \ |
86 | XFS_SB_VERSION2_ATTR2BIT) | 87 | XFS_SB_VERSION2_ATTR2BIT | \ |
88 | XFS_SB_VERSION2_PROJID32BIT) | ||
87 | #define XFS_SB_VERSION2_OKSASHFBITS \ | 89 | #define XFS_SB_VERSION2_OKSASHFBITS \ |
88 | (0) | 90 | (0) |
89 | #define XFS_SB_VERSION2_OKREALBITS \ | 91 | #define XFS_SB_VERSION2_OKREALBITS \ |
@@ -495,6 +497,12 @@ static inline void xfs_sb_version_removeattr2(xfs_sb_t *sbp) | |||
495 | sbp->sb_versionnum &= ~XFS_SB_VERSION_MOREBITSBIT; | 497 | sbp->sb_versionnum &= ~XFS_SB_VERSION_MOREBITSBIT; |
496 | } | 498 | } |
497 | 499 | ||
500 | static inline int xfs_sb_version_hasprojid32bit(xfs_sb_t *sbp) | ||
501 | { | ||
502 | return xfs_sb_version_hasmorebits(sbp) && | ||
503 | (sbp->sb_features2 & XFS_SB_VERSION2_PROJID32BIT); | ||
504 | } | ||
505 | |||
498 | /* | 506 | /* |
499 | * end of superblock version macros | 507 | * end of superblock version macros |
500 | */ | 508 | */ |
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c index 1c47edaea0d2..f6d956b7711e 100644 --- a/fs/xfs/xfs_trans.c +++ b/fs/xfs/xfs_trans.c | |||
@@ -696,7 +696,7 @@ xfs_trans_reserve( | |||
696 | * fail if the count would go below zero. | 696 | * fail if the count would go below zero. |
697 | */ | 697 | */ |
698 | if (blocks > 0) { | 698 | if (blocks > 0) { |
699 | error = xfs_mod_incore_sb(tp->t_mountp, XFS_SBS_FDBLOCKS, | 699 | error = xfs_icsb_modify_counters(tp->t_mountp, XFS_SBS_FDBLOCKS, |
700 | -((int64_t)blocks), rsvd); | 700 | -((int64_t)blocks), rsvd); |
701 | if (error != 0) { | 701 | if (error != 0) { |
702 | current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS); | 702 | current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS); |
@@ -767,7 +767,7 @@ undo_log: | |||
767 | 767 | ||
768 | undo_blocks: | 768 | undo_blocks: |
769 | if (blocks > 0) { | 769 | if (blocks > 0) { |
770 | (void) xfs_mod_incore_sb(tp->t_mountp, XFS_SBS_FDBLOCKS, | 770 | xfs_icsb_modify_counters(tp->t_mountp, XFS_SBS_FDBLOCKS, |
771 | (int64_t)blocks, rsvd); | 771 | (int64_t)blocks, rsvd); |
772 | tp->t_blk_res = 0; | 772 | tp->t_blk_res = 0; |
773 | } | 773 | } |
@@ -1009,7 +1009,7 @@ void | |||
1009 | xfs_trans_unreserve_and_mod_sb( | 1009 | xfs_trans_unreserve_and_mod_sb( |
1010 | xfs_trans_t *tp) | 1010 | xfs_trans_t *tp) |
1011 | { | 1011 | { |
1012 | xfs_mod_sb_t msb[14]; /* If you add cases, add entries */ | 1012 | xfs_mod_sb_t msb[9]; /* If you add cases, add entries */ |
1013 | xfs_mod_sb_t *msbp; | 1013 | xfs_mod_sb_t *msbp; |
1014 | xfs_mount_t *mp = tp->t_mountp; | 1014 | xfs_mount_t *mp = tp->t_mountp; |
1015 | /* REFERENCED */ | 1015 | /* REFERENCED */ |
@@ -1017,55 +1017,61 @@ xfs_trans_unreserve_and_mod_sb( | |||
1017 | int rsvd; | 1017 | int rsvd; |
1018 | int64_t blkdelta = 0; | 1018 | int64_t blkdelta = 0; |
1019 | int64_t rtxdelta = 0; | 1019 | int64_t rtxdelta = 0; |
1020 | int64_t idelta = 0; | ||
1021 | int64_t ifreedelta = 0; | ||
1020 | 1022 | ||
1021 | msbp = msb; | 1023 | msbp = msb; |
1022 | rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0; | 1024 | rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0; |
1023 | 1025 | ||
1024 | /* calculate free blocks delta */ | 1026 | /* calculate deltas */ |
1025 | if (tp->t_blk_res > 0) | 1027 | if (tp->t_blk_res > 0) |
1026 | blkdelta = tp->t_blk_res; | 1028 | blkdelta = tp->t_blk_res; |
1027 | |||
1028 | if ((tp->t_fdblocks_delta != 0) && | 1029 | if ((tp->t_fdblocks_delta != 0) && |
1029 | (xfs_sb_version_haslazysbcount(&mp->m_sb) || | 1030 | (xfs_sb_version_haslazysbcount(&mp->m_sb) || |
1030 | (tp->t_flags & XFS_TRANS_SB_DIRTY))) | 1031 | (tp->t_flags & XFS_TRANS_SB_DIRTY))) |
1031 | blkdelta += tp->t_fdblocks_delta; | 1032 | blkdelta += tp->t_fdblocks_delta; |
1032 | 1033 | ||
1033 | if (blkdelta != 0) { | ||
1034 | msbp->msb_field = XFS_SBS_FDBLOCKS; | ||
1035 | msbp->msb_delta = blkdelta; | ||
1036 | msbp++; | ||
1037 | } | ||
1038 | |||
1039 | /* calculate free realtime extents delta */ | ||
1040 | if (tp->t_rtx_res > 0) | 1034 | if (tp->t_rtx_res > 0) |
1041 | rtxdelta = tp->t_rtx_res; | 1035 | rtxdelta = tp->t_rtx_res; |
1042 | |||
1043 | if ((tp->t_frextents_delta != 0) && | 1036 | if ((tp->t_frextents_delta != 0) && |
1044 | (tp->t_flags & XFS_TRANS_SB_DIRTY)) | 1037 | (tp->t_flags & XFS_TRANS_SB_DIRTY)) |
1045 | rtxdelta += tp->t_frextents_delta; | 1038 | rtxdelta += tp->t_frextents_delta; |
1046 | 1039 | ||
1040 | if (xfs_sb_version_haslazysbcount(&mp->m_sb) || | ||
1041 | (tp->t_flags & XFS_TRANS_SB_DIRTY)) { | ||
1042 | idelta = tp->t_icount_delta; | ||
1043 | ifreedelta = tp->t_ifree_delta; | ||
1044 | } | ||
1045 | |||
1046 | /* apply the per-cpu counters */ | ||
1047 | if (blkdelta) { | ||
1048 | error = xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS, | ||
1049 | blkdelta, rsvd); | ||
1050 | if (error) | ||
1051 | goto out; | ||
1052 | } | ||
1053 | |||
1054 | if (idelta) { | ||
1055 | error = xfs_icsb_modify_counters(mp, XFS_SBS_ICOUNT, | ||
1056 | idelta, rsvd); | ||
1057 | if (error) | ||
1058 | goto out_undo_fdblocks; | ||
1059 | } | ||
1060 | |||
1061 | if (ifreedelta) { | ||
1062 | error = xfs_icsb_modify_counters(mp, XFS_SBS_IFREE, | ||
1063 | ifreedelta, rsvd); | ||
1064 | if (error) | ||
1065 | goto out_undo_icount; | ||
1066 | } | ||
1067 | |||
1068 | /* apply remaining deltas */ | ||
1047 | if (rtxdelta != 0) { | 1069 | if (rtxdelta != 0) { |
1048 | msbp->msb_field = XFS_SBS_FREXTENTS; | 1070 | msbp->msb_field = XFS_SBS_FREXTENTS; |
1049 | msbp->msb_delta = rtxdelta; | 1071 | msbp->msb_delta = rtxdelta; |
1050 | msbp++; | 1072 | msbp++; |
1051 | } | 1073 | } |
1052 | 1074 | ||
1053 | /* apply remaining deltas */ | ||
1054 | |||
1055 | if (xfs_sb_version_haslazysbcount(&mp->m_sb) || | ||
1056 | (tp->t_flags & XFS_TRANS_SB_DIRTY)) { | ||
1057 | if (tp->t_icount_delta != 0) { | ||
1058 | msbp->msb_field = XFS_SBS_ICOUNT; | ||
1059 | msbp->msb_delta = tp->t_icount_delta; | ||
1060 | msbp++; | ||
1061 | } | ||
1062 | if (tp->t_ifree_delta != 0) { | ||
1063 | msbp->msb_field = XFS_SBS_IFREE; | ||
1064 | msbp->msb_delta = tp->t_ifree_delta; | ||
1065 | msbp++; | ||
1066 | } | ||
1067 | } | ||
1068 | |||
1069 | if (tp->t_flags & XFS_TRANS_SB_DIRTY) { | 1075 | if (tp->t_flags & XFS_TRANS_SB_DIRTY) { |
1070 | if (tp->t_dblocks_delta != 0) { | 1076 | if (tp->t_dblocks_delta != 0) { |
1071 | msbp->msb_field = XFS_SBS_DBLOCKS; | 1077 | msbp->msb_field = XFS_SBS_DBLOCKS; |
@@ -1115,8 +1121,24 @@ xfs_trans_unreserve_and_mod_sb( | |||
1115 | if (msbp > msb) { | 1121 | if (msbp > msb) { |
1116 | error = xfs_mod_incore_sb_batch(tp->t_mountp, msb, | 1122 | error = xfs_mod_incore_sb_batch(tp->t_mountp, msb, |
1117 | (uint)(msbp - msb), rsvd); | 1123 | (uint)(msbp - msb), rsvd); |
1118 | ASSERT(error == 0); | 1124 | if (error) |
1125 | goto out_undo_ifreecount; | ||
1119 | } | 1126 | } |
1127 | |||
1128 | return; | ||
1129 | |||
1130 | out_undo_ifreecount: | ||
1131 | if (ifreedelta) | ||
1132 | xfs_icsb_modify_counters(mp, XFS_SBS_IFREE, -ifreedelta, rsvd); | ||
1133 | out_undo_icount: | ||
1134 | if (idelta) | ||
1135 | xfs_icsb_modify_counters(mp, XFS_SBS_ICOUNT, -idelta, rsvd); | ||
1136 | out_undo_fdblocks: | ||
1137 | if (blkdelta) | ||
1138 | xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS, -blkdelta, rsvd); | ||
1139 | out: | ||
1140 | ASSERT(error = 0); | ||
1141 | return; | ||
1120 | } | 1142 | } |
1121 | 1143 | ||
1122 | /* | 1144 | /* |
@@ -1389,15 +1411,12 @@ xfs_trans_item_committed( | |||
1389 | */ | 1411 | */ |
1390 | STATIC void | 1412 | STATIC void |
1391 | xfs_trans_committed( | 1413 | xfs_trans_committed( |
1392 | struct xfs_trans *tp, | 1414 | void *arg, |
1393 | int abortflag) | 1415 | int abortflag) |
1394 | { | 1416 | { |
1417 | struct xfs_trans *tp = arg; | ||
1395 | struct xfs_log_item_desc *lidp, *next; | 1418 | struct xfs_log_item_desc *lidp, *next; |
1396 | 1419 | ||
1397 | /* Call the transaction's completion callback if there is one. */ | ||
1398 | if (tp->t_callback != NULL) | ||
1399 | tp->t_callback(tp, tp->t_callarg); | ||
1400 | |||
1401 | list_for_each_entry_safe(lidp, next, &tp->t_items, lid_trans) { | 1420 | list_for_each_entry_safe(lidp, next, &tp->t_items, lid_trans) { |
1402 | xfs_trans_item_committed(lidp->lid_item, tp->t_lsn, abortflag); | 1421 | xfs_trans_item_committed(lidp->lid_item, tp->t_lsn, abortflag); |
1403 | xfs_trans_free_item_desc(lidp); | 1422 | xfs_trans_free_item_desc(lidp); |
@@ -1525,7 +1544,7 @@ xfs_trans_commit_iclog( | |||
1525 | * running in simulation mode (the log is explicitly turned | 1544 | * running in simulation mode (the log is explicitly turned |
1526 | * off). | 1545 | * off). |
1527 | */ | 1546 | */ |
1528 | tp->t_logcb.cb_func = (void(*)(void*, int))xfs_trans_committed; | 1547 | tp->t_logcb.cb_func = xfs_trans_committed; |
1529 | tp->t_logcb.cb_arg = tp; | 1548 | tp->t_logcb.cb_arg = tp; |
1530 | 1549 | ||
1531 | /* | 1550 | /* |
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h index c13c0f97b494..246286b77a86 100644 --- a/fs/xfs/xfs_trans.h +++ b/fs/xfs/xfs_trans.h | |||
@@ -399,8 +399,6 @@ typedef struct xfs_trans { | |||
399 | * transaction. */ | 399 | * transaction. */ |
400 | struct xfs_mount *t_mountp; /* ptr to fs mount struct */ | 400 | struct xfs_mount *t_mountp; /* ptr to fs mount struct */ |
401 | struct xfs_dquot_acct *t_dqinfo; /* acctg info for dquots */ | 401 | struct xfs_dquot_acct *t_dqinfo; /* acctg info for dquots */ |
402 | xfs_trans_callback_t t_callback; /* transaction callback */ | ||
403 | void *t_callarg; /* callback arg */ | ||
404 | unsigned int t_flags; /* misc flags */ | 402 | unsigned int t_flags; /* misc flags */ |
405 | int64_t t_icount_delta; /* superblock icount change */ | 403 | int64_t t_icount_delta; /* superblock icount change */ |
406 | int64_t t_ifree_delta; /* superblock ifree change */ | 404 | int64_t t_ifree_delta; /* superblock ifree change */ |
@@ -473,6 +471,7 @@ void xfs_trans_dquot_buf(xfs_trans_t *, struct xfs_buf *, uint); | |||
473 | void xfs_trans_inode_alloc_buf(xfs_trans_t *, struct xfs_buf *); | 471 | void xfs_trans_inode_alloc_buf(xfs_trans_t *, struct xfs_buf *); |
474 | int xfs_trans_iget(struct xfs_mount *, xfs_trans_t *, | 472 | int xfs_trans_iget(struct xfs_mount *, xfs_trans_t *, |
475 | xfs_ino_t , uint, uint, struct xfs_inode **); | 473 | xfs_ino_t , uint, uint, struct xfs_inode **); |
474 | void xfs_trans_ichgtime(struct xfs_trans *, struct xfs_inode *, int); | ||
476 | void xfs_trans_ijoin_ref(struct xfs_trans *, struct xfs_inode *, uint); | 475 | void xfs_trans_ijoin_ref(struct xfs_trans *, struct xfs_inode *, uint); |
477 | void xfs_trans_ijoin(struct xfs_trans *, struct xfs_inode *); | 476 | void xfs_trans_ijoin(struct xfs_trans *, struct xfs_inode *); |
478 | void xfs_trans_log_buf(xfs_trans_t *, struct xfs_buf *, uint, uint); | 477 | void xfs_trans_log_buf(xfs_trans_t *, struct xfs_buf *, uint, uint); |
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c index 90af025e6839..c47918c302a5 100644 --- a/fs/xfs/xfs_trans_buf.c +++ b/fs/xfs/xfs_trans_buf.c | |||
@@ -336,7 +336,7 @@ xfs_trans_read_buf( | |||
336 | ASSERT(!XFS_BUF_ISASYNC(bp)); | 336 | ASSERT(!XFS_BUF_ISASYNC(bp)); |
337 | XFS_BUF_READ(bp); | 337 | XFS_BUF_READ(bp); |
338 | xfsbdstrat(tp->t_mountp, bp); | 338 | xfsbdstrat(tp->t_mountp, bp); |
339 | error = xfs_iowait(bp); | 339 | error = xfs_buf_iowait(bp); |
340 | if (error) { | 340 | if (error) { |
341 | xfs_ioerror_alert("xfs_trans_read_buf", mp, | 341 | xfs_ioerror_alert("xfs_trans_read_buf", mp, |
342 | bp, blkno); | 342 | bp, blkno); |
diff --git a/fs/xfs/xfs_trans_inode.c b/fs/xfs/xfs_trans_inode.c index cdc53a1050c5..ccb34532768b 100644 --- a/fs/xfs/xfs_trans_inode.c +++ b/fs/xfs/xfs_trans_inode.c | |||
@@ -118,6 +118,36 @@ xfs_trans_ijoin_ref( | |||
118 | } | 118 | } |
119 | 119 | ||
120 | /* | 120 | /* |
121 | * Transactional inode timestamp update. Requires the inode to be locked and | ||
122 | * joined to the transaction supplied. Relies on the transaction subsystem to | ||
123 | * track dirty state and update/writeback the inode accordingly. | ||
124 | */ | ||
125 | void | ||
126 | xfs_trans_ichgtime( | ||
127 | struct xfs_trans *tp, | ||
128 | struct xfs_inode *ip, | ||
129 | int flags) | ||
130 | { | ||
131 | struct inode *inode = VFS_I(ip); | ||
132 | timespec_t tv; | ||
133 | |||
134 | ASSERT(tp); | ||
135 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); | ||
136 | ASSERT(ip->i_transp == tp); | ||
137 | |||
138 | tv = current_fs_time(inode->i_sb); | ||
139 | |||
140 | if ((flags & XFS_ICHGTIME_MOD) && | ||
141 | !timespec_equal(&inode->i_mtime, &tv)) { | ||
142 | inode->i_mtime = tv; | ||
143 | } | ||
144 | if ((flags & XFS_ICHGTIME_CHG) && | ||
145 | !timespec_equal(&inode->i_ctime, &tv)) { | ||
146 | inode->i_ctime = tv; | ||
147 | } | ||
148 | } | ||
149 | |||
150 | /* | ||
121 | * This is called to mark the fields indicated in fieldmask as needing | 151 | * This is called to mark the fields indicated in fieldmask as needing |
122 | * to be logged when the transaction is committed. The inode must | 152 | * to be logged when the transaction is committed. The inode must |
123 | * already be associated with the given transaction. | 153 | * already be associated with the given transaction. |
diff --git a/fs/xfs/xfs_types.h b/fs/xfs/xfs_types.h index 320775295e32..26d1867d8156 100644 --- a/fs/xfs/xfs_types.h +++ b/fs/xfs/xfs_types.h | |||
@@ -73,8 +73,6 @@ typedef __int32_t xfs_tid_t; /* transaction identifier */ | |||
73 | typedef __uint32_t xfs_dablk_t; /* dir/attr block number (in file) */ | 73 | typedef __uint32_t xfs_dablk_t; /* dir/attr block number (in file) */ |
74 | typedef __uint32_t xfs_dahash_t; /* dir/attr hash value */ | 74 | typedef __uint32_t xfs_dahash_t; /* dir/attr hash value */ |
75 | 75 | ||
76 | typedef __uint16_t xfs_prid_t; /* prid_t truncated to 16bits in XFS */ | ||
77 | |||
78 | typedef __uint32_t xlog_tid_t; /* transaction ID type */ | 76 | typedef __uint32_t xlog_tid_t; /* transaction ID type */ |
79 | 77 | ||
80 | /* | 78 | /* |
diff --git a/fs/xfs/xfs_utils.c b/fs/xfs/xfs_utils.c index b7d5769d2df0..8b32d1a4c5a1 100644 --- a/fs/xfs/xfs_utils.c +++ b/fs/xfs/xfs_utils.c | |||
@@ -56,7 +56,6 @@ xfs_dir_ialloc( | |||
56 | mode_t mode, | 56 | mode_t mode, |
57 | xfs_nlink_t nlink, | 57 | xfs_nlink_t nlink, |
58 | xfs_dev_t rdev, | 58 | xfs_dev_t rdev, |
59 | cred_t *credp, | ||
60 | prid_t prid, /* project id */ | 59 | prid_t prid, /* project id */ |
61 | int okalloc, /* ok to allocate new space */ | 60 | int okalloc, /* ok to allocate new space */ |
62 | xfs_inode_t **ipp, /* pointer to inode; it will be | 61 | xfs_inode_t **ipp, /* pointer to inode; it will be |
@@ -93,7 +92,7 @@ xfs_dir_ialloc( | |||
93 | * transaction commit so that no other process can steal | 92 | * transaction commit so that no other process can steal |
94 | * the inode(s) that we've just allocated. | 93 | * the inode(s) that we've just allocated. |
95 | */ | 94 | */ |
96 | code = xfs_ialloc(tp, dp, mode, nlink, rdev, credp, prid, okalloc, | 95 | code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid, okalloc, |
97 | &ialloc_context, &call_again, &ip); | 96 | &ialloc_context, &call_again, &ip); |
98 | 97 | ||
99 | /* | 98 | /* |
@@ -197,7 +196,7 @@ xfs_dir_ialloc( | |||
197 | * other allocations in this allocation group, | 196 | * other allocations in this allocation group, |
198 | * this call should always succeed. | 197 | * this call should always succeed. |
199 | */ | 198 | */ |
200 | code = xfs_ialloc(tp, dp, mode, nlink, rdev, credp, prid, | 199 | code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid, |
201 | okalloc, &ialloc_context, &call_again, &ip); | 200 | okalloc, &ialloc_context, &call_again, &ip); |
202 | 201 | ||
203 | /* | 202 | /* |
@@ -235,7 +234,7 @@ xfs_droplink( | |||
235 | { | 234 | { |
236 | int error; | 235 | int error; |
237 | 236 | ||
238 | xfs_ichgtime(ip, XFS_ICHGTIME_CHG); | 237 | xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG); |
239 | 238 | ||
240 | ASSERT (ip->i_d.di_nlink > 0); | 239 | ASSERT (ip->i_d.di_nlink > 0); |
241 | ip->i_d.di_nlink--; | 240 | ip->i_d.di_nlink--; |
@@ -299,7 +298,7 @@ xfs_bumplink( | |||
299 | { | 298 | { |
300 | if (ip->i_d.di_nlink >= XFS_MAXLINK) | 299 | if (ip->i_d.di_nlink >= XFS_MAXLINK) |
301 | return XFS_ERROR(EMLINK); | 300 | return XFS_ERROR(EMLINK); |
302 | xfs_ichgtime(ip, XFS_ICHGTIME_CHG); | 301 | xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG); |
303 | 302 | ||
304 | ASSERT(ip->i_d.di_nlink > 0); | 303 | ASSERT(ip->i_d.di_nlink > 0); |
305 | ip->i_d.di_nlink++; | 304 | ip->i_d.di_nlink++; |
diff --git a/fs/xfs/xfs_utils.h b/fs/xfs/xfs_utils.h index f55b9678264f..456fca314933 100644 --- a/fs/xfs/xfs_utils.h +++ b/fs/xfs/xfs_utils.h | |||
@@ -19,8 +19,7 @@ | |||
19 | #define __XFS_UTILS_H__ | 19 | #define __XFS_UTILS_H__ |
20 | 20 | ||
21 | extern int xfs_dir_ialloc(xfs_trans_t **, xfs_inode_t *, mode_t, xfs_nlink_t, | 21 | extern int xfs_dir_ialloc(xfs_trans_t **, xfs_inode_t *, mode_t, xfs_nlink_t, |
22 | xfs_dev_t, cred_t *, prid_t, int, | 22 | xfs_dev_t, prid_t, int, xfs_inode_t **, int *); |
23 | xfs_inode_t **, int *); | ||
24 | extern int xfs_droplink(xfs_trans_t *, xfs_inode_t *); | 23 | extern int xfs_droplink(xfs_trans_t *, xfs_inode_t *); |
25 | extern int xfs_bumplink(xfs_trans_t *, xfs_inode_t *); | 24 | extern int xfs_bumplink(xfs_trans_t *, xfs_inode_t *); |
26 | extern void xfs_bump_ino_vers2(xfs_trans_t *, xfs_inode_t *); | 25 | extern void xfs_bump_ino_vers2(xfs_trans_t *, xfs_inode_t *); |
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c index 4c7c7bfb2b2f..8e4a63c4151a 100644 --- a/fs/xfs/xfs_vnodeops.c +++ b/fs/xfs/xfs_vnodeops.c | |||
@@ -114,7 +114,7 @@ xfs_setattr( | |||
114 | */ | 114 | */ |
115 | ASSERT(udqp == NULL); | 115 | ASSERT(udqp == NULL); |
116 | ASSERT(gdqp == NULL); | 116 | ASSERT(gdqp == NULL); |
117 | code = xfs_qm_vop_dqalloc(ip, uid, gid, ip->i_d.di_projid, | 117 | code = xfs_qm_vop_dqalloc(ip, uid, gid, xfs_get_projid(ip), |
118 | qflags, &udqp, &gdqp); | 118 | qflags, &udqp, &gdqp); |
119 | if (code) | 119 | if (code) |
120 | return code; | 120 | return code; |
@@ -184,8 +184,11 @@ xfs_setattr( | |||
184 | ip->i_size == 0 && ip->i_d.di_nextents == 0) { | 184 | ip->i_size == 0 && ip->i_d.di_nextents == 0) { |
185 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | 185 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
186 | lock_flags &= ~XFS_ILOCK_EXCL; | 186 | lock_flags &= ~XFS_ILOCK_EXCL; |
187 | if (mask & ATTR_CTIME) | 187 | if (mask & ATTR_CTIME) { |
188 | xfs_ichgtime(ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); | 188 | inode->i_mtime = inode->i_ctime = |
189 | current_fs_time(inode->i_sb); | ||
190 | xfs_mark_inode_dirty_sync(ip); | ||
191 | } | ||
189 | code = 0; | 192 | code = 0; |
190 | goto error_return; | 193 | goto error_return; |
191 | } | 194 | } |
@@ -1253,8 +1256,7 @@ xfs_create( | |||
1253 | struct xfs_name *name, | 1256 | struct xfs_name *name, |
1254 | mode_t mode, | 1257 | mode_t mode, |
1255 | xfs_dev_t rdev, | 1258 | xfs_dev_t rdev, |
1256 | xfs_inode_t **ipp, | 1259 | xfs_inode_t **ipp) |
1257 | cred_t *credp) | ||
1258 | { | 1260 | { |
1259 | int is_dir = S_ISDIR(mode); | 1261 | int is_dir = S_ISDIR(mode); |
1260 | struct xfs_mount *mp = dp->i_mount; | 1262 | struct xfs_mount *mp = dp->i_mount; |
@@ -1266,7 +1268,7 @@ xfs_create( | |||
1266 | boolean_t unlock_dp_on_error = B_FALSE; | 1268 | boolean_t unlock_dp_on_error = B_FALSE; |
1267 | uint cancel_flags; | 1269 | uint cancel_flags; |
1268 | int committed; | 1270 | int committed; |
1269 | xfs_prid_t prid; | 1271 | prid_t prid; |
1270 | struct xfs_dquot *udqp = NULL; | 1272 | struct xfs_dquot *udqp = NULL; |
1271 | struct xfs_dquot *gdqp = NULL; | 1273 | struct xfs_dquot *gdqp = NULL; |
1272 | uint resblks; | 1274 | uint resblks; |
@@ -1279,9 +1281,9 @@ xfs_create( | |||
1279 | return XFS_ERROR(EIO); | 1281 | return XFS_ERROR(EIO); |
1280 | 1282 | ||
1281 | if (dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) | 1283 | if (dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) |
1282 | prid = dp->i_d.di_projid; | 1284 | prid = xfs_get_projid(dp); |
1283 | else | 1285 | else |
1284 | prid = dfltprid; | 1286 | prid = XFS_PROJID_DEFAULT; |
1285 | 1287 | ||
1286 | /* | 1288 | /* |
1287 | * Make sure that we have allocated dquot(s) on disk. | 1289 | * Make sure that we have allocated dquot(s) on disk. |
@@ -1360,7 +1362,7 @@ xfs_create( | |||
1360 | * entry pointing to them, but a directory also the "." entry | 1362 | * entry pointing to them, but a directory also the "." entry |
1361 | * pointing to itself. | 1363 | * pointing to itself. |
1362 | */ | 1364 | */ |
1363 | error = xfs_dir_ialloc(&tp, dp, mode, is_dir ? 2 : 1, rdev, credp, | 1365 | error = xfs_dir_ialloc(&tp, dp, mode, is_dir ? 2 : 1, rdev, |
1364 | prid, resblks > 0, &ip, &committed); | 1366 | prid, resblks > 0, &ip, &committed); |
1365 | if (error) { | 1367 | if (error) { |
1366 | if (error == ENOSPC) | 1368 | if (error == ENOSPC) |
@@ -1391,7 +1393,7 @@ xfs_create( | |||
1391 | ASSERT(error != ENOSPC); | 1393 | ASSERT(error != ENOSPC); |
1392 | goto out_trans_abort; | 1394 | goto out_trans_abort; |
1393 | } | 1395 | } |
1394 | xfs_ichgtime(dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); | 1396 | xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); |
1395 | xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE); | 1397 | xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE); |
1396 | 1398 | ||
1397 | if (is_dir) { | 1399 | if (is_dir) { |
@@ -1742,7 +1744,7 @@ xfs_remove( | |||
1742 | ASSERT(error != ENOENT); | 1744 | ASSERT(error != ENOENT); |
1743 | goto out_bmap_cancel; | 1745 | goto out_bmap_cancel; |
1744 | } | 1746 | } |
1745 | xfs_ichgtime(dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); | 1747 | xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); |
1746 | 1748 | ||
1747 | if (is_dir) { | 1749 | if (is_dir) { |
1748 | /* | 1750 | /* |
@@ -1880,7 +1882,7 @@ xfs_link( | |||
1880 | * the tree quota mechanism could be circumvented. | 1882 | * the tree quota mechanism could be circumvented. |
1881 | */ | 1883 | */ |
1882 | if (unlikely((tdp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) && | 1884 | if (unlikely((tdp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) && |
1883 | (tdp->i_d.di_projid != sip->i_d.di_projid))) { | 1885 | (xfs_get_projid(tdp) != xfs_get_projid(sip)))) { |
1884 | error = XFS_ERROR(EXDEV); | 1886 | error = XFS_ERROR(EXDEV); |
1885 | goto error_return; | 1887 | goto error_return; |
1886 | } | 1888 | } |
@@ -1895,7 +1897,7 @@ xfs_link( | |||
1895 | &first_block, &free_list, resblks); | 1897 | &first_block, &free_list, resblks); |
1896 | if (error) | 1898 | if (error) |
1897 | goto abort_return; | 1899 | goto abort_return; |
1898 | xfs_ichgtime(tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); | 1900 | xfs_trans_ichgtime(tp, tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); |
1899 | xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE); | 1901 | xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE); |
1900 | 1902 | ||
1901 | error = xfs_bumplink(tp, sip); | 1903 | error = xfs_bumplink(tp, sip); |
@@ -1933,8 +1935,7 @@ xfs_symlink( | |||
1933 | struct xfs_name *link_name, | 1935 | struct xfs_name *link_name, |
1934 | const char *target_path, | 1936 | const char *target_path, |
1935 | mode_t mode, | 1937 | mode_t mode, |
1936 | xfs_inode_t **ipp, | 1938 | xfs_inode_t **ipp) |
1937 | cred_t *credp) | ||
1938 | { | 1939 | { |
1939 | xfs_mount_t *mp = dp->i_mount; | 1940 | xfs_mount_t *mp = dp->i_mount; |
1940 | xfs_trans_t *tp; | 1941 | xfs_trans_t *tp; |
@@ -1955,7 +1956,7 @@ xfs_symlink( | |||
1955 | int byte_cnt; | 1956 | int byte_cnt; |
1956 | int n; | 1957 | int n; |
1957 | xfs_buf_t *bp; | 1958 | xfs_buf_t *bp; |
1958 | xfs_prid_t prid; | 1959 | prid_t prid; |
1959 | struct xfs_dquot *udqp, *gdqp; | 1960 | struct xfs_dquot *udqp, *gdqp; |
1960 | uint resblks; | 1961 | uint resblks; |
1961 | 1962 | ||
@@ -1978,9 +1979,9 @@ xfs_symlink( | |||
1978 | 1979 | ||
1979 | udqp = gdqp = NULL; | 1980 | udqp = gdqp = NULL; |
1980 | if (dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) | 1981 | if (dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) |
1981 | prid = dp->i_d.di_projid; | 1982 | prid = xfs_get_projid(dp); |
1982 | else | 1983 | else |
1983 | prid = (xfs_prid_t)dfltprid; | 1984 | prid = XFS_PROJID_DEFAULT; |
1984 | 1985 | ||
1985 | /* | 1986 | /* |
1986 | * Make sure that we have allocated dquot(s) on disk. | 1987 | * Make sure that we have allocated dquot(s) on disk. |
@@ -2046,8 +2047,8 @@ xfs_symlink( | |||
2046 | /* | 2047 | /* |
2047 | * Allocate an inode for the symlink. | 2048 | * Allocate an inode for the symlink. |
2048 | */ | 2049 | */ |
2049 | error = xfs_dir_ialloc(&tp, dp, S_IFLNK | (mode & ~S_IFMT), | 2050 | error = xfs_dir_ialloc(&tp, dp, S_IFLNK | (mode & ~S_IFMT), 1, 0, |
2050 | 1, 0, credp, prid, resblks > 0, &ip, NULL); | 2051 | prid, resblks > 0, &ip, NULL); |
2051 | if (error) { | 2052 | if (error) { |
2052 | if (error == ENOSPC) | 2053 | if (error == ENOSPC) |
2053 | goto error_return; | 2054 | goto error_return; |
@@ -2129,7 +2130,7 @@ xfs_symlink( | |||
2129 | &first_block, &free_list, resblks); | 2130 | &first_block, &free_list, resblks); |
2130 | if (error) | 2131 | if (error) |
2131 | goto error1; | 2132 | goto error1; |
2132 | xfs_ichgtime(dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); | 2133 | xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); |
2133 | xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE); | 2134 | xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE); |
2134 | 2135 | ||
2135 | /* | 2136 | /* |
@@ -2272,7 +2273,7 @@ xfs_alloc_file_space( | |||
2272 | count = len; | 2273 | count = len; |
2273 | imapp = &imaps[0]; | 2274 | imapp = &imaps[0]; |
2274 | nimaps = 1; | 2275 | nimaps = 1; |
2275 | bmapi_flag = XFS_BMAPI_WRITE | (alloc_type ? XFS_BMAPI_PREALLOC : 0); | 2276 | bmapi_flag = XFS_BMAPI_WRITE | alloc_type; |
2276 | startoffset_fsb = XFS_B_TO_FSBT(mp, offset); | 2277 | startoffset_fsb = XFS_B_TO_FSBT(mp, offset); |
2277 | allocatesize_fsb = XFS_B_TO_FSB(mp, count); | 2278 | allocatesize_fsb = XFS_B_TO_FSB(mp, count); |
2278 | 2279 | ||
@@ -2431,9 +2432,9 @@ xfs_zero_remaining_bytes( | |||
2431 | if (endoff > ip->i_size) | 2432 | if (endoff > ip->i_size) |
2432 | endoff = ip->i_size; | 2433 | endoff = ip->i_size; |
2433 | 2434 | ||
2434 | bp = xfs_buf_get_noaddr(mp->m_sb.sb_blocksize, | 2435 | bp = xfs_buf_get_uncached(XFS_IS_REALTIME_INODE(ip) ? |
2435 | XFS_IS_REALTIME_INODE(ip) ? | 2436 | mp->m_rtdev_targp : mp->m_ddev_targp, |
2436 | mp->m_rtdev_targp : mp->m_ddev_targp); | 2437 | mp->m_sb.sb_blocksize, XBF_DONT_BLOCK); |
2437 | if (!bp) | 2438 | if (!bp) |
2438 | return XFS_ERROR(ENOMEM); | 2439 | return XFS_ERROR(ENOMEM); |
2439 | 2440 | ||
@@ -2459,7 +2460,7 @@ xfs_zero_remaining_bytes( | |||
2459 | XFS_BUF_READ(bp); | 2460 | XFS_BUF_READ(bp); |
2460 | XFS_BUF_SET_ADDR(bp, xfs_fsb_to_db(ip, imap.br_startblock)); | 2461 | XFS_BUF_SET_ADDR(bp, xfs_fsb_to_db(ip, imap.br_startblock)); |
2461 | xfsbdstrat(mp, bp); | 2462 | xfsbdstrat(mp, bp); |
2462 | error = xfs_iowait(bp); | 2463 | error = xfs_buf_iowait(bp); |
2463 | if (error) { | 2464 | if (error) { |
2464 | xfs_ioerror_alert("xfs_zero_remaining_bytes(read)", | 2465 | xfs_ioerror_alert("xfs_zero_remaining_bytes(read)", |
2465 | mp, bp, XFS_BUF_ADDR(bp)); | 2466 | mp, bp, XFS_BUF_ADDR(bp)); |
@@ -2472,7 +2473,7 @@ xfs_zero_remaining_bytes( | |||
2472 | XFS_BUF_UNREAD(bp); | 2473 | XFS_BUF_UNREAD(bp); |
2473 | XFS_BUF_WRITE(bp); | 2474 | XFS_BUF_WRITE(bp); |
2474 | xfsbdstrat(mp, bp); | 2475 | xfsbdstrat(mp, bp); |
2475 | error = xfs_iowait(bp); | 2476 | error = xfs_buf_iowait(bp); |
2476 | if (error) { | 2477 | if (error) { |
2477 | xfs_ioerror_alert("xfs_zero_remaining_bytes(write)", | 2478 | xfs_ioerror_alert("xfs_zero_remaining_bytes(write)", |
2478 | mp, bp, XFS_BUF_ADDR(bp)); | 2479 | mp, bp, XFS_BUF_ADDR(bp)); |
@@ -2711,6 +2712,7 @@ xfs_change_file_space( | |||
2711 | xfs_off_t llen; | 2712 | xfs_off_t llen; |
2712 | xfs_trans_t *tp; | 2713 | xfs_trans_t *tp; |
2713 | struct iattr iattr; | 2714 | struct iattr iattr; |
2715 | int prealloc_type; | ||
2714 | 2716 | ||
2715 | if (!S_ISREG(ip->i_d.di_mode)) | 2717 | if (!S_ISREG(ip->i_d.di_mode)) |
2716 | return XFS_ERROR(EINVAL); | 2718 | return XFS_ERROR(EINVAL); |
@@ -2753,12 +2755,17 @@ xfs_change_file_space( | |||
2753 | * size to be changed. | 2755 | * size to be changed. |
2754 | */ | 2756 | */ |
2755 | setprealloc = clrprealloc = 0; | 2757 | setprealloc = clrprealloc = 0; |
2758 | prealloc_type = XFS_BMAPI_PREALLOC; | ||
2756 | 2759 | ||
2757 | switch (cmd) { | 2760 | switch (cmd) { |
2761 | case XFS_IOC_ZERO_RANGE: | ||
2762 | prealloc_type |= XFS_BMAPI_CONVERT; | ||
2763 | xfs_tosspages(ip, startoffset, startoffset + bf->l_len, 0); | ||
2764 | /* FALLTHRU */ | ||
2758 | case XFS_IOC_RESVSP: | 2765 | case XFS_IOC_RESVSP: |
2759 | case XFS_IOC_RESVSP64: | 2766 | case XFS_IOC_RESVSP64: |
2760 | error = xfs_alloc_file_space(ip, startoffset, bf->l_len, | 2767 | error = xfs_alloc_file_space(ip, startoffset, bf->l_len, |
2761 | 1, attr_flags); | 2768 | prealloc_type, attr_flags); |
2762 | if (error) | 2769 | if (error) |
2763 | return error; | 2770 | return error; |
2764 | setprealloc = 1; | 2771 | setprealloc = 1; |
@@ -2827,7 +2834,7 @@ xfs_change_file_space( | |||
2827 | if (ip->i_d.di_mode & S_IXGRP) | 2834 | if (ip->i_d.di_mode & S_IXGRP) |
2828 | ip->i_d.di_mode &= ~S_ISGID; | 2835 | ip->i_d.di_mode &= ~S_ISGID; |
2829 | 2836 | ||
2830 | xfs_ichgtime(ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); | 2837 | xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); |
2831 | } | 2838 | } |
2832 | if (setprealloc) | 2839 | if (setprealloc) |
2833 | ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC; | 2840 | ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC; |
diff --git a/fs/xfs/xfs_vnodeops.h b/fs/xfs/xfs_vnodeops.h index d8dfa8d0dadd..f6702927eee4 100644 --- a/fs/xfs/xfs_vnodeops.h +++ b/fs/xfs/xfs_vnodeops.h | |||
@@ -2,7 +2,6 @@ | |||
2 | #define _XFS_VNODEOPS_H 1 | 2 | #define _XFS_VNODEOPS_H 1 |
3 | 3 | ||
4 | struct attrlist_cursor_kern; | 4 | struct attrlist_cursor_kern; |
5 | struct cred; | ||
6 | struct file; | 5 | struct file; |
7 | struct iattr; | 6 | struct iattr; |
8 | struct inode; | 7 | struct inode; |
@@ -26,7 +25,7 @@ int xfs_inactive(struct xfs_inode *ip); | |||
26 | int xfs_lookup(struct xfs_inode *dp, struct xfs_name *name, | 25 | int xfs_lookup(struct xfs_inode *dp, struct xfs_name *name, |
27 | struct xfs_inode **ipp, struct xfs_name *ci_name); | 26 | struct xfs_inode **ipp, struct xfs_name *ci_name); |
28 | int xfs_create(struct xfs_inode *dp, struct xfs_name *name, mode_t mode, | 27 | int xfs_create(struct xfs_inode *dp, struct xfs_name *name, mode_t mode, |
29 | xfs_dev_t rdev, struct xfs_inode **ipp, cred_t *credp); | 28 | xfs_dev_t rdev, struct xfs_inode **ipp); |
30 | int xfs_remove(struct xfs_inode *dp, struct xfs_name *name, | 29 | int xfs_remove(struct xfs_inode *dp, struct xfs_name *name, |
31 | struct xfs_inode *ip); | 30 | struct xfs_inode *ip); |
32 | int xfs_link(struct xfs_inode *tdp, struct xfs_inode *sip, | 31 | int xfs_link(struct xfs_inode *tdp, struct xfs_inode *sip, |
@@ -34,8 +33,7 @@ int xfs_link(struct xfs_inode *tdp, struct xfs_inode *sip, | |||
34 | int xfs_readdir(struct xfs_inode *dp, void *dirent, size_t bufsize, | 33 | int xfs_readdir(struct xfs_inode *dp, void *dirent, size_t bufsize, |
35 | xfs_off_t *offset, filldir_t filldir); | 34 | xfs_off_t *offset, filldir_t filldir); |
36 | int xfs_symlink(struct xfs_inode *dp, struct xfs_name *link_name, | 35 | int xfs_symlink(struct xfs_inode *dp, struct xfs_name *link_name, |
37 | const char *target_path, mode_t mode, struct xfs_inode **ipp, | 36 | const char *target_path, mode_t mode, struct xfs_inode **ipp); |
38 | cred_t *credp); | ||
39 | int xfs_set_dmattrs(struct xfs_inode *ip, u_int evmask, u_int16_t state); | 37 | int xfs_set_dmattrs(struct xfs_inode *ip, u_int evmask, u_int16_t state); |
40 | int xfs_change_file_space(struct xfs_inode *ip, int cmd, | 38 | int xfs_change_file_space(struct xfs_inode *ip, int cmd, |
41 | xfs_flock64_t *bf, xfs_off_t offset, int attr_flags); | 39 | xfs_flock64_t *bf, xfs_off_t offset, int attr_flags); |