diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-07-09 15:29:12 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-07-09 15:29:12 -0400 |
commit | da89bd213fe719ec3552abbeb8be12d0cc0337ca (patch) | |
tree | 41d07c62c3e4c963504a72bce8043acdd4aa142d /fs/xfs | |
parent | be0c5d8c0bb0023e11f5c6d38e90f7b0f24edb64 (diff) | |
parent | 83e782e1a1cc0159888e58e14dfc8f3289663338 (diff) |
Merge tag 'for-linus-v3.11-rc1' of git://oss.sgi.com/xfs/xfs
Pull xfs update from Ben Myers:
"This includes several bugfixes, part of the work for project quotas
and group quotas to be used together, performance improvements for
inode creation/deletion, buffer readahead, and bulkstat,
implementation of the inode change count, an inode create transaction,
and the removal of a bunch of dead code.
There are also some duplicate commits that you already have from the
3.10-rc series.
- part of the work to allow project quotas and group quotas to be
used together
- inode change count
- inode create transaction
- block queue plugging in buffer readahead and bulkstat
- ordered log vector support
- removal of dead code in and around xfs_sync_inode_grab,
xfs_ialloc_get_rec, XFS_MOUNT_RETERR, XFS_ALLOCFREE_LOG_RES,
XFS_DIROP_LOG_RES, xfs_chash, ctl_table, and
xfs_growfs_data_private
- don't keep silent if sunit/swidth can not be changed via mount
- fix a leak of remote symlink blocks into the filesystem when xattrs
are used on symlinks
- fix for fiemap to return FIEMAP_EXTENT_UNKOWN flag on delay extents
- part of a fix for xfs_fsr
- disable speculative preallocation with small files
- performance improvements for inode creates and deletes"
* tag 'for-linus-v3.11-rc1' of git://oss.sgi.com/xfs/xfs: (61 commits)
xfs: Remove incore use of XFS_OQUOTA_ENFD and XFS_OQUOTA_CHKD
xfs: Change xfs_dquot_acct to be a 2-dimensional array
xfs: Code cleanup and removal of some typedef usage
xfs: Replace macro XFS_DQ_TO_QIP with a function
xfs: Replace macro XFS_DQUOT_TREE with a function
xfs: Define a new function xfs_is_quota_inode()
xfs: implement inode change count
xfs: Use inode create transaction
xfs: Inode create item recovery
xfs: Inode create transaction reservations
xfs: Inode create log items
xfs: Introduce an ordered buffer item
xfs: Introduce ordered log vector support
xfs: xfs_ifree doesn't need to modify the inode buffer
xfs: don't do IO when creating an new inode
xfs: don't use speculative prealloc for small files
xfs: plug directory buffer readahead
xfs: add pluging for bulkstat readahead
xfs: Remove dead function prototype xfs_sync_inode_grab()
xfs: Remove the left function variable from xfs_ialloc_get_rec()
...
Diffstat (limited to 'fs/xfs')
43 files changed, 1180 insertions, 477 deletions
diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile index 6313b69b6644..4a4508023a3c 100644 --- a/fs/xfs/Makefile +++ b/fs/xfs/Makefile | |||
@@ -71,6 +71,7 @@ xfs-y += xfs_alloc.o \ | |||
71 | xfs_dir2_sf.o \ | 71 | xfs_dir2_sf.o \ |
72 | xfs_ialloc.o \ | 72 | xfs_ialloc.o \ |
73 | xfs_ialloc_btree.o \ | 73 | xfs_ialloc_btree.o \ |
74 | xfs_icreate_item.o \ | ||
74 | xfs_inode.o \ | 75 | xfs_inode.o \ |
75 | xfs_log_recover.o \ | 76 | xfs_log_recover.o \ |
76 | xfs_mount.o \ | 77 | xfs_mount.o \ |
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c index 5673bcfda2f0..71596e57283a 100644 --- a/fs/xfs/xfs_alloc.c +++ b/fs/xfs/xfs_alloc.c | |||
@@ -175,6 +175,7 @@ xfs_alloc_compute_diff( | |||
175 | xfs_agblock_t wantbno, /* target starting block */ | 175 | xfs_agblock_t wantbno, /* target starting block */ |
176 | xfs_extlen_t wantlen, /* target length */ | 176 | xfs_extlen_t wantlen, /* target length */ |
177 | xfs_extlen_t alignment, /* target alignment */ | 177 | xfs_extlen_t alignment, /* target alignment */ |
178 | char userdata, /* are we allocating data? */ | ||
178 | xfs_agblock_t freebno, /* freespace's starting block */ | 179 | xfs_agblock_t freebno, /* freespace's starting block */ |
179 | xfs_extlen_t freelen, /* freespace's length */ | 180 | xfs_extlen_t freelen, /* freespace's length */ |
180 | xfs_agblock_t *newbnop) /* result: best start block from free */ | 181 | xfs_agblock_t *newbnop) /* result: best start block from free */ |
@@ -189,7 +190,14 @@ xfs_alloc_compute_diff( | |||
189 | ASSERT(freelen >= wantlen); | 190 | ASSERT(freelen >= wantlen); |
190 | freeend = freebno + freelen; | 191 | freeend = freebno + freelen; |
191 | wantend = wantbno + wantlen; | 192 | wantend = wantbno + wantlen; |
192 | if (freebno >= wantbno) { | 193 | /* |
194 | * We want to allocate from the start of a free extent if it is past | ||
195 | * the desired block or if we are allocating user data and the free | ||
196 | * extent is before desired block. The second case is there to allow | ||
197 | * for contiguous allocation from the remaining free space if the file | ||
198 | * grows in the short term. | ||
199 | */ | ||
200 | if (freebno >= wantbno || (userdata && freeend < wantend)) { | ||
193 | if ((newbno1 = roundup(freebno, alignment)) >= freeend) | 201 | if ((newbno1 = roundup(freebno, alignment)) >= freeend) |
194 | newbno1 = NULLAGBLOCK; | 202 | newbno1 = NULLAGBLOCK; |
195 | } else if (freeend >= wantend && alignment > 1) { | 203 | } else if (freeend >= wantend && alignment > 1) { |
@@ -805,7 +813,8 @@ xfs_alloc_find_best_extent( | |||
805 | xfs_alloc_fix_len(args); | 813 | xfs_alloc_fix_len(args); |
806 | 814 | ||
807 | sdiff = xfs_alloc_compute_diff(args->agbno, args->len, | 815 | sdiff = xfs_alloc_compute_diff(args->agbno, args->len, |
808 | args->alignment, *sbnoa, | 816 | args->alignment, |
817 | args->userdata, *sbnoa, | ||
809 | *slena, &new); | 818 | *slena, &new); |
810 | 819 | ||
811 | /* | 820 | /* |
@@ -976,7 +985,8 @@ restart: | |||
976 | if (args->len < blen) | 985 | if (args->len < blen) |
977 | continue; | 986 | continue; |
978 | ltdiff = xfs_alloc_compute_diff(args->agbno, args->len, | 987 | ltdiff = xfs_alloc_compute_diff(args->agbno, args->len, |
979 | args->alignment, ltbnoa, ltlena, <new); | 988 | args->alignment, args->userdata, ltbnoa, |
989 | ltlena, <new); | ||
980 | if (ltnew != NULLAGBLOCK && | 990 | if (ltnew != NULLAGBLOCK && |
981 | (args->len > blen || ltdiff < bdiff)) { | 991 | (args->len > blen || ltdiff < bdiff)) { |
982 | bdiff = ltdiff; | 992 | bdiff = ltdiff; |
@@ -1128,7 +1138,8 @@ restart: | |||
1128 | args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen); | 1138 | args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen); |
1129 | xfs_alloc_fix_len(args); | 1139 | xfs_alloc_fix_len(args); |
1130 | ltdiff = xfs_alloc_compute_diff(args->agbno, args->len, | 1140 | ltdiff = xfs_alloc_compute_diff(args->agbno, args->len, |
1131 | args->alignment, ltbnoa, ltlena, <new); | 1141 | args->alignment, args->userdata, ltbnoa, |
1142 | ltlena, <new); | ||
1132 | 1143 | ||
1133 | error = xfs_alloc_find_best_extent(args, | 1144 | error = xfs_alloc_find_best_extent(args, |
1134 | &bno_cur_lt, &bno_cur_gt, | 1145 | &bno_cur_lt, &bno_cur_gt, |
@@ -1144,7 +1155,8 @@ restart: | |||
1144 | args->len = XFS_EXTLEN_MIN(gtlena, args->maxlen); | 1155 | args->len = XFS_EXTLEN_MIN(gtlena, args->maxlen); |
1145 | xfs_alloc_fix_len(args); | 1156 | xfs_alloc_fix_len(args); |
1146 | gtdiff = xfs_alloc_compute_diff(args->agbno, args->len, | 1157 | gtdiff = xfs_alloc_compute_diff(args->agbno, args->len, |
1147 | args->alignment, gtbnoa, gtlena, >new); | 1158 | args->alignment, args->userdata, gtbnoa, |
1159 | gtlena, >new); | ||
1148 | 1160 | ||
1149 | error = xfs_alloc_find_best_extent(args, | 1161 | error = xfs_alloc_find_best_extent(args, |
1150 | &bno_cur_gt, &bno_cur_lt, | 1162 | &bno_cur_gt, &bno_cur_lt, |
@@ -1203,7 +1215,7 @@ restart: | |||
1203 | } | 1215 | } |
1204 | rlen = args->len; | 1216 | rlen = args->len; |
1205 | (void)xfs_alloc_compute_diff(args->agbno, rlen, args->alignment, | 1217 | (void)xfs_alloc_compute_diff(args->agbno, rlen, args->alignment, |
1206 | ltbnoa, ltlena, <new); | 1218 | args->userdata, ltbnoa, ltlena, <new); |
1207 | ASSERT(ltnew >= ltbno); | 1219 | ASSERT(ltnew >= ltbno); |
1208 | ASSERT(ltnew + rlen <= ltbnoa + ltlena); | 1220 | ASSERT(ltnew + rlen <= ltbnoa + ltlena); |
1209 | ASSERT(ltnew + rlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length)); | 1221 | ASSERT(ltnew + rlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length)); |
diff --git a/fs/xfs/xfs_bmap_btree.h b/fs/xfs/xfs_bmap_btree.h index 70c43d9f72c1..1b726d626941 100644 --- a/fs/xfs/xfs_bmap_btree.h +++ b/fs/xfs/xfs_bmap_btree.h | |||
@@ -196,6 +196,8 @@ typedef __be64 xfs_bmbt_ptr_t, xfs_bmdr_ptr_t; | |||
196 | #define XFS_BMDR_SPACE_CALC(nrecs) \ | 196 | #define XFS_BMDR_SPACE_CALC(nrecs) \ |
197 | (int)(sizeof(xfs_bmdr_block_t) + \ | 197 | (int)(sizeof(xfs_bmdr_block_t) + \ |
198 | ((nrecs) * (sizeof(xfs_bmbt_key_t) + sizeof(xfs_bmbt_ptr_t)))) | 198 | ((nrecs) * (sizeof(xfs_bmbt_key_t) + sizeof(xfs_bmbt_ptr_t)))) |
199 | #define XFS_BMAP_BMDR_SPACE(bb) \ | ||
200 | (XFS_BMDR_SPACE_CALC(be16_to_cpu((bb)->bb_numrecs))) | ||
199 | 201 | ||
200 | /* | 202 | /* |
201 | * Maximum number of bmap btree levels. | 203 | * Maximum number of bmap btree levels. |
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c index 4ec431777048..bfc4e0c26fd3 100644 --- a/fs/xfs/xfs_buf_item.c +++ b/fs/xfs/xfs_buf_item.c | |||
@@ -140,6 +140,16 @@ xfs_buf_item_size( | |||
140 | 140 | ||
141 | ASSERT(bip->bli_flags & XFS_BLI_LOGGED); | 141 | ASSERT(bip->bli_flags & XFS_BLI_LOGGED); |
142 | 142 | ||
143 | if (bip->bli_flags & XFS_BLI_ORDERED) { | ||
144 | /* | ||
145 | * The buffer has been logged just to order it. | ||
146 | * It is not being included in the transaction | ||
147 | * commit, so no vectors are used at all. | ||
148 | */ | ||
149 | trace_xfs_buf_item_size_ordered(bip); | ||
150 | return XFS_LOG_VEC_ORDERED; | ||
151 | } | ||
152 | |||
143 | /* | 153 | /* |
144 | * the vector count is based on the number of buffer vectors we have | 154 | * the vector count is based on the number of buffer vectors we have |
145 | * dirty bits in. This will only be greater than one when we have a | 155 | * dirty bits in. This will only be greater than one when we have a |
@@ -212,6 +222,7 @@ xfs_buf_item_format_segment( | |||
212 | goto out; | 222 | goto out; |
213 | } | 223 | } |
214 | 224 | ||
225 | |||
215 | /* | 226 | /* |
216 | * Fill in an iovec for each set of contiguous chunks. | 227 | * Fill in an iovec for each set of contiguous chunks. |
217 | */ | 228 | */ |
@@ -299,18 +310,36 @@ xfs_buf_item_format( | |||
299 | 310 | ||
300 | /* | 311 | /* |
301 | * If it is an inode buffer, transfer the in-memory state to the | 312 | * If it is an inode buffer, transfer the in-memory state to the |
302 | * format flags and clear the in-memory state. We do not transfer | 313 | * format flags and clear the in-memory state. |
314 | * | ||
315 | * For buffer based inode allocation, we do not transfer | ||
303 | * this state if the inode buffer allocation has not yet been committed | 316 | * this state if the inode buffer allocation has not yet been committed |
304 | * to the log as setting the XFS_BLI_INODE_BUF flag will prevent | 317 | * to the log as setting the XFS_BLI_INODE_BUF flag will prevent |
305 | * correct replay of the inode allocation. | 318 | * correct replay of the inode allocation. |
319 | * | ||
320 | * For icreate item based inode allocation, the buffers aren't written | ||
321 | * to the journal during allocation, and hence we should always tag the | ||
322 | * buffer as an inode buffer so that the correct unlinked list replay | ||
323 | * occurs during recovery. | ||
306 | */ | 324 | */ |
307 | if (bip->bli_flags & XFS_BLI_INODE_BUF) { | 325 | if (bip->bli_flags & XFS_BLI_INODE_BUF) { |
308 | if (!((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) && | 326 | if (xfs_sb_version_hascrc(&lip->li_mountp->m_sb) || |
327 | !((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) && | ||
309 | xfs_log_item_in_current_chkpt(lip))) | 328 | xfs_log_item_in_current_chkpt(lip))) |
310 | bip->__bli_format.blf_flags |= XFS_BLF_INODE_BUF; | 329 | bip->__bli_format.blf_flags |= XFS_BLF_INODE_BUF; |
311 | bip->bli_flags &= ~XFS_BLI_INODE_BUF; | 330 | bip->bli_flags &= ~XFS_BLI_INODE_BUF; |
312 | } | 331 | } |
313 | 332 | ||
333 | if ((bip->bli_flags & (XFS_BLI_ORDERED|XFS_BLI_STALE)) == | ||
334 | XFS_BLI_ORDERED) { | ||
335 | /* | ||
336 | * The buffer has been logged just to order it. It is not being | ||
337 | * included in the transaction commit, so don't format it. | ||
338 | */ | ||
339 | trace_xfs_buf_item_format_ordered(bip); | ||
340 | return; | ||
341 | } | ||
342 | |||
314 | for (i = 0; i < bip->bli_format_count; i++) { | 343 | for (i = 0; i < bip->bli_format_count; i++) { |
315 | vecp = xfs_buf_item_format_segment(bip, vecp, offset, | 344 | vecp = xfs_buf_item_format_segment(bip, vecp, offset, |
316 | &bip->bli_formats[i]); | 345 | &bip->bli_formats[i]); |
@@ -340,6 +369,7 @@ xfs_buf_item_pin( | |||
340 | 369 | ||
341 | ASSERT(atomic_read(&bip->bli_refcount) > 0); | 370 | ASSERT(atomic_read(&bip->bli_refcount) > 0); |
342 | ASSERT((bip->bli_flags & XFS_BLI_LOGGED) || | 371 | ASSERT((bip->bli_flags & XFS_BLI_LOGGED) || |
372 | (bip->bli_flags & XFS_BLI_ORDERED) || | ||
343 | (bip->bli_flags & XFS_BLI_STALE)); | 373 | (bip->bli_flags & XFS_BLI_STALE)); |
344 | 374 | ||
345 | trace_xfs_buf_item_pin(bip); | 375 | trace_xfs_buf_item_pin(bip); |
@@ -512,8 +542,9 @@ xfs_buf_item_unlock( | |||
512 | { | 542 | { |
513 | struct xfs_buf_log_item *bip = BUF_ITEM(lip); | 543 | struct xfs_buf_log_item *bip = BUF_ITEM(lip); |
514 | struct xfs_buf *bp = bip->bli_buf; | 544 | struct xfs_buf *bp = bip->bli_buf; |
515 | int aborted, clean, i; | 545 | bool clean; |
516 | uint hold; | 546 | bool aborted; |
547 | int flags; | ||
517 | 548 | ||
518 | /* Clear the buffer's association with this transaction. */ | 549 | /* Clear the buffer's association with this transaction. */ |
519 | bp->b_transp = NULL; | 550 | bp->b_transp = NULL; |
@@ -524,23 +555,21 @@ xfs_buf_item_unlock( | |||
524 | * (cancelled) buffers at unpin time, but we'll never go through the | 555 | * (cancelled) buffers at unpin time, but we'll never go through the |
525 | * pin/unpin cycle if we abort inside commit. | 556 | * pin/unpin cycle if we abort inside commit. |
526 | */ | 557 | */ |
527 | aborted = (lip->li_flags & XFS_LI_ABORTED) != 0; | 558 | aborted = (lip->li_flags & XFS_LI_ABORTED) ? true : false; |
528 | |||
529 | /* | 559 | /* |
530 | * Before possibly freeing the buf item, determine if we should | 560 | * Before possibly freeing the buf item, copy the per-transaction state |
531 | * release the buffer at the end of this routine. | 561 | * so we can reference it safely later after clearing it from the |
562 | * buffer log item. | ||
532 | */ | 563 | */ |
533 | hold = bip->bli_flags & XFS_BLI_HOLD; | 564 | flags = bip->bli_flags; |
534 | 565 | bip->bli_flags &= ~(XFS_BLI_LOGGED | XFS_BLI_HOLD | XFS_BLI_ORDERED); | |
535 | /* Clear the per transaction state. */ | ||
536 | bip->bli_flags &= ~(XFS_BLI_LOGGED | XFS_BLI_HOLD); | ||
537 | 566 | ||
538 | /* | 567 | /* |
539 | * If the buf item is marked stale, then don't do anything. We'll | 568 | * If the buf item is marked stale, then don't do anything. We'll |
540 | * unlock the buffer and free the buf item when the buffer is unpinned | 569 | * unlock the buffer and free the buf item when the buffer is unpinned |
541 | * for the last time. | 570 | * for the last time. |
542 | */ | 571 | */ |
543 | if (bip->bli_flags & XFS_BLI_STALE) { | 572 | if (flags & XFS_BLI_STALE) { |
544 | trace_xfs_buf_item_unlock_stale(bip); | 573 | trace_xfs_buf_item_unlock_stale(bip); |
545 | ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL); | 574 | ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL); |
546 | if (!aborted) { | 575 | if (!aborted) { |
@@ -557,13 +586,19 @@ xfs_buf_item_unlock( | |||
557 | * be the only reference to the buf item, so we free it anyway | 586 | * be the only reference to the buf item, so we free it anyway |
558 | * regardless of whether it is dirty or not. A dirty abort implies a | 587 | * regardless of whether it is dirty or not. A dirty abort implies a |
559 | * shutdown, anyway. | 588 | * shutdown, anyway. |
589 | * | ||
590 | * Ordered buffers are dirty but may have no recorded changes, so ensure | ||
591 | * we only release clean items here. | ||
560 | */ | 592 | */ |
561 | clean = 1; | 593 | clean = (flags & XFS_BLI_DIRTY) ? false : true; |
562 | for (i = 0; i < bip->bli_format_count; i++) { | 594 | if (clean) { |
563 | if (!xfs_bitmap_empty(bip->bli_formats[i].blf_data_map, | 595 | int i; |
564 | bip->bli_formats[i].blf_map_size)) { | 596 | for (i = 0; i < bip->bli_format_count; i++) { |
565 | clean = 0; | 597 | if (!xfs_bitmap_empty(bip->bli_formats[i].blf_data_map, |
566 | break; | 598 | bip->bli_formats[i].blf_map_size)) { |
599 | clean = false; | ||
600 | break; | ||
601 | } | ||
567 | } | 602 | } |
568 | } | 603 | } |
569 | if (clean) | 604 | if (clean) |
@@ -576,7 +611,7 @@ xfs_buf_item_unlock( | |||
576 | } else | 611 | } else |
577 | atomic_dec(&bip->bli_refcount); | 612 | atomic_dec(&bip->bli_refcount); |
578 | 613 | ||
579 | if (!hold) | 614 | if (!(flags & XFS_BLI_HOLD)) |
580 | xfs_buf_relse(bp); | 615 | xfs_buf_relse(bp); |
581 | } | 616 | } |
582 | 617 | ||
@@ -842,12 +877,6 @@ xfs_buf_item_log( | |||
842 | struct xfs_buf *bp = bip->bli_buf; | 877 | struct xfs_buf *bp = bip->bli_buf; |
843 | 878 | ||
844 | /* | 879 | /* |
845 | * Mark the item as having some dirty data for | ||
846 | * quick reference in xfs_buf_item_dirty. | ||
847 | */ | ||
848 | bip->bli_flags |= XFS_BLI_DIRTY; | ||
849 | |||
850 | /* | ||
851 | * walk each buffer segment and mark them dirty appropriately. | 880 | * walk each buffer segment and mark them dirty appropriately. |
852 | */ | 881 | */ |
853 | start = 0; | 882 | start = 0; |
@@ -873,7 +902,7 @@ xfs_buf_item_log( | |||
873 | 902 | ||
874 | 903 | ||
875 | /* | 904 | /* |
876 | * Return 1 if the buffer has some data that has been logged (at any | 905 | * Return 1 if the buffer has been logged or ordered in a transaction (at any |
877 | * point, not just the current transaction) and 0 if not. | 906 | * point, not just the current transaction) and 0 if not. |
878 | */ | 907 | */ |
879 | uint | 908 | uint |
@@ -907,11 +936,11 @@ void | |||
907 | xfs_buf_item_relse( | 936 | xfs_buf_item_relse( |
908 | xfs_buf_t *bp) | 937 | xfs_buf_t *bp) |
909 | { | 938 | { |
910 | xfs_buf_log_item_t *bip; | 939 | xfs_buf_log_item_t *bip = bp->b_fspriv; |
911 | 940 | ||
912 | trace_xfs_buf_item_relse(bp, _RET_IP_); | 941 | trace_xfs_buf_item_relse(bp, _RET_IP_); |
942 | ASSERT(!(bip->bli_item.li_flags & XFS_LI_IN_AIL)); | ||
913 | 943 | ||
914 | bip = bp->b_fspriv; | ||
915 | bp->b_fspriv = bip->bli_item.li_bio_list; | 944 | bp->b_fspriv = bip->bli_item.li_bio_list; |
916 | if (bp->b_fspriv == NULL) | 945 | if (bp->b_fspriv == NULL) |
917 | bp->b_iodone = NULL; | 946 | bp->b_iodone = NULL; |
diff --git a/fs/xfs/xfs_buf_item.h b/fs/xfs/xfs_buf_item.h index 2573d2a75fc8..0f1c247dc680 100644 --- a/fs/xfs/xfs_buf_item.h +++ b/fs/xfs/xfs_buf_item.h | |||
@@ -120,6 +120,7 @@ xfs_blft_from_flags(struct xfs_buf_log_format *blf) | |||
120 | #define XFS_BLI_INODE_ALLOC_BUF 0x10 | 120 | #define XFS_BLI_INODE_ALLOC_BUF 0x10 |
121 | #define XFS_BLI_STALE_INODE 0x20 | 121 | #define XFS_BLI_STALE_INODE 0x20 |
122 | #define XFS_BLI_INODE_BUF 0x40 | 122 | #define XFS_BLI_INODE_BUF 0x40 |
123 | #define XFS_BLI_ORDERED 0x80 | ||
123 | 124 | ||
124 | #define XFS_BLI_FLAGS \ | 125 | #define XFS_BLI_FLAGS \ |
125 | { XFS_BLI_HOLD, "HOLD" }, \ | 126 | { XFS_BLI_HOLD, "HOLD" }, \ |
@@ -128,7 +129,8 @@ xfs_blft_from_flags(struct xfs_buf_log_format *blf) | |||
128 | { XFS_BLI_LOGGED, "LOGGED" }, \ | 129 | { XFS_BLI_LOGGED, "LOGGED" }, \ |
129 | { XFS_BLI_INODE_ALLOC_BUF, "INODE_ALLOC" }, \ | 130 | { XFS_BLI_INODE_ALLOC_BUF, "INODE_ALLOC" }, \ |
130 | { XFS_BLI_STALE_INODE, "STALE_INODE" }, \ | 131 | { XFS_BLI_STALE_INODE, "STALE_INODE" }, \ |
131 | { XFS_BLI_INODE_BUF, "INODE_BUF" } | 132 | { XFS_BLI_INODE_BUF, "INODE_BUF" }, \ |
133 | { XFS_BLI_ORDERED, "ORDERED" } | ||
132 | 134 | ||
133 | 135 | ||
134 | #ifdef __KERNEL__ | 136 | #ifdef __KERNEL__ |
diff --git a/fs/xfs/xfs_dfrag.c b/fs/xfs/xfs_dfrag.c index c407e1ccff43..e36445ceaf80 100644 --- a/fs/xfs/xfs_dfrag.c +++ b/fs/xfs/xfs_dfrag.c | |||
@@ -24,6 +24,9 @@ | |||
24 | #include "xfs_ag.h" | 24 | #include "xfs_ag.h" |
25 | #include "xfs_mount.h" | 25 | #include "xfs_mount.h" |
26 | #include "xfs_bmap_btree.h" | 26 | #include "xfs_bmap_btree.h" |
27 | #include "xfs_alloc_btree.h" | ||
28 | #include "xfs_ialloc_btree.h" | ||
29 | #include "xfs_btree.h" | ||
27 | #include "xfs_dinode.h" | 30 | #include "xfs_dinode.h" |
28 | #include "xfs_inode.h" | 31 | #include "xfs_inode.h" |
29 | #include "xfs_inode_item.h" | 32 | #include "xfs_inode_item.h" |
@@ -182,7 +185,7 @@ xfs_swap_extents_check_format( | |||
182 | */ | 185 | */ |
183 | if (tip->i_d.di_format == XFS_DINODE_FMT_BTREE) { | 186 | if (tip->i_d.di_format == XFS_DINODE_FMT_BTREE) { |
184 | if (XFS_IFORK_BOFF(ip) && | 187 | if (XFS_IFORK_BOFF(ip) && |
185 | tip->i_df.if_broot_bytes > XFS_IFORK_BOFF(ip)) | 188 | XFS_BMAP_BMDR_SPACE(tip->i_df.if_broot) > XFS_IFORK_BOFF(ip)) |
186 | return EINVAL; | 189 | return EINVAL; |
187 | if (XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) <= | 190 | if (XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) <= |
188 | XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK)) | 191 | XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK)) |
@@ -192,9 +195,8 @@ xfs_swap_extents_check_format( | |||
192 | /* Reciprocal target->temp btree format checks */ | 195 | /* Reciprocal target->temp btree format checks */ |
193 | if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE) { | 196 | if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE) { |
194 | if (XFS_IFORK_BOFF(tip) && | 197 | if (XFS_IFORK_BOFF(tip) && |
195 | ip->i_df.if_broot_bytes > XFS_IFORK_BOFF(tip)) | 198 | XFS_BMAP_BMDR_SPACE(ip->i_df.if_broot) > XFS_IFORK_BOFF(tip)) |
196 | return EINVAL; | 199 | return EINVAL; |
197 | |||
198 | if (XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) <= | 200 | if (XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) <= |
199 | XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK)) | 201 | XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK)) |
200 | return EINVAL; | 202 | return EINVAL; |
diff --git a/fs/xfs/xfs_dir2_leaf.c b/fs/xfs/xfs_dir2_leaf.c index e0cc1243a8aa..2aed25cae04d 100644 --- a/fs/xfs/xfs_dir2_leaf.c +++ b/fs/xfs/xfs_dir2_leaf.c | |||
@@ -1108,6 +1108,7 @@ xfs_dir2_leaf_readbuf( | |||
1108 | struct xfs_mount *mp = dp->i_mount; | 1108 | struct xfs_mount *mp = dp->i_mount; |
1109 | struct xfs_buf *bp = *bpp; | 1109 | struct xfs_buf *bp = *bpp; |
1110 | struct xfs_bmbt_irec *map = mip->map; | 1110 | struct xfs_bmbt_irec *map = mip->map; |
1111 | struct blk_plug plug; | ||
1111 | int error = 0; | 1112 | int error = 0; |
1112 | int length; | 1113 | int length; |
1113 | int i; | 1114 | int i; |
@@ -1236,6 +1237,7 @@ xfs_dir2_leaf_readbuf( | |||
1236 | /* | 1237 | /* |
1237 | * Do we need more readahead? | 1238 | * Do we need more readahead? |
1238 | */ | 1239 | */ |
1240 | blk_start_plug(&plug); | ||
1239 | for (mip->ra_index = mip->ra_offset = i = 0; | 1241 | for (mip->ra_index = mip->ra_offset = i = 0; |
1240 | mip->ra_want > mip->ra_current && i < mip->map_blocks; | 1242 | mip->ra_want > mip->ra_current && i < mip->map_blocks; |
1241 | i += mp->m_dirblkfsbs) { | 1243 | i += mp->m_dirblkfsbs) { |
@@ -1287,6 +1289,7 @@ xfs_dir2_leaf_readbuf( | |||
1287 | } | 1289 | } |
1288 | } | 1290 | } |
1289 | } | 1291 | } |
1292 | blk_finish_plug(&plug); | ||
1290 | 1293 | ||
1291 | out: | 1294 | out: |
1292 | *bpp = bp; | 1295 | *bpp = bp; |
diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c index 044e97a33c8d..f01012de06d0 100644 --- a/fs/xfs/xfs_dquot.c +++ b/fs/xfs/xfs_dquot.c | |||
@@ -570,13 +570,13 @@ xfs_qm_dqtobp( | |||
570 | xfs_buf_t **O_bpp, | 570 | xfs_buf_t **O_bpp, |
571 | uint flags) | 571 | uint flags) |
572 | { | 572 | { |
573 | xfs_bmbt_irec_t map; | 573 | struct xfs_bmbt_irec map; |
574 | int nmaps = 1, error; | 574 | int nmaps = 1, error; |
575 | xfs_buf_t *bp; | 575 | struct xfs_buf *bp; |
576 | xfs_inode_t *quotip = XFS_DQ_TO_QIP(dqp); | 576 | struct xfs_inode *quotip = xfs_dq_to_quota_inode(dqp); |
577 | xfs_mount_t *mp = dqp->q_mount; | 577 | struct xfs_mount *mp = dqp->q_mount; |
578 | xfs_dqid_t id = be32_to_cpu(dqp->q_core.d_id); | 578 | xfs_dqid_t id = be32_to_cpu(dqp->q_core.d_id); |
579 | xfs_trans_t *tp = (tpp ? *tpp : NULL); | 579 | struct xfs_trans *tp = (tpp ? *tpp : NULL); |
580 | 580 | ||
581 | dqp->q_fileoffset = (xfs_fileoff_t)id / mp->m_quotainfo->qi_dqperchunk; | 581 | dqp->q_fileoffset = (xfs_fileoff_t)id / mp->m_quotainfo->qi_dqperchunk; |
582 | 582 | ||
@@ -804,7 +804,7 @@ xfs_qm_dqget( | |||
804 | xfs_dquot_t **O_dqpp) /* OUT : locked incore dquot */ | 804 | xfs_dquot_t **O_dqpp) /* OUT : locked incore dquot */ |
805 | { | 805 | { |
806 | struct xfs_quotainfo *qi = mp->m_quotainfo; | 806 | struct xfs_quotainfo *qi = mp->m_quotainfo; |
807 | struct radix_tree_root *tree = XFS_DQUOT_TREE(qi, type); | 807 | struct radix_tree_root *tree = xfs_dquot_tree(qi, type); |
808 | struct xfs_dquot *dqp; | 808 | struct xfs_dquot *dqp; |
809 | int error; | 809 | int error; |
810 | 810 | ||
diff --git a/fs/xfs/xfs_dquot.h b/fs/xfs/xfs_dquot.h index 4f0ebfc43cc9..b596626249b8 100644 --- a/fs/xfs/xfs_dquot.h +++ b/fs/xfs/xfs_dquot.h | |||
@@ -143,10 +143,6 @@ static inline xfs_dquot_t *xfs_inode_dquot(struct xfs_inode *ip, int type) | |||
143 | #define XFS_QM_ISUDQ(dqp) ((dqp)->dq_flags & XFS_DQ_USER) | 143 | #define XFS_QM_ISUDQ(dqp) ((dqp)->dq_flags & XFS_DQ_USER) |
144 | #define XFS_QM_ISPDQ(dqp) ((dqp)->dq_flags & XFS_DQ_PROJ) | 144 | #define XFS_QM_ISPDQ(dqp) ((dqp)->dq_flags & XFS_DQ_PROJ) |
145 | #define XFS_QM_ISGDQ(dqp) ((dqp)->dq_flags & XFS_DQ_GROUP) | 145 | #define XFS_QM_ISGDQ(dqp) ((dqp)->dq_flags & XFS_DQ_GROUP) |
146 | #define XFS_DQ_TO_QINF(dqp) ((dqp)->q_mount->m_quotainfo) | ||
147 | #define XFS_DQ_TO_QIP(dqp) (XFS_QM_ISUDQ(dqp) ? \ | ||
148 | XFS_DQ_TO_QINF(dqp)->qi_uquotaip : \ | ||
149 | XFS_DQ_TO_QINF(dqp)->qi_gquotaip) | ||
150 | 146 | ||
151 | extern int xfs_qm_dqread(struct xfs_mount *, xfs_dqid_t, uint, | 147 | extern int xfs_qm_dqread(struct xfs_mount *, xfs_dqid_t, uint, |
152 | uint, struct xfs_dquot **); | 148 | uint, struct xfs_dquot **); |
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c index 3c3644ea825b..614eb0cc3608 100644 --- a/fs/xfs/xfs_fsops.c +++ b/fs/xfs/xfs_fsops.c | |||
@@ -176,7 +176,7 @@ xfs_growfs_data_private( | |||
176 | if (!bp) | 176 | if (!bp) |
177 | return EIO; | 177 | return EIO; |
178 | if (bp->b_error) { | 178 | if (bp->b_error) { |
179 | int error = bp->b_error; | 179 | error = bp->b_error; |
180 | xfs_buf_relse(bp); | 180 | xfs_buf_relse(bp); |
181 | return error; | 181 | return error; |
182 | } | 182 | } |
diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c index c8f5ae1debf2..7a0c17d7ec09 100644 --- a/fs/xfs/xfs_ialloc.c +++ b/fs/xfs/xfs_ialloc.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include "xfs_bmap.h" | 38 | #include "xfs_bmap.h" |
39 | #include "xfs_cksum.h" | 39 | #include "xfs_cksum.h" |
40 | #include "xfs_buf_item.h" | 40 | #include "xfs_buf_item.h" |
41 | #include "xfs_icreate_item.h" | ||
41 | 42 | ||
42 | 43 | ||
43 | /* | 44 | /* |
@@ -150,12 +151,16 @@ xfs_check_agi_freecount( | |||
150 | #endif | 151 | #endif |
151 | 152 | ||
152 | /* | 153 | /* |
153 | * Initialise a new set of inodes. | 154 | * Initialise a new set of inodes. When called without a transaction context |
155 | * (e.g. from recovery) we initiate a delayed write of the inode buffers rather | ||
156 | * than logging them (which in a transaction context puts them into the AIL | ||
157 | * for writeback rather than the xfsbufd queue). | ||
154 | */ | 158 | */ |
155 | STATIC int | 159 | int |
156 | xfs_ialloc_inode_init( | 160 | xfs_ialloc_inode_init( |
157 | struct xfs_mount *mp, | 161 | struct xfs_mount *mp, |
158 | struct xfs_trans *tp, | 162 | struct xfs_trans *tp, |
163 | struct list_head *buffer_list, | ||
159 | xfs_agnumber_t agno, | 164 | xfs_agnumber_t agno, |
160 | xfs_agblock_t agbno, | 165 | xfs_agblock_t agbno, |
161 | xfs_agblock_t length, | 166 | xfs_agblock_t length, |
@@ -208,6 +213,18 @@ xfs_ialloc_inode_init( | |||
208 | version = 3; | 213 | version = 3; |
209 | ino = XFS_AGINO_TO_INO(mp, agno, | 214 | ino = XFS_AGINO_TO_INO(mp, agno, |
210 | XFS_OFFBNO_TO_AGINO(mp, agbno, 0)); | 215 | XFS_OFFBNO_TO_AGINO(mp, agbno, 0)); |
216 | |||
217 | /* | ||
218 | * log the initialisation that is about to take place as an | ||
219 | * logical operation. This means the transaction does not | ||
220 | * need to log the physical changes to the inode buffers as log | ||
221 | * recovery will know what initialisation is actually needed. | ||
222 | * Hence we only need to log the buffers as "ordered" buffers so | ||
223 | * they track in the AIL as if they were physically logged. | ||
224 | */ | ||
225 | if (tp) | ||
226 | xfs_icreate_log(tp, agno, agbno, XFS_IALLOC_INODES(mp), | ||
227 | mp->m_sb.sb_inodesize, length, gen); | ||
211 | } else if (xfs_sb_version_hasnlink(&mp->m_sb)) | 228 | } else if (xfs_sb_version_hasnlink(&mp->m_sb)) |
212 | version = 2; | 229 | version = 2; |
213 | else | 230 | else |
@@ -223,13 +240,8 @@ xfs_ialloc_inode_init( | |||
223 | XBF_UNMAPPED); | 240 | XBF_UNMAPPED); |
224 | if (!fbuf) | 241 | if (!fbuf) |
225 | return ENOMEM; | 242 | return ENOMEM; |
226 | /* | 243 | |
227 | * Initialize all inodes in this buffer and then log them. | 244 | /* Initialize the inode buffers and log them appropriately. */ |
228 | * | ||
229 | * XXX: It would be much better if we had just one transaction | ||
230 | * to log a whole cluster of inodes instead of all the | ||
231 | * individual transactions causing a lot of log traffic. | ||
232 | */ | ||
233 | fbuf->b_ops = &xfs_inode_buf_ops; | 245 | fbuf->b_ops = &xfs_inode_buf_ops; |
234 | xfs_buf_zero(fbuf, 0, BBTOB(fbuf->b_length)); | 246 | xfs_buf_zero(fbuf, 0, BBTOB(fbuf->b_length)); |
235 | for (i = 0; i < ninodes; i++) { | 247 | for (i = 0; i < ninodes; i++) { |
@@ -247,18 +259,39 @@ xfs_ialloc_inode_init( | |||
247 | ino++; | 259 | ino++; |
248 | uuid_copy(&free->di_uuid, &mp->m_sb.sb_uuid); | 260 | uuid_copy(&free->di_uuid, &mp->m_sb.sb_uuid); |
249 | xfs_dinode_calc_crc(mp, free); | 261 | xfs_dinode_calc_crc(mp, free); |
250 | } else { | 262 | } else if (tp) { |
251 | /* just log the inode core */ | 263 | /* just log the inode core */ |
252 | xfs_trans_log_buf(tp, fbuf, ioffset, | 264 | xfs_trans_log_buf(tp, fbuf, ioffset, |
253 | ioffset + isize - 1); | 265 | ioffset + isize - 1); |
254 | } | 266 | } |
255 | } | 267 | } |
256 | if (version == 3) { | 268 | |
257 | /* need to log the entire buffer */ | 269 | if (tp) { |
258 | xfs_trans_log_buf(tp, fbuf, 0, | 270 | /* |
259 | BBTOB(fbuf->b_length) - 1); | 271 | * Mark the buffer as an inode allocation buffer so it |
272 | * sticks in AIL at the point of this allocation | ||
273 | * transaction. This ensures the they are on disk before | ||
274 | * the tail of the log can be moved past this | ||
275 | * transaction (i.e. by preventing relogging from moving | ||
276 | * it forward in the log). | ||
277 | */ | ||
278 | xfs_trans_inode_alloc_buf(tp, fbuf); | ||
279 | if (version == 3) { | ||
280 | /* | ||
281 | * Mark the buffer as ordered so that they are | ||
282 | * not physically logged in the transaction but | ||
283 | * still tracked in the AIL as part of the | ||
284 | * transaction and pin the log appropriately. | ||
285 | */ | ||
286 | xfs_trans_ordered_buf(tp, fbuf); | ||
287 | xfs_trans_log_buf(tp, fbuf, 0, | ||
288 | BBTOB(fbuf->b_length) - 1); | ||
289 | } | ||
290 | } else { | ||
291 | fbuf->b_flags |= XBF_DONE; | ||
292 | xfs_buf_delwri_queue(fbuf, buffer_list); | ||
293 | xfs_buf_relse(fbuf); | ||
260 | } | 294 | } |
261 | xfs_trans_inode_alloc_buf(tp, fbuf); | ||
262 | } | 295 | } |
263 | return 0; | 296 | return 0; |
264 | } | 297 | } |
@@ -303,7 +336,7 @@ xfs_ialloc_ag_alloc( | |||
303 | * First try to allocate inodes contiguous with the last-allocated | 336 | * First try to allocate inodes contiguous with the last-allocated |
304 | * chunk of inodes. If the filesystem is striped, this will fill | 337 | * chunk of inodes. If the filesystem is striped, this will fill |
305 | * an entire stripe unit with inodes. | 338 | * an entire stripe unit with inodes. |
306 | */ | 339 | */ |
307 | agi = XFS_BUF_TO_AGI(agbp); | 340 | agi = XFS_BUF_TO_AGI(agbp); |
308 | newino = be32_to_cpu(agi->agi_newino); | 341 | newino = be32_to_cpu(agi->agi_newino); |
309 | agno = be32_to_cpu(agi->agi_seqno); | 342 | agno = be32_to_cpu(agi->agi_seqno); |
@@ -402,7 +435,7 @@ xfs_ialloc_ag_alloc( | |||
402 | * rather than a linear progression to prevent the next generation | 435 | * rather than a linear progression to prevent the next generation |
403 | * number from being easily guessable. | 436 | * number from being easily guessable. |
404 | */ | 437 | */ |
405 | error = xfs_ialloc_inode_init(args.mp, tp, agno, args.agbno, | 438 | error = xfs_ialloc_inode_init(args.mp, tp, NULL, agno, args.agbno, |
406 | args.len, prandom_u32()); | 439 | args.len, prandom_u32()); |
407 | 440 | ||
408 | if (error) | 441 | if (error) |
@@ -615,8 +648,7 @@ xfs_ialloc_get_rec( | |||
615 | struct xfs_btree_cur *cur, | 648 | struct xfs_btree_cur *cur, |
616 | xfs_agino_t agino, | 649 | xfs_agino_t agino, |
617 | xfs_inobt_rec_incore_t *rec, | 650 | xfs_inobt_rec_incore_t *rec, |
618 | int *done, | 651 | int *done) |
619 | int left) | ||
620 | { | 652 | { |
621 | int error; | 653 | int error; |
622 | int i; | 654 | int i; |
@@ -724,12 +756,12 @@ xfs_dialloc_ag( | |||
724 | pag->pagl_leftrec != NULLAGINO && | 756 | pag->pagl_leftrec != NULLAGINO && |
725 | pag->pagl_rightrec != NULLAGINO) { | 757 | pag->pagl_rightrec != NULLAGINO) { |
726 | error = xfs_ialloc_get_rec(tcur, pag->pagl_leftrec, | 758 | error = xfs_ialloc_get_rec(tcur, pag->pagl_leftrec, |
727 | &trec, &doneleft, 1); | 759 | &trec, &doneleft); |
728 | if (error) | 760 | if (error) |
729 | goto error1; | 761 | goto error1; |
730 | 762 | ||
731 | error = xfs_ialloc_get_rec(cur, pag->pagl_rightrec, | 763 | error = xfs_ialloc_get_rec(cur, pag->pagl_rightrec, |
732 | &rec, &doneright, 0); | 764 | &rec, &doneright); |
733 | if (error) | 765 | if (error) |
734 | goto error1; | 766 | goto error1; |
735 | } else { | 767 | } else { |
diff --git a/fs/xfs/xfs_ialloc.h b/fs/xfs/xfs_ialloc.h index c8da3df271e6..68c07320f096 100644 --- a/fs/xfs/xfs_ialloc.h +++ b/fs/xfs/xfs_ialloc.h | |||
@@ -150,6 +150,14 @@ int xfs_inobt_lookup(struct xfs_btree_cur *cur, xfs_agino_t ino, | |||
150 | int xfs_inobt_get_rec(struct xfs_btree_cur *cur, | 150 | int xfs_inobt_get_rec(struct xfs_btree_cur *cur, |
151 | xfs_inobt_rec_incore_t *rec, int *stat); | 151 | xfs_inobt_rec_incore_t *rec, int *stat); |
152 | 152 | ||
153 | /* | ||
154 | * Inode chunk initialisation routine | ||
155 | */ | ||
156 | int xfs_ialloc_inode_init(struct xfs_mount *mp, struct xfs_trans *tp, | ||
157 | struct list_head *buffer_list, | ||
158 | xfs_agnumber_t agno, xfs_agblock_t agbno, | ||
159 | xfs_agblock_t length, unsigned int gen); | ||
160 | |||
153 | extern const struct xfs_buf_ops xfs_agi_buf_ops; | 161 | extern const struct xfs_buf_ops xfs_agi_buf_ops; |
154 | 162 | ||
155 | #endif /* __XFS_IALLOC_H__ */ | 163 | #endif /* __XFS_IALLOC_H__ */ |
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c index 96e344e3e927..9560dc1f15a9 100644 --- a/fs/xfs/xfs_icache.c +++ b/fs/xfs/xfs_icache.c | |||
@@ -335,7 +335,8 @@ xfs_iget_cache_miss( | |||
335 | iflags = XFS_INEW; | 335 | iflags = XFS_INEW; |
336 | if (flags & XFS_IGET_DONTCACHE) | 336 | if (flags & XFS_IGET_DONTCACHE) |
337 | iflags |= XFS_IDONTCACHE; | 337 | iflags |= XFS_IDONTCACHE; |
338 | ip->i_udquot = ip->i_gdquot = NULL; | 338 | ip->i_udquot = NULL; |
339 | ip->i_gdquot = NULL; | ||
339 | xfs_iflags_set(ip, iflags); | 340 | xfs_iflags_set(ip, iflags); |
340 | 341 | ||
341 | /* insert the new inode */ | 342 | /* insert the new inode */ |
diff --git a/fs/xfs/xfs_icache.h b/fs/xfs/xfs_icache.h index e0f138c70a2f..a01afbb3909a 100644 --- a/fs/xfs/xfs_icache.h +++ b/fs/xfs/xfs_icache.h | |||
@@ -40,7 +40,6 @@ void xfs_inode_clear_eofblocks_tag(struct xfs_inode *ip); | |||
40 | int xfs_icache_free_eofblocks(struct xfs_mount *, struct xfs_eofblocks *); | 40 | int xfs_icache_free_eofblocks(struct xfs_mount *, struct xfs_eofblocks *); |
41 | void xfs_eofblocks_worker(struct work_struct *); | 41 | void xfs_eofblocks_worker(struct work_struct *); |
42 | 42 | ||
43 | int xfs_sync_inode_grab(struct xfs_inode *ip); | ||
44 | int xfs_inode_ag_iterator(struct xfs_mount *mp, | 43 | int xfs_inode_ag_iterator(struct xfs_mount *mp, |
45 | int (*execute)(struct xfs_inode *ip, struct xfs_perag *pag, | 44 | int (*execute)(struct xfs_inode *ip, struct xfs_perag *pag, |
46 | int flags, void *args), | 45 | int flags, void *args), |
diff --git a/fs/xfs/xfs_icreate_item.c b/fs/xfs/xfs_icreate_item.c new file mode 100644 index 000000000000..7716a4e7375e --- /dev/null +++ b/fs/xfs/xfs_icreate_item.c | |||
@@ -0,0 +1,195 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2008-2010, 2013 Dave Chinner | ||
3 | * All Rights Reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it would be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write the Free Software Foundation, | ||
16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
17 | */ | ||
18 | #include "xfs.h" | ||
19 | #include "xfs_fs.h" | ||
20 | #include "xfs_types.h" | ||
21 | #include "xfs_bit.h" | ||
22 | #include "xfs_log.h" | ||
23 | #include "xfs_inum.h" | ||
24 | #include "xfs_trans.h" | ||
25 | #include "xfs_buf_item.h" | ||
26 | #include "xfs_sb.h" | ||
27 | #include "xfs_ag.h" | ||
28 | #include "xfs_dir2.h" | ||
29 | #include "xfs_mount.h" | ||
30 | #include "xfs_trans_priv.h" | ||
31 | #include "xfs_bmap_btree.h" | ||
32 | #include "xfs_alloc_btree.h" | ||
33 | #include "xfs_ialloc_btree.h" | ||
34 | #include "xfs_attr_sf.h" | ||
35 | #include "xfs_dinode.h" | ||
36 | #include "xfs_inode.h" | ||
37 | #include "xfs_inode_item.h" | ||
38 | #include "xfs_btree.h" | ||
39 | #include "xfs_ialloc.h" | ||
40 | #include "xfs_error.h" | ||
41 | #include "xfs_icreate_item.h" | ||
42 | |||
43 | kmem_zone_t *xfs_icreate_zone; /* inode create item zone */ | ||
44 | |||
45 | static inline struct xfs_icreate_item *ICR_ITEM(struct xfs_log_item *lip) | ||
46 | { | ||
47 | return container_of(lip, struct xfs_icreate_item, ic_item); | ||
48 | } | ||
49 | |||
50 | /* | ||
51 | * This returns the number of iovecs needed to log the given inode item. | ||
52 | * | ||
53 | * We only need one iovec for the icreate log structure. | ||
54 | */ | ||
55 | STATIC uint | ||
56 | xfs_icreate_item_size( | ||
57 | struct xfs_log_item *lip) | ||
58 | { | ||
59 | return 1; | ||
60 | } | ||
61 | |||
62 | /* | ||
63 | * This is called to fill in the vector of log iovecs for the | ||
64 | * given inode create log item. | ||
65 | */ | ||
66 | STATIC void | ||
67 | xfs_icreate_item_format( | ||
68 | struct xfs_log_item *lip, | ||
69 | struct xfs_log_iovec *log_vector) | ||
70 | { | ||
71 | struct xfs_icreate_item *icp = ICR_ITEM(lip); | ||
72 | |||
73 | log_vector->i_addr = (xfs_caddr_t)&icp->ic_format; | ||
74 | log_vector->i_len = sizeof(struct xfs_icreate_log); | ||
75 | log_vector->i_type = XLOG_REG_TYPE_ICREATE; | ||
76 | } | ||
77 | |||
78 | |||
79 | /* Pinning has no meaning for the create item, so just return. */ | ||
80 | STATIC void | ||
81 | xfs_icreate_item_pin( | ||
82 | struct xfs_log_item *lip) | ||
83 | { | ||
84 | } | ||
85 | |||
86 | |||
87 | /* pinning has no meaning for the create item, so just return. */ | ||
88 | STATIC void | ||
89 | xfs_icreate_item_unpin( | ||
90 | struct xfs_log_item *lip, | ||
91 | int remove) | ||
92 | { | ||
93 | } | ||
94 | |||
95 | STATIC void | ||
96 | xfs_icreate_item_unlock( | ||
97 | struct xfs_log_item *lip) | ||
98 | { | ||
99 | struct xfs_icreate_item *icp = ICR_ITEM(lip); | ||
100 | |||
101 | if (icp->ic_item.li_flags & XFS_LI_ABORTED) | ||
102 | kmem_zone_free(xfs_icreate_zone, icp); | ||
103 | return; | ||
104 | } | ||
105 | |||
106 | /* | ||
107 | * Because we have ordered buffers being tracked in the AIL for the inode | ||
108 | * creation, we don't need the create item after this. Hence we can free | ||
109 | * the log item and return -1 to tell the caller we're done with the item. | ||
110 | */ | ||
111 | STATIC xfs_lsn_t | ||
112 | xfs_icreate_item_committed( | ||
113 | struct xfs_log_item *lip, | ||
114 | xfs_lsn_t lsn) | ||
115 | { | ||
116 | struct xfs_icreate_item *icp = ICR_ITEM(lip); | ||
117 | |||
118 | kmem_zone_free(xfs_icreate_zone, icp); | ||
119 | return (xfs_lsn_t)-1; | ||
120 | } | ||
121 | |||
122 | /* item can never get into the AIL */ | ||
123 | STATIC uint | ||
124 | xfs_icreate_item_push( | ||
125 | struct xfs_log_item *lip, | ||
126 | struct list_head *buffer_list) | ||
127 | { | ||
128 | ASSERT(0); | ||
129 | return XFS_ITEM_SUCCESS; | ||
130 | } | ||
131 | |||
132 | /* Ordered buffers do the dependency tracking here, so this does nothing. */ | ||
133 | STATIC void | ||
134 | xfs_icreate_item_committing( | ||
135 | struct xfs_log_item *lip, | ||
136 | xfs_lsn_t lsn) | ||
137 | { | ||
138 | } | ||
139 | |||
140 | /* | ||
141 | * This is the ops vector shared by all buf log items. | ||
142 | */ | ||
143 | static struct xfs_item_ops xfs_icreate_item_ops = { | ||
144 | .iop_size = xfs_icreate_item_size, | ||
145 | .iop_format = xfs_icreate_item_format, | ||
146 | .iop_pin = xfs_icreate_item_pin, | ||
147 | .iop_unpin = xfs_icreate_item_unpin, | ||
148 | .iop_push = xfs_icreate_item_push, | ||
149 | .iop_unlock = xfs_icreate_item_unlock, | ||
150 | .iop_committed = xfs_icreate_item_committed, | ||
151 | .iop_committing = xfs_icreate_item_committing, | ||
152 | }; | ||
153 | |||
154 | |||
155 | /* | ||
156 | * Initialize the inode log item for a newly allocated (in-core) inode. | ||
157 | * | ||
158 | * Inode extents can only reside within an AG. Hence specify the starting | ||
159 | * block for the inode chunk by offset within an AG as well as the | ||
160 | * length of the allocated extent. | ||
161 | * | ||
162 | * This joins the item to the transaction and marks it dirty so | ||
163 | * that we don't need a separate call to do this, nor does the | ||
164 | * caller need to know anything about the icreate item. | ||
165 | */ | ||
166 | void | ||
167 | xfs_icreate_log( | ||
168 | struct xfs_trans *tp, | ||
169 | xfs_agnumber_t agno, | ||
170 | xfs_agblock_t agbno, | ||
171 | unsigned int count, | ||
172 | unsigned int inode_size, | ||
173 | xfs_agblock_t length, | ||
174 | unsigned int generation) | ||
175 | { | ||
176 | struct xfs_icreate_item *icp; | ||
177 | |||
178 | icp = kmem_zone_zalloc(xfs_icreate_zone, KM_SLEEP); | ||
179 | |||
180 | xfs_log_item_init(tp->t_mountp, &icp->ic_item, XFS_LI_ICREATE, | ||
181 | &xfs_icreate_item_ops); | ||
182 | |||
183 | icp->ic_format.icl_type = XFS_LI_ICREATE; | ||
184 | icp->ic_format.icl_size = 1; /* single vector */ | ||
185 | icp->ic_format.icl_ag = cpu_to_be32(agno); | ||
186 | icp->ic_format.icl_agbno = cpu_to_be32(agbno); | ||
187 | icp->ic_format.icl_count = cpu_to_be32(count); | ||
188 | icp->ic_format.icl_isize = cpu_to_be32(inode_size); | ||
189 | icp->ic_format.icl_length = cpu_to_be32(length); | ||
190 | icp->ic_format.icl_gen = cpu_to_be32(generation); | ||
191 | |||
192 | xfs_trans_add_item(tp, &icp->ic_item); | ||
193 | tp->t_flags |= XFS_TRANS_DIRTY; | ||
194 | icp->ic_item.li_desc->lid_flags |= XFS_LID_DIRTY; | ||
195 | } | ||
diff --git a/fs/xfs/xfs_icreate_item.h b/fs/xfs/xfs_icreate_item.h new file mode 100644 index 000000000000..88ba8aa0bc41 --- /dev/null +++ b/fs/xfs/xfs_icreate_item.h | |||
@@ -0,0 +1,52 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2008-2010, Dave Chinner | ||
3 | * All Rights Reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it would be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write the Free Software Foundation, | ||
16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
17 | */ | ||
18 | #ifndef XFS_ICREATE_ITEM_H | ||
19 | #define XFS_ICREATE_ITEM_H 1 | ||
20 | |||
21 | /* | ||
22 | * on disk log item structure | ||
23 | * | ||
24 | * Log recovery assumes the first two entries are the type and size and they fit | ||
25 | * in 32 bits. Also in host order (ugh) so they have to be 32 bit aligned so | ||
26 | * decoding can be done correctly. | ||
27 | */ | ||
28 | struct xfs_icreate_log { | ||
29 | __uint16_t icl_type; /* type of log format structure */ | ||
30 | __uint16_t icl_size; /* size of log format structure */ | ||
31 | __be32 icl_ag; /* ag being allocated in */ | ||
32 | __be32 icl_agbno; /* start block of inode range */ | ||
33 | __be32 icl_count; /* number of inodes to initialise */ | ||
34 | __be32 icl_isize; /* size of inodes */ | ||
35 | __be32 icl_length; /* length of extent to initialise */ | ||
36 | __be32 icl_gen; /* inode generation number to use */ | ||
37 | }; | ||
38 | |||
39 | /* in memory log item structure */ | ||
40 | struct xfs_icreate_item { | ||
41 | struct xfs_log_item ic_item; | ||
42 | struct xfs_icreate_log ic_format; | ||
43 | }; | ||
44 | |||
45 | extern kmem_zone_t *xfs_icreate_zone; /* inode create item zone */ | ||
46 | |||
47 | void xfs_icreate_log(struct xfs_trans *tp, xfs_agnumber_t agno, | ||
48 | xfs_agblock_t agbno, unsigned int count, | ||
49 | unsigned int inode_size, xfs_agblock_t length, | ||
50 | unsigned int generation); | ||
51 | |||
52 | #endif /* XFS_ICREATE_ITEM_H */ | ||
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 7f7be5f98f52..9ecfe1e559fc 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c | |||
@@ -1028,6 +1028,11 @@ xfs_dinode_calc_crc( | |||
1028 | 1028 | ||
1029 | /* | 1029 | /* |
1030 | * Read the disk inode attributes into the in-core inode structure. | 1030 | * Read the disk inode attributes into the in-core inode structure. |
1031 | * | ||
1032 | * If we are initialising a new inode and we are not utilising the | ||
1033 | * XFS_MOUNT_IKEEP inode cluster mode, we can simple build the new inode core | ||
1034 | * with a random generation number. If we are keeping inodes around, we need to | ||
1035 | * read the inode cluster to get the existing generation number off disk. | ||
1031 | */ | 1036 | */ |
1032 | int | 1037 | int |
1033 | xfs_iread( | 1038 | xfs_iread( |
@@ -1047,6 +1052,22 @@ xfs_iread( | |||
1047 | if (error) | 1052 | if (error) |
1048 | return error; | 1053 | return error; |
1049 | 1054 | ||
1055 | /* shortcut IO on inode allocation if possible */ | ||
1056 | if ((iget_flags & XFS_IGET_CREATE) && | ||
1057 | !(mp->m_flags & XFS_MOUNT_IKEEP)) { | ||
1058 | /* initialise the on-disk inode core */ | ||
1059 | memset(&ip->i_d, 0, sizeof(ip->i_d)); | ||
1060 | ip->i_d.di_magic = XFS_DINODE_MAGIC; | ||
1061 | ip->i_d.di_gen = prandom_u32(); | ||
1062 | if (xfs_sb_version_hascrc(&mp->m_sb)) { | ||
1063 | ip->i_d.di_version = 3; | ||
1064 | ip->i_d.di_ino = ip->i_ino; | ||
1065 | uuid_copy(&ip->i_d.di_uuid, &mp->m_sb.sb_uuid); | ||
1066 | } else | ||
1067 | ip->i_d.di_version = 2; | ||
1068 | return 0; | ||
1069 | } | ||
1070 | |||
1050 | /* | 1071 | /* |
1051 | * Get pointers to the on-disk inode and the buffer containing it. | 1072 | * Get pointers to the on-disk inode and the buffer containing it. |
1052 | */ | 1073 | */ |
@@ -1133,17 +1154,16 @@ xfs_iread( | |||
1133 | xfs_buf_set_ref(bp, XFS_INO_REF); | 1154 | xfs_buf_set_ref(bp, XFS_INO_REF); |
1134 | 1155 | ||
1135 | /* | 1156 | /* |
1136 | * Use xfs_trans_brelse() to release the buffer containing the | 1157 | * Use xfs_trans_brelse() to release the buffer containing the on-disk |
1137 | * on-disk inode, because it was acquired with xfs_trans_read_buf() | 1158 | * inode, because it was acquired with xfs_trans_read_buf() in |
1138 | * in xfs_imap_to_bp() above. If tp is NULL, this is just a normal | 1159 | * xfs_imap_to_bp() above. If tp is NULL, this is just a normal |
1139 | * brelse(). If we're within a transaction, then xfs_trans_brelse() | 1160 | * brelse(). If we're within a transaction, then xfs_trans_brelse() |
1140 | * will only release the buffer if it is not dirty within the | 1161 | * will only release the buffer if it is not dirty within the |
1141 | * transaction. It will be OK to release the buffer in this case, | 1162 | * transaction. It will be OK to release the buffer in this case, |
1142 | * because inodes on disk are never destroyed and we will be | 1163 | * because inodes on disk are never destroyed and we will be locking the |
1143 | * locking the new in-core inode before putting it in the hash | 1164 | * new in-core inode before putting it in the cache where other |
1144 | * table where other processes can find it. Thus we don't have | 1165 | * processes can find it. Thus we don't have to worry about the inode |
1145 | * to worry about the inode being changed just because we released | 1166 | * being changed just because we released the buffer. |
1146 | * the buffer. | ||
1147 | */ | 1167 | */ |
1148 | out_brelse: | 1168 | out_brelse: |
1149 | xfs_trans_brelse(tp, bp); | 1169 | xfs_trans_brelse(tp, bp); |
@@ -2028,8 +2048,6 @@ xfs_ifree( | |||
2028 | int error; | 2048 | int error; |
2029 | int delete; | 2049 | int delete; |
2030 | xfs_ino_t first_ino; | 2050 | xfs_ino_t first_ino; |
2031 | xfs_dinode_t *dip; | ||
2032 | xfs_buf_t *ibp; | ||
2033 | 2051 | ||
2034 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); | 2052 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); |
2035 | ASSERT(ip->i_d.di_nlink == 0); | 2053 | ASSERT(ip->i_d.di_nlink == 0); |
@@ -2042,14 +2060,13 @@ xfs_ifree( | |||
2042 | * Pull the on-disk inode from the AGI unlinked list. | 2060 | * Pull the on-disk inode from the AGI unlinked list. |
2043 | */ | 2061 | */ |
2044 | error = xfs_iunlink_remove(tp, ip); | 2062 | error = xfs_iunlink_remove(tp, ip); |
2045 | if (error != 0) { | 2063 | if (error) |
2046 | return error; | 2064 | return error; |
2047 | } | ||
2048 | 2065 | ||
2049 | error = xfs_difree(tp, ip->i_ino, flist, &delete, &first_ino); | 2066 | error = xfs_difree(tp, ip->i_ino, flist, &delete, &first_ino); |
2050 | if (error != 0) { | 2067 | if (error) |
2051 | return error; | 2068 | return error; |
2052 | } | 2069 | |
2053 | ip->i_d.di_mode = 0; /* mark incore inode as free */ | 2070 | ip->i_d.di_mode = 0; /* mark incore inode as free */ |
2054 | ip->i_d.di_flags = 0; | 2071 | ip->i_d.di_flags = 0; |
2055 | ip->i_d.di_dmevmask = 0; | 2072 | ip->i_d.di_dmevmask = 0; |
@@ -2061,31 +2078,10 @@ xfs_ifree( | |||
2061 | * by reincarnations of this inode. | 2078 | * by reincarnations of this inode. |
2062 | */ | 2079 | */ |
2063 | ip->i_d.di_gen++; | 2080 | ip->i_d.di_gen++; |
2064 | |||
2065 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); | 2081 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); |
2066 | 2082 | ||
2067 | error = xfs_imap_to_bp(ip->i_mount, tp, &ip->i_imap, &dip, &ibp, | 2083 | if (delete) |
2068 | 0, 0); | ||
2069 | if (error) | ||
2070 | return error; | ||
2071 | |||
2072 | /* | ||
2073 | * Clear the on-disk di_mode. This is to prevent xfs_bulkstat | ||
2074 | * from picking up this inode when it is reclaimed (its incore state | ||
2075 | * initialzed but not flushed to disk yet). The in-core di_mode is | ||
2076 | * already cleared and a corresponding transaction logged. | ||
2077 | * The hack here just synchronizes the in-core to on-disk | ||
2078 | * di_mode value in advance before the actual inode sync to disk. | ||
2079 | * This is OK because the inode is already unlinked and would never | ||
2080 | * change its di_mode again for this inode generation. | ||
2081 | * This is a temporary hack that would require a proper fix | ||
2082 | * in the future. | ||
2083 | */ | ||
2084 | dip->di_mode = 0; | ||
2085 | |||
2086 | if (delete) { | ||
2087 | error = xfs_ifree_cluster(ip, tp, first_ino); | 2084 | error = xfs_ifree_cluster(ip, tp, first_ino); |
2088 | } | ||
2089 | 2085 | ||
2090 | return error; | 2086 | return error; |
2091 | } | 2087 | } |
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index 8f8aaee7f379..6a7096422295 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c | |||
@@ -284,6 +284,15 @@ xfs_iomap_eof_want_preallocate( | |||
284 | return 0; | 284 | return 0; |
285 | 285 | ||
286 | /* | 286 | /* |
287 | * If the file is smaller than the minimum prealloc and we are using | ||
288 | * dynamic preallocation, don't do any preallocation at all as it is | ||
289 | * likely this is the only write to the file that is going to be done. | ||
290 | */ | ||
291 | if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) && | ||
292 | XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_writeio_blocks)) | ||
293 | return 0; | ||
294 | |||
295 | /* | ||
287 | * If there are any real blocks past eof, then don't | 296 | * If there are any real blocks past eof, then don't |
288 | * do any speculative allocation. | 297 | * do any speculative allocation. |
289 | */ | 298 | */ |
@@ -345,6 +354,10 @@ xfs_iomap_eof_prealloc_initial_size( | |||
345 | if (mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) | 354 | if (mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) |
346 | return 0; | 355 | return 0; |
347 | 356 | ||
357 | /* If the file is small, then use the minimum prealloc */ | ||
358 | if (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_dalign)) | ||
359 | return 0; | ||
360 | |||
348 | /* | 361 | /* |
349 | * As we write multiple pages, the offset will always align to the | 362 | * As we write multiple pages, the offset will always align to the |
350 | * start of a page and hence point to a hole at EOF. i.e. if the size is | 363 | * start of a page and hence point to a hole at EOF. i.e. if the size is |
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c index ca9ecaa81112..c69bbc493cb0 100644 --- a/fs/xfs/xfs_iops.c +++ b/fs/xfs/xfs_iops.c | |||
@@ -987,7 +987,8 @@ xfs_fiemap_format( | |||
987 | if (bmv->bmv_oflags & BMV_OF_PREALLOC) | 987 | if (bmv->bmv_oflags & BMV_OF_PREALLOC) |
988 | fiemap_flags |= FIEMAP_EXTENT_UNWRITTEN; | 988 | fiemap_flags |= FIEMAP_EXTENT_UNWRITTEN; |
989 | else if (bmv->bmv_oflags & BMV_OF_DELALLOC) { | 989 | else if (bmv->bmv_oflags & BMV_OF_DELALLOC) { |
990 | fiemap_flags |= FIEMAP_EXTENT_DELALLOC; | 990 | fiemap_flags |= (FIEMAP_EXTENT_DELALLOC | |
991 | FIEMAP_EXTENT_UNKNOWN); | ||
991 | physical = 0; /* no block yet */ | 992 | physical = 0; /* no block yet */ |
992 | } | 993 | } |
993 | if (bmv->bmv_oflags & BMV_OF_LAST) | 994 | if (bmv->bmv_oflags & BMV_OF_LAST) |
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c index 2ea7d402188d..bc92c5306a17 100644 --- a/fs/xfs/xfs_itable.c +++ b/fs/xfs/xfs_itable.c | |||
@@ -43,7 +43,7 @@ xfs_internal_inum( | |||
43 | { | 43 | { |
44 | return (ino == mp->m_sb.sb_rbmino || ino == mp->m_sb.sb_rsumino || | 44 | return (ino == mp->m_sb.sb_rbmino || ino == mp->m_sb.sb_rsumino || |
45 | (xfs_sb_version_hasquota(&mp->m_sb) && | 45 | (xfs_sb_version_hasquota(&mp->m_sb) && |
46 | (ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino))); | 46 | xfs_is_quota_inode(&mp->m_sb, ino))); |
47 | } | 47 | } |
48 | 48 | ||
49 | /* | 49 | /* |
@@ -383,11 +383,13 @@ xfs_bulkstat( | |||
383 | * Also start read-ahead now for this chunk. | 383 | * Also start read-ahead now for this chunk. |
384 | */ | 384 | */ |
385 | if (r.ir_freecount < XFS_INODES_PER_CHUNK) { | 385 | if (r.ir_freecount < XFS_INODES_PER_CHUNK) { |
386 | struct blk_plug plug; | ||
386 | /* | 387 | /* |
387 | * Loop over all clusters in the next chunk. | 388 | * Loop over all clusters in the next chunk. |
388 | * Do a readahead if there are any allocated | 389 | * Do a readahead if there are any allocated |
389 | * inodes in that cluster. | 390 | * inodes in that cluster. |
390 | */ | 391 | */ |
392 | blk_start_plug(&plug); | ||
391 | agbno = XFS_AGINO_TO_AGBNO(mp, r.ir_startino); | 393 | agbno = XFS_AGINO_TO_AGBNO(mp, r.ir_startino); |
392 | for (chunkidx = 0; | 394 | for (chunkidx = 0; |
393 | chunkidx < XFS_INODES_PER_CHUNK; | 395 | chunkidx < XFS_INODES_PER_CHUNK; |
@@ -399,6 +401,7 @@ xfs_bulkstat( | |||
399 | agbno, nbcluster, | 401 | agbno, nbcluster, |
400 | &xfs_inode_buf_ops); | 402 | &xfs_inode_buf_ops); |
401 | } | 403 | } |
404 | blk_finish_plug(&plug); | ||
402 | irbp->ir_startino = r.ir_startino; | 405 | irbp->ir_startino = r.ir_startino; |
403 | irbp->ir_freecount = r.ir_freecount; | 406 | irbp->ir_freecount = r.ir_freecount; |
404 | irbp->ir_free = r.ir_free; | 407 | irbp->ir_free = r.ir_free; |
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index b345a7c85153..d852a2b3e1fd 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c | |||
@@ -1963,6 +1963,10 @@ xlog_write_calc_vec_length( | |||
1963 | headers++; | 1963 | headers++; |
1964 | 1964 | ||
1965 | for (lv = log_vector; lv; lv = lv->lv_next) { | 1965 | for (lv = log_vector; lv; lv = lv->lv_next) { |
1966 | /* we don't write ordered log vectors */ | ||
1967 | if (lv->lv_buf_len == XFS_LOG_VEC_ORDERED) | ||
1968 | continue; | ||
1969 | |||
1966 | headers += lv->lv_niovecs; | 1970 | headers += lv->lv_niovecs; |
1967 | 1971 | ||
1968 | for (i = 0; i < lv->lv_niovecs; i++) { | 1972 | for (i = 0; i < lv->lv_niovecs; i++) { |
@@ -2216,7 +2220,7 @@ xlog_write( | |||
2216 | index = 0; | 2220 | index = 0; |
2217 | lv = log_vector; | 2221 | lv = log_vector; |
2218 | vecp = lv->lv_iovecp; | 2222 | vecp = lv->lv_iovecp; |
2219 | while (lv && index < lv->lv_niovecs) { | 2223 | while (lv && (!lv->lv_niovecs || index < lv->lv_niovecs)) { |
2220 | void *ptr; | 2224 | void *ptr; |
2221 | int log_offset; | 2225 | int log_offset; |
2222 | 2226 | ||
@@ -2236,13 +2240,22 @@ xlog_write( | |||
2236 | * This loop writes out as many regions as can fit in the amount | 2240 | * This loop writes out as many regions as can fit in the amount |
2237 | * of space which was allocated by xlog_state_get_iclog_space(). | 2241 | * of space which was allocated by xlog_state_get_iclog_space(). |
2238 | */ | 2242 | */ |
2239 | while (lv && index < lv->lv_niovecs) { | 2243 | while (lv && (!lv->lv_niovecs || index < lv->lv_niovecs)) { |
2240 | struct xfs_log_iovec *reg = &vecp[index]; | 2244 | struct xfs_log_iovec *reg; |
2241 | struct xlog_op_header *ophdr; | 2245 | struct xlog_op_header *ophdr; |
2242 | int start_rec_copy; | 2246 | int start_rec_copy; |
2243 | int copy_len; | 2247 | int copy_len; |
2244 | int copy_off; | 2248 | int copy_off; |
2249 | bool ordered = false; | ||
2250 | |||
2251 | /* ordered log vectors have no regions to write */ | ||
2252 | if (lv->lv_buf_len == XFS_LOG_VEC_ORDERED) { | ||
2253 | ASSERT(lv->lv_niovecs == 0); | ||
2254 | ordered = true; | ||
2255 | goto next_lv; | ||
2256 | } | ||
2245 | 2257 | ||
2258 | reg = &vecp[index]; | ||
2246 | ASSERT(reg->i_len % sizeof(__int32_t) == 0); | 2259 | ASSERT(reg->i_len % sizeof(__int32_t) == 0); |
2247 | ASSERT((unsigned long)ptr % sizeof(__int32_t) == 0); | 2260 | ASSERT((unsigned long)ptr % sizeof(__int32_t) == 0); |
2248 | 2261 | ||
@@ -2302,12 +2315,13 @@ xlog_write( | |||
2302 | break; | 2315 | break; |
2303 | 2316 | ||
2304 | if (++index == lv->lv_niovecs) { | 2317 | if (++index == lv->lv_niovecs) { |
2318 | next_lv: | ||
2305 | lv = lv->lv_next; | 2319 | lv = lv->lv_next; |
2306 | index = 0; | 2320 | index = 0; |
2307 | if (lv) | 2321 | if (lv) |
2308 | vecp = lv->lv_iovecp; | 2322 | vecp = lv->lv_iovecp; |
2309 | } | 2323 | } |
2310 | if (record_cnt == 0) { | 2324 | if (record_cnt == 0 && ordered == false) { |
2311 | if (!lv) | 2325 | if (!lv) |
2312 | return 0; | 2326 | return 0; |
2313 | break; | 2327 | break; |
diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h index 5caee96059df..fb630e496c12 100644 --- a/fs/xfs/xfs_log.h +++ b/fs/xfs/xfs_log.h | |||
@@ -88,7 +88,8 @@ static inline xfs_lsn_t _lsn_cmp(xfs_lsn_t lsn1, xfs_lsn_t lsn2) | |||
88 | #define XLOG_REG_TYPE_UNMOUNT 17 | 88 | #define XLOG_REG_TYPE_UNMOUNT 17 |
89 | #define XLOG_REG_TYPE_COMMIT 18 | 89 | #define XLOG_REG_TYPE_COMMIT 18 |
90 | #define XLOG_REG_TYPE_TRANSHDR 19 | 90 | #define XLOG_REG_TYPE_TRANSHDR 19 |
91 | #define XLOG_REG_TYPE_MAX 19 | 91 | #define XLOG_REG_TYPE_ICREATE 20 |
92 | #define XLOG_REG_TYPE_MAX 20 | ||
92 | 93 | ||
93 | typedef struct xfs_log_iovec { | 94 | typedef struct xfs_log_iovec { |
94 | void *i_addr; /* beginning address of region */ | 95 | void *i_addr; /* beginning address of region */ |
@@ -105,6 +106,8 @@ struct xfs_log_vec { | |||
105 | int lv_buf_len; /* size of formatted buffer */ | 106 | int lv_buf_len; /* size of formatted buffer */ |
106 | }; | 107 | }; |
107 | 108 | ||
109 | #define XFS_LOG_VEC_ORDERED (-1) | ||
110 | |||
108 | /* | 111 | /* |
109 | * Structure used to pass callback function and the function's argument | 112 | * Structure used to pass callback function and the function's argument |
110 | * to the log manager. | 113 | * to the log manager. |
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c index d0833b54e55d..02b9cf3f8252 100644 --- a/fs/xfs/xfs_log_cil.c +++ b/fs/xfs/xfs_log_cil.c | |||
@@ -127,6 +127,7 @@ xlog_cil_prepare_log_vecs( | |||
127 | int index; | 127 | int index; |
128 | int len = 0; | 128 | int len = 0; |
129 | uint niovecs; | 129 | uint niovecs; |
130 | bool ordered = false; | ||
130 | 131 | ||
131 | /* Skip items which aren't dirty in this transaction. */ | 132 | /* Skip items which aren't dirty in this transaction. */ |
132 | if (!(lidp->lid_flags & XFS_LID_DIRTY)) | 133 | if (!(lidp->lid_flags & XFS_LID_DIRTY)) |
@@ -137,14 +138,30 @@ xlog_cil_prepare_log_vecs( | |||
137 | if (!niovecs) | 138 | if (!niovecs) |
138 | continue; | 139 | continue; |
139 | 140 | ||
141 | /* | ||
142 | * Ordered items need to be tracked but we do not wish to write | ||
143 | * them. We need a logvec to track the object, but we do not | ||
144 | * need an iovec or buffer to be allocated for copying data. | ||
145 | */ | ||
146 | if (niovecs == XFS_LOG_VEC_ORDERED) { | ||
147 | ordered = true; | ||
148 | niovecs = 0; | ||
149 | } | ||
150 | |||
140 | new_lv = kmem_zalloc(sizeof(*new_lv) + | 151 | new_lv = kmem_zalloc(sizeof(*new_lv) + |
141 | niovecs * sizeof(struct xfs_log_iovec), | 152 | niovecs * sizeof(struct xfs_log_iovec), |
142 | KM_SLEEP|KM_NOFS); | 153 | KM_SLEEP|KM_NOFS); |
143 | 154 | ||
155 | new_lv->lv_item = lidp->lid_item; | ||
156 | new_lv->lv_niovecs = niovecs; | ||
157 | if (ordered) { | ||
158 | /* track as an ordered logvec */ | ||
159 | new_lv->lv_buf_len = XFS_LOG_VEC_ORDERED; | ||
160 | goto next; | ||
161 | } | ||
162 | |||
144 | /* The allocated iovec region lies beyond the log vector. */ | 163 | /* The allocated iovec region lies beyond the log vector. */ |
145 | new_lv->lv_iovecp = (struct xfs_log_iovec *)&new_lv[1]; | 164 | new_lv->lv_iovecp = (struct xfs_log_iovec *)&new_lv[1]; |
146 | new_lv->lv_niovecs = niovecs; | ||
147 | new_lv->lv_item = lidp->lid_item; | ||
148 | 165 | ||
149 | /* build the vector array and calculate it's length */ | 166 | /* build the vector array and calculate it's length */ |
150 | IOP_FORMAT(new_lv->lv_item, new_lv->lv_iovecp); | 167 | IOP_FORMAT(new_lv->lv_item, new_lv->lv_iovecp); |
@@ -165,6 +182,7 @@ xlog_cil_prepare_log_vecs( | |||
165 | } | 182 | } |
166 | ASSERT(ptr == new_lv->lv_buf + new_lv->lv_buf_len); | 183 | ASSERT(ptr == new_lv->lv_buf + new_lv->lv_buf_len); |
167 | 184 | ||
185 | next: | ||
168 | if (!ret_lv) | 186 | if (!ret_lv) |
169 | ret_lv = new_lv; | 187 | ret_lv = new_lv; |
170 | else | 188 | else |
@@ -191,8 +209,18 @@ xfs_cil_prepare_item( | |||
191 | 209 | ||
192 | if (old) { | 210 | if (old) { |
193 | /* existing lv on log item, space used is a delta */ | 211 | /* existing lv on log item, space used is a delta */ |
194 | ASSERT(!list_empty(&lv->lv_item->li_cil)); | 212 | ASSERT((old->lv_buf && old->lv_buf_len && old->lv_niovecs) || |
195 | ASSERT(old->lv_buf && old->lv_buf_len && old->lv_niovecs); | 213 | old->lv_buf_len == XFS_LOG_VEC_ORDERED); |
214 | |||
215 | /* | ||
216 | * If the new item is ordered, keep the old one that is already | ||
217 | * tracking dirty or ordered regions | ||
218 | */ | ||
219 | if (lv->lv_buf_len == XFS_LOG_VEC_ORDERED) { | ||
220 | ASSERT(!lv->lv_buf); | ||
221 | kmem_free(lv); | ||
222 | return; | ||
223 | } | ||
196 | 224 | ||
197 | *len += lv->lv_buf_len - old->lv_buf_len; | 225 | *len += lv->lv_buf_len - old->lv_buf_len; |
198 | *diff_iovecs += lv->lv_niovecs - old->lv_niovecs; | 226 | *diff_iovecs += lv->lv_niovecs - old->lv_niovecs; |
@@ -201,10 +229,11 @@ xfs_cil_prepare_item( | |||
201 | } else { | 229 | } else { |
202 | /* new lv, must pin the log item */ | 230 | /* new lv, must pin the log item */ |
203 | ASSERT(!lv->lv_item->li_lv); | 231 | ASSERT(!lv->lv_item->li_lv); |
204 | ASSERT(list_empty(&lv->lv_item->li_cil)); | ||
205 | 232 | ||
206 | *len += lv->lv_buf_len; | 233 | if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED) { |
207 | *diff_iovecs += lv->lv_niovecs; | 234 | *len += lv->lv_buf_len; |
235 | *diff_iovecs += lv->lv_niovecs; | ||
236 | } | ||
208 | IOP_PIN(lv->lv_item); | 237 | IOP_PIN(lv->lv_item); |
209 | 238 | ||
210 | } | 239 | } |
@@ -259,18 +288,24 @@ xlog_cil_insert_items( | |||
259 | * We can do this safely because the context can't checkpoint until we | 288 | * We can do this safely because the context can't checkpoint until we |
260 | * are done so it doesn't matter exactly how we update the CIL. | 289 | * are done so it doesn't matter exactly how we update the CIL. |
261 | */ | 290 | */ |
262 | for (lv = log_vector; lv; lv = lv->lv_next) | ||
263 | xfs_cil_prepare_item(log, lv, &len, &diff_iovecs); | ||
264 | |||
265 | /* account for space used by new iovec headers */ | ||
266 | len += diff_iovecs * sizeof(xlog_op_header_t); | ||
267 | |||
268 | spin_lock(&cil->xc_cil_lock); | 291 | spin_lock(&cil->xc_cil_lock); |
292 | for (lv = log_vector; lv; ) { | ||
293 | struct xfs_log_vec *next = lv->lv_next; | ||
269 | 294 | ||
270 | /* move the items to the tail of the CIL */ | 295 | ASSERT(lv->lv_item->li_lv || list_empty(&lv->lv_item->li_cil)); |
271 | for (lv = log_vector; lv; lv = lv->lv_next) | 296 | lv->lv_next = NULL; |
297 | |||
298 | /* | ||
299 | * xfs_cil_prepare_item() may free the lv, so move the item on | ||
300 | * the CIL first. | ||
301 | */ | ||
272 | list_move_tail(&lv->lv_item->li_cil, &cil->xc_cil); | 302 | list_move_tail(&lv->lv_item->li_cil, &cil->xc_cil); |
303 | xfs_cil_prepare_item(log, lv, &len, &diff_iovecs); | ||
304 | lv = next; | ||
305 | } | ||
273 | 306 | ||
307 | /* account for space used by new iovec headers */ | ||
308 | len += diff_iovecs * sizeof(xlog_op_header_t); | ||
274 | ctx->nvecs += diff_iovecs; | 309 | ctx->nvecs += diff_iovecs; |
275 | 310 | ||
276 | /* | 311 | /* |
@@ -381,9 +416,7 @@ xlog_cil_push( | |||
381 | struct xfs_cil_ctx *new_ctx; | 416 | struct xfs_cil_ctx *new_ctx; |
382 | struct xlog_in_core *commit_iclog; | 417 | struct xlog_in_core *commit_iclog; |
383 | struct xlog_ticket *tic; | 418 | struct xlog_ticket *tic; |
384 | int num_lv; | ||
385 | int num_iovecs; | 419 | int num_iovecs; |
386 | int len; | ||
387 | int error = 0; | 420 | int error = 0; |
388 | struct xfs_trans_header thdr; | 421 | struct xfs_trans_header thdr; |
389 | struct xfs_log_iovec lhdr; | 422 | struct xfs_log_iovec lhdr; |
@@ -428,12 +461,9 @@ xlog_cil_push( | |||
428 | * side which is currently locked out by the flush lock. | 461 | * side which is currently locked out by the flush lock. |
429 | */ | 462 | */ |
430 | lv = NULL; | 463 | lv = NULL; |
431 | num_lv = 0; | ||
432 | num_iovecs = 0; | 464 | num_iovecs = 0; |
433 | len = 0; | ||
434 | while (!list_empty(&cil->xc_cil)) { | 465 | while (!list_empty(&cil->xc_cil)) { |
435 | struct xfs_log_item *item; | 466 | struct xfs_log_item *item; |
436 | int i; | ||
437 | 467 | ||
438 | item = list_first_entry(&cil->xc_cil, | 468 | item = list_first_entry(&cil->xc_cil, |
439 | struct xfs_log_item, li_cil); | 469 | struct xfs_log_item, li_cil); |
@@ -444,11 +474,7 @@ xlog_cil_push( | |||
444 | lv->lv_next = item->li_lv; | 474 | lv->lv_next = item->li_lv; |
445 | lv = item->li_lv; | 475 | lv = item->li_lv; |
446 | item->li_lv = NULL; | 476 | item->li_lv = NULL; |
447 | |||
448 | num_lv++; | ||
449 | num_iovecs += lv->lv_niovecs; | 477 | num_iovecs += lv->lv_niovecs; |
450 | for (i = 0; i < lv->lv_niovecs; i++) | ||
451 | len += lv->lv_iovecp[i].i_len; | ||
452 | } | 478 | } |
453 | 479 | ||
454 | /* | 480 | /* |
@@ -701,6 +727,7 @@ xfs_log_commit_cil( | |||
701 | if (commit_lsn) | 727 | if (commit_lsn) |
702 | *commit_lsn = log->l_cilp->xc_ctx->sequence; | 728 | *commit_lsn = log->l_cilp->xc_ctx->sequence; |
703 | 729 | ||
730 | /* xlog_cil_insert_items() destroys log_vector list */ | ||
704 | xlog_cil_insert_items(log, log_vector, tp->t_ticket); | 731 | xlog_cil_insert_items(log, log_vector, tp->t_ticket); |
705 | 732 | ||
706 | /* check we didn't blow the reservation */ | 733 | /* check we didn't blow the reservation */ |
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 7cf5e4eafe28..6fcc910a50b9 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c | |||
@@ -45,6 +45,7 @@ | |||
45 | #include "xfs_cksum.h" | 45 | #include "xfs_cksum.h" |
46 | #include "xfs_trace.h" | 46 | #include "xfs_trace.h" |
47 | #include "xfs_icache.h" | 47 | #include "xfs_icache.h" |
48 | #include "xfs_icreate_item.h" | ||
48 | 49 | ||
49 | /* Need all the magic numbers and buffer ops structures from these headers */ | 50 | /* Need all the magic numbers and buffer ops structures from these headers */ |
50 | #include "xfs_symlink.h" | 51 | #include "xfs_symlink.h" |
@@ -1617,7 +1618,10 @@ xlog_recover_add_to_trans( | |||
1617 | * form the cancelled buffer table. Hence they have tobe done last. | 1618 | * form the cancelled buffer table. Hence they have tobe done last. |
1618 | * | 1619 | * |
1619 | * 3. Inode allocation buffers must be replayed before inode items that | 1620 | * 3. Inode allocation buffers must be replayed before inode items that |
1620 | * read the buffer and replay changes into it. | 1621 | * read the buffer and replay changes into it. For filesystems using the |
1622 | * ICREATE transactions, this means XFS_LI_ICREATE objects need to get | ||
1623 | * treated the same as inode allocation buffers as they create and | ||
1624 | * initialise the buffers directly. | ||
1621 | * | 1625 | * |
1622 | * 4. Inode unlink buffers must be replayed after inode items are replayed. | 1626 | * 4. Inode unlink buffers must be replayed after inode items are replayed. |
1623 | * This ensures that inodes are completely flushed to the inode buffer | 1627 | * This ensures that inodes are completely flushed to the inode buffer |
@@ -1632,10 +1636,17 @@ xlog_recover_add_to_trans( | |||
1632 | * from all the other buffers and move them to last. | 1636 | * from all the other buffers and move them to last. |
1633 | * | 1637 | * |
1634 | * Hence, 4 lists, in order from head to tail: | 1638 | * Hence, 4 lists, in order from head to tail: |
1635 | * - buffer_list for all buffers except cancelled/inode unlink buffers | 1639 | * - buffer_list for all buffers except cancelled/inode unlink buffers |
1636 | * - item_list for all non-buffer items | 1640 | * - item_list for all non-buffer items |
1637 | * - inode_buffer_list for inode unlink buffers | 1641 | * - inode_buffer_list for inode unlink buffers |
1638 | * - cancel_list for the cancelled buffers | 1642 | * - cancel_list for the cancelled buffers |
1643 | * | ||
1644 | * Note that we add objects to the tail of the lists so that first-to-last | ||
1645 | * ordering is preserved within the lists. Adding objects to the head of the | ||
1646 | * list means when we traverse from the head we walk them in last-to-first | ||
1647 | * order. For cancelled buffers and inode unlink buffers this doesn't matter, | ||
1648 | * but for all other items there may be specific ordering that we need to | ||
1649 | * preserve. | ||
1639 | */ | 1650 | */ |
1640 | STATIC int | 1651 | STATIC int |
1641 | xlog_recover_reorder_trans( | 1652 | xlog_recover_reorder_trans( |
@@ -1655,6 +1666,9 @@ xlog_recover_reorder_trans( | |||
1655 | xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr; | 1666 | xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr; |
1656 | 1667 | ||
1657 | switch (ITEM_TYPE(item)) { | 1668 | switch (ITEM_TYPE(item)) { |
1669 | case XFS_LI_ICREATE: | ||
1670 | list_move_tail(&item->ri_list, &buffer_list); | ||
1671 | break; | ||
1658 | case XFS_LI_BUF: | 1672 | case XFS_LI_BUF: |
1659 | if (buf_f->blf_flags & XFS_BLF_CANCEL) { | 1673 | if (buf_f->blf_flags & XFS_BLF_CANCEL) { |
1660 | trace_xfs_log_recover_item_reorder_head(log, | 1674 | trace_xfs_log_recover_item_reorder_head(log, |
@@ -2982,6 +2996,93 @@ xlog_recover_efd_pass2( | |||
2982 | } | 2996 | } |
2983 | 2997 | ||
2984 | /* | 2998 | /* |
2999 | * This routine is called when an inode create format structure is found in a | ||
3000 | * committed transaction in the log. It's purpose is to initialise the inodes | ||
3001 | * being allocated on disk. This requires us to get inode cluster buffers that | ||
3002 | * match the range to be intialised, stamped with inode templates and written | ||
3003 | * by delayed write so that subsequent modifications will hit the cached buffer | ||
3004 | * and only need writing out at the end of recovery. | ||
3005 | */ | ||
3006 | STATIC int | ||
3007 | xlog_recover_do_icreate_pass2( | ||
3008 | struct xlog *log, | ||
3009 | struct list_head *buffer_list, | ||
3010 | xlog_recover_item_t *item) | ||
3011 | { | ||
3012 | struct xfs_mount *mp = log->l_mp; | ||
3013 | struct xfs_icreate_log *icl; | ||
3014 | xfs_agnumber_t agno; | ||
3015 | xfs_agblock_t agbno; | ||
3016 | unsigned int count; | ||
3017 | unsigned int isize; | ||
3018 | xfs_agblock_t length; | ||
3019 | |||
3020 | icl = (struct xfs_icreate_log *)item->ri_buf[0].i_addr; | ||
3021 | if (icl->icl_type != XFS_LI_ICREATE) { | ||
3022 | xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad type"); | ||
3023 | return EINVAL; | ||
3024 | } | ||
3025 | |||
3026 | if (icl->icl_size != 1) { | ||
3027 | xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad icl size"); | ||
3028 | return EINVAL; | ||
3029 | } | ||
3030 | |||
3031 | agno = be32_to_cpu(icl->icl_ag); | ||
3032 | if (agno >= mp->m_sb.sb_agcount) { | ||
3033 | xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agno"); | ||
3034 | return EINVAL; | ||
3035 | } | ||
3036 | agbno = be32_to_cpu(icl->icl_agbno); | ||
3037 | if (!agbno || agbno == NULLAGBLOCK || agbno >= mp->m_sb.sb_agblocks) { | ||
3038 | xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agbno"); | ||
3039 | return EINVAL; | ||
3040 | } | ||
3041 | isize = be32_to_cpu(icl->icl_isize); | ||
3042 | if (isize != mp->m_sb.sb_inodesize) { | ||
3043 | xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad isize"); | ||
3044 | return EINVAL; | ||
3045 | } | ||
3046 | count = be32_to_cpu(icl->icl_count); | ||
3047 | if (!count) { | ||
3048 | xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad count"); | ||
3049 | return EINVAL; | ||
3050 | } | ||
3051 | length = be32_to_cpu(icl->icl_length); | ||
3052 | if (!length || length >= mp->m_sb.sb_agblocks) { | ||
3053 | xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad length"); | ||
3054 | return EINVAL; | ||
3055 | } | ||
3056 | |||
3057 | /* existing allocation is fixed value */ | ||
3058 | ASSERT(count == XFS_IALLOC_INODES(mp)); | ||
3059 | ASSERT(length == XFS_IALLOC_BLOCKS(mp)); | ||
3060 | if (count != XFS_IALLOC_INODES(mp) || | ||
3061 | length != XFS_IALLOC_BLOCKS(mp)) { | ||
3062 | xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad count 2"); | ||
3063 | return EINVAL; | ||
3064 | } | ||
3065 | |||
3066 | /* | ||
3067 | * Inode buffers can be freed. Do not replay the inode initialisation as | ||
3068 | * we could be overwriting something written after this inode buffer was | ||
3069 | * cancelled. | ||
3070 | * | ||
3071 | * XXX: we need to iterate all buffers and only init those that are not | ||
3072 | * cancelled. I think that a more fine grained factoring of | ||
3073 | * xfs_ialloc_inode_init may be appropriate here to enable this to be | ||
3074 | * done easily. | ||
3075 | */ | ||
3076 | if (xlog_check_buffer_cancelled(log, | ||
3077 | XFS_AGB_TO_DADDR(mp, agno, agbno), length, 0)) | ||
3078 | return 0; | ||
3079 | |||
3080 | xfs_ialloc_inode_init(mp, NULL, buffer_list, agno, agbno, length, | ||
3081 | be32_to_cpu(icl->icl_gen)); | ||
3082 | return 0; | ||
3083 | } | ||
3084 | |||
3085 | /* | ||
2985 | * Free up any resources allocated by the transaction | 3086 | * Free up any resources allocated by the transaction |
2986 | * | 3087 | * |
2987 | * Remember that EFIs, EFDs, and IUNLINKs are handled later. | 3088 | * Remember that EFIs, EFDs, and IUNLINKs are handled later. |
@@ -3023,6 +3124,7 @@ xlog_recover_commit_pass1( | |||
3023 | case XFS_LI_EFI: | 3124 | case XFS_LI_EFI: |
3024 | case XFS_LI_EFD: | 3125 | case XFS_LI_EFD: |
3025 | case XFS_LI_DQUOT: | 3126 | case XFS_LI_DQUOT: |
3127 | case XFS_LI_ICREATE: | ||
3026 | /* nothing to do in pass 1 */ | 3128 | /* nothing to do in pass 1 */ |
3027 | return 0; | 3129 | return 0; |
3028 | default: | 3130 | default: |
@@ -3053,6 +3155,8 @@ xlog_recover_commit_pass2( | |||
3053 | return xlog_recover_efd_pass2(log, item); | 3155 | return xlog_recover_efd_pass2(log, item); |
3054 | case XFS_LI_DQUOT: | 3156 | case XFS_LI_DQUOT: |
3055 | return xlog_recover_dquot_pass2(log, buffer_list, item); | 3157 | return xlog_recover_dquot_pass2(log, buffer_list, item); |
3158 | case XFS_LI_ICREATE: | ||
3159 | return xlog_recover_do_icreate_pass2(log, buffer_list, item); | ||
3056 | case XFS_LI_QUOTAOFF: | 3160 | case XFS_LI_QUOTAOFF: |
3057 | /* nothing to do in pass2 */ | 3161 | /* nothing to do in pass2 */ |
3058 | return 0; | 3162 | return 0; |
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index e8e310c05097..2b0ba3581656 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c | |||
@@ -336,6 +336,14 @@ xfs_mount_validate_sb( | |||
336 | return XFS_ERROR(EWRONGFS); | 336 | return XFS_ERROR(EWRONGFS); |
337 | } | 337 | } |
338 | 338 | ||
339 | if ((sbp->sb_qflags & (XFS_OQUOTA_ENFD | XFS_OQUOTA_CHKD)) && | ||
340 | (sbp->sb_qflags & (XFS_PQUOTA_ENFD | XFS_GQUOTA_ENFD | | ||
341 | XFS_PQUOTA_CHKD | XFS_GQUOTA_CHKD))) { | ||
342 | xfs_notice(mp, | ||
343 | "Super block has XFS_OQUOTA bits along with XFS_PQUOTA and/or XFS_GQUOTA bits.\n"); | ||
344 | return XFS_ERROR(EFSCORRUPTED); | ||
345 | } | ||
346 | |||
339 | /* | 347 | /* |
340 | * Version 5 superblock feature mask validation. Reject combinations the | 348 | * Version 5 superblock feature mask validation. Reject combinations the |
341 | * kernel cannot support up front before checking anything else. For | 349 | * kernel cannot support up front before checking anything else. For |
@@ -561,6 +569,18 @@ out_unwind: | |||
561 | return error; | 569 | return error; |
562 | } | 570 | } |
563 | 571 | ||
572 | static void | ||
573 | xfs_sb_quota_from_disk(struct xfs_sb *sbp) | ||
574 | { | ||
575 | if (sbp->sb_qflags & XFS_OQUOTA_ENFD) | ||
576 | sbp->sb_qflags |= (sbp->sb_qflags & XFS_PQUOTA_ACCT) ? | ||
577 | XFS_PQUOTA_ENFD : XFS_GQUOTA_ENFD; | ||
578 | if (sbp->sb_qflags & XFS_OQUOTA_CHKD) | ||
579 | sbp->sb_qflags |= (sbp->sb_qflags & XFS_PQUOTA_ACCT) ? | ||
580 | XFS_PQUOTA_CHKD : XFS_GQUOTA_CHKD; | ||
581 | sbp->sb_qflags &= ~(XFS_OQUOTA_ENFD | XFS_OQUOTA_CHKD); | ||
582 | } | ||
583 | |||
564 | void | 584 | void |
565 | xfs_sb_from_disk( | 585 | xfs_sb_from_disk( |
566 | struct xfs_sb *to, | 586 | struct xfs_sb *to, |
@@ -622,6 +642,35 @@ xfs_sb_from_disk( | |||
622 | to->sb_lsn = be64_to_cpu(from->sb_lsn); | 642 | to->sb_lsn = be64_to_cpu(from->sb_lsn); |
623 | } | 643 | } |
624 | 644 | ||
645 | static inline void | ||
646 | xfs_sb_quota_to_disk( | ||
647 | xfs_dsb_t *to, | ||
648 | xfs_sb_t *from, | ||
649 | __int64_t *fields) | ||
650 | { | ||
651 | __uint16_t qflags = from->sb_qflags; | ||
652 | |||
653 | if (*fields & XFS_SB_QFLAGS) { | ||
654 | /* | ||
655 | * The in-core version of sb_qflags do not have | ||
656 | * XFS_OQUOTA_* flags, whereas the on-disk version | ||
657 | * does. So, convert incore XFS_{PG}QUOTA_* flags | ||
658 | * to on-disk XFS_OQUOTA_* flags. | ||
659 | */ | ||
660 | qflags &= ~(XFS_PQUOTA_ENFD | XFS_PQUOTA_CHKD | | ||
661 | XFS_GQUOTA_ENFD | XFS_GQUOTA_CHKD); | ||
662 | |||
663 | if (from->sb_qflags & | ||
664 | (XFS_PQUOTA_ENFD | XFS_GQUOTA_ENFD)) | ||
665 | qflags |= XFS_OQUOTA_ENFD; | ||
666 | if (from->sb_qflags & | ||
667 | (XFS_PQUOTA_CHKD | XFS_GQUOTA_CHKD)) | ||
668 | qflags |= XFS_OQUOTA_CHKD; | ||
669 | to->sb_qflags = cpu_to_be16(qflags); | ||
670 | *fields &= ~XFS_SB_QFLAGS; | ||
671 | } | ||
672 | } | ||
673 | |||
625 | /* | 674 | /* |
626 | * Copy in core superblock to ondisk one. | 675 | * Copy in core superblock to ondisk one. |
627 | * | 676 | * |
@@ -643,6 +692,7 @@ xfs_sb_to_disk( | |||
643 | if (!fields) | 692 | if (!fields) |
644 | return; | 693 | return; |
645 | 694 | ||
695 | xfs_sb_quota_to_disk(to, from, &fields); | ||
646 | while (fields) { | 696 | while (fields) { |
647 | f = (xfs_sb_field_t)xfs_lowbit64((__uint64_t)fields); | 697 | f = (xfs_sb_field_t)xfs_lowbit64((__uint64_t)fields); |
648 | first = xfs_sb_info[f].offset; | 698 | first = xfs_sb_info[f].offset; |
@@ -835,6 +885,7 @@ reread: | |||
835 | */ | 885 | */ |
836 | xfs_sb_from_disk(&mp->m_sb, XFS_BUF_TO_SBP(bp)); | 886 | xfs_sb_from_disk(&mp->m_sb, XFS_BUF_TO_SBP(bp)); |
837 | 887 | ||
888 | xfs_sb_quota_from_disk(&mp->m_sb); | ||
838 | /* | 889 | /* |
839 | * We must be able to do sector-sized and sector-aligned IO. | 890 | * We must be able to do sector-sized and sector-aligned IO. |
840 | */ | 891 | */ |
@@ -987,42 +1038,27 @@ xfs_update_alignment(xfs_mount_t *mp) | |||
987 | */ | 1038 | */ |
988 | if ((BBTOB(mp->m_dalign) & mp->m_blockmask) || | 1039 | if ((BBTOB(mp->m_dalign) & mp->m_blockmask) || |
989 | (BBTOB(mp->m_swidth) & mp->m_blockmask)) { | 1040 | (BBTOB(mp->m_swidth) & mp->m_blockmask)) { |
990 | if (mp->m_flags & XFS_MOUNT_RETERR) { | 1041 | xfs_warn(mp, |
991 | xfs_warn(mp, "alignment check failed: " | 1042 | "alignment check failed: sunit/swidth vs. blocksize(%d)", |
992 | "(sunit/swidth vs. blocksize)"); | 1043 | sbp->sb_blocksize); |
993 | return XFS_ERROR(EINVAL); | 1044 | return XFS_ERROR(EINVAL); |
994 | } | ||
995 | mp->m_dalign = mp->m_swidth = 0; | ||
996 | } else { | 1045 | } else { |
997 | /* | 1046 | /* |
998 | * Convert the stripe unit and width to FSBs. | 1047 | * Convert the stripe unit and width to FSBs. |
999 | */ | 1048 | */ |
1000 | mp->m_dalign = XFS_BB_TO_FSBT(mp, mp->m_dalign); | 1049 | mp->m_dalign = XFS_BB_TO_FSBT(mp, mp->m_dalign); |
1001 | if (mp->m_dalign && (sbp->sb_agblocks % mp->m_dalign)) { | 1050 | if (mp->m_dalign && (sbp->sb_agblocks % mp->m_dalign)) { |
1002 | if (mp->m_flags & XFS_MOUNT_RETERR) { | ||
1003 | xfs_warn(mp, "alignment check failed: " | ||
1004 | "(sunit/swidth vs. ag size)"); | ||
1005 | return XFS_ERROR(EINVAL); | ||
1006 | } | ||
1007 | xfs_warn(mp, | 1051 | xfs_warn(mp, |
1008 | "stripe alignment turned off: sunit(%d)/swidth(%d) " | 1052 | "alignment check failed: sunit/swidth vs. agsize(%d)", |
1009 | "incompatible with agsize(%d)", | 1053 | sbp->sb_agblocks); |
1010 | mp->m_dalign, mp->m_swidth, | 1054 | return XFS_ERROR(EINVAL); |
1011 | sbp->sb_agblocks); | ||
1012 | |||
1013 | mp->m_dalign = 0; | ||
1014 | mp->m_swidth = 0; | ||
1015 | } else if (mp->m_dalign) { | 1055 | } else if (mp->m_dalign) { |
1016 | mp->m_swidth = XFS_BB_TO_FSBT(mp, mp->m_swidth); | 1056 | mp->m_swidth = XFS_BB_TO_FSBT(mp, mp->m_swidth); |
1017 | } else { | 1057 | } else { |
1018 | if (mp->m_flags & XFS_MOUNT_RETERR) { | 1058 | xfs_warn(mp, |
1019 | xfs_warn(mp, "alignment check failed: " | 1059 | "alignment check failed: sunit(%d) less than bsize(%d)", |
1020 | "sunit(%d) less than bsize(%d)", | 1060 | mp->m_dalign, sbp->sb_blocksize); |
1021 | mp->m_dalign, | 1061 | return XFS_ERROR(EINVAL); |
1022 | mp->m_blockmask +1); | ||
1023 | return XFS_ERROR(EINVAL); | ||
1024 | } | ||
1025 | mp->m_swidth = 0; | ||
1026 | } | 1062 | } |
1027 | } | 1063 | } |
1028 | 1064 | ||
@@ -1039,6 +1075,10 @@ xfs_update_alignment(xfs_mount_t *mp) | |||
1039 | sbp->sb_width = mp->m_swidth; | 1075 | sbp->sb_width = mp->m_swidth; |
1040 | mp->m_update_flags |= XFS_SB_WIDTH; | 1076 | mp->m_update_flags |= XFS_SB_WIDTH; |
1041 | } | 1077 | } |
1078 | } else { | ||
1079 | xfs_warn(mp, | ||
1080 | "cannot change alignment: superblock does not support data alignment"); | ||
1081 | return XFS_ERROR(EINVAL); | ||
1042 | } | 1082 | } |
1043 | } else if ((mp->m_flags & XFS_MOUNT_NOALIGN) != XFS_MOUNT_NOALIGN && | 1083 | } else if ((mp->m_flags & XFS_MOUNT_NOALIGN) != XFS_MOUNT_NOALIGN && |
1044 | xfs_sb_version_hasdalign(&mp->m_sb)) { | 1084 | xfs_sb_version_hasdalign(&mp->m_sb)) { |
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index b004cecdfb04..4e374d4a9189 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h | |||
@@ -192,8 +192,6 @@ typedef struct xfs_mount { | |||
192 | xfs_dablk_t m_dirleafblk; /* blockno of dir non-data v2 */ | 192 | xfs_dablk_t m_dirleafblk; /* blockno of dir non-data v2 */ |
193 | xfs_dablk_t m_dirfreeblk; /* blockno of dirfreeindex v2 */ | 193 | xfs_dablk_t m_dirfreeblk; /* blockno of dirfreeindex v2 */ |
194 | uint m_chsize; /* size of next field */ | 194 | uint m_chsize; /* size of next field */ |
195 | struct xfs_chash *m_chash; /* fs private inode per-cluster | ||
196 | * hash table */ | ||
197 | atomic_t m_active_trans; /* number trans frozen */ | 195 | atomic_t m_active_trans; /* number trans frozen */ |
198 | #ifdef HAVE_PERCPU_SB | 196 | #ifdef HAVE_PERCPU_SB |
199 | xfs_icsb_cnts_t __percpu *m_sb_cnts; /* per-cpu superblock counters */ | 197 | xfs_icsb_cnts_t __percpu *m_sb_cnts; /* per-cpu superblock counters */ |
@@ -229,8 +227,6 @@ typedef struct xfs_mount { | |||
229 | operations, typically for | 227 | operations, typically for |
230 | disk errors in metadata */ | 228 | disk errors in metadata */ |
231 | #define XFS_MOUNT_DISCARD (1ULL << 5) /* discard unused blocks */ | 229 | #define XFS_MOUNT_DISCARD (1ULL << 5) /* discard unused blocks */ |
232 | #define XFS_MOUNT_RETERR (1ULL << 6) /* return alignment errors to | ||
233 | user */ | ||
234 | #define XFS_MOUNT_NOALIGN (1ULL << 7) /* turn off stripe alignment | 230 | #define XFS_MOUNT_NOALIGN (1ULL << 7) /* turn off stripe alignment |
235 | allocations */ | 231 | allocations */ |
236 | #define XFS_MOUNT_ATTR2 (1ULL << 8) /* allow use of attr2 format */ | 232 | #define XFS_MOUNT_ATTR2 (1ULL << 8) /* allow use of attr2 format */ |
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c index b75c9bb6e71e..7a3e007b49f4 100644 --- a/fs/xfs/xfs_qm.c +++ b/fs/xfs/xfs_qm.c | |||
@@ -70,7 +70,7 @@ xfs_qm_dquot_walk( | |||
70 | void *data) | 70 | void *data) |
71 | { | 71 | { |
72 | struct xfs_quotainfo *qi = mp->m_quotainfo; | 72 | struct xfs_quotainfo *qi = mp->m_quotainfo; |
73 | struct radix_tree_root *tree = XFS_DQUOT_TREE(qi, type); | 73 | struct radix_tree_root *tree = xfs_dquot_tree(qi, type); |
74 | uint32_t next_index; | 74 | uint32_t next_index; |
75 | int last_error = 0; | 75 | int last_error = 0; |
76 | int skipped; | 76 | int skipped; |
@@ -189,7 +189,7 @@ xfs_qm_dqpurge( | |||
189 | xfs_dqfunlock(dqp); | 189 | xfs_dqfunlock(dqp); |
190 | xfs_dqunlock(dqp); | 190 | xfs_dqunlock(dqp); |
191 | 191 | ||
192 | radix_tree_delete(XFS_DQUOT_TREE(qi, dqp->q_core.d_flags), | 192 | radix_tree_delete(xfs_dquot_tree(qi, dqp->q_core.d_flags), |
193 | be32_to_cpu(dqp->q_core.d_id)); | 193 | be32_to_cpu(dqp->q_core.d_id)); |
194 | qi->qi_dquots--; | 194 | qi->qi_dquots--; |
195 | 195 | ||
@@ -299,8 +299,10 @@ xfs_qm_mount_quotas( | |||
299 | */ | 299 | */ |
300 | if (!XFS_IS_UQUOTA_ON(mp)) | 300 | if (!XFS_IS_UQUOTA_ON(mp)) |
301 | mp->m_qflags &= ~XFS_UQUOTA_CHKD; | 301 | mp->m_qflags &= ~XFS_UQUOTA_CHKD; |
302 | if (!(XFS_IS_GQUOTA_ON(mp) || XFS_IS_PQUOTA_ON(mp))) | 302 | if (!XFS_IS_GQUOTA_ON(mp)) |
303 | mp->m_qflags &= ~XFS_OQUOTA_CHKD; | 303 | mp->m_qflags &= ~XFS_GQUOTA_CHKD; |
304 | if (!XFS_IS_PQUOTA_ON(mp)) | ||
305 | mp->m_qflags &= ~XFS_PQUOTA_CHKD; | ||
304 | 306 | ||
305 | write_changes: | 307 | write_changes: |
306 | /* | 308 | /* |
@@ -489,8 +491,7 @@ xfs_qm_need_dqattach( | |||
489 | return false; | 491 | return false; |
490 | if (!XFS_NOT_DQATTACHED(mp, ip)) | 492 | if (!XFS_NOT_DQATTACHED(mp, ip)) |
491 | return false; | 493 | return false; |
492 | if (ip->i_ino == mp->m_sb.sb_uquotino || | 494 | if (xfs_is_quota_inode(&mp->m_sb, ip->i_ino)) |
493 | ip->i_ino == mp->m_sb.sb_gquotino) | ||
494 | return false; | 495 | return false; |
495 | return true; | 496 | return true; |
496 | } | 497 | } |
@@ -606,8 +607,7 @@ xfs_qm_dqdetach( | |||
606 | 607 | ||
607 | trace_xfs_dquot_dqdetach(ip); | 608 | trace_xfs_dquot_dqdetach(ip); |
608 | 609 | ||
609 | ASSERT(ip->i_ino != ip->i_mount->m_sb.sb_uquotino); | 610 | ASSERT(!xfs_is_quota_inode(&ip->i_mount->m_sb, ip->i_ino)); |
610 | ASSERT(ip->i_ino != ip->i_mount->m_sb.sb_gquotino); | ||
611 | if (ip->i_udquot) { | 611 | if (ip->i_udquot) { |
612 | xfs_qm_dqrele(ip->i_udquot); | 612 | xfs_qm_dqrele(ip->i_udquot); |
613 | ip->i_udquot = NULL; | 613 | ip->i_udquot = NULL; |
@@ -1152,7 +1152,7 @@ xfs_qm_dqusage_adjust( | |||
1152 | * rootino must have its resources accounted for, not so with the quota | 1152 | * rootino must have its resources accounted for, not so with the quota |
1153 | * inodes. | 1153 | * inodes. |
1154 | */ | 1154 | */ |
1155 | if (ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino) { | 1155 | if (xfs_is_quota_inode(&mp->m_sb, ino)) { |
1156 | *res = BULKSTAT_RV_NOTHING; | 1156 | *res = BULKSTAT_RV_NOTHING; |
1157 | return XFS_ERROR(EINVAL); | 1157 | return XFS_ERROR(EINVAL); |
1158 | } | 1158 | } |
@@ -1262,19 +1262,20 @@ int | |||
1262 | xfs_qm_quotacheck( | 1262 | xfs_qm_quotacheck( |
1263 | xfs_mount_t *mp) | 1263 | xfs_mount_t *mp) |
1264 | { | 1264 | { |
1265 | int done, count, error, error2; | 1265 | int done, count, error, error2; |
1266 | xfs_ino_t lastino; | 1266 | xfs_ino_t lastino; |
1267 | size_t structsz; | 1267 | size_t structsz; |
1268 | xfs_inode_t *uip, *gip; | 1268 | uint flags; |
1269 | uint flags; | 1269 | LIST_HEAD (buffer_list); |
1270 | LIST_HEAD (buffer_list); | 1270 | struct xfs_inode *uip = mp->m_quotainfo->qi_uquotaip; |
1271 | struct xfs_inode *gip = mp->m_quotainfo->qi_gquotaip; | ||
1271 | 1272 | ||
1272 | count = INT_MAX; | 1273 | count = INT_MAX; |
1273 | structsz = 1; | 1274 | structsz = 1; |
1274 | lastino = 0; | 1275 | lastino = 0; |
1275 | flags = 0; | 1276 | flags = 0; |
1276 | 1277 | ||
1277 | ASSERT(mp->m_quotainfo->qi_uquotaip || mp->m_quotainfo->qi_gquotaip); | 1278 | ASSERT(uip || gip); |
1278 | ASSERT(XFS_IS_QUOTA_RUNNING(mp)); | 1279 | ASSERT(XFS_IS_QUOTA_RUNNING(mp)); |
1279 | 1280 | ||
1280 | xfs_notice(mp, "Quotacheck needed: Please wait."); | 1281 | xfs_notice(mp, "Quotacheck needed: Please wait."); |
@@ -1284,7 +1285,6 @@ xfs_qm_quotacheck( | |||
1284 | * their counters to zero. We need a clean slate. | 1285 | * their counters to zero. We need a clean slate. |
1285 | * We don't log our changes till later. | 1286 | * We don't log our changes till later. |
1286 | */ | 1287 | */ |
1287 | uip = mp->m_quotainfo->qi_uquotaip; | ||
1288 | if (uip) { | 1288 | if (uip) { |
1289 | error = xfs_qm_dqiterate(mp, uip, XFS_QMOPT_UQUOTA, | 1289 | error = xfs_qm_dqiterate(mp, uip, XFS_QMOPT_UQUOTA, |
1290 | &buffer_list); | 1290 | &buffer_list); |
@@ -1293,14 +1293,14 @@ xfs_qm_quotacheck( | |||
1293 | flags |= XFS_UQUOTA_CHKD; | 1293 | flags |= XFS_UQUOTA_CHKD; |
1294 | } | 1294 | } |
1295 | 1295 | ||
1296 | gip = mp->m_quotainfo->qi_gquotaip; | ||
1297 | if (gip) { | 1296 | if (gip) { |
1298 | error = xfs_qm_dqiterate(mp, gip, XFS_IS_GQUOTA_ON(mp) ? | 1297 | error = xfs_qm_dqiterate(mp, gip, XFS_IS_GQUOTA_ON(mp) ? |
1299 | XFS_QMOPT_GQUOTA : XFS_QMOPT_PQUOTA, | 1298 | XFS_QMOPT_GQUOTA : XFS_QMOPT_PQUOTA, |
1300 | &buffer_list); | 1299 | &buffer_list); |
1301 | if (error) | 1300 | if (error) |
1302 | goto error_return; | 1301 | goto error_return; |
1303 | flags |= XFS_OQUOTA_CHKD; | 1302 | flags |= XFS_IS_GQUOTA_ON(mp) ? |
1303 | XFS_GQUOTA_CHKD : XFS_PQUOTA_CHKD; | ||
1304 | } | 1304 | } |
1305 | 1305 | ||
1306 | do { | 1306 | do { |
@@ -1395,15 +1395,13 @@ STATIC int | |||
1395 | xfs_qm_init_quotainos( | 1395 | xfs_qm_init_quotainos( |
1396 | xfs_mount_t *mp) | 1396 | xfs_mount_t *mp) |
1397 | { | 1397 | { |
1398 | xfs_inode_t *uip, *gip; | 1398 | struct xfs_inode *uip = NULL; |
1399 | int error; | 1399 | struct xfs_inode *gip = NULL; |
1400 | __int64_t sbflags; | 1400 | int error; |
1401 | uint flags; | 1401 | __int64_t sbflags = 0; |
1402 | uint flags = 0; | ||
1402 | 1403 | ||
1403 | ASSERT(mp->m_quotainfo); | 1404 | ASSERT(mp->m_quotainfo); |
1404 | uip = gip = NULL; | ||
1405 | sbflags = 0; | ||
1406 | flags = 0; | ||
1407 | 1405 | ||
1408 | /* | 1406 | /* |
1409 | * Get the uquota and gquota inodes | 1407 | * Get the uquota and gquota inodes |
@@ -1412,19 +1410,18 @@ xfs_qm_init_quotainos( | |||
1412 | if (XFS_IS_UQUOTA_ON(mp) && | 1410 | if (XFS_IS_UQUOTA_ON(mp) && |
1413 | mp->m_sb.sb_uquotino != NULLFSINO) { | 1411 | mp->m_sb.sb_uquotino != NULLFSINO) { |
1414 | ASSERT(mp->m_sb.sb_uquotino > 0); | 1412 | ASSERT(mp->m_sb.sb_uquotino > 0); |
1415 | if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, | 1413 | error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, |
1416 | 0, 0, &uip))) | 1414 | 0, 0, &uip); |
1415 | if (error) | ||
1417 | return XFS_ERROR(error); | 1416 | return XFS_ERROR(error); |
1418 | } | 1417 | } |
1419 | if (XFS_IS_OQUOTA_ON(mp) && | 1418 | if (XFS_IS_OQUOTA_ON(mp) && |
1420 | mp->m_sb.sb_gquotino != NULLFSINO) { | 1419 | mp->m_sb.sb_gquotino != NULLFSINO) { |
1421 | ASSERT(mp->m_sb.sb_gquotino > 0); | 1420 | ASSERT(mp->m_sb.sb_gquotino > 0); |
1422 | if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, | 1421 | error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, |
1423 | 0, 0, &gip))) { | 1422 | 0, 0, &gip); |
1424 | if (uip) | 1423 | if (error) |
1425 | IRELE(uip); | 1424 | goto error_rele; |
1426 | return XFS_ERROR(error); | ||
1427 | } | ||
1428 | } | 1425 | } |
1429 | } else { | 1426 | } else { |
1430 | flags |= XFS_QMOPT_SBVERSION; | 1427 | flags |= XFS_QMOPT_SBVERSION; |
@@ -1439,10 +1436,11 @@ xfs_qm_init_quotainos( | |||
1439 | * temporarily switch to read-write to do this. | 1436 | * temporarily switch to read-write to do this. |
1440 | */ | 1437 | */ |
1441 | if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) { | 1438 | if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) { |
1442 | if ((error = xfs_qm_qino_alloc(mp, &uip, | 1439 | error = xfs_qm_qino_alloc(mp, &uip, |
1443 | sbflags | XFS_SB_UQUOTINO, | 1440 | sbflags | XFS_SB_UQUOTINO, |
1444 | flags | XFS_QMOPT_UQUOTA))) | 1441 | flags | XFS_QMOPT_UQUOTA); |
1445 | return XFS_ERROR(error); | 1442 | if (error) |
1443 | goto error_rele; | ||
1446 | 1444 | ||
1447 | flags &= ~XFS_QMOPT_SBVERSION; | 1445 | flags &= ~XFS_QMOPT_SBVERSION; |
1448 | } | 1446 | } |
@@ -1451,18 +1449,21 @@ xfs_qm_init_quotainos( | |||
1451 | XFS_QMOPT_GQUOTA : XFS_QMOPT_PQUOTA); | 1449 | XFS_QMOPT_GQUOTA : XFS_QMOPT_PQUOTA); |
1452 | error = xfs_qm_qino_alloc(mp, &gip, | 1450 | error = xfs_qm_qino_alloc(mp, &gip, |
1453 | sbflags | XFS_SB_GQUOTINO, flags); | 1451 | sbflags | XFS_SB_GQUOTINO, flags); |
1454 | if (error) { | 1452 | if (error) |
1455 | if (uip) | 1453 | goto error_rele; |
1456 | IRELE(uip); | ||
1457 | |||
1458 | return XFS_ERROR(error); | ||
1459 | } | ||
1460 | } | 1454 | } |
1461 | 1455 | ||
1462 | mp->m_quotainfo->qi_uquotaip = uip; | 1456 | mp->m_quotainfo->qi_uquotaip = uip; |
1463 | mp->m_quotainfo->qi_gquotaip = gip; | 1457 | mp->m_quotainfo->qi_gquotaip = gip; |
1464 | 1458 | ||
1465 | return 0; | 1459 | return 0; |
1460 | |||
1461 | error_rele: | ||
1462 | if (uip) | ||
1463 | IRELE(uip); | ||
1464 | if (gip) | ||
1465 | IRELE(gip); | ||
1466 | return XFS_ERROR(error); | ||
1466 | } | 1467 | } |
1467 | 1468 | ||
1468 | STATIC void | 1469 | STATIC void |
@@ -1473,7 +1474,7 @@ xfs_qm_dqfree_one( | |||
1473 | struct xfs_quotainfo *qi = mp->m_quotainfo; | 1474 | struct xfs_quotainfo *qi = mp->m_quotainfo; |
1474 | 1475 | ||
1475 | mutex_lock(&qi->qi_tree_lock); | 1476 | mutex_lock(&qi->qi_tree_lock); |
1476 | radix_tree_delete(XFS_DQUOT_TREE(qi, dqp->q_core.d_flags), | 1477 | radix_tree_delete(xfs_dquot_tree(qi, dqp->q_core.d_flags), |
1477 | be32_to_cpu(dqp->q_core.d_id)); | 1478 | be32_to_cpu(dqp->q_core.d_id)); |
1478 | 1479 | ||
1479 | qi->qi_dquots--; | 1480 | qi->qi_dquots--; |
@@ -1659,7 +1660,8 @@ xfs_qm_vop_dqalloc( | |||
1659 | struct xfs_dquot **O_gdqpp) | 1660 | struct xfs_dquot **O_gdqpp) |
1660 | { | 1661 | { |
1661 | struct xfs_mount *mp = ip->i_mount; | 1662 | struct xfs_mount *mp = ip->i_mount; |
1662 | struct xfs_dquot *uq, *gq; | 1663 | struct xfs_dquot *uq = NULL; |
1664 | struct xfs_dquot *gq = NULL; | ||
1663 | int error; | 1665 | int error; |
1664 | uint lockflags; | 1666 | uint lockflags; |
1665 | 1667 | ||
@@ -1684,7 +1686,6 @@ xfs_qm_vop_dqalloc( | |||
1684 | } | 1686 | } |
1685 | } | 1687 | } |
1686 | 1688 | ||
1687 | uq = gq = NULL; | ||
1688 | if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) { | 1689 | if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) { |
1689 | if (ip->i_d.di_uid != uid) { | 1690 | if (ip->i_d.di_uid != uid) { |
1690 | /* | 1691 | /* |
@@ -1697,11 +1698,12 @@ xfs_qm_vop_dqalloc( | |||
1697 | * holding ilock. | 1698 | * holding ilock. |
1698 | */ | 1699 | */ |
1699 | xfs_iunlock(ip, lockflags); | 1700 | xfs_iunlock(ip, lockflags); |
1700 | if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t) uid, | 1701 | error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t) uid, |
1701 | XFS_DQ_USER, | 1702 | XFS_DQ_USER, |
1702 | XFS_QMOPT_DQALLOC | | 1703 | XFS_QMOPT_DQALLOC | |
1703 | XFS_QMOPT_DOWARN, | 1704 | XFS_QMOPT_DOWARN, |
1704 | &uq))) { | 1705 | &uq); |
1706 | if (error) { | ||
1705 | ASSERT(error != ENOENT); | 1707 | ASSERT(error != ENOENT); |
1706 | return error; | 1708 | return error; |
1707 | } | 1709 | } |
@@ -1723,15 +1725,14 @@ xfs_qm_vop_dqalloc( | |||
1723 | if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) { | 1725 | if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) { |
1724 | if (ip->i_d.di_gid != gid) { | 1726 | if (ip->i_d.di_gid != gid) { |
1725 | xfs_iunlock(ip, lockflags); | 1727 | xfs_iunlock(ip, lockflags); |
1726 | if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)gid, | 1728 | error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)gid, |
1727 | XFS_DQ_GROUP, | 1729 | XFS_DQ_GROUP, |
1728 | XFS_QMOPT_DQALLOC | | 1730 | XFS_QMOPT_DQALLOC | |
1729 | XFS_QMOPT_DOWARN, | 1731 | XFS_QMOPT_DOWARN, |
1730 | &gq))) { | 1732 | &gq); |
1731 | if (uq) | 1733 | if (error) { |
1732 | xfs_qm_dqrele(uq); | ||
1733 | ASSERT(error != ENOENT); | 1734 | ASSERT(error != ENOENT); |
1734 | return error; | 1735 | goto error_rele; |
1735 | } | 1736 | } |
1736 | xfs_dqunlock(gq); | 1737 | xfs_dqunlock(gq); |
1737 | lockflags = XFS_ILOCK_SHARED; | 1738 | lockflags = XFS_ILOCK_SHARED; |
@@ -1743,15 +1744,14 @@ xfs_qm_vop_dqalloc( | |||
1743 | } else if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) { | 1744 | } else if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) { |
1744 | if (xfs_get_projid(ip) != prid) { | 1745 | if (xfs_get_projid(ip) != prid) { |
1745 | xfs_iunlock(ip, lockflags); | 1746 | xfs_iunlock(ip, lockflags); |
1746 | if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)prid, | 1747 | error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)prid, |
1747 | XFS_DQ_PROJ, | 1748 | XFS_DQ_PROJ, |
1748 | XFS_QMOPT_DQALLOC | | 1749 | XFS_QMOPT_DQALLOC | |
1749 | XFS_QMOPT_DOWARN, | 1750 | XFS_QMOPT_DOWARN, |
1750 | &gq))) { | 1751 | &gq); |
1751 | if (uq) | 1752 | if (error) { |
1752 | xfs_qm_dqrele(uq); | ||
1753 | ASSERT(error != ENOENT); | 1753 | ASSERT(error != ENOENT); |
1754 | return (error); | 1754 | goto error_rele; |
1755 | } | 1755 | } |
1756 | xfs_dqunlock(gq); | 1756 | xfs_dqunlock(gq); |
1757 | lockflags = XFS_ILOCK_SHARED; | 1757 | lockflags = XFS_ILOCK_SHARED; |
@@ -1774,6 +1774,11 @@ xfs_qm_vop_dqalloc( | |||
1774 | else if (gq) | 1774 | else if (gq) |
1775 | xfs_qm_dqrele(gq); | 1775 | xfs_qm_dqrele(gq); |
1776 | return 0; | 1776 | return 0; |
1777 | |||
1778 | error_rele: | ||
1779 | if (uq) | ||
1780 | xfs_qm_dqrele(uq); | ||
1781 | return error; | ||
1777 | } | 1782 | } |
1778 | 1783 | ||
1779 | /* | 1784 | /* |
@@ -1821,29 +1826,31 @@ xfs_qm_vop_chown( | |||
1821 | */ | 1826 | */ |
1822 | int | 1827 | int |
1823 | xfs_qm_vop_chown_reserve( | 1828 | xfs_qm_vop_chown_reserve( |
1824 | xfs_trans_t *tp, | 1829 | struct xfs_trans *tp, |
1825 | xfs_inode_t *ip, | 1830 | struct xfs_inode *ip, |
1826 | xfs_dquot_t *udqp, | 1831 | struct xfs_dquot *udqp, |
1827 | xfs_dquot_t *gdqp, | 1832 | struct xfs_dquot *gdqp, |
1828 | uint flags) | 1833 | uint flags) |
1829 | { | 1834 | { |
1830 | xfs_mount_t *mp = ip->i_mount; | 1835 | struct xfs_mount *mp = ip->i_mount; |
1831 | uint delblks, blkflags, prjflags = 0; | 1836 | uint delblks, blkflags, prjflags = 0; |
1832 | xfs_dquot_t *unresudq, *unresgdq, *delblksudq, *delblksgdq; | 1837 | struct xfs_dquot *udq_unres = NULL; |
1833 | int error; | 1838 | struct xfs_dquot *gdq_unres = NULL; |
1839 | struct xfs_dquot *udq_delblks = NULL; | ||
1840 | struct xfs_dquot *gdq_delblks = NULL; | ||
1841 | int error; | ||
1834 | 1842 | ||
1835 | 1843 | ||
1836 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); | 1844 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); |
1837 | ASSERT(XFS_IS_QUOTA_RUNNING(mp)); | 1845 | ASSERT(XFS_IS_QUOTA_RUNNING(mp)); |
1838 | 1846 | ||
1839 | delblks = ip->i_delayed_blks; | 1847 | delblks = ip->i_delayed_blks; |
1840 | delblksudq = delblksgdq = unresudq = unresgdq = NULL; | ||
1841 | blkflags = XFS_IS_REALTIME_INODE(ip) ? | 1848 | blkflags = XFS_IS_REALTIME_INODE(ip) ? |
1842 | XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS; | 1849 | XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS; |
1843 | 1850 | ||
1844 | if (XFS_IS_UQUOTA_ON(mp) && udqp && | 1851 | if (XFS_IS_UQUOTA_ON(mp) && udqp && |
1845 | ip->i_d.di_uid != (uid_t)be32_to_cpu(udqp->q_core.d_id)) { | 1852 | ip->i_d.di_uid != (uid_t)be32_to_cpu(udqp->q_core.d_id)) { |
1846 | delblksudq = udqp; | 1853 | udq_delblks = udqp; |
1847 | /* | 1854 | /* |
1848 | * If there are delayed allocation blocks, then we have to | 1855 | * If there are delayed allocation blocks, then we have to |
1849 | * unreserve those from the old dquot, and add them to the | 1856 | * unreserve those from the old dquot, and add them to the |
@@ -1851,7 +1858,7 @@ xfs_qm_vop_chown_reserve( | |||
1851 | */ | 1858 | */ |
1852 | if (delblks) { | 1859 | if (delblks) { |
1853 | ASSERT(ip->i_udquot); | 1860 | ASSERT(ip->i_udquot); |
1854 | unresudq = ip->i_udquot; | 1861 | udq_unres = ip->i_udquot; |
1855 | } | 1862 | } |
1856 | } | 1863 | } |
1857 | if (XFS_IS_OQUOTA_ON(ip->i_mount) && gdqp) { | 1864 | if (XFS_IS_OQUOTA_ON(ip->i_mount) && gdqp) { |
@@ -1862,18 +1869,19 @@ xfs_qm_vop_chown_reserve( | |||
1862 | if (prjflags || | 1869 | if (prjflags || |
1863 | (XFS_IS_GQUOTA_ON(ip->i_mount) && | 1870 | (XFS_IS_GQUOTA_ON(ip->i_mount) && |
1864 | ip->i_d.di_gid != be32_to_cpu(gdqp->q_core.d_id))) { | 1871 | ip->i_d.di_gid != be32_to_cpu(gdqp->q_core.d_id))) { |
1865 | delblksgdq = gdqp; | 1872 | gdq_delblks = gdqp; |
1866 | if (delblks) { | 1873 | if (delblks) { |
1867 | ASSERT(ip->i_gdquot); | 1874 | ASSERT(ip->i_gdquot); |
1868 | unresgdq = ip->i_gdquot; | 1875 | gdq_unres = ip->i_gdquot; |
1869 | } | 1876 | } |
1870 | } | 1877 | } |
1871 | } | 1878 | } |
1872 | 1879 | ||
1873 | if ((error = xfs_trans_reserve_quota_bydquots(tp, ip->i_mount, | 1880 | error = xfs_trans_reserve_quota_bydquots(tp, ip->i_mount, |
1874 | delblksudq, delblksgdq, ip->i_d.di_nblocks, 1, | 1881 | udq_delblks, gdq_delblks, ip->i_d.di_nblocks, 1, |
1875 | flags | blkflags | prjflags))) | 1882 | flags | blkflags | prjflags); |
1876 | return (error); | 1883 | if (error) |
1884 | return error; | ||
1877 | 1885 | ||
1878 | /* | 1886 | /* |
1879 | * Do the delayed blks reservations/unreservations now. Since, these | 1887 | * Do the delayed blks reservations/unreservations now. Since, these |
@@ -1885,14 +1893,15 @@ xfs_qm_vop_chown_reserve( | |||
1885 | /* | 1893 | /* |
1886 | * Do the reservations first. Unreservation can't fail. | 1894 | * Do the reservations first. Unreservation can't fail. |
1887 | */ | 1895 | */ |
1888 | ASSERT(delblksudq || delblksgdq); | 1896 | ASSERT(udq_delblks || gdq_delblks); |
1889 | ASSERT(unresudq || unresgdq); | 1897 | ASSERT(udq_unres || gdq_unres); |
1890 | if ((error = xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount, | 1898 | error = xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount, |
1891 | delblksudq, delblksgdq, (xfs_qcnt_t)delblks, 0, | 1899 | udq_delblks, gdq_delblks, (xfs_qcnt_t)delblks, 0, |
1892 | flags | blkflags | prjflags))) | 1900 | flags | blkflags | prjflags); |
1893 | return (error); | 1901 | if (error) |
1902 | return error; | ||
1894 | xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount, | 1903 | xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount, |
1895 | unresudq, unresgdq, -((xfs_qcnt_t)delblks), 0, | 1904 | udq_unres, gdq_unres, -((xfs_qcnt_t)delblks), 0, |
1896 | blkflags); | 1905 | blkflags); |
1897 | } | 1906 | } |
1898 | 1907 | ||
diff --git a/fs/xfs/xfs_qm.h b/fs/xfs/xfs_qm.h index 5d16a6e6900f..bdb4f8b95207 100644 --- a/fs/xfs/xfs_qm.h +++ b/fs/xfs/xfs_qm.h | |||
@@ -69,30 +69,62 @@ typedef struct xfs_quotainfo { | |||
69 | struct shrinker qi_shrinker; | 69 | struct shrinker qi_shrinker; |
70 | } xfs_quotainfo_t; | 70 | } xfs_quotainfo_t; |
71 | 71 | ||
72 | #define XFS_DQUOT_TREE(qi, type) \ | 72 | static inline struct radix_tree_root * |
73 | ((type & XFS_DQ_USER) ? \ | 73 | xfs_dquot_tree( |
74 | &((qi)->qi_uquota_tree) : \ | 74 | struct xfs_quotainfo *qi, |
75 | &((qi)->qi_gquota_tree)) | 75 | int type) |
76 | { | ||
77 | switch (type) { | ||
78 | case XFS_DQ_USER: | ||
79 | return &qi->qi_uquota_tree; | ||
80 | case XFS_DQ_GROUP: | ||
81 | case XFS_DQ_PROJ: | ||
82 | return &qi->qi_gquota_tree; | ||
83 | default: | ||
84 | ASSERT(0); | ||
85 | } | ||
86 | return NULL; | ||
87 | } | ||
76 | 88 | ||
89 | static inline struct xfs_inode * | ||
90 | xfs_dq_to_quota_inode(struct xfs_dquot *dqp) | ||
91 | { | ||
92 | switch (dqp->dq_flags & XFS_DQ_ALLTYPES) { | ||
93 | case XFS_DQ_USER: | ||
94 | return dqp->q_mount->m_quotainfo->qi_uquotaip; | ||
95 | case XFS_DQ_GROUP: | ||
96 | case XFS_DQ_PROJ: | ||
97 | return dqp->q_mount->m_quotainfo->qi_gquotaip; | ||
98 | default: | ||
99 | ASSERT(0); | ||
100 | } | ||
101 | return NULL; | ||
102 | } | ||
77 | 103 | ||
78 | extern int xfs_qm_calc_dquots_per_chunk(struct xfs_mount *mp, | 104 | extern int xfs_qm_calc_dquots_per_chunk(struct xfs_mount *mp, |
79 | unsigned int nbblks); | 105 | unsigned int nbblks); |
80 | extern void xfs_trans_mod_dquot(xfs_trans_t *, xfs_dquot_t *, uint, long); | 106 | extern void xfs_trans_mod_dquot(struct xfs_trans *, |
81 | extern int xfs_trans_reserve_quota_bydquots(xfs_trans_t *, xfs_mount_t *, | 107 | struct xfs_dquot *, uint, long); |
82 | xfs_dquot_t *, xfs_dquot_t *, long, long, uint); | 108 | extern int xfs_trans_reserve_quota_bydquots(struct xfs_trans *, |
83 | extern void xfs_trans_dqjoin(xfs_trans_t *, xfs_dquot_t *); | 109 | struct xfs_mount *, struct xfs_dquot *, |
84 | extern void xfs_trans_log_dquot(xfs_trans_t *, xfs_dquot_t *); | 110 | struct xfs_dquot *, long, long, uint); |
111 | extern void xfs_trans_dqjoin(struct xfs_trans *, struct xfs_dquot *); | ||
112 | extern void xfs_trans_log_dquot(struct xfs_trans *, struct xfs_dquot *); | ||
85 | 113 | ||
86 | /* | 114 | /* |
87 | * We keep the usr and grp dquots separately so that locking will be easier | 115 | * We keep the usr and grp dquots separately so that locking will be easier |
88 | * to do at commit time. All transactions that we know of at this point | 116 | * to do at commit time. All transactions that we know of at this point |
89 | * affect no more than two dquots of one type. Hence, the TRANS_MAXDQS value. | 117 | * affect no more than two dquots of one type. Hence, the TRANS_MAXDQS value. |
90 | */ | 118 | */ |
119 | enum { | ||
120 | XFS_QM_TRANS_USR = 0, | ||
121 | XFS_QM_TRANS_GRP, | ||
122 | XFS_QM_TRANS_DQTYPES | ||
123 | }; | ||
91 | #define XFS_QM_TRANS_MAXDQS 2 | 124 | #define XFS_QM_TRANS_MAXDQS 2 |
92 | typedef struct xfs_dquot_acct { | 125 | struct xfs_dquot_acct { |
93 | xfs_dqtrx_t dqa_usrdquots[XFS_QM_TRANS_MAXDQS]; | 126 | struct xfs_dqtrx dqs[XFS_QM_TRANS_DQTYPES][XFS_QM_TRANS_MAXDQS]; |
94 | xfs_dqtrx_t dqa_grpdquots[XFS_QM_TRANS_MAXDQS]; | 127 | }; |
95 | } xfs_dquot_acct_t; | ||
96 | 128 | ||
97 | /* | 129 | /* |
98 | * Users are allowed to have a usage exceeding their softlimit for | 130 | * Users are allowed to have a usage exceeding their softlimit for |
@@ -106,22 +138,23 @@ typedef struct xfs_dquot_acct { | |||
106 | #define XFS_QM_IWARNLIMIT 5 | 138 | #define XFS_QM_IWARNLIMIT 5 |
107 | #define XFS_QM_RTBWARNLIMIT 5 | 139 | #define XFS_QM_RTBWARNLIMIT 5 |
108 | 140 | ||
109 | extern void xfs_qm_destroy_quotainfo(xfs_mount_t *); | 141 | extern void xfs_qm_destroy_quotainfo(struct xfs_mount *); |
110 | extern int xfs_qm_quotacheck(xfs_mount_t *); | 142 | extern int xfs_qm_quotacheck(struct xfs_mount *); |
111 | extern int xfs_qm_write_sb_changes(xfs_mount_t *, __int64_t); | 143 | extern int xfs_qm_write_sb_changes(struct xfs_mount *, __int64_t); |
112 | 144 | ||
113 | /* dquot stuff */ | 145 | /* dquot stuff */ |
114 | extern void xfs_qm_dqpurge_all(xfs_mount_t *, uint); | 146 | extern void xfs_qm_dqpurge_all(struct xfs_mount *, uint); |
115 | extern void xfs_qm_dqrele_all_inodes(xfs_mount_t *, uint); | 147 | extern void xfs_qm_dqrele_all_inodes(struct xfs_mount *, uint); |
116 | 148 | ||
117 | /* quota ops */ | 149 | /* quota ops */ |
118 | extern int xfs_qm_scall_trunc_qfiles(xfs_mount_t *, uint); | 150 | extern int xfs_qm_scall_trunc_qfiles(struct xfs_mount *, uint); |
119 | extern int xfs_qm_scall_getquota(xfs_mount_t *, xfs_dqid_t, uint, | 151 | extern int xfs_qm_scall_getquota(struct xfs_mount *, xfs_dqid_t, |
120 | fs_disk_quota_t *); | 152 | uint, struct fs_disk_quota *); |
121 | extern int xfs_qm_scall_setqlim(struct xfs_mount *, xfs_dqid_t, uint, | 153 | extern int xfs_qm_scall_setqlim(struct xfs_mount *, xfs_dqid_t, uint, |
122 | fs_disk_quota_t *); | 154 | struct fs_disk_quota *); |
123 | extern int xfs_qm_scall_getqstat(xfs_mount_t *, fs_quota_stat_t *); | 155 | extern int xfs_qm_scall_getqstat(struct xfs_mount *, |
124 | extern int xfs_qm_scall_quotaon(xfs_mount_t *, uint); | 156 | struct fs_quota_stat *); |
125 | extern int xfs_qm_scall_quotaoff(xfs_mount_t *, uint); | 157 | extern int xfs_qm_scall_quotaon(struct xfs_mount *, uint); |
158 | extern int xfs_qm_scall_quotaoff(struct xfs_mount *, uint); | ||
126 | 159 | ||
127 | #endif /* __XFS_QM_H__ */ | 160 | #endif /* __XFS_QM_H__ */ |
diff --git a/fs/xfs/xfs_qm_syscalls.c b/fs/xfs/xfs_qm_syscalls.c index 6cdf6ffc36a1..a08801ae24e2 100644 --- a/fs/xfs/xfs_qm_syscalls.c +++ b/fs/xfs/xfs_qm_syscalls.c | |||
@@ -117,11 +117,11 @@ xfs_qm_scall_quotaoff( | |||
117 | } | 117 | } |
118 | if (flags & XFS_GQUOTA_ACCT) { | 118 | if (flags & XFS_GQUOTA_ACCT) { |
119 | dqtype |= XFS_QMOPT_GQUOTA; | 119 | dqtype |= XFS_QMOPT_GQUOTA; |
120 | flags |= (XFS_OQUOTA_CHKD | XFS_OQUOTA_ENFD); | 120 | flags |= (XFS_GQUOTA_CHKD | XFS_GQUOTA_ENFD); |
121 | inactivate_flags |= XFS_GQUOTA_ACTIVE; | 121 | inactivate_flags |= XFS_GQUOTA_ACTIVE; |
122 | } else if (flags & XFS_PQUOTA_ACCT) { | 122 | } else if (flags & XFS_PQUOTA_ACCT) { |
123 | dqtype |= XFS_QMOPT_PQUOTA; | 123 | dqtype |= XFS_QMOPT_PQUOTA; |
124 | flags |= (XFS_OQUOTA_CHKD | XFS_OQUOTA_ENFD); | 124 | flags |= (XFS_PQUOTA_CHKD | XFS_PQUOTA_ENFD); |
125 | inactivate_flags |= XFS_PQUOTA_ACTIVE; | 125 | inactivate_flags |= XFS_PQUOTA_ACTIVE; |
126 | } | 126 | } |
127 | 127 | ||
@@ -335,14 +335,14 @@ xfs_qm_scall_quotaon( | |||
335 | * quota acct on ondisk without m_qflags' knowing. | 335 | * quota acct on ondisk without m_qflags' knowing. |
336 | */ | 336 | */ |
337 | if (((flags & XFS_UQUOTA_ACCT) == 0 && | 337 | if (((flags & XFS_UQUOTA_ACCT) == 0 && |
338 | (mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) == 0 && | 338 | (mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) == 0 && |
339 | (flags & XFS_UQUOTA_ENFD)) | 339 | (flags & XFS_UQUOTA_ENFD)) || |
340 | || | 340 | ((flags & XFS_GQUOTA_ACCT) == 0 && |
341 | (mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) == 0 && | ||
342 | (flags & XFS_GQUOTA_ENFD)) || | ||
341 | ((flags & XFS_PQUOTA_ACCT) == 0 && | 343 | ((flags & XFS_PQUOTA_ACCT) == 0 && |
342 | (mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) == 0 && | 344 | (mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) == 0 && |
343 | (flags & XFS_GQUOTA_ACCT) == 0 && | 345 | (flags & XFS_PQUOTA_ENFD))) { |
344 | (mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) == 0 && | ||
345 | (flags & XFS_OQUOTA_ENFD))) { | ||
346 | xfs_debug(mp, | 346 | xfs_debug(mp, |
347 | "%s: Can't enforce without acct, flags=%x sbflags=%x\n", | 347 | "%s: Can't enforce without acct, flags=%x sbflags=%x\n", |
348 | __func__, flags, mp->m_sb.sb_qflags); | 348 | __func__, flags, mp->m_sb.sb_qflags); |
@@ -407,11 +407,11 @@ xfs_qm_scall_getqstat( | |||
407 | struct fs_quota_stat *out) | 407 | struct fs_quota_stat *out) |
408 | { | 408 | { |
409 | struct xfs_quotainfo *q = mp->m_quotainfo; | 409 | struct xfs_quotainfo *q = mp->m_quotainfo; |
410 | struct xfs_inode *uip, *gip; | 410 | struct xfs_inode *uip = NULL; |
411 | bool tempuqip, tempgqip; | 411 | struct xfs_inode *gip = NULL; |
412 | bool tempuqip = false; | ||
413 | bool tempgqip = false; | ||
412 | 414 | ||
413 | uip = gip = NULL; | ||
414 | tempuqip = tempgqip = false; | ||
415 | memset(out, 0, sizeof(fs_quota_stat_t)); | 415 | memset(out, 0, sizeof(fs_quota_stat_t)); |
416 | 416 | ||
417 | out->qs_version = FS_QSTAT_VERSION; | 417 | out->qs_version = FS_QSTAT_VERSION; |
@@ -776,9 +776,12 @@ xfs_qm_scall_getquota( | |||
776 | * gets turned off. No need to confuse the user level code, | 776 | * gets turned off. No need to confuse the user level code, |
777 | * so return zeroes in that case. | 777 | * so return zeroes in that case. |
778 | */ | 778 | */ |
779 | if ((!XFS_IS_UQUOTA_ENFORCED(mp) && dqp->q_core.d_flags == XFS_DQ_USER) || | 779 | if ((!XFS_IS_UQUOTA_ENFORCED(mp) && |
780 | (!XFS_IS_OQUOTA_ENFORCED(mp) && | 780 | dqp->q_core.d_flags == XFS_DQ_USER) || |
781 | (dqp->q_core.d_flags & (XFS_DQ_PROJ | XFS_DQ_GROUP)))) { | 781 | (!XFS_IS_GQUOTA_ENFORCED(mp) && |
782 | dqp->q_core.d_flags == XFS_DQ_GROUP) || | ||
783 | (!XFS_IS_PQUOTA_ENFORCED(mp) && | ||
784 | dqp->q_core.d_flags == XFS_DQ_PROJ)) { | ||
782 | dst->d_btimer = 0; | 785 | dst->d_btimer = 0; |
783 | dst->d_itimer = 0; | 786 | dst->d_itimer = 0; |
784 | dst->d_rtbtimer = 0; | 787 | dst->d_rtbtimer = 0; |
@@ -786,8 +789,8 @@ xfs_qm_scall_getquota( | |||
786 | 789 | ||
787 | #ifdef DEBUG | 790 | #ifdef DEBUG |
788 | if (((XFS_IS_UQUOTA_ENFORCED(mp) && dst->d_flags == FS_USER_QUOTA) || | 791 | if (((XFS_IS_UQUOTA_ENFORCED(mp) && dst->d_flags == FS_USER_QUOTA) || |
789 | (XFS_IS_OQUOTA_ENFORCED(mp) && | 792 | (XFS_IS_GQUOTA_ENFORCED(mp) && dst->d_flags == FS_GROUP_QUOTA) || |
790 | (dst->d_flags & (FS_PROJ_QUOTA | FS_GROUP_QUOTA)))) && | 793 | (XFS_IS_PQUOTA_ENFORCED(mp) && dst->d_flags == FS_PROJ_QUOTA)) && |
791 | dst->d_id != 0) { | 794 | dst->d_id != 0) { |
792 | if ((dst->d_bcount > dst->d_blk_softlimit) && | 795 | if ((dst->d_bcount > dst->d_blk_softlimit) && |
793 | (dst->d_blk_softlimit > 0)) { | 796 | (dst->d_blk_softlimit > 0)) { |
@@ -833,16 +836,16 @@ xfs_qm_export_flags( | |||
833 | uflags = 0; | 836 | uflags = 0; |
834 | if (flags & XFS_UQUOTA_ACCT) | 837 | if (flags & XFS_UQUOTA_ACCT) |
835 | uflags |= FS_QUOTA_UDQ_ACCT; | 838 | uflags |= FS_QUOTA_UDQ_ACCT; |
836 | if (flags & XFS_PQUOTA_ACCT) | ||
837 | uflags |= FS_QUOTA_PDQ_ACCT; | ||
838 | if (flags & XFS_GQUOTA_ACCT) | 839 | if (flags & XFS_GQUOTA_ACCT) |
839 | uflags |= FS_QUOTA_GDQ_ACCT; | 840 | uflags |= FS_QUOTA_GDQ_ACCT; |
841 | if (flags & XFS_PQUOTA_ACCT) | ||
842 | uflags |= FS_QUOTA_PDQ_ACCT; | ||
840 | if (flags & XFS_UQUOTA_ENFD) | 843 | if (flags & XFS_UQUOTA_ENFD) |
841 | uflags |= FS_QUOTA_UDQ_ENFD; | 844 | uflags |= FS_QUOTA_UDQ_ENFD; |
842 | if (flags & (XFS_OQUOTA_ENFD)) { | 845 | if (flags & XFS_GQUOTA_ENFD) |
843 | uflags |= (flags & XFS_GQUOTA_ACCT) ? | 846 | uflags |= FS_QUOTA_GDQ_ENFD; |
844 | FS_QUOTA_GDQ_ENFD : FS_QUOTA_PDQ_ENFD; | 847 | if (flags & XFS_PQUOTA_ENFD) |
845 | } | 848 | uflags |= FS_QUOTA_PDQ_ENFD; |
846 | return (uflags); | 849 | return (uflags); |
847 | } | 850 | } |
848 | 851 | ||
diff --git a/fs/xfs/xfs_quota.h b/fs/xfs/xfs_quota.h index c38068f26c55..c3483bab9cde 100644 --- a/fs/xfs/xfs_quota.h +++ b/fs/xfs/xfs_quota.h | |||
@@ -161,30 +161,42 @@ typedef struct xfs_qoff_logformat { | |||
161 | #define XFS_GQUOTA_ACCT 0x0040 /* group quota accounting ON */ | 161 | #define XFS_GQUOTA_ACCT 0x0040 /* group quota accounting ON */ |
162 | 162 | ||
163 | /* | 163 | /* |
164 | * Conversion to and from the combined OQUOTA flag (if necessary) | ||
165 | * is done only in xfs_sb_qflags_to_disk() and xfs_sb_qflags_from_disk() | ||
166 | */ | ||
167 | #define XFS_GQUOTA_ENFD 0x0080 /* group quota limits enforced */ | ||
168 | #define XFS_GQUOTA_CHKD 0x0100 /* quotacheck run on group quotas */ | ||
169 | #define XFS_PQUOTA_ENFD 0x0200 /* project quota limits enforced */ | ||
170 | #define XFS_PQUOTA_CHKD 0x0400 /* quotacheck run on project quotas */ | ||
171 | |||
172 | /* | ||
164 | * Quota Accounting/Enforcement flags | 173 | * Quota Accounting/Enforcement flags |
165 | */ | 174 | */ |
166 | #define XFS_ALL_QUOTA_ACCT \ | 175 | #define XFS_ALL_QUOTA_ACCT \ |
167 | (XFS_UQUOTA_ACCT | XFS_GQUOTA_ACCT | XFS_PQUOTA_ACCT) | 176 | (XFS_UQUOTA_ACCT | XFS_GQUOTA_ACCT | XFS_PQUOTA_ACCT) |
168 | #define XFS_ALL_QUOTA_ENFD (XFS_UQUOTA_ENFD | XFS_OQUOTA_ENFD) | 177 | #define XFS_ALL_QUOTA_ENFD \ |
169 | #define XFS_ALL_QUOTA_CHKD (XFS_UQUOTA_CHKD | XFS_OQUOTA_CHKD) | 178 | (XFS_UQUOTA_ENFD | XFS_GQUOTA_ENFD | XFS_PQUOTA_ENFD) |
179 | #define XFS_ALL_QUOTA_CHKD \ | ||
180 | (XFS_UQUOTA_CHKD | XFS_GQUOTA_CHKD | XFS_PQUOTA_CHKD) | ||
170 | 181 | ||
171 | #define XFS_IS_QUOTA_RUNNING(mp) ((mp)->m_qflags & XFS_ALL_QUOTA_ACCT) | 182 | #define XFS_IS_QUOTA_RUNNING(mp) ((mp)->m_qflags & XFS_ALL_QUOTA_ACCT) |
172 | #define XFS_IS_UQUOTA_RUNNING(mp) ((mp)->m_qflags & XFS_UQUOTA_ACCT) | 183 | #define XFS_IS_UQUOTA_RUNNING(mp) ((mp)->m_qflags & XFS_UQUOTA_ACCT) |
173 | #define XFS_IS_PQUOTA_RUNNING(mp) ((mp)->m_qflags & XFS_PQUOTA_ACCT) | 184 | #define XFS_IS_PQUOTA_RUNNING(mp) ((mp)->m_qflags & XFS_PQUOTA_ACCT) |
174 | #define XFS_IS_GQUOTA_RUNNING(mp) ((mp)->m_qflags & XFS_GQUOTA_ACCT) | 185 | #define XFS_IS_GQUOTA_RUNNING(mp) ((mp)->m_qflags & XFS_GQUOTA_ACCT) |
175 | #define XFS_IS_UQUOTA_ENFORCED(mp) ((mp)->m_qflags & XFS_UQUOTA_ENFD) | 186 | #define XFS_IS_UQUOTA_ENFORCED(mp) ((mp)->m_qflags & XFS_UQUOTA_ENFD) |
176 | #define XFS_IS_OQUOTA_ENFORCED(mp) ((mp)->m_qflags & XFS_OQUOTA_ENFD) | 187 | #define XFS_IS_GQUOTA_ENFORCED(mp) ((mp)->m_qflags & XFS_GQUOTA_ENFD) |
188 | #define XFS_IS_PQUOTA_ENFORCED(mp) ((mp)->m_qflags & XFS_PQUOTA_ENFD) | ||
177 | 189 | ||
178 | /* | 190 | /* |
179 | * Incore only flags for quotaoff - these bits get cleared when quota(s) | 191 | * Incore only flags for quotaoff - these bits get cleared when quota(s) |
180 | * are in the process of getting turned off. These flags are in m_qflags but | 192 | * are in the process of getting turned off. These flags are in m_qflags but |
181 | * never in sb_qflags. | 193 | * never in sb_qflags. |
182 | */ | 194 | */ |
183 | #define XFS_UQUOTA_ACTIVE 0x0100 /* uquotas are being turned off */ | 195 | #define XFS_UQUOTA_ACTIVE 0x1000 /* uquotas are being turned off */ |
184 | #define XFS_PQUOTA_ACTIVE 0x0200 /* pquotas are being turned off */ | 196 | #define XFS_GQUOTA_ACTIVE 0x2000 /* gquotas are being turned off */ |
185 | #define XFS_GQUOTA_ACTIVE 0x0400 /* gquotas are being turned off */ | 197 | #define XFS_PQUOTA_ACTIVE 0x4000 /* pquotas are being turned off */ |
186 | #define XFS_ALL_QUOTA_ACTIVE \ | 198 | #define XFS_ALL_QUOTA_ACTIVE \ |
187 | (XFS_UQUOTA_ACTIVE | XFS_PQUOTA_ACTIVE | XFS_GQUOTA_ACTIVE) | 199 | (XFS_UQUOTA_ACTIVE | XFS_GQUOTA_ACTIVE | XFS_PQUOTA_ACTIVE) |
188 | 200 | ||
189 | /* | 201 | /* |
190 | * Checking XFS_IS_*QUOTA_ON() while holding any inode lock guarantees | 202 | * Checking XFS_IS_*QUOTA_ON() while holding any inode lock guarantees |
@@ -268,24 +280,23 @@ typedef struct xfs_qoff_logformat { | |||
268 | ((XFS_IS_UQUOTA_ON(mp) && \ | 280 | ((XFS_IS_UQUOTA_ON(mp) && \ |
269 | (mp->m_sb.sb_qflags & XFS_UQUOTA_CHKD) == 0) || \ | 281 | (mp->m_sb.sb_qflags & XFS_UQUOTA_CHKD) == 0) || \ |
270 | (XFS_IS_GQUOTA_ON(mp) && \ | 282 | (XFS_IS_GQUOTA_ON(mp) && \ |
271 | ((mp->m_sb.sb_qflags & XFS_OQUOTA_CHKD) == 0 || \ | 283 | (mp->m_sb.sb_qflags & XFS_GQUOTA_CHKD) == 0) || \ |
272 | (mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT))) || \ | ||
273 | (XFS_IS_PQUOTA_ON(mp) && \ | 284 | (XFS_IS_PQUOTA_ON(mp) && \ |
274 | ((mp->m_sb.sb_qflags & XFS_OQUOTA_CHKD) == 0 || \ | 285 | (mp->m_sb.sb_qflags & XFS_PQUOTA_CHKD) == 0)) |
275 | (mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT)))) | ||
276 | 286 | ||
277 | #define XFS_MOUNT_QUOTA_SET1 (XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD|\ | 287 | #define XFS_MOUNT_QUOTA_SET1 (XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD|\ |
278 | XFS_UQUOTA_CHKD|XFS_PQUOTA_ACCT|\ | 288 | XFS_UQUOTA_CHKD|XFS_GQUOTA_ACCT|\ |
279 | XFS_OQUOTA_ENFD|XFS_OQUOTA_CHKD) | 289 | XFS_GQUOTA_ENFD|XFS_GQUOTA_CHKD) |
280 | 290 | ||
281 | #define XFS_MOUNT_QUOTA_SET2 (XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD|\ | 291 | #define XFS_MOUNT_QUOTA_SET2 (XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD|\ |
282 | XFS_UQUOTA_CHKD|XFS_GQUOTA_ACCT|\ | 292 | XFS_UQUOTA_CHKD|XFS_PQUOTA_ACCT|\ |
283 | XFS_OQUOTA_ENFD|XFS_OQUOTA_CHKD) | 293 | XFS_PQUOTA_ENFD|XFS_PQUOTA_CHKD) |
284 | 294 | ||
285 | #define XFS_MOUNT_QUOTA_ALL (XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD|\ | 295 | #define XFS_MOUNT_QUOTA_ALL (XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD|\ |
286 | XFS_UQUOTA_CHKD|XFS_PQUOTA_ACCT|\ | 296 | XFS_UQUOTA_CHKD|XFS_GQUOTA_ACCT|\ |
287 | XFS_OQUOTA_ENFD|XFS_OQUOTA_CHKD|\ | 297 | XFS_GQUOTA_ENFD|XFS_GQUOTA_CHKD|\ |
288 | XFS_GQUOTA_ACCT) | 298 | XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD|\ |
299 | XFS_PQUOTA_CHKD) | ||
289 | 300 | ||
290 | 301 | ||
291 | /* | 302 | /* |
diff --git a/fs/xfs/xfs_quotaops.c b/fs/xfs/xfs_quotaops.c index 71926d630527..20e30f93b0c7 100644 --- a/fs/xfs/xfs_quotaops.c +++ b/fs/xfs/xfs_quotaops.c | |||
@@ -75,8 +75,10 @@ xfs_fs_set_xstate( | |||
75 | flags |= XFS_GQUOTA_ACCT; | 75 | flags |= XFS_GQUOTA_ACCT; |
76 | if (uflags & FS_QUOTA_UDQ_ENFD) | 76 | if (uflags & FS_QUOTA_UDQ_ENFD) |
77 | flags |= XFS_UQUOTA_ENFD; | 77 | flags |= XFS_UQUOTA_ENFD; |
78 | if (uflags & (FS_QUOTA_PDQ_ENFD|FS_QUOTA_GDQ_ENFD)) | 78 | if (uflags & FS_QUOTA_GDQ_ENFD) |
79 | flags |= XFS_OQUOTA_ENFD; | 79 | flags |= XFS_GQUOTA_ENFD; |
80 | if (uflags & FS_QUOTA_PDQ_ENFD) | ||
81 | flags |= XFS_PQUOTA_ENFD; | ||
80 | 82 | ||
81 | switch (op) { | 83 | switch (op) { |
82 | case Q_XQUOTAON: | 84 | case Q_XQUOTAON: |
diff --git a/fs/xfs/xfs_sb.h b/fs/xfs/xfs_sb.h index 2de58a85833c..78f9e70b80c7 100644 --- a/fs/xfs/xfs_sb.h +++ b/fs/xfs/xfs_sb.h | |||
@@ -618,6 +618,12 @@ xfs_sb_has_incompat_log_feature( | |||
618 | return (sbp->sb_features_log_incompat & feature) != 0; | 618 | return (sbp->sb_features_log_incompat & feature) != 0; |
619 | } | 619 | } |
620 | 620 | ||
621 | static inline bool | ||
622 | xfs_is_quota_inode(struct xfs_sb *sbp, xfs_ino_t ino) | ||
623 | { | ||
624 | return (ino == sbp->sb_uquotino || ino == sbp->sb_gquotino); | ||
625 | } | ||
626 | |||
621 | /* | 627 | /* |
622 | * end of superblock version macros | 628 | * end of superblock version macros |
623 | */ | 629 | */ |
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c index 3033ba5e9762..1d68ffcdeaa7 100644 --- a/fs/xfs/xfs_super.c +++ b/fs/xfs/xfs_super.c | |||
@@ -51,6 +51,7 @@ | |||
51 | #include "xfs_inode_item.h" | 51 | #include "xfs_inode_item.h" |
52 | #include "xfs_icache.h" | 52 | #include "xfs_icache.h" |
53 | #include "xfs_trace.h" | 53 | #include "xfs_trace.h" |
54 | #include "xfs_icreate_item.h" | ||
54 | 55 | ||
55 | #include <linux/namei.h> | 56 | #include <linux/namei.h> |
56 | #include <linux/init.h> | 57 | #include <linux/init.h> |
@@ -359,17 +360,17 @@ xfs_parseargs( | |||
359 | } else if (!strcmp(this_char, MNTOPT_PQUOTA) || | 360 | } else if (!strcmp(this_char, MNTOPT_PQUOTA) || |
360 | !strcmp(this_char, MNTOPT_PRJQUOTA)) { | 361 | !strcmp(this_char, MNTOPT_PRJQUOTA)) { |
361 | mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE | | 362 | mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE | |
362 | XFS_OQUOTA_ENFD); | 363 | XFS_PQUOTA_ENFD); |
363 | } else if (!strcmp(this_char, MNTOPT_PQUOTANOENF)) { | 364 | } else if (!strcmp(this_char, MNTOPT_PQUOTANOENF)) { |
364 | mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE); | 365 | mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE); |
365 | mp->m_qflags &= ~XFS_OQUOTA_ENFD; | 366 | mp->m_qflags &= ~XFS_PQUOTA_ENFD; |
366 | } else if (!strcmp(this_char, MNTOPT_GQUOTA) || | 367 | } else if (!strcmp(this_char, MNTOPT_GQUOTA) || |
367 | !strcmp(this_char, MNTOPT_GRPQUOTA)) { | 368 | !strcmp(this_char, MNTOPT_GRPQUOTA)) { |
368 | mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE | | 369 | mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE | |
369 | XFS_OQUOTA_ENFD); | 370 | XFS_GQUOTA_ENFD); |
370 | } else if (!strcmp(this_char, MNTOPT_GQUOTANOENF)) { | 371 | } else if (!strcmp(this_char, MNTOPT_GQUOTANOENF)) { |
371 | mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE); | 372 | mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE); |
372 | mp->m_qflags &= ~XFS_OQUOTA_ENFD; | 373 | mp->m_qflags &= ~XFS_GQUOTA_ENFD; |
373 | } else if (!strcmp(this_char, MNTOPT_DELAYLOG)) { | 374 | } else if (!strcmp(this_char, MNTOPT_DELAYLOG)) { |
374 | xfs_warn(mp, | 375 | xfs_warn(mp, |
375 | "delaylog is the default now, option is deprecated."); | 376 | "delaylog is the default now, option is deprecated."); |
@@ -439,20 +440,15 @@ xfs_parseargs( | |||
439 | } | 440 | } |
440 | 441 | ||
441 | done: | 442 | done: |
442 | if (!(mp->m_flags & XFS_MOUNT_NOALIGN)) { | 443 | if (dsunit && !(mp->m_flags & XFS_MOUNT_NOALIGN)) { |
443 | /* | 444 | /* |
444 | * At this point the superblock has not been read | 445 | * At this point the superblock has not been read |
445 | * in, therefore we do not know the block size. | 446 | * in, therefore we do not know the block size. |
446 | * Before the mount call ends we will convert | 447 | * Before the mount call ends we will convert |
447 | * these to FSBs. | 448 | * these to FSBs. |
448 | */ | 449 | */ |
449 | if (dsunit) { | 450 | mp->m_dalign = dsunit; |
450 | mp->m_dalign = dsunit; | 451 | mp->m_swidth = dswidth; |
451 | mp->m_flags |= XFS_MOUNT_RETERR; | ||
452 | } | ||
453 | |||
454 | if (dswidth) | ||
455 | mp->m_swidth = dswidth; | ||
456 | } | 452 | } |
457 | 453 | ||
458 | if (mp->m_logbufs != -1 && | 454 | if (mp->m_logbufs != -1 && |
@@ -563,12 +559,12 @@ xfs_showargs( | |||
563 | /* Either project or group quotas can be active, not both */ | 559 | /* Either project or group quotas can be active, not both */ |
564 | 560 | ||
565 | if (mp->m_qflags & XFS_PQUOTA_ACCT) { | 561 | if (mp->m_qflags & XFS_PQUOTA_ACCT) { |
566 | if (mp->m_qflags & XFS_OQUOTA_ENFD) | 562 | if (mp->m_qflags & XFS_PQUOTA_ENFD) |
567 | seq_puts(m, "," MNTOPT_PRJQUOTA); | 563 | seq_puts(m, "," MNTOPT_PRJQUOTA); |
568 | else | 564 | else |
569 | seq_puts(m, "," MNTOPT_PQUOTANOENF); | 565 | seq_puts(m, "," MNTOPT_PQUOTANOENF); |
570 | } else if (mp->m_qflags & XFS_GQUOTA_ACCT) { | 566 | } else if (mp->m_qflags & XFS_GQUOTA_ACCT) { |
571 | if (mp->m_qflags & XFS_OQUOTA_ENFD) | 567 | if (mp->m_qflags & XFS_GQUOTA_ENFD) |
572 | seq_puts(m, "," MNTOPT_GRPQUOTA); | 568 | seq_puts(m, "," MNTOPT_GRPQUOTA); |
573 | else | 569 | else |
574 | seq_puts(m, "," MNTOPT_GQUOTANOENF); | 570 | seq_puts(m, "," MNTOPT_GQUOTANOENF); |
@@ -1136,8 +1132,8 @@ xfs_fs_statfs( | |||
1136 | spin_unlock(&mp->m_sb_lock); | 1132 | spin_unlock(&mp->m_sb_lock); |
1137 | 1133 | ||
1138 | if ((ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) && | 1134 | if ((ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) && |
1139 | ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_OQUOTA_ENFD))) == | 1135 | ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) == |
1140 | (XFS_PQUOTA_ACCT|XFS_OQUOTA_ENFD)) | 1136 | (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD)) |
1141 | xfs_qm_statvfs(ip, statp); | 1137 | xfs_qm_statvfs(ip, statp); |
1142 | return 0; | 1138 | return 0; |
1143 | } | 1139 | } |
@@ -1481,6 +1477,10 @@ xfs_fs_fill_super( | |||
1481 | sb->s_time_gran = 1; | 1477 | sb->s_time_gran = 1; |
1482 | set_posix_acl_flag(sb); | 1478 | set_posix_acl_flag(sb); |
1483 | 1479 | ||
1480 | /* version 5 superblocks support inode version counters. */ | ||
1481 | if (XFS_SB_VERSION_NUM(&mp->m_sb) == XFS_SB_VERSION_5) | ||
1482 | sb->s_flags |= MS_I_VERSION; | ||
1483 | |||
1484 | error = xfs_mountfs(mp); | 1484 | error = xfs_mountfs(mp); |
1485 | if (error) | 1485 | if (error) |
1486 | goto out_filestream_unmount; | 1486 | goto out_filestream_unmount; |
@@ -1655,9 +1655,15 @@ xfs_init_zones(void) | |||
1655 | KM_ZONE_SPREAD, NULL); | 1655 | KM_ZONE_SPREAD, NULL); |
1656 | if (!xfs_ili_zone) | 1656 | if (!xfs_ili_zone) |
1657 | goto out_destroy_inode_zone; | 1657 | goto out_destroy_inode_zone; |
1658 | xfs_icreate_zone = kmem_zone_init(sizeof(struct xfs_icreate_item), | ||
1659 | "xfs_icr"); | ||
1660 | if (!xfs_icreate_zone) | ||
1661 | goto out_destroy_ili_zone; | ||
1658 | 1662 | ||
1659 | return 0; | 1663 | return 0; |
1660 | 1664 | ||
1665 | out_destroy_ili_zone: | ||
1666 | kmem_zone_destroy(xfs_ili_zone); | ||
1661 | out_destroy_inode_zone: | 1667 | out_destroy_inode_zone: |
1662 | kmem_zone_destroy(xfs_inode_zone); | 1668 | kmem_zone_destroy(xfs_inode_zone); |
1663 | out_destroy_efi_zone: | 1669 | out_destroy_efi_zone: |
@@ -1696,6 +1702,7 @@ xfs_destroy_zones(void) | |||
1696 | * destroy caches. | 1702 | * destroy caches. |
1697 | */ | 1703 | */ |
1698 | rcu_barrier(); | 1704 | rcu_barrier(); |
1705 | kmem_zone_destroy(xfs_icreate_zone); | ||
1699 | kmem_zone_destroy(xfs_ili_zone); | 1706 | kmem_zone_destroy(xfs_ili_zone); |
1700 | kmem_zone_destroy(xfs_inode_zone); | 1707 | kmem_zone_destroy(xfs_inode_zone); |
1701 | kmem_zone_destroy(xfs_efi_zone); | 1708 | kmem_zone_destroy(xfs_efi_zone); |
diff --git a/fs/xfs/xfs_symlink.c b/fs/xfs/xfs_symlink.c index 195a403e1522..e830fb56e27f 100644 --- a/fs/xfs/xfs_symlink.c +++ b/fs/xfs/xfs_symlink.c | |||
@@ -358,7 +358,8 @@ xfs_symlink( | |||
358 | int n; | 358 | int n; |
359 | xfs_buf_t *bp; | 359 | xfs_buf_t *bp; |
360 | prid_t prid; | 360 | prid_t prid; |
361 | struct xfs_dquot *udqp, *gdqp; | 361 | struct xfs_dquot *udqp = NULL; |
362 | struct xfs_dquot *gdqp = NULL; | ||
362 | uint resblks; | 363 | uint resblks; |
363 | 364 | ||
364 | *ipp = NULL; | 365 | *ipp = NULL; |
@@ -585,7 +586,7 @@ xfs_symlink( | |||
585 | /* | 586 | /* |
586 | * Free a symlink that has blocks associated with it. | 587 | * Free a symlink that has blocks associated with it. |
587 | */ | 588 | */ |
588 | int | 589 | STATIC int |
589 | xfs_inactive_symlink_rmt( | 590 | xfs_inactive_symlink_rmt( |
590 | xfs_inode_t *ip, | 591 | xfs_inode_t *ip, |
591 | xfs_trans_t **tpp) | 592 | xfs_trans_t **tpp) |
@@ -606,7 +607,7 @@ xfs_inactive_symlink_rmt( | |||
606 | 607 | ||
607 | tp = *tpp; | 608 | tp = *tpp; |
608 | mp = ip->i_mount; | 609 | mp = ip->i_mount; |
609 | ASSERT(ip->i_d.di_size > XFS_IFORK_DSIZE(ip)); | 610 | ASSERT(ip->i_df.if_flags & XFS_IFEXTENTS); |
610 | /* | 611 | /* |
611 | * We're freeing a symlink that has some | 612 | * We're freeing a symlink that has some |
612 | * blocks allocated to it. Free the | 613 | * blocks allocated to it. Free the |
@@ -720,3 +721,47 @@ xfs_inactive_symlink_rmt( | |||
720 | error0: | 721 | error0: |
721 | return error; | 722 | return error; |
722 | } | 723 | } |
724 | |||
725 | /* | ||
726 | * xfs_inactive_symlink - free a symlink | ||
727 | */ | ||
728 | int | ||
729 | xfs_inactive_symlink( | ||
730 | struct xfs_inode *ip, | ||
731 | struct xfs_trans **tp) | ||
732 | { | ||
733 | struct xfs_mount *mp = ip->i_mount; | ||
734 | int pathlen; | ||
735 | |||
736 | trace_xfs_inactive_symlink(ip); | ||
737 | |||
738 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); | ||
739 | |||
740 | if (XFS_FORCED_SHUTDOWN(mp)) | ||
741 | return XFS_ERROR(EIO); | ||
742 | |||
743 | /* | ||
744 | * Zero length symlinks _can_ exist. | ||
745 | */ | ||
746 | pathlen = (int)ip->i_d.di_size; | ||
747 | if (!pathlen) | ||
748 | return 0; | ||
749 | |||
750 | if (pathlen < 0 || pathlen > MAXPATHLEN) { | ||
751 | xfs_alert(mp, "%s: inode (0x%llx) bad symlink length (%d)", | ||
752 | __func__, (unsigned long long)ip->i_ino, pathlen); | ||
753 | ASSERT(0); | ||
754 | return XFS_ERROR(EFSCORRUPTED); | ||
755 | } | ||
756 | |||
757 | if (ip->i_df.if_flags & XFS_IFINLINE) { | ||
758 | if (ip->i_df.if_bytes > 0) | ||
759 | xfs_idata_realloc(ip, -(ip->i_df.if_bytes), | ||
760 | XFS_DATA_FORK); | ||
761 | ASSERT(ip->i_df.if_bytes == 0); | ||
762 | return 0; | ||
763 | } | ||
764 | |||
765 | /* remove the remote symlink */ | ||
766 | return xfs_inactive_symlink_rmt(ip, tp); | ||
767 | } | ||
diff --git a/fs/xfs/xfs_symlink.h b/fs/xfs/xfs_symlink.h index b39398d2097c..374394880c01 100644 --- a/fs/xfs/xfs_symlink.h +++ b/fs/xfs/xfs_symlink.h | |||
@@ -60,7 +60,7 @@ extern const struct xfs_buf_ops xfs_symlink_buf_ops; | |||
60 | int xfs_symlink(struct xfs_inode *dp, struct xfs_name *link_name, | 60 | int xfs_symlink(struct xfs_inode *dp, struct xfs_name *link_name, |
61 | const char *target_path, umode_t mode, struct xfs_inode **ipp); | 61 | const char *target_path, umode_t mode, struct xfs_inode **ipp); |
62 | int xfs_readlink(struct xfs_inode *ip, char *link); | 62 | int xfs_readlink(struct xfs_inode *ip, char *link); |
63 | int xfs_inactive_symlink_rmt(struct xfs_inode *ip, struct xfs_trans **tpp); | 63 | int xfs_inactive_symlink(struct xfs_inode *ip, struct xfs_trans **tpp); |
64 | 64 | ||
65 | #endif /* __KERNEL__ */ | 65 | #endif /* __KERNEL__ */ |
66 | #endif /* __XFS_SYMLINK_H */ | 66 | #endif /* __XFS_SYMLINK_H */ |
diff --git a/fs/xfs/xfs_sysctl.c b/fs/xfs/xfs_sysctl.c index 2801b5ce6cdb..1743b9f8e23d 100644 --- a/fs/xfs/xfs_sysctl.c +++ b/fs/xfs/xfs_sysctl.c | |||
@@ -25,11 +25,11 @@ static struct ctl_table_header *xfs_table_header; | |||
25 | #ifdef CONFIG_PROC_FS | 25 | #ifdef CONFIG_PROC_FS |
26 | STATIC int | 26 | STATIC int |
27 | xfs_stats_clear_proc_handler( | 27 | xfs_stats_clear_proc_handler( |
28 | ctl_table *ctl, | 28 | struct ctl_table *ctl, |
29 | int write, | 29 | int write, |
30 | void __user *buffer, | 30 | void __user *buffer, |
31 | size_t *lenp, | 31 | size_t *lenp, |
32 | loff_t *ppos) | 32 | loff_t *ppos) |
33 | { | 33 | { |
34 | int c, ret, *valp = ctl->data; | 34 | int c, ret, *valp = ctl->data; |
35 | __uint32_t vn_active; | 35 | __uint32_t vn_active; |
@@ -55,11 +55,11 @@ xfs_stats_clear_proc_handler( | |||
55 | 55 | ||
56 | STATIC int | 56 | STATIC int |
57 | xfs_panic_mask_proc_handler( | 57 | xfs_panic_mask_proc_handler( |
58 | ctl_table *ctl, | 58 | struct ctl_table *ctl, |
59 | int write, | 59 | int write, |
60 | void __user *buffer, | 60 | void __user *buffer, |
61 | size_t *lenp, | 61 | size_t *lenp, |
62 | loff_t *ppos) | 62 | loff_t *ppos) |
63 | { | 63 | { |
64 | int ret, *valp = ctl->data; | 64 | int ret, *valp = ctl->data; |
65 | 65 | ||
@@ -74,7 +74,7 @@ xfs_panic_mask_proc_handler( | |||
74 | } | 74 | } |
75 | #endif /* CONFIG_PROC_FS */ | 75 | #endif /* CONFIG_PROC_FS */ |
76 | 76 | ||
77 | static ctl_table xfs_table[] = { | 77 | static struct ctl_table xfs_table[] = { |
78 | { | 78 | { |
79 | .procname = "irix_sgid_inherit", | 79 | .procname = "irix_sgid_inherit", |
80 | .data = &xfs_params.sgid_inherit.val, | 80 | .data = &xfs_params.sgid_inherit.val, |
@@ -227,7 +227,7 @@ static ctl_table xfs_table[] = { | |||
227 | {} | 227 | {} |
228 | }; | 228 | }; |
229 | 229 | ||
230 | static ctl_table xfs_dir_table[] = { | 230 | static struct ctl_table xfs_dir_table[] = { |
231 | { | 231 | { |
232 | .procname = "xfs", | 232 | .procname = "xfs", |
233 | .mode = 0555, | 233 | .mode = 0555, |
@@ -236,7 +236,7 @@ static ctl_table xfs_dir_table[] = { | |||
236 | {} | 236 | {} |
237 | }; | 237 | }; |
238 | 238 | ||
239 | static ctl_table xfs_root_table[] = { | 239 | static struct ctl_table xfs_root_table[] = { |
240 | { | 240 | { |
241 | .procname = "fs", | 241 | .procname = "fs", |
242 | .mode = 0555, | 242 | .mode = 0555, |
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h index a04701de6bbd..47910e638c18 100644 --- a/fs/xfs/xfs_trace.h +++ b/fs/xfs/xfs_trace.h | |||
@@ -486,9 +486,12 @@ DEFINE_EVENT(xfs_buf_item_class, name, \ | |||
486 | TP_PROTO(struct xfs_buf_log_item *bip), \ | 486 | TP_PROTO(struct xfs_buf_log_item *bip), \ |
487 | TP_ARGS(bip)) | 487 | TP_ARGS(bip)) |
488 | DEFINE_BUF_ITEM_EVENT(xfs_buf_item_size); | 488 | DEFINE_BUF_ITEM_EVENT(xfs_buf_item_size); |
489 | DEFINE_BUF_ITEM_EVENT(xfs_buf_item_size_ordered); | ||
489 | DEFINE_BUF_ITEM_EVENT(xfs_buf_item_size_stale); | 490 | DEFINE_BUF_ITEM_EVENT(xfs_buf_item_size_stale); |
490 | DEFINE_BUF_ITEM_EVENT(xfs_buf_item_format); | 491 | DEFINE_BUF_ITEM_EVENT(xfs_buf_item_format); |
492 | DEFINE_BUF_ITEM_EVENT(xfs_buf_item_format_ordered); | ||
491 | DEFINE_BUF_ITEM_EVENT(xfs_buf_item_format_stale); | 493 | DEFINE_BUF_ITEM_EVENT(xfs_buf_item_format_stale); |
494 | DEFINE_BUF_ITEM_EVENT(xfs_buf_item_ordered); | ||
492 | DEFINE_BUF_ITEM_EVENT(xfs_buf_item_pin); | 495 | DEFINE_BUF_ITEM_EVENT(xfs_buf_item_pin); |
493 | DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unpin); | 496 | DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unpin); |
494 | DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unpin_stale); | 497 | DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unpin_stale); |
@@ -508,6 +511,7 @@ DEFINE_BUF_ITEM_EVENT(xfs_trans_bjoin); | |||
508 | DEFINE_BUF_ITEM_EVENT(xfs_trans_bhold); | 511 | DEFINE_BUF_ITEM_EVENT(xfs_trans_bhold); |
509 | DEFINE_BUF_ITEM_EVENT(xfs_trans_bhold_release); | 512 | DEFINE_BUF_ITEM_EVENT(xfs_trans_bhold_release); |
510 | DEFINE_BUF_ITEM_EVENT(xfs_trans_binval); | 513 | DEFINE_BUF_ITEM_EVENT(xfs_trans_binval); |
514 | DEFINE_BUF_ITEM_EVENT(xfs_trans_buf_ordered); | ||
511 | 515 | ||
512 | DECLARE_EVENT_CLASS(xfs_lock_class, | 516 | DECLARE_EVENT_CLASS(xfs_lock_class, |
513 | TP_PROTO(struct xfs_inode *ip, unsigned lock_flags, | 517 | TP_PROTO(struct xfs_inode *ip, unsigned lock_flags, |
@@ -571,6 +575,7 @@ DEFINE_INODE_EVENT(xfs_iget_miss); | |||
571 | DEFINE_INODE_EVENT(xfs_getattr); | 575 | DEFINE_INODE_EVENT(xfs_getattr); |
572 | DEFINE_INODE_EVENT(xfs_setattr); | 576 | DEFINE_INODE_EVENT(xfs_setattr); |
573 | DEFINE_INODE_EVENT(xfs_readlink); | 577 | DEFINE_INODE_EVENT(xfs_readlink); |
578 | DEFINE_INODE_EVENT(xfs_inactive_symlink); | ||
574 | DEFINE_INODE_EVENT(xfs_alloc_file_space); | 579 | DEFINE_INODE_EVENT(xfs_alloc_file_space); |
575 | DEFINE_INODE_EVENT(xfs_free_file_space); | 580 | DEFINE_INODE_EVENT(xfs_free_file_space); |
576 | DEFINE_INODE_EVENT(xfs_readdir); | 581 | DEFINE_INODE_EVENT(xfs_readdir); |
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c index 2fd7c1ff1d21..35a229981354 100644 --- a/fs/xfs/xfs_trans.c +++ b/fs/xfs/xfs_trans.c | |||
@@ -234,71 +234,93 @@ xfs_calc_remove_reservation( | |||
234 | } | 234 | } |
235 | 235 | ||
236 | /* | 236 | /* |
237 | * For symlink we can modify: | 237 | * For create, break it in to the two cases that the transaction |
238 | * covers. We start with the modify case - allocation done by modification | ||
239 | * of the state of existing inodes - and the allocation case. | ||
240 | */ | ||
241 | |||
242 | /* | ||
243 | * For create we can modify: | ||
238 | * the parent directory inode: inode size | 244 | * the parent directory inode: inode size |
239 | * the new inode: inode size | 245 | * the new inode: inode size |
240 | * the inode btree entry: 1 block | 246 | * the inode btree entry: block size |
247 | * the superblock for the nlink flag: sector size | ||
241 | * the directory btree: (max depth + v2) * dir block size | 248 | * the directory btree: (max depth + v2) * dir block size |
242 | * the directory inode's bmap btree: (max depth + v2) * block size | 249 | * the directory inode's bmap btree: (max depth + v2) * block size |
243 | * the blocks for the symlink: 1 kB | 250 | */ |
244 | * Or in the first xact we allocate some inodes giving: | 251 | STATIC uint |
252 | xfs_calc_create_resv_modify( | ||
253 | struct xfs_mount *mp) | ||
254 | { | ||
255 | return xfs_calc_buf_res(2, mp->m_sb.sb_inodesize) + | ||
256 | xfs_calc_buf_res(1, mp->m_sb.sb_sectsize) + | ||
257 | (uint)XFS_FSB_TO_B(mp, 1) + | ||
258 | xfs_calc_buf_res(XFS_DIROP_LOG_COUNT(mp), XFS_FSB_TO_B(mp, 1)); | ||
259 | } | ||
260 | |||
261 | /* | ||
262 | * For create we can allocate some inodes giving: | ||
245 | * the agi and agf of the ag getting the new inodes: 2 * sectorsize | 263 | * the agi and agf of the ag getting the new inodes: 2 * sectorsize |
264 | * the superblock for the nlink flag: sector size | ||
246 | * the inode blocks allocated: XFS_IALLOC_BLOCKS * blocksize | 265 | * the inode blocks allocated: XFS_IALLOC_BLOCKS * blocksize |
247 | * the inode btree: max depth * blocksize | 266 | * the inode btree: max depth * blocksize |
248 | * the allocation btrees: 2 trees * (2 * max depth - 1) * block size | 267 | * the allocation btrees: 2 trees * (max depth - 1) * block size |
249 | */ | 268 | */ |
250 | STATIC uint | 269 | STATIC uint |
251 | xfs_calc_symlink_reservation( | 270 | xfs_calc_create_resv_alloc( |
271 | struct xfs_mount *mp) | ||
272 | { | ||
273 | return xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) + | ||
274 | mp->m_sb.sb_sectsize + | ||
275 | xfs_calc_buf_res(XFS_IALLOC_BLOCKS(mp), XFS_FSB_TO_B(mp, 1)) + | ||
276 | xfs_calc_buf_res(mp->m_in_maxlevels, XFS_FSB_TO_B(mp, 1)) + | ||
277 | xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1), | ||
278 | XFS_FSB_TO_B(mp, 1)); | ||
279 | } | ||
280 | |||
281 | STATIC uint | ||
282 | __xfs_calc_create_reservation( | ||
252 | struct xfs_mount *mp) | 283 | struct xfs_mount *mp) |
253 | { | 284 | { |
254 | return XFS_DQUOT_LOGRES(mp) + | 285 | return XFS_DQUOT_LOGRES(mp) + |
255 | MAX((xfs_calc_buf_res(2, mp->m_sb.sb_inodesize) + | 286 | MAX(xfs_calc_create_resv_alloc(mp), |
256 | xfs_calc_buf_res(1, XFS_FSB_TO_B(mp, 1)) + | 287 | xfs_calc_create_resv_modify(mp)); |
257 | xfs_calc_buf_res(XFS_DIROP_LOG_COUNT(mp), | ||
258 | XFS_FSB_TO_B(mp, 1)) + | ||
259 | xfs_calc_buf_res(1, 1024)), | ||
260 | (xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) + | ||
261 | xfs_calc_buf_res(XFS_IALLOC_BLOCKS(mp), | ||
262 | XFS_FSB_TO_B(mp, 1)) + | ||
263 | xfs_calc_buf_res(mp->m_in_maxlevels, | ||
264 | XFS_FSB_TO_B(mp, 1)) + | ||
265 | xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1), | ||
266 | XFS_FSB_TO_B(mp, 1)))); | ||
267 | } | 288 | } |
268 | 289 | ||
269 | /* | 290 | /* |
270 | * For create we can modify: | 291 | * For icreate we can allocate some inodes giving: |
271 | * the parent directory inode: inode size | ||
272 | * the new inode: inode size | ||
273 | * the inode btree entry: block size | ||
274 | * the superblock for the nlink flag: sector size | ||
275 | * the directory btree: (max depth + v2) * dir block size | ||
276 | * the directory inode's bmap btree: (max depth + v2) * block size | ||
277 | * Or in the first xact we allocate some inodes giving: | ||
278 | * the agi and agf of the ag getting the new inodes: 2 * sectorsize | 292 | * the agi and agf of the ag getting the new inodes: 2 * sectorsize |
279 | * the superblock for the nlink flag: sector size | 293 | * the superblock for the nlink flag: sector size |
280 | * the inode blocks allocated: XFS_IALLOC_BLOCKS * blocksize | ||
281 | * the inode btree: max depth * blocksize | 294 | * the inode btree: max depth * blocksize |
282 | * the allocation btrees: 2 trees * (max depth - 1) * block size | 295 | * the allocation btrees: 2 trees * (max depth - 1) * block size |
283 | */ | 296 | */ |
284 | STATIC uint | 297 | STATIC uint |
285 | xfs_calc_create_reservation( | 298 | xfs_calc_icreate_resv_alloc( |
286 | struct xfs_mount *mp) | 299 | struct xfs_mount *mp) |
287 | { | 300 | { |
301 | return xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) + | ||
302 | mp->m_sb.sb_sectsize + | ||
303 | xfs_calc_buf_res(mp->m_in_maxlevels, XFS_FSB_TO_B(mp, 1)) + | ||
304 | xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1), | ||
305 | XFS_FSB_TO_B(mp, 1)); | ||
306 | } | ||
307 | |||
308 | STATIC uint | ||
309 | xfs_calc_icreate_reservation(xfs_mount_t *mp) | ||
310 | { | ||
288 | return XFS_DQUOT_LOGRES(mp) + | 311 | return XFS_DQUOT_LOGRES(mp) + |
289 | MAX((xfs_calc_buf_res(2, mp->m_sb.sb_inodesize) + | 312 | MAX(xfs_calc_icreate_resv_alloc(mp), |
290 | xfs_calc_buf_res(1, mp->m_sb.sb_sectsize) + | 313 | xfs_calc_create_resv_modify(mp)); |
291 | (uint)XFS_FSB_TO_B(mp, 1) + | 314 | } |
292 | xfs_calc_buf_res(XFS_DIROP_LOG_COUNT(mp), | 315 | |
293 | XFS_FSB_TO_B(mp, 1))), | 316 | STATIC uint |
294 | (xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) + | 317 | xfs_calc_create_reservation( |
295 | mp->m_sb.sb_sectsize + | 318 | struct xfs_mount *mp) |
296 | xfs_calc_buf_res(XFS_IALLOC_BLOCKS(mp), | 319 | { |
297 | XFS_FSB_TO_B(mp, 1)) + | 320 | if (xfs_sb_version_hascrc(&mp->m_sb)) |
298 | xfs_calc_buf_res(mp->m_in_maxlevels, | 321 | return xfs_calc_icreate_reservation(mp); |
299 | XFS_FSB_TO_B(mp, 1)) + | 322 | return __xfs_calc_create_reservation(mp); |
300 | xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1), | 323 | |
301 | XFS_FSB_TO_B(mp, 1)))); | ||
302 | } | 324 | } |
303 | 325 | ||
304 | /* | 326 | /* |
@@ -311,6 +333,20 @@ xfs_calc_mkdir_reservation( | |||
311 | return xfs_calc_create_reservation(mp); | 333 | return xfs_calc_create_reservation(mp); |
312 | } | 334 | } |
313 | 335 | ||
336 | |||
337 | /* | ||
338 | * Making a new symplink is the same as creating a new file, but | ||
339 | * with the added blocks for remote symlink data which can be up to 1kB in | ||
340 | * length (MAXPATHLEN). | ||
341 | */ | ||
342 | STATIC uint | ||
343 | xfs_calc_symlink_reservation( | ||
344 | struct xfs_mount *mp) | ||
345 | { | ||
346 | return xfs_calc_create_reservation(mp) + | ||
347 | xfs_calc_buf_res(1, MAXPATHLEN); | ||
348 | } | ||
349 | |||
314 | /* | 350 | /* |
315 | * In freeing an inode we can modify: | 351 | * In freeing an inode we can modify: |
316 | * the inode being freed: inode size | 352 | * the inode being freed: inode size |
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h index a44dba5b2cdb..2b4946393e30 100644 --- a/fs/xfs/xfs_trans.h +++ b/fs/xfs/xfs_trans.h | |||
@@ -48,6 +48,7 @@ typedef struct xfs_trans_header { | |||
48 | #define XFS_LI_BUF 0x123c /* v2 bufs, variable sized inode bufs */ | 48 | #define XFS_LI_BUF 0x123c /* v2 bufs, variable sized inode bufs */ |
49 | #define XFS_LI_DQUOT 0x123d | 49 | #define XFS_LI_DQUOT 0x123d |
50 | #define XFS_LI_QUOTAOFF 0x123e | 50 | #define XFS_LI_QUOTAOFF 0x123e |
51 | #define XFS_LI_ICREATE 0x123f | ||
51 | 52 | ||
52 | #define XFS_LI_TYPE_DESC \ | 53 | #define XFS_LI_TYPE_DESC \ |
53 | { XFS_LI_EFI, "XFS_LI_EFI" }, \ | 54 | { XFS_LI_EFI, "XFS_LI_EFI" }, \ |
@@ -107,7 +108,8 @@ typedef struct xfs_trans_header { | |||
107 | #define XFS_TRANS_SWAPEXT 40 | 108 | #define XFS_TRANS_SWAPEXT 40 |
108 | #define XFS_TRANS_SB_COUNT 41 | 109 | #define XFS_TRANS_SB_COUNT 41 |
109 | #define XFS_TRANS_CHECKPOINT 42 | 110 | #define XFS_TRANS_CHECKPOINT 42 |
110 | #define XFS_TRANS_TYPE_MAX 42 | 111 | #define XFS_TRANS_ICREATE 43 |
112 | #define XFS_TRANS_TYPE_MAX 43 | ||
111 | /* new transaction types need to be reflected in xfs_logprint(8) */ | 113 | /* new transaction types need to be reflected in xfs_logprint(8) */ |
112 | 114 | ||
113 | #define XFS_TRANS_TYPES \ | 115 | #define XFS_TRANS_TYPES \ |
@@ -210,23 +212,18 @@ struct xfs_log_item_desc { | |||
210 | /* | 212 | /* |
211 | * Per-extent log reservation for the allocation btree changes | 213 | * Per-extent log reservation for the allocation btree changes |
212 | * involved in freeing or allocating an extent. | 214 | * involved in freeing or allocating an extent. |
213 | * 2 trees * (2 blocks/level * max depth - 1) * block size | 215 | * 2 trees * (2 blocks/level * max depth - 1) |
214 | */ | 216 | */ |
215 | #define XFS_ALLOCFREE_LOG_RES(mp,nx) \ | ||
216 | ((nx) * (2 * XFS_FSB_TO_B((mp), 2 * XFS_AG_MAXLEVELS(mp) - 1))) | ||
217 | #define XFS_ALLOCFREE_LOG_COUNT(mp,nx) \ | 217 | #define XFS_ALLOCFREE_LOG_COUNT(mp,nx) \ |
218 | ((nx) * (2 * (2 * XFS_AG_MAXLEVELS(mp) - 1))) | 218 | ((nx) * (2 * (2 * XFS_AG_MAXLEVELS(mp) - 1))) |
219 | 219 | ||
220 | /* | 220 | /* |
221 | * Per-directory log reservation for any directory change. | 221 | * Per-directory log reservation for any directory change. |
222 | * dir blocks: (1 btree block per level + data block + free block) * dblock size | 222 | * dir blocks: (1 btree block per level + data block + free block) |
223 | * bmap btree: (levels + 2) * max depth * block size | 223 | * bmap btree: (levels + 2) * max depth |
224 | * v2 directory blocks can be fragmented below the dirblksize down to the fsb | 224 | * v2 directory blocks can be fragmented below the dirblksize down to the fsb |
225 | * size, so account for that in the DAENTER macros. | 225 | * size, so account for that in the DAENTER macros. |
226 | */ | 226 | */ |
227 | #define XFS_DIROP_LOG_RES(mp) \ | ||
228 | (XFS_FSB_TO_B(mp, XFS_DAENTER_BLOCKS(mp, XFS_DATA_FORK)) + \ | ||
229 | (XFS_FSB_TO_B(mp, XFS_DAENTER_BMAPS(mp, XFS_DATA_FORK) + 1))) | ||
230 | #define XFS_DIROP_LOG_COUNT(mp) \ | 227 | #define XFS_DIROP_LOG_COUNT(mp) \ |
231 | (XFS_DAENTER_BLOCKS(mp, XFS_DATA_FORK) + \ | 228 | (XFS_DAENTER_BLOCKS(mp, XFS_DATA_FORK) + \ |
232 | XFS_DAENTER_BMAPS(mp, XFS_DATA_FORK) + 1) | 229 | XFS_DAENTER_BMAPS(mp, XFS_DATA_FORK) + 1) |
@@ -503,6 +500,7 @@ void xfs_trans_bhold_release(xfs_trans_t *, struct xfs_buf *); | |||
503 | void xfs_trans_binval(xfs_trans_t *, struct xfs_buf *); | 500 | void xfs_trans_binval(xfs_trans_t *, struct xfs_buf *); |
504 | void xfs_trans_inode_buf(xfs_trans_t *, struct xfs_buf *); | 501 | void xfs_trans_inode_buf(xfs_trans_t *, struct xfs_buf *); |
505 | void xfs_trans_stale_inode_buf(xfs_trans_t *, struct xfs_buf *); | 502 | void xfs_trans_stale_inode_buf(xfs_trans_t *, struct xfs_buf *); |
503 | void xfs_trans_ordered_buf(xfs_trans_t *, struct xfs_buf *); | ||
506 | void xfs_trans_dquot_buf(xfs_trans_t *, struct xfs_buf *, uint); | 504 | void xfs_trans_dquot_buf(xfs_trans_t *, struct xfs_buf *, uint); |
507 | void xfs_trans_inode_alloc_buf(xfs_trans_t *, struct xfs_buf *); | 505 | void xfs_trans_inode_alloc_buf(xfs_trans_t *, struct xfs_buf *); |
508 | void xfs_trans_ichgtime(struct xfs_trans *, struct xfs_inode *, int); | 506 | void xfs_trans_ichgtime(struct xfs_trans *, struct xfs_inode *, int); |
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c index 73a5fa457e16..aa5a04b844d6 100644 --- a/fs/xfs/xfs_trans_buf.c +++ b/fs/xfs/xfs_trans_buf.c | |||
@@ -397,7 +397,6 @@ shutdown_abort: | |||
397 | return XFS_ERROR(EIO); | 397 | return XFS_ERROR(EIO); |
398 | } | 398 | } |
399 | 399 | ||
400 | |||
401 | /* | 400 | /* |
402 | * Release the buffer bp which was previously acquired with one of the | 401 | * Release the buffer bp which was previously acquired with one of the |
403 | * xfs_trans_... buffer allocation routines if the buffer has not | 402 | * xfs_trans_... buffer allocation routines if the buffer has not |
@@ -603,8 +602,14 @@ xfs_trans_log_buf(xfs_trans_t *tp, | |||
603 | 602 | ||
604 | tp->t_flags |= XFS_TRANS_DIRTY; | 603 | tp->t_flags |= XFS_TRANS_DIRTY; |
605 | bip->bli_item.li_desc->lid_flags |= XFS_LID_DIRTY; | 604 | bip->bli_item.li_desc->lid_flags |= XFS_LID_DIRTY; |
606 | bip->bli_flags |= XFS_BLI_LOGGED; | 605 | |
607 | xfs_buf_item_log(bip, first, last); | 606 | /* |
607 | * If we have an ordered buffer we are not logging any dirty range but | ||
608 | * it still needs to be marked dirty and that it has been logged. | ||
609 | */ | ||
610 | bip->bli_flags |= XFS_BLI_DIRTY | XFS_BLI_LOGGED; | ||
611 | if (!(bip->bli_flags & XFS_BLI_ORDERED)) | ||
612 | xfs_buf_item_log(bip, first, last); | ||
608 | } | 613 | } |
609 | 614 | ||
610 | 615 | ||
@@ -757,6 +762,29 @@ xfs_trans_inode_alloc_buf( | |||
757 | } | 762 | } |
758 | 763 | ||
759 | /* | 764 | /* |
765 | * Mark the buffer as ordered for this transaction. This means | ||
766 | * that the contents of the buffer are not recorded in the transaction | ||
767 | * but it is tracked in the AIL as though it was. This allows us | ||
768 | * to record logical changes in transactions rather than the physical | ||
769 | * changes we make to the buffer without changing writeback ordering | ||
770 | * constraints of metadata buffers. | ||
771 | */ | ||
772 | void | ||
773 | xfs_trans_ordered_buf( | ||
774 | struct xfs_trans *tp, | ||
775 | struct xfs_buf *bp) | ||
776 | { | ||
777 | struct xfs_buf_log_item *bip = bp->b_fspriv; | ||
778 | |||
779 | ASSERT(bp->b_transp == tp); | ||
780 | ASSERT(bip != NULL); | ||
781 | ASSERT(atomic_read(&bip->bli_refcount) > 0); | ||
782 | |||
783 | bip->bli_flags |= XFS_BLI_ORDERED; | ||
784 | trace_xfs_buf_item_ordered(bip); | ||
785 | } | ||
786 | |||
787 | /* | ||
760 | * Set the type of the buffer for log recovery so that it can correctly identify | 788 | * Set the type of the buffer for log recovery so that it can correctly identify |
761 | * and hence attach the correct buffer ops to the buffer after replay. | 789 | * and hence attach the correct buffer ops to the buffer after replay. |
762 | */ | 790 | */ |
diff --git a/fs/xfs/xfs_trans_dquot.c b/fs/xfs/xfs_trans_dquot.c index fec75d023703..3ba64d540168 100644 --- a/fs/xfs/xfs_trans_dquot.c +++ b/fs/xfs/xfs_trans_dquot.c | |||
@@ -103,8 +103,6 @@ xfs_trans_dup_dqinfo( | |||
103 | return; | 103 | return; |
104 | 104 | ||
105 | xfs_trans_alloc_dqinfo(ntp); | 105 | xfs_trans_alloc_dqinfo(ntp); |
106 | oqa = otp->t_dqinfo->dqa_usrdquots; | ||
107 | nqa = ntp->t_dqinfo->dqa_usrdquots; | ||
108 | 106 | ||
109 | /* | 107 | /* |
110 | * Because the quota blk reservation is carried forward, | 108 | * Because the quota blk reservation is carried forward, |
@@ -113,7 +111,9 @@ xfs_trans_dup_dqinfo( | |||
113 | if(otp->t_flags & XFS_TRANS_DQ_DIRTY) | 111 | if(otp->t_flags & XFS_TRANS_DQ_DIRTY) |
114 | ntp->t_flags |= XFS_TRANS_DQ_DIRTY; | 112 | ntp->t_flags |= XFS_TRANS_DQ_DIRTY; |
115 | 113 | ||
116 | for (j = 0; j < 2; j++) { | 114 | for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) { |
115 | oqa = otp->t_dqinfo->dqs[j]; | ||
116 | nqa = ntp->t_dqinfo->dqs[j]; | ||
117 | for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) { | 117 | for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) { |
118 | if (oqa[i].qt_dquot == NULL) | 118 | if (oqa[i].qt_dquot == NULL) |
119 | break; | 119 | break; |
@@ -138,8 +138,6 @@ xfs_trans_dup_dqinfo( | |||
138 | oq->qt_ino_res = oq->qt_ino_res_used; | 138 | oq->qt_ino_res = oq->qt_ino_res_used; |
139 | 139 | ||
140 | } | 140 | } |
141 | oqa = otp->t_dqinfo->dqa_grpdquots; | ||
142 | nqa = ntp->t_dqinfo->dqa_grpdquots; | ||
143 | } | 141 | } |
144 | } | 142 | } |
145 | 143 | ||
@@ -157,8 +155,7 @@ xfs_trans_mod_dquot_byino( | |||
157 | 155 | ||
158 | if (!XFS_IS_QUOTA_RUNNING(mp) || | 156 | if (!XFS_IS_QUOTA_RUNNING(mp) || |
159 | !XFS_IS_QUOTA_ON(mp) || | 157 | !XFS_IS_QUOTA_ON(mp) || |
160 | ip->i_ino == mp->m_sb.sb_uquotino || | 158 | xfs_is_quota_inode(&mp->m_sb, ip->i_ino)) |
161 | ip->i_ino == mp->m_sb.sb_gquotino) | ||
162 | return; | 159 | return; |
163 | 160 | ||
164 | if (tp->t_dqinfo == NULL) | 161 | if (tp->t_dqinfo == NULL) |
@@ -170,16 +167,18 @@ xfs_trans_mod_dquot_byino( | |||
170 | (void) xfs_trans_mod_dquot(tp, ip->i_gdquot, field, delta); | 167 | (void) xfs_trans_mod_dquot(tp, ip->i_gdquot, field, delta); |
171 | } | 168 | } |
172 | 169 | ||
173 | STATIC xfs_dqtrx_t * | 170 | STATIC struct xfs_dqtrx * |
174 | xfs_trans_get_dqtrx( | 171 | xfs_trans_get_dqtrx( |
175 | xfs_trans_t *tp, | 172 | struct xfs_trans *tp, |
176 | xfs_dquot_t *dqp) | 173 | struct xfs_dquot *dqp) |
177 | { | 174 | { |
178 | int i; | 175 | int i; |
179 | xfs_dqtrx_t *qa; | 176 | struct xfs_dqtrx *qa; |
180 | 177 | ||
181 | qa = XFS_QM_ISUDQ(dqp) ? | 178 | if (XFS_QM_ISUDQ(dqp)) |
182 | tp->t_dqinfo->dqa_usrdquots : tp->t_dqinfo->dqa_grpdquots; | 179 | qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_USR]; |
180 | else | ||
181 | qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_GRP]; | ||
183 | 182 | ||
184 | for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) { | 183 | for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) { |
185 | if (qa[i].qt_dquot == NULL || | 184 | if (qa[i].qt_dquot == NULL || |
@@ -339,12 +338,10 @@ xfs_trans_apply_dquot_deltas( | |||
339 | return; | 338 | return; |
340 | 339 | ||
341 | ASSERT(tp->t_dqinfo); | 340 | ASSERT(tp->t_dqinfo); |
342 | qa = tp->t_dqinfo->dqa_usrdquots; | 341 | for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) { |
343 | for (j = 0; j < 2; j++) { | 342 | qa = tp->t_dqinfo->dqs[j]; |
344 | if (qa[0].qt_dquot == NULL) { | 343 | if (qa[0].qt_dquot == NULL) |
345 | qa = tp->t_dqinfo->dqa_grpdquots; | ||
346 | continue; | 344 | continue; |
347 | } | ||
348 | 345 | ||
349 | /* | 346 | /* |
350 | * Lock all of the dquots and join them to the transaction. | 347 | * Lock all of the dquots and join them to the transaction. |
@@ -495,10 +492,6 @@ xfs_trans_apply_dquot_deltas( | |||
495 | ASSERT(dqp->q_res_rtbcount >= | 492 | ASSERT(dqp->q_res_rtbcount >= |
496 | be64_to_cpu(dqp->q_core.d_rtbcount)); | 493 | be64_to_cpu(dqp->q_core.d_rtbcount)); |
497 | } | 494 | } |
498 | /* | ||
499 | * Do the group quotas next | ||
500 | */ | ||
501 | qa = tp->t_dqinfo->dqa_grpdquots; | ||
502 | } | 495 | } |
503 | } | 496 | } |
504 | 497 | ||
@@ -521,9 +514,9 @@ xfs_trans_unreserve_and_mod_dquots( | |||
521 | if (!tp->t_dqinfo || !(tp->t_flags & XFS_TRANS_DQ_DIRTY)) | 514 | if (!tp->t_dqinfo || !(tp->t_flags & XFS_TRANS_DQ_DIRTY)) |
522 | return; | 515 | return; |
523 | 516 | ||
524 | qa = tp->t_dqinfo->dqa_usrdquots; | 517 | for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) { |
518 | qa = tp->t_dqinfo->dqs[j]; | ||
525 | 519 | ||
526 | for (j = 0; j < 2; j++) { | ||
527 | for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) { | 520 | for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) { |
528 | qtrx = &qa[i]; | 521 | qtrx = &qa[i]; |
529 | /* | 522 | /* |
@@ -565,7 +558,6 @@ xfs_trans_unreserve_and_mod_dquots( | |||
565 | xfs_dqunlock(dqp); | 558 | xfs_dqunlock(dqp); |
566 | 559 | ||
567 | } | 560 | } |
568 | qa = tp->t_dqinfo->dqa_grpdquots; | ||
569 | } | 561 | } |
570 | } | 562 | } |
571 | 563 | ||
@@ -640,8 +632,8 @@ xfs_trans_dqresv( | |||
640 | if ((flags & XFS_QMOPT_FORCE_RES) == 0 && | 632 | if ((flags & XFS_QMOPT_FORCE_RES) == 0 && |
641 | dqp->q_core.d_id && | 633 | dqp->q_core.d_id && |
642 | ((XFS_IS_UQUOTA_ENFORCED(dqp->q_mount) && XFS_QM_ISUDQ(dqp)) || | 634 | ((XFS_IS_UQUOTA_ENFORCED(dqp->q_mount) && XFS_QM_ISUDQ(dqp)) || |
643 | (XFS_IS_OQUOTA_ENFORCED(dqp->q_mount) && | 635 | (XFS_IS_GQUOTA_ENFORCED(dqp->q_mount) && XFS_QM_ISGDQ(dqp)) || |
644 | (XFS_QM_ISPDQ(dqp) || XFS_QM_ISGDQ(dqp))))) { | 636 | (XFS_IS_PQUOTA_ENFORCED(dqp->q_mount) && XFS_QM_ISPDQ(dqp)))) { |
645 | if (nblks > 0) { | 637 | if (nblks > 0) { |
646 | /* | 638 | /* |
647 | * dquot is locked already. See if we'd go over the | 639 | * dquot is locked already. See if we'd go over the |
@@ -748,15 +740,15 @@ error_return: | |||
748 | */ | 740 | */ |
749 | int | 741 | int |
750 | xfs_trans_reserve_quota_bydquots( | 742 | xfs_trans_reserve_quota_bydquots( |
751 | xfs_trans_t *tp, | 743 | struct xfs_trans *tp, |
752 | xfs_mount_t *mp, | 744 | struct xfs_mount *mp, |
753 | xfs_dquot_t *udqp, | 745 | struct xfs_dquot *udqp, |
754 | xfs_dquot_t *gdqp, | 746 | struct xfs_dquot *gdqp, |
755 | long nblks, | 747 | long nblks, |
756 | long ninos, | 748 | long ninos, |
757 | uint flags) | 749 | uint flags) |
758 | { | 750 | { |
759 | int resvd = 0, error; | 751 | int error; |
760 | 752 | ||
761 | if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp)) | 753 | if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp)) |
762 | return 0; | 754 | return 0; |
@@ -771,28 +763,24 @@ xfs_trans_reserve_quota_bydquots( | |||
771 | (flags & ~XFS_QMOPT_ENOSPC)); | 763 | (flags & ~XFS_QMOPT_ENOSPC)); |
772 | if (error) | 764 | if (error) |
773 | return error; | 765 | return error; |
774 | resvd = 1; | ||
775 | } | 766 | } |
776 | 767 | ||
777 | if (gdqp) { | 768 | if (gdqp) { |
778 | error = xfs_trans_dqresv(tp, mp, gdqp, nblks, ninos, flags); | 769 | error = xfs_trans_dqresv(tp, mp, gdqp, nblks, ninos, flags); |
779 | if (error) { | 770 | if (error) |
780 | /* | 771 | goto unwind_usr; |
781 | * can't do it, so backout previous reservation | ||
782 | */ | ||
783 | if (resvd) { | ||
784 | flags |= XFS_QMOPT_FORCE_RES; | ||
785 | xfs_trans_dqresv(tp, mp, udqp, | ||
786 | -nblks, -ninos, flags); | ||
787 | } | ||
788 | return error; | ||
789 | } | ||
790 | } | 772 | } |
791 | 773 | ||
792 | /* | 774 | /* |
793 | * Didn't change anything critical, so, no need to log | 775 | * Didn't change anything critical, so, no need to log |
794 | */ | 776 | */ |
795 | return 0; | 777 | return 0; |
778 | |||
779 | unwind_usr: | ||
780 | flags |= XFS_QMOPT_FORCE_RES; | ||
781 | if (udqp) | ||
782 | xfs_trans_dqresv(tp, mp, udqp, -nblks, -ninos, flags); | ||
783 | return error; | ||
796 | } | 784 | } |
797 | 785 | ||
798 | 786 | ||
@@ -816,8 +804,7 @@ xfs_trans_reserve_quota_nblks( | |||
816 | if (XFS_IS_PQUOTA_ON(mp)) | 804 | if (XFS_IS_PQUOTA_ON(mp)) |
817 | flags |= XFS_QMOPT_ENOSPC; | 805 | flags |= XFS_QMOPT_ENOSPC; |
818 | 806 | ||
819 | ASSERT(ip->i_ino != mp->m_sb.sb_uquotino); | 807 | ASSERT(!xfs_is_quota_inode(&mp->m_sb, ip->i_ino)); |
820 | ASSERT(ip->i_ino != mp->m_sb.sb_gquotino); | ||
821 | 808 | ||
822 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); | 809 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); |
823 | ASSERT((flags & ~(XFS_QMOPT_FORCE_RES | XFS_QMOPT_ENOSPC)) == | 810 | ASSERT((flags & ~(XFS_QMOPT_FORCE_RES | XFS_QMOPT_ENOSPC)) == |
diff --git a/fs/xfs/xfs_trans_inode.c b/fs/xfs/xfs_trans_inode.c index ac6d567704db..53dfe46f3680 100644 --- a/fs/xfs/xfs_trans_inode.c +++ b/fs/xfs/xfs_trans_inode.c | |||
@@ -112,6 +112,17 @@ xfs_trans_log_inode( | |||
112 | ASSERT(ip->i_itemp != NULL); | 112 | ASSERT(ip->i_itemp != NULL); |
113 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); | 113 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); |
114 | 114 | ||
115 | /* | ||
116 | * First time we log the inode in a transaction, bump the inode change | ||
117 | * counter if it is configured for this to occur. | ||
118 | */ | ||
119 | if (!(ip->i_itemp->ili_item.li_desc->lid_flags & XFS_LID_DIRTY) && | ||
120 | IS_I_VERSION(VFS_I(ip))) { | ||
121 | inode_inc_iversion(VFS_I(ip)); | ||
122 | ip->i_d.di_changecount = VFS_I(ip)->i_version; | ||
123 | flags |= XFS_ILOG_CORE; | ||
124 | } | ||
125 | |||
115 | tp->t_flags |= XFS_TRANS_DIRTY; | 126 | tp->t_flags |= XFS_TRANS_DIRTY; |
116 | ip->i_itemp->ili_item.li_desc->lid_flags |= XFS_LID_DIRTY; | 127 | ip->i_itemp->ili_item.li_desc->lid_flags |= XFS_LID_DIRTY; |
117 | 128 | ||
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c index 0176bb21f09a..42c0ef288aeb 100644 --- a/fs/xfs/xfs_vnodeops.c +++ b/fs/xfs/xfs_vnodeops.c | |||
@@ -322,18 +322,9 @@ xfs_inactive( | |||
322 | xfs_trans_ijoin(tp, ip, 0); | 322 | xfs_trans_ijoin(tp, ip, 0); |
323 | 323 | ||
324 | if (S_ISLNK(ip->i_d.di_mode)) { | 324 | if (S_ISLNK(ip->i_d.di_mode)) { |
325 | /* | 325 | error = xfs_inactive_symlink(ip, &tp); |
326 | * Zero length symlinks _can_ exist. | 326 | if (error) |
327 | */ | 327 | goto out_cancel; |
328 | if (ip->i_d.di_size > XFS_IFORK_DSIZE(ip)) { | ||
329 | error = xfs_inactive_symlink_rmt(ip, &tp); | ||
330 | if (error) | ||
331 | goto out_cancel; | ||
332 | } else if (ip->i_df.if_bytes > 0) { | ||
333 | xfs_idata_realloc(ip, -(ip->i_df.if_bytes), | ||
334 | XFS_DATA_FORK); | ||
335 | ASSERT(ip->i_df.if_bytes == 0); | ||
336 | } | ||
337 | } else if (truncate) { | 328 | } else if (truncate) { |
338 | ip->i_d.di_size = 0; | 329 | ip->i_d.di_size = 0; |
339 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); | 330 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); |