diff options
Diffstat (limited to 'fs/xfs')
37 files changed, 604 insertions, 710 deletions
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index 71c8c9d2b882..a26739451b53 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c | |||
@@ -1217,7 +1217,7 @@ __xfs_get_blocks( | |||
1217 | lockmode = XFS_ILOCK_EXCL; | 1217 | lockmode = XFS_ILOCK_EXCL; |
1218 | xfs_ilock(ip, lockmode); | 1218 | xfs_ilock(ip, lockmode); |
1219 | } else { | 1219 | } else { |
1220 | lockmode = xfs_ilock_map_shared(ip); | 1220 | lockmode = xfs_ilock_data_map_shared(ip); |
1221 | } | 1221 | } |
1222 | 1222 | ||
1223 | ASSERT(offset <= mp->m_super->s_maxbytes); | 1223 | ASSERT(offset <= mp->m_super->s_maxbytes); |
diff --git a/fs/xfs/xfs_attr.c b/fs/xfs/xfs_attr.c index b86127072ac3..01b6a0102fbd 100644 --- a/fs/xfs/xfs_attr.c +++ b/fs/xfs/xfs_attr.c | |||
@@ -164,6 +164,7 @@ xfs_attr_get( | |||
164 | { | 164 | { |
165 | int error; | 165 | int error; |
166 | struct xfs_name xname; | 166 | struct xfs_name xname; |
167 | uint lock_mode; | ||
167 | 168 | ||
168 | XFS_STATS_INC(xs_attr_get); | 169 | XFS_STATS_INC(xs_attr_get); |
169 | 170 | ||
@@ -174,9 +175,9 @@ xfs_attr_get( | |||
174 | if (error) | 175 | if (error) |
175 | return error; | 176 | return error; |
176 | 177 | ||
177 | xfs_ilock(ip, XFS_ILOCK_SHARED); | 178 | lock_mode = xfs_ilock_attr_map_shared(ip); |
178 | error = xfs_attr_get_int(ip, &xname, value, valuelenp, flags); | 179 | error = xfs_attr_get_int(ip, &xname, value, valuelenp, flags); |
179 | xfs_iunlock(ip, XFS_ILOCK_SHARED); | 180 | xfs_iunlock(ip, lock_mode); |
180 | return(error); | 181 | return(error); |
181 | } | 182 | } |
182 | 183 | ||
diff --git a/fs/xfs/xfs_attr_list.c b/fs/xfs/xfs_attr_list.c index 2d174b128153..01db96f60cf0 100644 --- a/fs/xfs/xfs_attr_list.c +++ b/fs/xfs/xfs_attr_list.c | |||
@@ -507,17 +507,17 @@ xfs_attr_list_int( | |||
507 | { | 507 | { |
508 | int error; | 508 | int error; |
509 | xfs_inode_t *dp = context->dp; | 509 | xfs_inode_t *dp = context->dp; |
510 | uint lock_mode; | ||
510 | 511 | ||
511 | XFS_STATS_INC(xs_attr_list); | 512 | XFS_STATS_INC(xs_attr_list); |
512 | 513 | ||
513 | if (XFS_FORCED_SHUTDOWN(dp->i_mount)) | 514 | if (XFS_FORCED_SHUTDOWN(dp->i_mount)) |
514 | return EIO; | 515 | return EIO; |
515 | 516 | ||
516 | xfs_ilock(dp, XFS_ILOCK_SHARED); | ||
517 | |||
518 | /* | 517 | /* |
519 | * Decide on what work routines to call based on the inode size. | 518 | * Decide on what work routines to call based on the inode size. |
520 | */ | 519 | */ |
520 | lock_mode = xfs_ilock_attr_map_shared(dp); | ||
521 | if (!xfs_inode_hasattr(dp)) { | 521 | if (!xfs_inode_hasattr(dp)) { |
522 | error = 0; | 522 | error = 0; |
523 | } else if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) { | 523 | } else if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) { |
@@ -527,9 +527,7 @@ xfs_attr_list_int( | |||
527 | } else { | 527 | } else { |
528 | error = xfs_attr_node_list(context); | 528 | error = xfs_attr_node_list(context); |
529 | } | 529 | } |
530 | 530 | xfs_iunlock(dp, lock_mode); | |
531 | xfs_iunlock(dp, XFS_ILOCK_SHARED); | ||
532 | |||
533 | return error; | 531 | return error; |
534 | } | 532 | } |
535 | 533 | ||
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c index 3b2c14b6f0fb..152543c4ca70 100644 --- a/fs/xfs/xfs_bmap.c +++ b/fs/xfs/xfs_bmap.c | |||
@@ -4013,6 +4013,7 @@ xfs_bmapi_read( | |||
4013 | ASSERT(*nmap >= 1); | 4013 | ASSERT(*nmap >= 1); |
4014 | ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK|XFS_BMAPI_ENTIRE| | 4014 | ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK|XFS_BMAPI_ENTIRE| |
4015 | XFS_BMAPI_IGSTATE))); | 4015 | XFS_BMAPI_IGSTATE))); |
4016 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)); | ||
4016 | 4017 | ||
4017 | if (unlikely(XFS_TEST_ERROR( | 4018 | if (unlikely(XFS_TEST_ERROR( |
4018 | (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && | 4019 | (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && |
@@ -4207,6 +4208,7 @@ xfs_bmapi_delay( | |||
4207 | ASSERT(*nmap >= 1); | 4208 | ASSERT(*nmap >= 1); |
4208 | ASSERT(*nmap <= XFS_BMAP_MAX_NMAP); | 4209 | ASSERT(*nmap <= XFS_BMAP_MAX_NMAP); |
4209 | ASSERT(!(flags & ~XFS_BMAPI_ENTIRE)); | 4210 | ASSERT(!(flags & ~XFS_BMAPI_ENTIRE)); |
4211 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); | ||
4210 | 4212 | ||
4211 | if (unlikely(XFS_TEST_ERROR( | 4213 | if (unlikely(XFS_TEST_ERROR( |
4212 | (XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_EXTENTS && | 4214 | (XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_EXTENTS && |
@@ -4500,6 +4502,7 @@ xfs_bmapi_write( | |||
4500 | ASSERT(tp != NULL); | 4502 | ASSERT(tp != NULL); |
4501 | ASSERT(len > 0); | 4503 | ASSERT(len > 0); |
4502 | ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL); | 4504 | ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL); |
4505 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); | ||
4503 | 4506 | ||
4504 | if (unlikely(XFS_TEST_ERROR( | 4507 | if (unlikely(XFS_TEST_ERROR( |
4505 | (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && | 4508 | (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && |
@@ -5051,6 +5054,7 @@ xfs_bunmapi( | |||
5051 | if (XFS_FORCED_SHUTDOWN(mp)) | 5054 | if (XFS_FORCED_SHUTDOWN(mp)) |
5052 | return XFS_ERROR(EIO); | 5055 | return XFS_ERROR(EIO); |
5053 | 5056 | ||
5057 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); | ||
5054 | ASSERT(len > 0); | 5058 | ASSERT(len > 0); |
5055 | ASSERT(nexts >= 0); | 5059 | ASSERT(nexts >= 0); |
5056 | 5060 | ||
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c index 82e0dab46ee5..f264616080ca 100644 --- a/fs/xfs/xfs_bmap_util.c +++ b/fs/xfs/xfs_bmap_util.c | |||
@@ -618,22 +618,27 @@ xfs_getbmap( | |||
618 | return XFS_ERROR(ENOMEM); | 618 | return XFS_ERROR(ENOMEM); |
619 | 619 | ||
620 | xfs_ilock(ip, XFS_IOLOCK_SHARED); | 620 | xfs_ilock(ip, XFS_IOLOCK_SHARED); |
621 | if (whichfork == XFS_DATA_FORK && !(iflags & BMV_IF_DELALLOC)) { | 621 | if (whichfork == XFS_DATA_FORK) { |
622 | if (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_d.di_size) { | 622 | if (!(iflags & BMV_IF_DELALLOC) && |
623 | (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_d.di_size)) { | ||
623 | error = -filemap_write_and_wait(VFS_I(ip)->i_mapping); | 624 | error = -filemap_write_and_wait(VFS_I(ip)->i_mapping); |
624 | if (error) | 625 | if (error) |
625 | goto out_unlock_iolock; | 626 | goto out_unlock_iolock; |
627 | |||
628 | /* | ||
629 | * Even after flushing the inode, there can still be | ||
630 | * delalloc blocks on the inode beyond EOF due to | ||
631 | * speculative preallocation. These are not removed | ||
632 | * until the release function is called or the inode | ||
633 | * is inactivated. Hence we cannot assert here that | ||
634 | * ip->i_delayed_blks == 0. | ||
635 | */ | ||
626 | } | 636 | } |
627 | /* | ||
628 | * even after flushing the inode, there can still be delalloc | ||
629 | * blocks on the inode beyond EOF due to speculative | ||
630 | * preallocation. These are not removed until the release | ||
631 | * function is called or the inode is inactivated. Hence we | ||
632 | * cannot assert here that ip->i_delayed_blks == 0. | ||
633 | */ | ||
634 | } | ||
635 | 637 | ||
636 | lock = xfs_ilock_map_shared(ip); | 638 | lock = xfs_ilock_data_map_shared(ip); |
639 | } else { | ||
640 | lock = xfs_ilock_attr_map_shared(ip); | ||
641 | } | ||
637 | 642 | ||
638 | /* | 643 | /* |
639 | * Don't let nex be bigger than the number of extents | 644 | * Don't let nex be bigger than the number of extents |
@@ -738,7 +743,7 @@ xfs_getbmap( | |||
738 | out_free_map: | 743 | out_free_map: |
739 | kmem_free(map); | 744 | kmem_free(map); |
740 | out_unlock_ilock: | 745 | out_unlock_ilock: |
741 | xfs_iunlock_map_shared(ip, lock); | 746 | xfs_iunlock(ip, lock); |
742 | out_unlock_iolock: | 747 | out_unlock_iolock: |
743 | xfs_iunlock(ip, XFS_IOLOCK_SHARED); | 748 | xfs_iunlock(ip, XFS_IOLOCK_SHARED); |
744 | 749 | ||
@@ -1169,9 +1174,15 @@ xfs_zero_remaining_bytes( | |||
1169 | xfs_buf_unlock(bp); | 1174 | xfs_buf_unlock(bp); |
1170 | 1175 | ||
1171 | for (offset = startoff; offset <= endoff; offset = lastoffset + 1) { | 1176 | for (offset = startoff; offset <= endoff; offset = lastoffset + 1) { |
1177 | uint lock_mode; | ||
1178 | |||
1172 | offset_fsb = XFS_B_TO_FSBT(mp, offset); | 1179 | offset_fsb = XFS_B_TO_FSBT(mp, offset); |
1173 | nimap = 1; | 1180 | nimap = 1; |
1181 | |||
1182 | lock_mode = xfs_ilock_data_map_shared(ip); | ||
1174 | error = xfs_bmapi_read(ip, offset_fsb, 1, &imap, &nimap, 0); | 1183 | error = xfs_bmapi_read(ip, offset_fsb, 1, &imap, &nimap, 0); |
1184 | xfs_iunlock(ip, lock_mode); | ||
1185 | |||
1175 | if (error || nimap < 1) | 1186 | if (error || nimap < 1) |
1176 | break; | 1187 | break; |
1177 | ASSERT(imap.br_blockcount >= 1); | 1188 | ASSERT(imap.br_blockcount >= 1); |
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index afe7645e4b2b..9fccfb594291 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c | |||
@@ -1593,12 +1593,11 @@ xfs_free_buftarg( | |||
1593 | kmem_free(btp); | 1593 | kmem_free(btp); |
1594 | } | 1594 | } |
1595 | 1595 | ||
1596 | STATIC int | 1596 | int |
1597 | xfs_setsize_buftarg_flags( | 1597 | xfs_setsize_buftarg( |
1598 | xfs_buftarg_t *btp, | 1598 | xfs_buftarg_t *btp, |
1599 | unsigned int blocksize, | 1599 | unsigned int blocksize, |
1600 | unsigned int sectorsize, | 1600 | unsigned int sectorsize) |
1601 | int verbose) | ||
1602 | { | 1601 | { |
1603 | btp->bt_bsize = blocksize; | 1602 | btp->bt_bsize = blocksize; |
1604 | btp->bt_sshift = ffs(sectorsize) - 1; | 1603 | btp->bt_sshift = ffs(sectorsize) - 1; |
@@ -1619,26 +1618,17 @@ xfs_setsize_buftarg_flags( | |||
1619 | } | 1618 | } |
1620 | 1619 | ||
1621 | /* | 1620 | /* |
1622 | * When allocating the initial buffer target we have not yet | 1621 | * When allocating the initial buffer target we have not yet |
1623 | * read in the superblock, so don't know what sized sectors | 1622 | * read in the superblock, so don't know what sized sectors |
1624 | * are being used at this early stage. Play safe. | 1623 | * are being used at this early stage. Play safe. |
1625 | */ | 1624 | */ |
1626 | STATIC int | 1625 | STATIC int |
1627 | xfs_setsize_buftarg_early( | 1626 | xfs_setsize_buftarg_early( |
1628 | xfs_buftarg_t *btp, | 1627 | xfs_buftarg_t *btp, |
1629 | struct block_device *bdev) | 1628 | struct block_device *bdev) |
1630 | { | 1629 | { |
1631 | return xfs_setsize_buftarg_flags(btp, | 1630 | return xfs_setsize_buftarg(btp, PAGE_SIZE, |
1632 | PAGE_SIZE, bdev_logical_block_size(bdev), 0); | 1631 | bdev_logical_block_size(bdev)); |
1633 | } | ||
1634 | |||
1635 | int | ||
1636 | xfs_setsize_buftarg( | ||
1637 | xfs_buftarg_t *btp, | ||
1638 | unsigned int blocksize, | ||
1639 | unsigned int sectorsize) | ||
1640 | { | ||
1641 | return xfs_setsize_buftarg_flags(btp, blocksize, sectorsize, 1); | ||
1642 | } | 1632 | } |
1643 | 1633 | ||
1644 | xfs_buftarg_t * | 1634 | xfs_buftarg_t * |
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c index 2227b9b050bb..33149113e333 100644 --- a/fs/xfs/xfs_buf_item.c +++ b/fs/xfs/xfs_buf_item.c | |||
@@ -182,21 +182,47 @@ xfs_buf_item_size( | |||
182 | trace_xfs_buf_item_size(bip); | 182 | trace_xfs_buf_item_size(bip); |
183 | } | 183 | } |
184 | 184 | ||
185 | static struct xfs_log_iovec * | 185 | static inline void |
186 | xfs_buf_item_copy_iovec( | ||
187 | struct xfs_log_vec *lv, | ||
188 | struct xfs_log_iovec **vecp, | ||
189 | struct xfs_buf *bp, | ||
190 | uint offset, | ||
191 | int first_bit, | ||
192 | uint nbits) | ||
193 | { | ||
194 | offset += first_bit * XFS_BLF_CHUNK; | ||
195 | xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_BCHUNK, | ||
196 | xfs_buf_offset(bp, offset), | ||
197 | nbits * XFS_BLF_CHUNK); | ||
198 | } | ||
199 | |||
200 | static inline bool | ||
201 | xfs_buf_item_straddle( | ||
202 | struct xfs_buf *bp, | ||
203 | uint offset, | ||
204 | int next_bit, | ||
205 | int last_bit) | ||
206 | { | ||
207 | return xfs_buf_offset(bp, offset + (next_bit << XFS_BLF_SHIFT)) != | ||
208 | (xfs_buf_offset(bp, offset + (last_bit << XFS_BLF_SHIFT)) + | ||
209 | XFS_BLF_CHUNK); | ||
210 | } | ||
211 | |||
212 | static void | ||
186 | xfs_buf_item_format_segment( | 213 | xfs_buf_item_format_segment( |
187 | struct xfs_buf_log_item *bip, | 214 | struct xfs_buf_log_item *bip, |
188 | struct xfs_log_iovec *vecp, | 215 | struct xfs_log_vec *lv, |
216 | struct xfs_log_iovec **vecp, | ||
189 | uint offset, | 217 | uint offset, |
190 | struct xfs_buf_log_format *blfp) | 218 | struct xfs_buf_log_format *blfp) |
191 | { | 219 | { |
192 | struct xfs_buf *bp = bip->bli_buf; | 220 | struct xfs_buf *bp = bip->bli_buf; |
193 | uint base_size; | 221 | uint base_size; |
194 | uint nvecs; | ||
195 | int first_bit; | 222 | int first_bit; |
196 | int last_bit; | 223 | int last_bit; |
197 | int next_bit; | 224 | int next_bit; |
198 | uint nbits; | 225 | uint nbits; |
199 | uint buffer_offset; | ||
200 | 226 | ||
201 | /* copy the flags across from the base format item */ | 227 | /* copy the flags across from the base format item */ |
202 | blfp->blf_flags = bip->__bli_format.blf_flags; | 228 | blfp->blf_flags = bip->__bli_format.blf_flags; |
@@ -208,21 +234,17 @@ xfs_buf_item_format_segment( | |||
208 | */ | 234 | */ |
209 | base_size = xfs_buf_log_format_size(blfp); | 235 | base_size = xfs_buf_log_format_size(blfp); |
210 | 236 | ||
211 | nvecs = 0; | ||
212 | first_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0); | 237 | first_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0); |
213 | if (!(bip->bli_flags & XFS_BLI_STALE) && first_bit == -1) { | 238 | if (!(bip->bli_flags & XFS_BLI_STALE) && first_bit == -1) { |
214 | /* | 239 | /* |
215 | * If the map is not be dirty in the transaction, mark | 240 | * If the map is not be dirty in the transaction, mark |
216 | * the size as zero and do not advance the vector pointer. | 241 | * the size as zero and do not advance the vector pointer. |
217 | */ | 242 | */ |
218 | goto out; | 243 | return; |
219 | } | 244 | } |
220 | 245 | ||
221 | vecp->i_addr = blfp; | 246 | blfp = xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_BFORMAT, blfp, base_size); |
222 | vecp->i_len = base_size; | 247 | blfp->blf_size = 1; |
223 | vecp->i_type = XLOG_REG_TYPE_BFORMAT; | ||
224 | vecp++; | ||
225 | nvecs = 1; | ||
226 | 248 | ||
227 | if (bip->bli_flags & XFS_BLI_STALE) { | 249 | if (bip->bli_flags & XFS_BLI_STALE) { |
228 | /* | 250 | /* |
@@ -232,14 +254,13 @@ xfs_buf_item_format_segment( | |||
232 | */ | 254 | */ |
233 | trace_xfs_buf_item_format_stale(bip); | 255 | trace_xfs_buf_item_format_stale(bip); |
234 | ASSERT(blfp->blf_flags & XFS_BLF_CANCEL); | 256 | ASSERT(blfp->blf_flags & XFS_BLF_CANCEL); |
235 | goto out; | 257 | return; |
236 | } | 258 | } |
237 | 259 | ||
238 | 260 | ||
239 | /* | 261 | /* |
240 | * Fill in an iovec for each set of contiguous chunks. | 262 | * Fill in an iovec for each set of contiguous chunks. |
241 | */ | 263 | */ |
242 | |||
243 | last_bit = first_bit; | 264 | last_bit = first_bit; |
244 | nbits = 1; | 265 | nbits = 1; |
245 | for (;;) { | 266 | for (;;) { |
@@ -252,42 +273,22 @@ xfs_buf_item_format_segment( | |||
252 | next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, | 273 | next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, |
253 | (uint)last_bit + 1); | 274 | (uint)last_bit + 1); |
254 | /* | 275 | /* |
255 | * If we run out of bits fill in the last iovec and get | 276 | * If we run out of bits fill in the last iovec and get out of |
256 | * out of the loop. | 277 | * the loop. Else if we start a new set of bits then fill in |
257 | * Else if we start a new set of bits then fill in the | 278 | * the iovec for the series we were looking at and start |
258 | * iovec for the series we were looking at and start | 279 | * counting the bits in the new one. Else we're still in the |
259 | * counting the bits in the new one. | 280 | * same set of bits so just keep counting and scanning. |
260 | * Else we're still in the same set of bits so just | ||
261 | * keep counting and scanning. | ||
262 | */ | 281 | */ |
263 | if (next_bit == -1) { | 282 | if (next_bit == -1) { |
264 | buffer_offset = offset + first_bit * XFS_BLF_CHUNK; | 283 | xfs_buf_item_copy_iovec(lv, vecp, bp, offset, |
265 | vecp->i_addr = xfs_buf_offset(bp, buffer_offset); | 284 | first_bit, nbits); |
266 | vecp->i_len = nbits * XFS_BLF_CHUNK; | 285 | blfp->blf_size++; |
267 | vecp->i_type = XLOG_REG_TYPE_BCHUNK; | ||
268 | nvecs++; | ||
269 | break; | 286 | break; |
270 | } else if (next_bit != last_bit + 1) { | 287 | } else if (next_bit != last_bit + 1 || |
271 | buffer_offset = offset + first_bit * XFS_BLF_CHUNK; | 288 | xfs_buf_item_straddle(bp, offset, next_bit, last_bit)) { |
272 | vecp->i_addr = xfs_buf_offset(bp, buffer_offset); | 289 | xfs_buf_item_copy_iovec(lv, vecp, bp, offset, |
273 | vecp->i_len = nbits * XFS_BLF_CHUNK; | 290 | first_bit, nbits); |
274 | vecp->i_type = XLOG_REG_TYPE_BCHUNK; | 291 | blfp->blf_size++; |
275 | nvecs++; | ||
276 | vecp++; | ||
277 | first_bit = next_bit; | ||
278 | last_bit = next_bit; | ||
279 | nbits = 1; | ||
280 | } else if (xfs_buf_offset(bp, offset + | ||
281 | (next_bit << XFS_BLF_SHIFT)) != | ||
282 | (xfs_buf_offset(bp, offset + | ||
283 | (last_bit << XFS_BLF_SHIFT)) + | ||
284 | XFS_BLF_CHUNK)) { | ||
285 | buffer_offset = offset + first_bit * XFS_BLF_CHUNK; | ||
286 | vecp->i_addr = xfs_buf_offset(bp, buffer_offset); | ||
287 | vecp->i_len = nbits * XFS_BLF_CHUNK; | ||
288 | vecp->i_type = XLOG_REG_TYPE_BCHUNK; | ||
289 | nvecs++; | ||
290 | vecp++; | ||
291 | first_bit = next_bit; | 292 | first_bit = next_bit; |
292 | last_bit = next_bit; | 293 | last_bit = next_bit; |
293 | nbits = 1; | 294 | nbits = 1; |
@@ -296,9 +297,6 @@ xfs_buf_item_format_segment( | |||
296 | nbits++; | 297 | nbits++; |
297 | } | 298 | } |
298 | } | 299 | } |
299 | out: | ||
300 | blfp->blf_size = nvecs; | ||
301 | return vecp; | ||
302 | } | 300 | } |
303 | 301 | ||
304 | /* | 302 | /* |
@@ -310,10 +308,11 @@ out: | |||
310 | STATIC void | 308 | STATIC void |
311 | xfs_buf_item_format( | 309 | xfs_buf_item_format( |
312 | struct xfs_log_item *lip, | 310 | struct xfs_log_item *lip, |
313 | struct xfs_log_iovec *vecp) | 311 | struct xfs_log_vec *lv) |
314 | { | 312 | { |
315 | struct xfs_buf_log_item *bip = BUF_ITEM(lip); | 313 | struct xfs_buf_log_item *bip = BUF_ITEM(lip); |
316 | struct xfs_buf *bp = bip->bli_buf; | 314 | struct xfs_buf *bp = bip->bli_buf; |
315 | struct xfs_log_iovec *vecp = NULL; | ||
317 | uint offset = 0; | 316 | uint offset = 0; |
318 | int i; | 317 | int i; |
319 | 318 | ||
@@ -354,8 +353,8 @@ xfs_buf_item_format( | |||
354 | } | 353 | } |
355 | 354 | ||
356 | for (i = 0; i < bip->bli_format_count; i++) { | 355 | for (i = 0; i < bip->bli_format_count; i++) { |
357 | vecp = xfs_buf_item_format_segment(bip, vecp, offset, | 356 | xfs_buf_item_format_segment(bip, lv, &vecp, offset, |
358 | &bip->bli_formats[i]); | 357 | &bip->bli_formats[i]); |
359 | offset += bp->b_maps[i].bm_len; | 358 | offset += bp->b_maps[i].bm_len; |
360 | } | 359 | } |
361 | 360 | ||
diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c index c4e50c6ed584..aead369e1c30 100644 --- a/fs/xfs/xfs_dir2_readdir.c +++ b/fs/xfs/xfs_dir2_readdir.c | |||
@@ -674,6 +674,7 @@ xfs_readdir( | |||
674 | { | 674 | { |
675 | int rval; /* return value */ | 675 | int rval; /* return value */ |
676 | int v; /* type-checking value */ | 676 | int v; /* type-checking value */ |
677 | uint lock_mode; | ||
677 | 678 | ||
678 | trace_xfs_readdir(dp); | 679 | trace_xfs_readdir(dp); |
679 | 680 | ||
@@ -683,6 +684,7 @@ xfs_readdir( | |||
683 | ASSERT(S_ISDIR(dp->i_d.di_mode)); | 684 | ASSERT(S_ISDIR(dp->i_d.di_mode)); |
684 | XFS_STATS_INC(xs_dir_getdents); | 685 | XFS_STATS_INC(xs_dir_getdents); |
685 | 686 | ||
687 | lock_mode = xfs_ilock_data_map_shared(dp); | ||
686 | if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) | 688 | if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) |
687 | rval = xfs_dir2_sf_getdents(dp, ctx); | 689 | rval = xfs_dir2_sf_getdents(dp, ctx); |
688 | else if ((rval = xfs_dir2_isblock(NULL, dp, &v))) | 690 | else if ((rval = xfs_dir2_isblock(NULL, dp, &v))) |
@@ -691,5 +693,7 @@ xfs_readdir( | |||
691 | rval = xfs_dir2_block_getdents(dp, ctx); | 693 | rval = xfs_dir2_block_getdents(dp, ctx); |
692 | else | 694 | else |
693 | rval = xfs_dir2_leaf_getdents(dp, ctx, bufsize); | 695 | rval = xfs_dir2_leaf_getdents(dp, ctx, bufsize); |
696 | xfs_iunlock(dp, lock_mode); | ||
697 | |||
694 | return rval; | 698 | return rval; |
695 | } | 699 | } |
diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c index aafc6e46cb58..3725fb1b902b 100644 --- a/fs/xfs/xfs_dir2_sf.c +++ b/fs/xfs/xfs_dir2_sf.c | |||
@@ -170,6 +170,7 @@ xfs_dir2_block_to_sf( | |||
170 | char *ptr; /* current data pointer */ | 170 | char *ptr; /* current data pointer */ |
171 | xfs_dir2_sf_entry_t *sfep; /* shortform entry */ | 171 | xfs_dir2_sf_entry_t *sfep; /* shortform entry */ |
172 | xfs_dir2_sf_hdr_t *sfp; /* shortform directory header */ | 172 | xfs_dir2_sf_hdr_t *sfp; /* shortform directory header */ |
173 | xfs_dir2_sf_hdr_t *dst; /* temporary data buffer */ | ||
173 | 174 | ||
174 | trace_xfs_dir2_block_to_sf(args); | 175 | trace_xfs_dir2_block_to_sf(args); |
175 | 176 | ||
@@ -177,35 +178,20 @@ xfs_dir2_block_to_sf( | |||
177 | mp = dp->i_mount; | 178 | mp = dp->i_mount; |
178 | 179 | ||
179 | /* | 180 | /* |
180 | * Make a copy of the block data, so we can shrink the inode | 181 | * allocate a temporary destination buffer the size of the inode |
181 | * and add local data. | 182 | * to format the data into. Once we have formatted the data, we |
183 | * can free the block and copy the formatted data into the inode literal | ||
184 | * area. | ||
182 | */ | 185 | */ |
183 | hdr = kmem_alloc(mp->m_dirblksize, KM_SLEEP); | 186 | dst = kmem_alloc(mp->m_sb.sb_inodesize, KM_SLEEP); |
184 | memcpy(hdr, bp->b_addr, mp->m_dirblksize); | 187 | hdr = bp->b_addr; |
185 | logflags = XFS_ILOG_CORE; | ||
186 | if ((error = xfs_dir2_shrink_inode(args, mp->m_dirdatablk, bp))) { | ||
187 | ASSERT(error != ENOSPC); | ||
188 | goto out; | ||
189 | } | ||
190 | 188 | ||
191 | /* | 189 | /* |
192 | * The buffer is now unconditionally gone, whether | ||
193 | * xfs_dir2_shrink_inode worked or not. | ||
194 | * | ||
195 | * Convert the inode to local format. | ||
196 | */ | ||
197 | dp->i_df.if_flags &= ~XFS_IFEXTENTS; | ||
198 | dp->i_df.if_flags |= XFS_IFINLINE; | ||
199 | dp->i_d.di_format = XFS_DINODE_FMT_LOCAL; | ||
200 | ASSERT(dp->i_df.if_bytes == 0); | ||
201 | xfs_idata_realloc(dp, size, XFS_DATA_FORK); | ||
202 | logflags |= XFS_ILOG_DDATA; | ||
203 | /* | ||
204 | * Copy the header into the newly allocate local space. | 190 | * Copy the header into the newly allocate local space. |
205 | */ | 191 | */ |
206 | sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data; | 192 | sfp = (xfs_dir2_sf_hdr_t *)dst; |
207 | memcpy(sfp, sfhp, xfs_dir2_sf_hdr_size(sfhp->i8count)); | 193 | memcpy(sfp, sfhp, xfs_dir2_sf_hdr_size(sfhp->i8count)); |
208 | dp->i_d.di_size = size; | 194 | |
209 | /* | 195 | /* |
210 | * Set up to loop over the block's entries. | 196 | * Set up to loop over the block's entries. |
211 | */ | 197 | */ |
@@ -258,10 +244,34 @@ xfs_dir2_block_to_sf( | |||
258 | ptr += dp->d_ops->data_entsize(dep->namelen); | 244 | ptr += dp->d_ops->data_entsize(dep->namelen); |
259 | } | 245 | } |
260 | ASSERT((char *)sfep - (char *)sfp == size); | 246 | ASSERT((char *)sfep - (char *)sfp == size); |
247 | |||
248 | /* now we are done with the block, we can shrink the inode */ | ||
249 | logflags = XFS_ILOG_CORE; | ||
250 | error = xfs_dir2_shrink_inode(args, mp->m_dirdatablk, bp); | ||
251 | if (error) { | ||
252 | ASSERT(error != ENOSPC); | ||
253 | goto out; | ||
254 | } | ||
255 | |||
256 | /* | ||
257 | * The buffer is now unconditionally gone, whether | ||
258 | * xfs_dir2_shrink_inode worked or not. | ||
259 | * | ||
260 | * Convert the inode to local format and copy the data in. | ||
261 | */ | ||
262 | dp->i_df.if_flags &= ~XFS_IFEXTENTS; | ||
263 | dp->i_df.if_flags |= XFS_IFINLINE; | ||
264 | dp->i_d.di_format = XFS_DINODE_FMT_LOCAL; | ||
265 | ASSERT(dp->i_df.if_bytes == 0); | ||
266 | xfs_idata_realloc(dp, size, XFS_DATA_FORK); | ||
267 | |||
268 | logflags |= XFS_ILOG_DDATA; | ||
269 | memcpy(dp->i_df.if_u1.if_data, dst, size); | ||
270 | dp->i_d.di_size = size; | ||
261 | xfs_dir2_sf_check(args); | 271 | xfs_dir2_sf_check(args); |
262 | out: | 272 | out: |
263 | xfs_trans_log_inode(args->trans, dp, logflags); | 273 | xfs_trans_log_inode(args->trans, dp, logflags); |
264 | kmem_free(hdr); | 274 | kmem_free(dst); |
265 | return error; | 275 | return error; |
266 | } | 276 | } |
267 | 277 | ||
diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c index 6b1e695caf0e..7aeb4c895b32 100644 --- a/fs/xfs/xfs_dquot.c +++ b/fs/xfs/xfs_dquot.c | |||
@@ -469,16 +469,17 @@ xfs_qm_dqtobp( | |||
469 | struct xfs_mount *mp = dqp->q_mount; | 469 | struct xfs_mount *mp = dqp->q_mount; |
470 | xfs_dqid_t id = be32_to_cpu(dqp->q_core.d_id); | 470 | xfs_dqid_t id = be32_to_cpu(dqp->q_core.d_id); |
471 | struct xfs_trans *tp = (tpp ? *tpp : NULL); | 471 | struct xfs_trans *tp = (tpp ? *tpp : NULL); |
472 | uint lock_mode; | ||
472 | 473 | ||
473 | dqp->q_fileoffset = (xfs_fileoff_t)id / mp->m_quotainfo->qi_dqperchunk; | 474 | dqp->q_fileoffset = (xfs_fileoff_t)id / mp->m_quotainfo->qi_dqperchunk; |
474 | 475 | ||
475 | xfs_ilock(quotip, XFS_ILOCK_SHARED); | 476 | lock_mode = xfs_ilock_data_map_shared(quotip); |
476 | if (!xfs_this_quota_on(dqp->q_mount, dqp->dq_flags)) { | 477 | if (!xfs_this_quota_on(dqp->q_mount, dqp->dq_flags)) { |
477 | /* | 478 | /* |
478 | * Return if this type of quotas is turned off while we | 479 | * Return if this type of quotas is turned off while we |
479 | * didn't have the quota inode lock. | 480 | * didn't have the quota inode lock. |
480 | */ | 481 | */ |
481 | xfs_iunlock(quotip, XFS_ILOCK_SHARED); | 482 | xfs_iunlock(quotip, lock_mode); |
482 | return ESRCH; | 483 | return ESRCH; |
483 | } | 484 | } |
484 | 485 | ||
@@ -488,7 +489,7 @@ xfs_qm_dqtobp( | |||
488 | error = xfs_bmapi_read(quotip, dqp->q_fileoffset, | 489 | error = xfs_bmapi_read(quotip, dqp->q_fileoffset, |
489 | XFS_DQUOT_CLUSTER_SIZE_FSB, &map, &nmaps, 0); | 490 | XFS_DQUOT_CLUSTER_SIZE_FSB, &map, &nmaps, 0); |
490 | 491 | ||
491 | xfs_iunlock(quotip, XFS_ILOCK_SHARED); | 492 | xfs_iunlock(quotip, lock_mode); |
492 | if (error) | 493 | if (error) |
493 | return error; | 494 | return error; |
494 | 495 | ||
diff --git a/fs/xfs/xfs_dquot_item.c b/fs/xfs/xfs_dquot_item.c index 92e5f62eefc6..f33fbaaa4d8a 100644 --- a/fs/xfs/xfs_dquot_item.c +++ b/fs/xfs/xfs_dquot_item.c | |||
@@ -57,20 +57,24 @@ xfs_qm_dquot_logitem_size( | |||
57 | STATIC void | 57 | STATIC void |
58 | xfs_qm_dquot_logitem_format( | 58 | xfs_qm_dquot_logitem_format( |
59 | struct xfs_log_item *lip, | 59 | struct xfs_log_item *lip, |
60 | struct xfs_log_iovec *logvec) | 60 | struct xfs_log_vec *lv) |
61 | { | 61 | { |
62 | struct xfs_dq_logitem *qlip = DQUOT_ITEM(lip); | 62 | struct xfs_dq_logitem *qlip = DQUOT_ITEM(lip); |
63 | 63 | struct xfs_log_iovec *vecp = NULL; | |
64 | logvec->i_addr = &qlip->qli_format; | 64 | struct xfs_dq_logformat *qlf; |
65 | logvec->i_len = sizeof(xfs_dq_logformat_t); | 65 | |
66 | logvec->i_type = XLOG_REG_TYPE_QFORMAT; | 66 | qlf = xlog_prepare_iovec(lv, &vecp, XLOG_REG_TYPE_QFORMAT); |
67 | logvec++; | 67 | qlf->qlf_type = XFS_LI_DQUOT; |
68 | logvec->i_addr = &qlip->qli_dquot->q_core; | 68 | qlf->qlf_size = 2; |
69 | logvec->i_len = sizeof(xfs_disk_dquot_t); | 69 | qlf->qlf_id = be32_to_cpu(qlip->qli_dquot->q_core.d_id); |
70 | logvec->i_type = XLOG_REG_TYPE_DQUOT; | 70 | qlf->qlf_blkno = qlip->qli_dquot->q_blkno; |
71 | 71 | qlf->qlf_len = 1; | |
72 | qlip->qli_format.qlf_size = 2; | 72 | qlf->qlf_boffset = qlip->qli_dquot->q_bufoffset; |
73 | 73 | xlog_finish_iovec(lv, vecp, sizeof(struct xfs_dq_logformat)); | |
74 | |||
75 | xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_DQUOT, | ||
76 | &qlip->qli_dquot->q_core, | ||
77 | sizeof(struct xfs_disk_dquot)); | ||
74 | } | 78 | } |
75 | 79 | ||
76 | /* | 80 | /* |
@@ -257,18 +261,6 @@ xfs_qm_dquot_logitem_init( | |||
257 | xfs_log_item_init(dqp->q_mount, &lp->qli_item, XFS_LI_DQUOT, | 261 | xfs_log_item_init(dqp->q_mount, &lp->qli_item, XFS_LI_DQUOT, |
258 | &xfs_dquot_item_ops); | 262 | &xfs_dquot_item_ops); |
259 | lp->qli_dquot = dqp; | 263 | lp->qli_dquot = dqp; |
260 | lp->qli_format.qlf_type = XFS_LI_DQUOT; | ||
261 | lp->qli_format.qlf_id = be32_to_cpu(dqp->q_core.d_id); | ||
262 | lp->qli_format.qlf_blkno = dqp->q_blkno; | ||
263 | lp->qli_format.qlf_len = 1; | ||
264 | /* | ||
265 | * This is just the offset of this dquot within its buffer | ||
266 | * (which is currently 1 FSB and probably won't change). | ||
267 | * Hence 32 bits for this offset should be just fine. | ||
268 | * Alternatively, we can store (bufoffset / sizeof(xfs_dqblk_t)) | ||
269 | * here, and recompute it at recovery time. | ||
270 | */ | ||
271 | lp->qli_format.qlf_boffset = (__uint32_t)dqp->q_bufoffset; | ||
272 | } | 264 | } |
273 | 265 | ||
274 | /*------------------ QUOTAOFF LOG ITEMS -------------------*/ | 266 | /*------------------ QUOTAOFF LOG ITEMS -------------------*/ |
@@ -294,26 +286,20 @@ xfs_qm_qoff_logitem_size( | |||
294 | *nbytes += sizeof(struct xfs_qoff_logitem); | 286 | *nbytes += sizeof(struct xfs_qoff_logitem); |
295 | } | 287 | } |
296 | 288 | ||
297 | /* | ||
298 | * This is called to fill in the vector of log iovecs for the | ||
299 | * given quotaoff log item. We use only 1 iovec, and we point that | ||
300 | * at the quotaoff_log_format structure embedded in the quotaoff item. | ||
301 | * It is at this point that we assert that all of the extent | ||
302 | * slots in the quotaoff item have been filled. | ||
303 | */ | ||
304 | STATIC void | 289 | STATIC void |
305 | xfs_qm_qoff_logitem_format( | 290 | xfs_qm_qoff_logitem_format( |
306 | struct xfs_log_item *lip, | 291 | struct xfs_log_item *lip, |
307 | struct xfs_log_iovec *log_vector) | 292 | struct xfs_log_vec *lv) |
308 | { | 293 | { |
309 | struct xfs_qoff_logitem *qflip = QOFF_ITEM(lip); | 294 | struct xfs_qoff_logitem *qflip = QOFF_ITEM(lip); |
310 | 295 | struct xfs_log_iovec *vecp = NULL; | |
311 | ASSERT(qflip->qql_format.qf_type == XFS_LI_QUOTAOFF); | 296 | struct xfs_qoff_logformat *qlf; |
312 | 297 | ||
313 | log_vector->i_addr = &qflip->qql_format; | 298 | qlf = xlog_prepare_iovec(lv, &vecp, XLOG_REG_TYPE_QUOTAOFF); |
314 | log_vector->i_len = sizeof(xfs_qoff_logitem_t); | 299 | qlf->qf_type = XFS_LI_QUOTAOFF; |
315 | log_vector->i_type = XLOG_REG_TYPE_QUOTAOFF; | 300 | qlf->qf_size = 1; |
316 | qflip->qql_format.qf_size = 1; | 301 | qlf->qf_flags = qflip->qql_flags; |
302 | xlog_finish_iovec(lv, vecp, sizeof(struct xfs_qoff_logitem)); | ||
317 | } | 303 | } |
318 | 304 | ||
319 | /* | 305 | /* |
@@ -453,8 +439,7 @@ xfs_qm_qoff_logitem_init( | |||
453 | xfs_log_item_init(mp, &qf->qql_item, XFS_LI_QUOTAOFF, start ? | 439 | xfs_log_item_init(mp, &qf->qql_item, XFS_LI_QUOTAOFF, start ? |
454 | &xfs_qm_qoffend_logitem_ops : &xfs_qm_qoff_logitem_ops); | 440 | &xfs_qm_qoffend_logitem_ops : &xfs_qm_qoff_logitem_ops); |
455 | qf->qql_item.li_mountp = mp; | 441 | qf->qql_item.li_mountp = mp; |
456 | qf->qql_format.qf_type = XFS_LI_QUOTAOFF; | ||
457 | qf->qql_format.qf_flags = flags; | ||
458 | qf->qql_start_lip = start; | 442 | qf->qql_start_lip = start; |
443 | qf->qql_flags = flags; | ||
459 | return qf; | 444 | return qf; |
460 | } | 445 | } |
diff --git a/fs/xfs/xfs_dquot_item.h b/fs/xfs/xfs_dquot_item.h index 5acae2ada70b..502e9464634a 100644 --- a/fs/xfs/xfs_dquot_item.h +++ b/fs/xfs/xfs_dquot_item.h | |||
@@ -27,13 +27,12 @@ typedef struct xfs_dq_logitem { | |||
27 | xfs_log_item_t qli_item; /* common portion */ | 27 | xfs_log_item_t qli_item; /* common portion */ |
28 | struct xfs_dquot *qli_dquot; /* dquot ptr */ | 28 | struct xfs_dquot *qli_dquot; /* dquot ptr */ |
29 | xfs_lsn_t qli_flush_lsn; /* lsn at last flush */ | 29 | xfs_lsn_t qli_flush_lsn; /* lsn at last flush */ |
30 | xfs_dq_logformat_t qli_format; /* logged structure */ | ||
31 | } xfs_dq_logitem_t; | 30 | } xfs_dq_logitem_t; |
32 | 31 | ||
33 | typedef struct xfs_qoff_logitem { | 32 | typedef struct xfs_qoff_logitem { |
34 | xfs_log_item_t qql_item; /* common portion */ | 33 | xfs_log_item_t qql_item; /* common portion */ |
35 | struct xfs_qoff_logitem *qql_start_lip; /* qoff-start logitem, if any */ | 34 | struct xfs_qoff_logitem *qql_start_lip; /* qoff-start logitem, if any */ |
36 | xfs_qoff_logformat_t qql_format; /* logged structure */ | 35 | unsigned int qql_flags; |
37 | } xfs_qoff_logitem_t; | 36 | } xfs_qoff_logitem_t; |
38 | 37 | ||
39 | 38 | ||
diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c index 3680d04f973f..fb7a4c1ce1c5 100644 --- a/fs/xfs/xfs_extfree_item.c +++ b/fs/xfs/xfs_extfree_item.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include "xfs_trans_priv.h" | 26 | #include "xfs_trans_priv.h" |
27 | #include "xfs_buf_item.h" | 27 | #include "xfs_buf_item.h" |
28 | #include "xfs_extfree_item.h" | 28 | #include "xfs_extfree_item.h" |
29 | #include "xfs_log.h" | ||
29 | 30 | ||
30 | 31 | ||
31 | kmem_zone_t *xfs_efi_zone; | 32 | kmem_zone_t *xfs_efi_zone; |
@@ -101,9 +102,10 @@ xfs_efi_item_size( | |||
101 | STATIC void | 102 | STATIC void |
102 | xfs_efi_item_format( | 103 | xfs_efi_item_format( |
103 | struct xfs_log_item *lip, | 104 | struct xfs_log_item *lip, |
104 | struct xfs_log_iovec *log_vector) | 105 | struct xfs_log_vec *lv) |
105 | { | 106 | { |
106 | struct xfs_efi_log_item *efip = EFI_ITEM(lip); | 107 | struct xfs_efi_log_item *efip = EFI_ITEM(lip); |
108 | struct xfs_log_iovec *vecp = NULL; | ||
107 | 109 | ||
108 | ASSERT(atomic_read(&efip->efi_next_extent) == | 110 | ASSERT(atomic_read(&efip->efi_next_extent) == |
109 | efip->efi_format.efi_nextents); | 111 | efip->efi_format.efi_nextents); |
@@ -111,10 +113,9 @@ xfs_efi_item_format( | |||
111 | efip->efi_format.efi_type = XFS_LI_EFI; | 113 | efip->efi_format.efi_type = XFS_LI_EFI; |
112 | efip->efi_format.efi_size = 1; | 114 | efip->efi_format.efi_size = 1; |
113 | 115 | ||
114 | log_vector->i_addr = &efip->efi_format; | 116 | xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_EFI_FORMAT, |
115 | log_vector->i_len = xfs_efi_item_sizeof(efip); | 117 | &efip->efi_format, |
116 | log_vector->i_type = XLOG_REG_TYPE_EFI_FORMAT; | 118 | xfs_efi_item_sizeof(efip)); |
117 | ASSERT(log_vector->i_len >= sizeof(xfs_efi_log_format_t)); | ||
118 | } | 119 | } |
119 | 120 | ||
120 | 121 | ||
@@ -368,19 +369,19 @@ xfs_efd_item_size( | |||
368 | STATIC void | 369 | STATIC void |
369 | xfs_efd_item_format( | 370 | xfs_efd_item_format( |
370 | struct xfs_log_item *lip, | 371 | struct xfs_log_item *lip, |
371 | struct xfs_log_iovec *log_vector) | 372 | struct xfs_log_vec *lv) |
372 | { | 373 | { |
373 | struct xfs_efd_log_item *efdp = EFD_ITEM(lip); | 374 | struct xfs_efd_log_item *efdp = EFD_ITEM(lip); |
375 | struct xfs_log_iovec *vecp = NULL; | ||
374 | 376 | ||
375 | ASSERT(efdp->efd_next_extent == efdp->efd_format.efd_nextents); | 377 | ASSERT(efdp->efd_next_extent == efdp->efd_format.efd_nextents); |
376 | 378 | ||
377 | efdp->efd_format.efd_type = XFS_LI_EFD; | 379 | efdp->efd_format.efd_type = XFS_LI_EFD; |
378 | efdp->efd_format.efd_size = 1; | 380 | efdp->efd_format.efd_size = 1; |
379 | 381 | ||
380 | log_vector->i_addr = &efdp->efd_format; | 382 | xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_EFD_FORMAT, |
381 | log_vector->i_len = xfs_efd_item_sizeof(efdp); | 383 | &efdp->efd_format, |
382 | log_vector->i_type = XLOG_REG_TYPE_EFD_FORMAT; | 384 | xfs_efd_item_sizeof(efdp)); |
383 | ASSERT(log_vector->i_len >= sizeof(xfs_efd_log_format_t)); | ||
384 | } | 385 | } |
385 | 386 | ||
386 | /* | 387 | /* |
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index 52c91e143725..e00121592632 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c | |||
@@ -912,7 +912,7 @@ xfs_dir_open( | |||
912 | * If there are any blocks, read-ahead block 0 as we're almost | 912 | * If there are any blocks, read-ahead block 0 as we're almost |
913 | * certain to have the next operation be a read there. | 913 | * certain to have the next operation be a read there. |
914 | */ | 914 | */ |
915 | mode = xfs_ilock_map_shared(ip); | 915 | mode = xfs_ilock_data_map_shared(ip); |
916 | if (ip->i_d.di_nextents > 0) | 916 | if (ip->i_d.di_nextents > 0) |
917 | xfs_dir3_data_readahead(NULL, ip, 0, -1); | 917 | xfs_dir3_data_readahead(NULL, ip, 0, -1); |
918 | xfs_iunlock(ip, mode); | 918 | xfs_iunlock(ip, mode); |
@@ -1215,7 +1215,7 @@ xfs_seek_data( | |||
1215 | uint lock; | 1215 | uint lock; |
1216 | int error; | 1216 | int error; |
1217 | 1217 | ||
1218 | lock = xfs_ilock_map_shared(ip); | 1218 | lock = xfs_ilock_data_map_shared(ip); |
1219 | 1219 | ||
1220 | isize = i_size_read(inode); | 1220 | isize = i_size_read(inode); |
1221 | if (start >= isize) { | 1221 | if (start >= isize) { |
@@ -1294,7 +1294,7 @@ out: | |||
1294 | offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes); | 1294 | offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes); |
1295 | 1295 | ||
1296 | out_unlock: | 1296 | out_unlock: |
1297 | xfs_iunlock_map_shared(ip, lock); | 1297 | xfs_iunlock(ip, lock); |
1298 | 1298 | ||
1299 | if (error) | 1299 | if (error) |
1300 | return -error; | 1300 | return -error; |
@@ -1319,7 +1319,7 @@ xfs_seek_hole( | |||
1319 | if (XFS_FORCED_SHUTDOWN(mp)) | 1319 | if (XFS_FORCED_SHUTDOWN(mp)) |
1320 | return -XFS_ERROR(EIO); | 1320 | return -XFS_ERROR(EIO); |
1321 | 1321 | ||
1322 | lock = xfs_ilock_map_shared(ip); | 1322 | lock = xfs_ilock_data_map_shared(ip); |
1323 | 1323 | ||
1324 | isize = i_size_read(inode); | 1324 | isize = i_size_read(inode); |
1325 | if (start >= isize) { | 1325 | if (start >= isize) { |
@@ -1402,7 +1402,7 @@ out: | |||
1402 | offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes); | 1402 | offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes); |
1403 | 1403 | ||
1404 | out_unlock: | 1404 | out_unlock: |
1405 | xfs_iunlock_map_shared(ip, lock); | 1405 | xfs_iunlock(ip, lock); |
1406 | 1406 | ||
1407 | if (error) | 1407 | if (error) |
1408 | return -error; | 1408 | return -error; |
diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c index e87719c5bebe..5d7f105a1c82 100644 --- a/fs/xfs/xfs_ialloc.c +++ b/fs/xfs/xfs_ialloc.c | |||
@@ -52,7 +52,7 @@ xfs_ialloc_cluster_alignment( | |||
52 | { | 52 | { |
53 | if (xfs_sb_version_hasalign(&args->mp->m_sb) && | 53 | if (xfs_sb_version_hasalign(&args->mp->m_sb) && |
54 | args->mp->m_sb.sb_inoalignmt >= | 54 | args->mp->m_sb.sb_inoalignmt >= |
55 | XFS_B_TO_FSBT(args->mp, XFS_INODE_CLUSTER_SIZE(args->mp))) | 55 | XFS_B_TO_FSBT(args->mp, args->mp->m_inode_cluster_size)) |
56 | return args->mp->m_sb.sb_inoalignmt; | 56 | return args->mp->m_sb.sb_inoalignmt; |
57 | return 1; | 57 | return 1; |
58 | } | 58 | } |
@@ -170,27 +170,20 @@ xfs_ialloc_inode_init( | |||
170 | { | 170 | { |
171 | struct xfs_buf *fbuf; | 171 | struct xfs_buf *fbuf; |
172 | struct xfs_dinode *free; | 172 | struct xfs_dinode *free; |
173 | int blks_per_cluster, nbufs, ninodes; | 173 | int nbufs, blks_per_cluster, inodes_per_cluster; |
174 | int version; | 174 | int version; |
175 | int i, j; | 175 | int i, j; |
176 | xfs_daddr_t d; | 176 | xfs_daddr_t d; |
177 | xfs_ino_t ino = 0; | 177 | xfs_ino_t ino = 0; |
178 | 178 | ||
179 | /* | 179 | /* |
180 | * Loop over the new block(s), filling in the inodes. | 180 | * Loop over the new block(s), filling in the inodes. For small block |
181 | * For small block sizes, manipulate the inodes in buffers | 181 | * sizes, manipulate the inodes in buffers which are multiples of the |
182 | * which are multiples of the blocks size. | 182 | * blocks size. |
183 | */ | 183 | */ |
184 | if (mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp)) { | 184 | blks_per_cluster = xfs_icluster_size_fsb(mp); |
185 | blks_per_cluster = 1; | 185 | inodes_per_cluster = blks_per_cluster << mp->m_sb.sb_inopblog; |
186 | nbufs = length; | 186 | nbufs = length / blks_per_cluster; |
187 | ninodes = mp->m_sb.sb_inopblock; | ||
188 | } else { | ||
189 | blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) / | ||
190 | mp->m_sb.sb_blocksize; | ||
191 | nbufs = length / blks_per_cluster; | ||
192 | ninodes = blks_per_cluster * mp->m_sb.sb_inopblock; | ||
193 | } | ||
194 | 187 | ||
195 | /* | 188 | /* |
196 | * Figure out what version number to use in the inodes we create. If | 189 | * Figure out what version number to use in the inodes we create. If |
@@ -225,7 +218,7 @@ xfs_ialloc_inode_init( | |||
225 | * they track in the AIL as if they were physically logged. | 218 | * they track in the AIL as if they were physically logged. |
226 | */ | 219 | */ |
227 | if (tp) | 220 | if (tp) |
228 | xfs_icreate_log(tp, agno, agbno, XFS_IALLOC_INODES(mp), | 221 | xfs_icreate_log(tp, agno, agbno, mp->m_ialloc_inos, |
229 | mp->m_sb.sb_inodesize, length, gen); | 222 | mp->m_sb.sb_inodesize, length, gen); |
230 | } else if (xfs_sb_version_hasnlink(&mp->m_sb)) | 223 | } else if (xfs_sb_version_hasnlink(&mp->m_sb)) |
231 | version = 2; | 224 | version = 2; |
@@ -246,7 +239,7 @@ xfs_ialloc_inode_init( | |||
246 | /* Initialize the inode buffers and log them appropriately. */ | 239 | /* Initialize the inode buffers and log them appropriately. */ |
247 | fbuf->b_ops = &xfs_inode_buf_ops; | 240 | fbuf->b_ops = &xfs_inode_buf_ops; |
248 | xfs_buf_zero(fbuf, 0, BBTOB(fbuf->b_length)); | 241 | xfs_buf_zero(fbuf, 0, BBTOB(fbuf->b_length)); |
249 | for (i = 0; i < ninodes; i++) { | 242 | for (i = 0; i < inodes_per_cluster; i++) { |
250 | int ioffset = i << mp->m_sb.sb_inodelog; | 243 | int ioffset = i << mp->m_sb.sb_inodelog; |
251 | uint isize = xfs_dinode_size(version); | 244 | uint isize = xfs_dinode_size(version); |
252 | 245 | ||
@@ -329,11 +322,11 @@ xfs_ialloc_ag_alloc( | |||
329 | * Locking will ensure that we don't have two callers in here | 322 | * Locking will ensure that we don't have two callers in here |
330 | * at one time. | 323 | * at one time. |
331 | */ | 324 | */ |
332 | newlen = XFS_IALLOC_INODES(args.mp); | 325 | newlen = args.mp->m_ialloc_inos; |
333 | if (args.mp->m_maxicount && | 326 | if (args.mp->m_maxicount && |
334 | args.mp->m_sb.sb_icount + newlen > args.mp->m_maxicount) | 327 | args.mp->m_sb.sb_icount + newlen > args.mp->m_maxicount) |
335 | return XFS_ERROR(ENOSPC); | 328 | return XFS_ERROR(ENOSPC); |
336 | args.minlen = args.maxlen = XFS_IALLOC_BLOCKS(args.mp); | 329 | args.minlen = args.maxlen = args.mp->m_ialloc_blks; |
337 | /* | 330 | /* |
338 | * First try to allocate inodes contiguous with the last-allocated | 331 | * First try to allocate inodes contiguous with the last-allocated |
339 | * chunk of inodes. If the filesystem is striped, this will fill | 332 | * chunk of inodes. If the filesystem is striped, this will fill |
@@ -343,7 +336,7 @@ xfs_ialloc_ag_alloc( | |||
343 | newino = be32_to_cpu(agi->agi_newino); | 336 | newino = be32_to_cpu(agi->agi_newino); |
344 | agno = be32_to_cpu(agi->agi_seqno); | 337 | agno = be32_to_cpu(agi->agi_seqno); |
345 | args.agbno = XFS_AGINO_TO_AGBNO(args.mp, newino) + | 338 | args.agbno = XFS_AGINO_TO_AGBNO(args.mp, newino) + |
346 | XFS_IALLOC_BLOCKS(args.mp); | 339 | args.mp->m_ialloc_blks; |
347 | if (likely(newino != NULLAGINO && | 340 | if (likely(newino != NULLAGINO && |
348 | (args.agbno < be32_to_cpu(agi->agi_length)))) { | 341 | (args.agbno < be32_to_cpu(agi->agi_length)))) { |
349 | args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno); | 342 | args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno); |
@@ -585,7 +578,7 @@ xfs_ialloc_ag_select( | |||
585 | * Is there enough free space for the file plus a block of | 578 | * Is there enough free space for the file plus a block of |
586 | * inodes? (if we need to allocate some)? | 579 | * inodes? (if we need to allocate some)? |
587 | */ | 580 | */ |
588 | ineed = XFS_IALLOC_BLOCKS(mp); | 581 | ineed = mp->m_ialloc_blks; |
589 | longest = pag->pagf_longest; | 582 | longest = pag->pagf_longest; |
590 | if (!longest) | 583 | if (!longest) |
591 | longest = pag->pagf_flcount > 0; | 584 | longest = pag->pagf_flcount > 0; |
@@ -999,7 +992,7 @@ xfs_dialloc( | |||
999 | * inode. | 992 | * inode. |
1000 | */ | 993 | */ |
1001 | if (mp->m_maxicount && | 994 | if (mp->m_maxicount && |
1002 | mp->m_sb.sb_icount + XFS_IALLOC_INODES(mp) > mp->m_maxicount) { | 995 | mp->m_sb.sb_icount + mp->m_ialloc_inos > mp->m_maxicount) { |
1003 | noroom = 1; | 996 | noroom = 1; |
1004 | okalloc = 0; | 997 | okalloc = 0; |
1005 | } | 998 | } |
@@ -1202,7 +1195,7 @@ xfs_difree( | |||
1202 | * When an inode cluster is free, it becomes eligible for removal | 1195 | * When an inode cluster is free, it becomes eligible for removal |
1203 | */ | 1196 | */ |
1204 | if (!(mp->m_flags & XFS_MOUNT_IKEEP) && | 1197 | if (!(mp->m_flags & XFS_MOUNT_IKEEP) && |
1205 | (rec.ir_freecount == XFS_IALLOC_INODES(mp))) { | 1198 | (rec.ir_freecount == mp->m_ialloc_inos)) { |
1206 | 1199 | ||
1207 | *delete = 1; | 1200 | *delete = 1; |
1208 | *first_ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino); | 1201 | *first_ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino); |
@@ -1212,7 +1205,7 @@ xfs_difree( | |||
1212 | * AGI and Superblock inode counts, and mark the disk space | 1205 | * AGI and Superblock inode counts, and mark the disk space |
1213 | * to be freed when the transaction is committed. | 1206 | * to be freed when the transaction is committed. |
1214 | */ | 1207 | */ |
1215 | ilen = XFS_IALLOC_INODES(mp); | 1208 | ilen = mp->m_ialloc_inos; |
1216 | be32_add_cpu(&agi->agi_count, -ilen); | 1209 | be32_add_cpu(&agi->agi_count, -ilen); |
1217 | be32_add_cpu(&agi->agi_freecount, -(ilen - 1)); | 1210 | be32_add_cpu(&agi->agi_freecount, -(ilen - 1)); |
1218 | xfs_ialloc_log_agi(tp, agbp, XFS_AGI_COUNT | XFS_AGI_FREECOUNT); | 1211 | xfs_ialloc_log_agi(tp, agbp, XFS_AGI_COUNT | XFS_AGI_FREECOUNT); |
@@ -1228,9 +1221,9 @@ xfs_difree( | |||
1228 | goto error0; | 1221 | goto error0; |
1229 | } | 1222 | } |
1230 | 1223 | ||
1231 | xfs_bmap_add_free(XFS_AGB_TO_FSB(mp, | 1224 | xfs_bmap_add_free(XFS_AGB_TO_FSB(mp, agno, |
1232 | agno, XFS_INO_TO_AGBNO(mp,rec.ir_startino)), | 1225 | XFS_AGINO_TO_AGBNO(mp, rec.ir_startino)), |
1233 | XFS_IALLOC_BLOCKS(mp), flist, mp); | 1226 | mp->m_ialloc_blks, flist, mp); |
1234 | } else { | 1227 | } else { |
1235 | *delete = 0; | 1228 | *delete = 0; |
1236 | 1229 | ||
@@ -1311,7 +1304,7 @@ xfs_imap_lookup( | |||
1311 | 1304 | ||
1312 | /* check that the returned record contains the required inode */ | 1305 | /* check that the returned record contains the required inode */ |
1313 | if (rec.ir_startino > agino || | 1306 | if (rec.ir_startino > agino || |
1314 | rec.ir_startino + XFS_IALLOC_INODES(mp) <= agino) | 1307 | rec.ir_startino + mp->m_ialloc_inos <= agino) |
1315 | return EINVAL; | 1308 | return EINVAL; |
1316 | 1309 | ||
1317 | /* for untrusted inodes check it is allocated first */ | 1310 | /* for untrusted inodes check it is allocated first */ |
@@ -1384,7 +1377,7 @@ xfs_imap( | |||
1384 | return XFS_ERROR(EINVAL); | 1377 | return XFS_ERROR(EINVAL); |
1385 | } | 1378 | } |
1386 | 1379 | ||
1387 | blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_blocklog; | 1380 | blks_per_cluster = xfs_icluster_size_fsb(mp); |
1388 | 1381 | ||
1389 | /* | 1382 | /* |
1390 | * For bulkstat and handle lookups, we have an untrusted inode number | 1383 | * For bulkstat and handle lookups, we have an untrusted inode number |
@@ -1405,7 +1398,7 @@ xfs_imap( | |||
1405 | * If the inode cluster size is the same as the blocksize or | 1398 | * If the inode cluster size is the same as the blocksize or |
1406 | * smaller we get to the buffer by simple arithmetics. | 1399 | * smaller we get to the buffer by simple arithmetics. |
1407 | */ | 1400 | */ |
1408 | if (XFS_INODE_CLUSTER_SIZE(mp) <= mp->m_sb.sb_blocksize) { | 1401 | if (blks_per_cluster == 1) { |
1409 | offset = XFS_INO_TO_OFFSET(mp, ino); | 1402 | offset = XFS_INO_TO_OFFSET(mp, ino); |
1410 | ASSERT(offset < mp->m_sb.sb_inopblock); | 1403 | ASSERT(offset < mp->m_sb.sb_inopblock); |
1411 | 1404 | ||
diff --git a/fs/xfs/xfs_ialloc.h b/fs/xfs/xfs_ialloc.h index a8f76a5ff418..812365d17e67 100644 --- a/fs/xfs/xfs_ialloc.h +++ b/fs/xfs/xfs_ialloc.h | |||
@@ -25,17 +25,18 @@ struct xfs_mount; | |||
25 | struct xfs_trans; | 25 | struct xfs_trans; |
26 | struct xfs_btree_cur; | 26 | struct xfs_btree_cur; |
27 | 27 | ||
28 | /* | 28 | /* Move inodes in clusters of this size */ |
29 | * Allocation parameters for inode allocation. | ||
30 | */ | ||
31 | #define XFS_IALLOC_INODES(mp) (mp)->m_ialloc_inos | ||
32 | #define XFS_IALLOC_BLOCKS(mp) (mp)->m_ialloc_blks | ||
33 | |||
34 | /* | ||
35 | * Move inodes in clusters of this size. | ||
36 | */ | ||
37 | #define XFS_INODE_BIG_CLUSTER_SIZE 8192 | 29 | #define XFS_INODE_BIG_CLUSTER_SIZE 8192 |
38 | #define XFS_INODE_CLUSTER_SIZE(mp) (mp)->m_inode_cluster_size | 30 | |
31 | /* Calculate and return the number of filesystem blocks per inode cluster */ | ||
32 | static inline int | ||
33 | xfs_icluster_size_fsb( | ||
34 | struct xfs_mount *mp) | ||
35 | { | ||
36 | if (mp->m_sb.sb_blocksize >= mp->m_inode_cluster_size) | ||
37 | return 1; | ||
38 | return mp->m_inode_cluster_size >> mp->m_sb.sb_blocklog; | ||
39 | } | ||
39 | 40 | ||
40 | /* | 41 | /* |
41 | * Make an inode pointer out of the buffer/offset. | 42 | * Make an inode pointer out of the buffer/offset. |
diff --git a/fs/xfs/xfs_icreate_item.c b/fs/xfs/xfs_icreate_item.c index d2eaccfa73f4..7e4549233251 100644 --- a/fs/xfs/xfs_icreate_item.c +++ b/fs/xfs/xfs_icreate_item.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include "xfs_trans_priv.h" | 28 | #include "xfs_trans_priv.h" |
29 | #include "xfs_error.h" | 29 | #include "xfs_error.h" |
30 | #include "xfs_icreate_item.h" | 30 | #include "xfs_icreate_item.h" |
31 | #include "xfs_log.h" | ||
31 | 32 | ||
32 | kmem_zone_t *xfs_icreate_zone; /* inode create item zone */ | 33 | kmem_zone_t *xfs_icreate_zone; /* inode create item zone */ |
33 | 34 | ||
@@ -58,13 +59,14 @@ xfs_icreate_item_size( | |||
58 | STATIC void | 59 | STATIC void |
59 | xfs_icreate_item_format( | 60 | xfs_icreate_item_format( |
60 | struct xfs_log_item *lip, | 61 | struct xfs_log_item *lip, |
61 | struct xfs_log_iovec *log_vector) | 62 | struct xfs_log_vec *lv) |
62 | { | 63 | { |
63 | struct xfs_icreate_item *icp = ICR_ITEM(lip); | 64 | struct xfs_icreate_item *icp = ICR_ITEM(lip); |
65 | struct xfs_log_iovec *vecp = NULL; | ||
64 | 66 | ||
65 | log_vector->i_addr = (xfs_caddr_t)&icp->ic_format; | 67 | xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_ICREATE, |
66 | log_vector->i_len = sizeof(struct xfs_icreate_log); | 68 | &icp->ic_format, |
67 | log_vector->i_type = XLOG_REG_TYPE_ICREATE; | 69 | sizeof(struct xfs_icreate_log)); |
68 | } | 70 | } |
69 | 71 | ||
70 | 72 | ||
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 001aa893ed59..3a137e9f9a7d 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c | |||
@@ -77,48 +77,44 @@ xfs_get_extsz_hint( | |||
77 | } | 77 | } |
78 | 78 | ||
79 | /* | 79 | /* |
80 | * This is a wrapper routine around the xfs_ilock() routine used to centralize | 80 | * These two are wrapper routines around the xfs_ilock() routine used to |
81 | * some grungy code. It is used in places that wish to lock the inode solely | 81 | * centralize some grungy code. They are used in places that wish to lock the |
82 | * for reading the extents. The reason these places can't just call | 82 | * inode solely for reading the extents. The reason these places can't just |
83 | * xfs_ilock(SHARED) is that the inode lock also guards to bringing in of the | 83 | * call xfs_ilock(ip, XFS_ILOCK_SHARED) is that the inode lock also guards to |
84 | * extents from disk for a file in b-tree format. If the inode is in b-tree | 84 | * bringing in of the extents from disk for a file in b-tree format. If the |
85 | * format, then we need to lock the inode exclusively until the extents are read | 85 | * inode is in b-tree format, then we need to lock the inode exclusively until |
86 | * in. Locking it exclusively all the time would limit our parallelism | 86 | * the extents are read in. Locking it exclusively all the time would limit |
87 | * unnecessarily, though. What we do instead is check to see if the extents | 87 | * our parallelism unnecessarily, though. What we do instead is check to see |
88 | * have been read in yet, and only lock the inode exclusively if they have not. | 88 | * if the extents have been read in yet, and only lock the inode exclusively |
89 | * if they have not. | ||
89 | * | 90 | * |
90 | * The function returns a value which should be given to the corresponding | 91 | * The functions return a value which should be given to the corresponding |
91 | * xfs_iunlock_map_shared(). This value is the mode in which the lock was | 92 | * xfs_iunlock() call. |
92 | * actually taken. | ||
93 | */ | 93 | */ |
94 | uint | 94 | uint |
95 | xfs_ilock_map_shared( | 95 | xfs_ilock_data_map_shared( |
96 | xfs_inode_t *ip) | 96 | struct xfs_inode *ip) |
97 | { | 97 | { |
98 | uint lock_mode; | 98 | uint lock_mode = XFS_ILOCK_SHARED; |
99 | 99 | ||
100 | if ((ip->i_d.di_format == XFS_DINODE_FMT_BTREE) && | 100 | if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE && |
101 | ((ip->i_df.if_flags & XFS_IFEXTENTS) == 0)) { | 101 | (ip->i_df.if_flags & XFS_IFEXTENTS) == 0) |
102 | lock_mode = XFS_ILOCK_EXCL; | 102 | lock_mode = XFS_ILOCK_EXCL; |
103 | } else { | ||
104 | lock_mode = XFS_ILOCK_SHARED; | ||
105 | } | ||
106 | |||
107 | xfs_ilock(ip, lock_mode); | 103 | xfs_ilock(ip, lock_mode); |
108 | |||
109 | return lock_mode; | 104 | return lock_mode; |
110 | } | 105 | } |
111 | 106 | ||
112 | /* | 107 | uint |
113 | * This is simply the unlock routine to go with xfs_ilock_map_shared(). | 108 | xfs_ilock_attr_map_shared( |
114 | * All it does is call xfs_iunlock() with the given lock_mode. | 109 | struct xfs_inode *ip) |
115 | */ | ||
116 | void | ||
117 | xfs_iunlock_map_shared( | ||
118 | xfs_inode_t *ip, | ||
119 | unsigned int lock_mode) | ||
120 | { | 110 | { |
121 | xfs_iunlock(ip, lock_mode); | 111 | uint lock_mode = XFS_ILOCK_SHARED; |
112 | |||
113 | if (ip->i_d.di_aformat == XFS_DINODE_FMT_BTREE && | ||
114 | (ip->i_afp->if_flags & XFS_IFEXTENTS) == 0) | ||
115 | lock_mode = XFS_ILOCK_EXCL; | ||
116 | xfs_ilock(ip, lock_mode); | ||
117 | return lock_mode; | ||
122 | } | 118 | } |
123 | 119 | ||
124 | /* | 120 | /* |
@@ -588,9 +584,9 @@ xfs_lookup( | |||
588 | if (XFS_FORCED_SHUTDOWN(dp->i_mount)) | 584 | if (XFS_FORCED_SHUTDOWN(dp->i_mount)) |
589 | return XFS_ERROR(EIO); | 585 | return XFS_ERROR(EIO); |
590 | 586 | ||
591 | lock_mode = xfs_ilock_map_shared(dp); | 587 | lock_mode = xfs_ilock_data_map_shared(dp); |
592 | error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name); | 588 | error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name); |
593 | xfs_iunlock_map_shared(dp, lock_mode); | 589 | xfs_iunlock(dp, lock_mode); |
594 | 590 | ||
595 | if (error) | 591 | if (error) |
596 | goto out; | 592 | goto out; |
@@ -2141,8 +2137,8 @@ xfs_ifree_cluster( | |||
2141 | { | 2137 | { |
2142 | xfs_mount_t *mp = free_ip->i_mount; | 2138 | xfs_mount_t *mp = free_ip->i_mount; |
2143 | int blks_per_cluster; | 2139 | int blks_per_cluster; |
2140 | int inodes_per_cluster; | ||
2144 | int nbufs; | 2141 | int nbufs; |
2145 | int ninodes; | ||
2146 | int i, j; | 2142 | int i, j; |
2147 | xfs_daddr_t blkno; | 2143 | xfs_daddr_t blkno; |
2148 | xfs_buf_t *bp; | 2144 | xfs_buf_t *bp; |
@@ -2152,18 +2148,11 @@ xfs_ifree_cluster( | |||
2152 | struct xfs_perag *pag; | 2148 | struct xfs_perag *pag; |
2153 | 2149 | ||
2154 | pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, inum)); | 2150 | pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, inum)); |
2155 | if (mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp)) { | 2151 | blks_per_cluster = xfs_icluster_size_fsb(mp); |
2156 | blks_per_cluster = 1; | 2152 | inodes_per_cluster = blks_per_cluster << mp->m_sb.sb_inopblog; |
2157 | ninodes = mp->m_sb.sb_inopblock; | 2153 | nbufs = mp->m_ialloc_blks / blks_per_cluster; |
2158 | nbufs = XFS_IALLOC_BLOCKS(mp); | ||
2159 | } else { | ||
2160 | blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) / | ||
2161 | mp->m_sb.sb_blocksize; | ||
2162 | ninodes = blks_per_cluster * mp->m_sb.sb_inopblock; | ||
2163 | nbufs = XFS_IALLOC_BLOCKS(mp) / blks_per_cluster; | ||
2164 | } | ||
2165 | 2154 | ||
2166 | for (j = 0; j < nbufs; j++, inum += ninodes) { | 2155 | for (j = 0; j < nbufs; j++, inum += inodes_per_cluster) { |
2167 | blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum), | 2156 | blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum), |
2168 | XFS_INO_TO_AGBNO(mp, inum)); | 2157 | XFS_INO_TO_AGBNO(mp, inum)); |
2169 | 2158 | ||
@@ -2225,7 +2214,7 @@ xfs_ifree_cluster( | |||
2225 | * transaction stale above, which means there is no point in | 2214 | * transaction stale above, which means there is no point in |
2226 | * even trying to lock them. | 2215 | * even trying to lock them. |
2227 | */ | 2216 | */ |
2228 | for (i = 0; i < ninodes; i++) { | 2217 | for (i = 0; i < inodes_per_cluster; i++) { |
2229 | retry: | 2218 | retry: |
2230 | rcu_read_lock(); | 2219 | rcu_read_lock(); |
2231 | ip = radix_tree_lookup(&pag->pag_ici_root, | 2220 | ip = radix_tree_lookup(&pag->pag_ici_root, |
@@ -2906,13 +2895,13 @@ xfs_iflush_cluster( | |||
2906 | 2895 | ||
2907 | pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); | 2896 | pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); |
2908 | 2897 | ||
2909 | inodes_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog; | 2898 | inodes_per_cluster = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog; |
2910 | ilist_size = inodes_per_cluster * sizeof(xfs_inode_t *); | 2899 | ilist_size = inodes_per_cluster * sizeof(xfs_inode_t *); |
2911 | ilist = kmem_alloc(ilist_size, KM_MAYFAIL|KM_NOFS); | 2900 | ilist = kmem_alloc(ilist_size, KM_MAYFAIL|KM_NOFS); |
2912 | if (!ilist) | 2901 | if (!ilist) |
2913 | goto out_put; | 2902 | goto out_put; |
2914 | 2903 | ||
2915 | mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1); | 2904 | mask = ~(((mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog)) - 1); |
2916 | first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask; | 2905 | first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask; |
2917 | rcu_read_lock(); | 2906 | rcu_read_lock(); |
2918 | /* really need a gang lookup range call here */ | 2907 | /* really need a gang lookup range call here */ |
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index 9e6efccbae04..65e2350f449c 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h | |||
@@ -337,8 +337,8 @@ int xfs_ilock_nowait(xfs_inode_t *, uint); | |||
337 | void xfs_iunlock(xfs_inode_t *, uint); | 337 | void xfs_iunlock(xfs_inode_t *, uint); |
338 | void xfs_ilock_demote(xfs_inode_t *, uint); | 338 | void xfs_ilock_demote(xfs_inode_t *, uint); |
339 | int xfs_isilocked(xfs_inode_t *, uint); | 339 | int xfs_isilocked(xfs_inode_t *, uint); |
340 | uint xfs_ilock_map_shared(xfs_inode_t *); | 340 | uint xfs_ilock_data_map_shared(struct xfs_inode *); |
341 | void xfs_iunlock_map_shared(xfs_inode_t *, uint); | 341 | uint xfs_ilock_attr_map_shared(struct xfs_inode *); |
342 | int xfs_ialloc(struct xfs_trans *, xfs_inode_t *, umode_t, | 342 | int xfs_ialloc(struct xfs_trans *, xfs_inode_t *, umode_t, |
343 | xfs_nlink_t, xfs_dev_t, prid_t, int, | 343 | xfs_nlink_t, xfs_dev_t, prid_t, int, |
344 | struct xfs_buf **, xfs_inode_t **); | 344 | struct xfs_buf **, xfs_inode_t **); |
diff --git a/fs/xfs/xfs_inode_fork.c b/fs/xfs/xfs_inode_fork.c index cfee14a83cfe..73514c0486b7 100644 --- a/fs/xfs/xfs_inode_fork.c +++ b/fs/xfs/xfs_inode_fork.c | |||
@@ -431,6 +431,8 @@ xfs_iread_extents( | |||
431 | xfs_ifork_t *ifp; | 431 | xfs_ifork_t *ifp; |
432 | xfs_extnum_t nextents; | 432 | xfs_extnum_t nextents; |
433 | 433 | ||
434 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); | ||
435 | |||
434 | if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) { | 436 | if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) { |
435 | XFS_ERROR_REPORT("xfs_iread_extents", XFS_ERRLEVEL_LOW, | 437 | XFS_ERROR_REPORT("xfs_iread_extents", XFS_ERRLEVEL_LOW, |
436 | ip->i_mount); | 438 | ip->i_mount); |
@@ -721,15 +723,16 @@ xfs_idestroy_fork( | |||
721 | } | 723 | } |
722 | 724 | ||
723 | /* | 725 | /* |
724 | * xfs_iextents_copy() | 726 | * Convert in-core extents to on-disk form |
725 | * | 727 | * |
726 | * This is called to copy the REAL extents (as opposed to the delayed | 728 | * For either the data or attr fork in extent format, we need to endian convert |
727 | * allocation extents) from the inode into the given buffer. It | 729 | * the in-core extent as we place them into the on-disk inode. |
728 | * returns the number of bytes copied into the buffer. | ||
729 | * | 730 | * |
730 | * If there are no delayed allocation extents, then we can just | 731 | * In the case of the data fork, the in-core and on-disk fork sizes can be |
731 | * memcpy() the extents into the buffer. Otherwise, we need to | 732 | * different due to delayed allocation extents. We only copy on-disk extents |
732 | * examine each extent in turn and skip those which are delayed. | 733 | * here, so callers must always use the physical fork size to determine the |
734 | * size of the buffer passed to this routine. We will return the size actually | ||
735 | * used. | ||
733 | */ | 736 | */ |
734 | int | 737 | int |
735 | xfs_iextents_copy( | 738 | xfs_iextents_copy( |
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c index 7c0d391f9a6e..686889b4a1e5 100644 --- a/fs/xfs/xfs_inode_item.c +++ b/fs/xfs/xfs_inode_item.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include "xfs_trace.h" | 30 | #include "xfs_trace.h" |
31 | #include "xfs_trans_priv.h" | 31 | #include "xfs_trans_priv.h" |
32 | #include "xfs_dinode.h" | 32 | #include "xfs_dinode.h" |
33 | #include "xfs_log.h" | ||
33 | 34 | ||
34 | 35 | ||
35 | kmem_zone_t *xfs_ili_zone; /* inode log item zone */ | 36 | kmem_zone_t *xfs_ili_zone; /* inode log item zone */ |
@@ -39,27 +40,14 @@ static inline struct xfs_inode_log_item *INODE_ITEM(struct xfs_log_item *lip) | |||
39 | return container_of(lip, struct xfs_inode_log_item, ili_item); | 40 | return container_of(lip, struct xfs_inode_log_item, ili_item); |
40 | } | 41 | } |
41 | 42 | ||
42 | |||
43 | /* | ||
44 | * This returns the number of iovecs needed to log the given inode item. | ||
45 | * | ||
46 | * We need one iovec for the inode log format structure, one for the | ||
47 | * inode core, and possibly one for the inode data/extents/b-tree root | ||
48 | * and one for the inode attribute data/extents/b-tree root. | ||
49 | */ | ||
50 | STATIC void | 43 | STATIC void |
51 | xfs_inode_item_size( | 44 | xfs_inode_item_data_fork_size( |
52 | struct xfs_log_item *lip, | 45 | struct xfs_inode_log_item *iip, |
53 | int *nvecs, | 46 | int *nvecs, |
54 | int *nbytes) | 47 | int *nbytes) |
55 | { | 48 | { |
56 | struct xfs_inode_log_item *iip = INODE_ITEM(lip); | ||
57 | struct xfs_inode *ip = iip->ili_inode; | 49 | struct xfs_inode *ip = iip->ili_inode; |
58 | 50 | ||
59 | *nvecs += 2; | ||
60 | *nbytes += sizeof(struct xfs_inode_log_format) + | ||
61 | xfs_icdinode_size(ip->i_d.di_version); | ||
62 | |||
63 | switch (ip->i_d.di_format) { | 51 | switch (ip->i_d.di_format) { |
64 | case XFS_DINODE_FMT_EXTENTS: | 52 | case XFS_DINODE_FMT_EXTENTS: |
65 | if ((iip->ili_fields & XFS_ILOG_DEXT) && | 53 | if ((iip->ili_fields & XFS_ILOG_DEXT) && |
@@ -70,7 +58,6 @@ xfs_inode_item_size( | |||
70 | *nvecs += 1; | 58 | *nvecs += 1; |
71 | } | 59 | } |
72 | break; | 60 | break; |
73 | |||
74 | case XFS_DINODE_FMT_BTREE: | 61 | case XFS_DINODE_FMT_BTREE: |
75 | if ((iip->ili_fields & XFS_ILOG_DBROOT) && | 62 | if ((iip->ili_fields & XFS_ILOG_DBROOT) && |
76 | ip->i_df.if_broot_bytes > 0) { | 63 | ip->i_df.if_broot_bytes > 0) { |
@@ -78,7 +65,6 @@ xfs_inode_item_size( | |||
78 | *nvecs += 1; | 65 | *nvecs += 1; |
79 | } | 66 | } |
80 | break; | 67 | break; |
81 | |||
82 | case XFS_DINODE_FMT_LOCAL: | 68 | case XFS_DINODE_FMT_LOCAL: |
83 | if ((iip->ili_fields & XFS_ILOG_DDATA) && | 69 | if ((iip->ili_fields & XFS_ILOG_DDATA) && |
84 | ip->i_df.if_bytes > 0) { | 70 | ip->i_df.if_bytes > 0) { |
@@ -90,19 +76,20 @@ xfs_inode_item_size( | |||
90 | case XFS_DINODE_FMT_DEV: | 76 | case XFS_DINODE_FMT_DEV: |
91 | case XFS_DINODE_FMT_UUID: | 77 | case XFS_DINODE_FMT_UUID: |
92 | break; | 78 | break; |
93 | |||
94 | default: | 79 | default: |
95 | ASSERT(0); | 80 | ASSERT(0); |
96 | break; | 81 | break; |
97 | } | 82 | } |
83 | } | ||
98 | 84 | ||
99 | if (!XFS_IFORK_Q(ip)) | 85 | STATIC void |
100 | return; | 86 | xfs_inode_item_attr_fork_size( |
101 | 87 | struct xfs_inode_log_item *iip, | |
88 | int *nvecs, | ||
89 | int *nbytes) | ||
90 | { | ||
91 | struct xfs_inode *ip = iip->ili_inode; | ||
102 | 92 | ||
103 | /* | ||
104 | * Log any necessary attribute data. | ||
105 | */ | ||
106 | switch (ip->i_d.di_aformat) { | 93 | switch (ip->i_d.di_aformat) { |
107 | case XFS_DINODE_FMT_EXTENTS: | 94 | case XFS_DINODE_FMT_EXTENTS: |
108 | if ((iip->ili_fields & XFS_ILOG_AEXT) && | 95 | if ((iip->ili_fields & XFS_ILOG_AEXT) && |
@@ -113,7 +100,6 @@ xfs_inode_item_size( | |||
113 | *nvecs += 1; | 100 | *nvecs += 1; |
114 | } | 101 | } |
115 | break; | 102 | break; |
116 | |||
117 | case XFS_DINODE_FMT_BTREE: | 103 | case XFS_DINODE_FMT_BTREE: |
118 | if ((iip->ili_fields & XFS_ILOG_ABROOT) && | 104 | if ((iip->ili_fields & XFS_ILOG_ABROOT) && |
119 | ip->i_afp->if_broot_bytes > 0) { | 105 | ip->i_afp->if_broot_bytes > 0) { |
@@ -121,7 +107,6 @@ xfs_inode_item_size( | |||
121 | *nvecs += 1; | 107 | *nvecs += 1; |
122 | } | 108 | } |
123 | break; | 109 | break; |
124 | |||
125 | case XFS_DINODE_FMT_LOCAL: | 110 | case XFS_DINODE_FMT_LOCAL: |
126 | if ((iip->ili_fields & XFS_ILOG_ADATA) && | 111 | if ((iip->ili_fields & XFS_ILOG_ADATA) && |
127 | ip->i_afp->if_bytes > 0) { | 112 | ip->i_afp->if_bytes > 0) { |
@@ -129,7 +114,6 @@ xfs_inode_item_size( | |||
129 | *nvecs += 1; | 114 | *nvecs += 1; |
130 | } | 115 | } |
131 | break; | 116 | break; |
132 | |||
133 | default: | 117 | default: |
134 | ASSERT(0); | 118 | ASSERT(0); |
135 | break; | 119 | break; |
@@ -137,98 +121,67 @@ xfs_inode_item_size( | |||
137 | } | 121 | } |
138 | 122 | ||
139 | /* | 123 | /* |
140 | * xfs_inode_item_format_extents - convert in-core extents to on-disk form | 124 | * This returns the number of iovecs needed to log the given inode item. |
141 | * | ||
142 | * For either the data or attr fork in extent format, we need to endian convert | ||
143 | * the in-core extent as we place them into the on-disk inode. In this case, we | ||
144 | * need to do this conversion before we write the extents into the log. Because | ||
145 | * we don't have the disk inode to write into here, we allocate a buffer and | ||
146 | * format the extents into it via xfs_iextents_copy(). We free the buffer in | ||
147 | * the unlock routine after the copy for the log has been made. | ||
148 | * | 125 | * |
149 | * In the case of the data fork, the in-core and on-disk fork sizes can be | 126 | * We need one iovec for the inode log format structure, one for the |
150 | * different due to delayed allocation extents. We only log on-disk extents | 127 | * inode core, and possibly one for the inode data/extents/b-tree root |
151 | * here, so always use the physical fork size to determine the size of the | 128 | * and one for the inode attribute data/extents/b-tree root. |
152 | * buffer we need to allocate. | ||
153 | */ | 129 | */ |
154 | STATIC void | 130 | STATIC void |
155 | xfs_inode_item_format_extents( | 131 | xfs_inode_item_size( |
156 | struct xfs_inode *ip, | 132 | struct xfs_log_item *lip, |
157 | struct xfs_log_iovec *vecp, | 133 | int *nvecs, |
158 | int whichfork, | 134 | int *nbytes) |
159 | int type) | ||
160 | { | 135 | { |
161 | xfs_bmbt_rec_t *ext_buffer; | 136 | struct xfs_inode_log_item *iip = INODE_ITEM(lip); |
137 | struct xfs_inode *ip = iip->ili_inode; | ||
162 | 138 | ||
163 | ext_buffer = kmem_alloc(XFS_IFORK_SIZE(ip, whichfork), KM_SLEEP); | 139 | *nvecs += 2; |
164 | if (whichfork == XFS_DATA_FORK) | 140 | *nbytes += sizeof(struct xfs_inode_log_format) + |
165 | ip->i_itemp->ili_extents_buf = ext_buffer; | 141 | xfs_icdinode_size(ip->i_d.di_version); |
166 | else | ||
167 | ip->i_itemp->ili_aextents_buf = ext_buffer; | ||
168 | 142 | ||
169 | vecp->i_addr = ext_buffer; | 143 | xfs_inode_item_data_fork_size(iip, nvecs, nbytes); |
170 | vecp->i_len = xfs_iextents_copy(ip, ext_buffer, whichfork); | 144 | if (XFS_IFORK_Q(ip)) |
171 | vecp->i_type = type; | 145 | xfs_inode_item_attr_fork_size(iip, nvecs, nbytes); |
172 | } | 146 | } |
173 | 147 | ||
174 | /* | 148 | /* |
175 | * This is called to fill in the vector of log iovecs for the | 149 | * If this is a v1 format inode, then we need to log it as such. This means |
176 | * given inode log item. It fills the first item with an inode | 150 | * that we have to copy the link count from the new field to the old. We |
177 | * log format structure, the second with the on-disk inode structure, | 151 | * don't have to worry about the new fields, because nothing trusts them as |
178 | * and a possible third and/or fourth with the inode data/extents/b-tree | 152 | * long as the old inode version number is there. |
179 | * root and inode attributes data/extents/b-tree root. | ||
180 | */ | 153 | */ |
181 | STATIC void | 154 | STATIC void |
182 | xfs_inode_item_format( | 155 | xfs_inode_item_format_v1_inode( |
183 | struct xfs_log_item *lip, | 156 | struct xfs_inode *ip) |
184 | struct xfs_log_iovec *vecp) | 157 | { |
158 | if (!xfs_sb_version_hasnlink(&ip->i_mount->m_sb)) { | ||
159 | /* | ||
160 | * Convert it back. | ||
161 | */ | ||
162 | ASSERT(ip->i_d.di_nlink <= XFS_MAXLINK_1); | ||
163 | ip->i_d.di_onlink = ip->i_d.di_nlink; | ||
164 | } else { | ||
165 | /* | ||
166 | * The superblock version has already been bumped, | ||
167 | * so just make the conversion to the new inode | ||
168 | * format permanent. | ||
169 | */ | ||
170 | ip->i_d.di_version = 2; | ||
171 | ip->i_d.di_onlink = 0; | ||
172 | memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad)); | ||
173 | } | ||
174 | } | ||
175 | |||
176 | STATIC void | ||
177 | xfs_inode_item_format_data_fork( | ||
178 | struct xfs_inode_log_item *iip, | ||
179 | struct xfs_inode_log_format *ilf, | ||
180 | struct xfs_log_vec *lv, | ||
181 | struct xfs_log_iovec **vecp) | ||
185 | { | 182 | { |
186 | struct xfs_inode_log_item *iip = INODE_ITEM(lip); | ||
187 | struct xfs_inode *ip = iip->ili_inode; | 183 | struct xfs_inode *ip = iip->ili_inode; |
188 | uint nvecs; | ||
189 | size_t data_bytes; | 184 | size_t data_bytes; |
190 | xfs_mount_t *mp; | ||
191 | |||
192 | vecp->i_addr = &iip->ili_format; | ||
193 | vecp->i_len = sizeof(xfs_inode_log_format_t); | ||
194 | vecp->i_type = XLOG_REG_TYPE_IFORMAT; | ||
195 | vecp++; | ||
196 | nvecs = 1; | ||
197 | |||
198 | vecp->i_addr = &ip->i_d; | ||
199 | vecp->i_len = xfs_icdinode_size(ip->i_d.di_version); | ||
200 | vecp->i_type = XLOG_REG_TYPE_ICORE; | ||
201 | vecp++; | ||
202 | nvecs++; | ||
203 | |||
204 | /* | ||
205 | * If this is really an old format inode, then we need to | ||
206 | * log it as such. This means that we have to copy the link | ||
207 | * count from the new field to the old. We don't have to worry | ||
208 | * about the new fields, because nothing trusts them as long as | ||
209 | * the old inode version number is there. If the superblock already | ||
210 | * has a new version number, then we don't bother converting back. | ||
211 | */ | ||
212 | mp = ip->i_mount; | ||
213 | ASSERT(ip->i_d.di_version == 1 || xfs_sb_version_hasnlink(&mp->m_sb)); | ||
214 | if (ip->i_d.di_version == 1) { | ||
215 | if (!xfs_sb_version_hasnlink(&mp->m_sb)) { | ||
216 | /* | ||
217 | * Convert it back. | ||
218 | */ | ||
219 | ASSERT(ip->i_d.di_nlink <= XFS_MAXLINK_1); | ||
220 | ip->i_d.di_onlink = ip->i_d.di_nlink; | ||
221 | } else { | ||
222 | /* | ||
223 | * The superblock version has already been bumped, | ||
224 | * so just make the conversion to the new inode | ||
225 | * format permanent. | ||
226 | */ | ||
227 | ip->i_d.di_version = 2; | ||
228 | ip->i_d.di_onlink = 0; | ||
229 | memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad)); | ||
230 | } | ||
231 | } | ||
232 | 185 | ||
233 | switch (ip->i_d.di_format) { | 186 | switch (ip->i_d.di_format) { |
234 | case XFS_DINODE_FMT_EXTENTS: | 187 | case XFS_DINODE_FMT_EXTENTS: |
@@ -239,36 +192,23 @@ xfs_inode_item_format( | |||
239 | if ((iip->ili_fields & XFS_ILOG_DEXT) && | 192 | if ((iip->ili_fields & XFS_ILOG_DEXT) && |
240 | ip->i_d.di_nextents > 0 && | 193 | ip->i_d.di_nextents > 0 && |
241 | ip->i_df.if_bytes > 0) { | 194 | ip->i_df.if_bytes > 0) { |
195 | struct xfs_bmbt_rec *p; | ||
196 | |||
242 | ASSERT(ip->i_df.if_u1.if_extents != NULL); | 197 | ASSERT(ip->i_df.if_u1.if_extents != NULL); |
243 | ASSERT(ip->i_df.if_bytes / sizeof(xfs_bmbt_rec_t) > 0); | 198 | ASSERT(ip->i_df.if_bytes / sizeof(xfs_bmbt_rec_t) > 0); |
244 | ASSERT(iip->ili_extents_buf == NULL); | 199 | |
245 | 200 | p = xlog_prepare_iovec(lv, vecp, XLOG_REG_TYPE_IEXT); | |
246 | #ifdef XFS_NATIVE_HOST | 201 | data_bytes = xfs_iextents_copy(ip, p, XFS_DATA_FORK); |
247 | if (ip->i_d.di_nextents == ip->i_df.if_bytes / | 202 | xlog_finish_iovec(lv, *vecp, data_bytes); |
248 | (uint)sizeof(xfs_bmbt_rec_t)) { | 203 | |
249 | /* | 204 | ASSERT(data_bytes <= ip->i_df.if_bytes); |
250 | * There are no delayed allocation | 205 | |
251 | * extents, so just point to the | 206 | ilf->ilf_dsize = data_bytes; |
252 | * real extents array. | 207 | ilf->ilf_size++; |
253 | */ | ||
254 | vecp->i_addr = ip->i_df.if_u1.if_extents; | ||
255 | vecp->i_len = ip->i_df.if_bytes; | ||
256 | vecp->i_type = XLOG_REG_TYPE_IEXT; | ||
257 | } else | ||
258 | #endif | ||
259 | { | ||
260 | xfs_inode_item_format_extents(ip, vecp, | ||
261 | XFS_DATA_FORK, XLOG_REG_TYPE_IEXT); | ||
262 | } | ||
263 | ASSERT(vecp->i_len <= ip->i_df.if_bytes); | ||
264 | iip->ili_format.ilf_dsize = vecp->i_len; | ||
265 | vecp++; | ||
266 | nvecs++; | ||
267 | } else { | 208 | } else { |
268 | iip->ili_fields &= ~XFS_ILOG_DEXT; | 209 | iip->ili_fields &= ~XFS_ILOG_DEXT; |
269 | } | 210 | } |
270 | break; | 211 | break; |
271 | |||
272 | case XFS_DINODE_FMT_BTREE: | 212 | case XFS_DINODE_FMT_BTREE: |
273 | iip->ili_fields &= | 213 | iip->ili_fields &= |
274 | ~(XFS_ILOG_DDATA | XFS_ILOG_DEXT | | 214 | ~(XFS_ILOG_DDATA | XFS_ILOG_DEXT | |
@@ -277,80 +217,70 @@ xfs_inode_item_format( | |||
277 | if ((iip->ili_fields & XFS_ILOG_DBROOT) && | 217 | if ((iip->ili_fields & XFS_ILOG_DBROOT) && |
278 | ip->i_df.if_broot_bytes > 0) { | 218 | ip->i_df.if_broot_bytes > 0) { |
279 | ASSERT(ip->i_df.if_broot != NULL); | 219 | ASSERT(ip->i_df.if_broot != NULL); |
280 | vecp->i_addr = ip->i_df.if_broot; | 220 | xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_IBROOT, |
281 | vecp->i_len = ip->i_df.if_broot_bytes; | 221 | ip->i_df.if_broot, |
282 | vecp->i_type = XLOG_REG_TYPE_IBROOT; | 222 | ip->i_df.if_broot_bytes); |
283 | vecp++; | 223 | ilf->ilf_dsize = ip->i_df.if_broot_bytes; |
284 | nvecs++; | 224 | ilf->ilf_size++; |
285 | iip->ili_format.ilf_dsize = ip->i_df.if_broot_bytes; | ||
286 | } else { | 225 | } else { |
287 | ASSERT(!(iip->ili_fields & | 226 | ASSERT(!(iip->ili_fields & |
288 | XFS_ILOG_DBROOT)); | 227 | XFS_ILOG_DBROOT)); |
289 | iip->ili_fields &= ~XFS_ILOG_DBROOT; | 228 | iip->ili_fields &= ~XFS_ILOG_DBROOT; |
290 | } | 229 | } |
291 | break; | 230 | break; |
292 | |||
293 | case XFS_DINODE_FMT_LOCAL: | 231 | case XFS_DINODE_FMT_LOCAL: |
294 | iip->ili_fields &= | 232 | iip->ili_fields &= |
295 | ~(XFS_ILOG_DEXT | XFS_ILOG_DBROOT | | 233 | ~(XFS_ILOG_DEXT | XFS_ILOG_DBROOT | |
296 | XFS_ILOG_DEV | XFS_ILOG_UUID); | 234 | XFS_ILOG_DEV | XFS_ILOG_UUID); |
297 | if ((iip->ili_fields & XFS_ILOG_DDATA) && | 235 | if ((iip->ili_fields & XFS_ILOG_DDATA) && |
298 | ip->i_df.if_bytes > 0) { | 236 | ip->i_df.if_bytes > 0) { |
299 | ASSERT(ip->i_df.if_u1.if_data != NULL); | ||
300 | ASSERT(ip->i_d.di_size > 0); | ||
301 | |||
302 | vecp->i_addr = ip->i_df.if_u1.if_data; | ||
303 | /* | 237 | /* |
304 | * Round i_bytes up to a word boundary. | 238 | * Round i_bytes up to a word boundary. |
305 | * The underlying memory is guaranteed to | 239 | * The underlying memory is guaranteed to |
306 | * to be there by xfs_idata_realloc(). | 240 | * to be there by xfs_idata_realloc(). |
307 | */ | 241 | */ |
308 | data_bytes = roundup(ip->i_df.if_bytes, 4); | 242 | data_bytes = roundup(ip->i_df.if_bytes, 4); |
309 | ASSERT((ip->i_df.if_real_bytes == 0) || | 243 | ASSERT(ip->i_df.if_real_bytes == 0 || |
310 | (ip->i_df.if_real_bytes == data_bytes)); | 244 | ip->i_df.if_real_bytes == data_bytes); |
311 | vecp->i_len = (int)data_bytes; | 245 | ASSERT(ip->i_df.if_u1.if_data != NULL); |
312 | vecp->i_type = XLOG_REG_TYPE_ILOCAL; | 246 | ASSERT(ip->i_d.di_size > 0); |
313 | vecp++; | 247 | xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_ILOCAL, |
314 | nvecs++; | 248 | ip->i_df.if_u1.if_data, data_bytes); |
315 | iip->ili_format.ilf_dsize = (unsigned)data_bytes; | 249 | ilf->ilf_dsize = (unsigned)data_bytes; |
250 | ilf->ilf_size++; | ||
316 | } else { | 251 | } else { |
317 | iip->ili_fields &= ~XFS_ILOG_DDATA; | 252 | iip->ili_fields &= ~XFS_ILOG_DDATA; |
318 | } | 253 | } |
319 | break; | 254 | break; |
320 | |||
321 | case XFS_DINODE_FMT_DEV: | 255 | case XFS_DINODE_FMT_DEV: |
322 | iip->ili_fields &= | 256 | iip->ili_fields &= |
323 | ~(XFS_ILOG_DDATA | XFS_ILOG_DBROOT | | 257 | ~(XFS_ILOG_DDATA | XFS_ILOG_DBROOT | |
324 | XFS_ILOG_DEXT | XFS_ILOG_UUID); | 258 | XFS_ILOG_DEXT | XFS_ILOG_UUID); |
325 | if (iip->ili_fields & XFS_ILOG_DEV) { | 259 | if (iip->ili_fields & XFS_ILOG_DEV) |
326 | iip->ili_format.ilf_u.ilfu_rdev = | 260 | ilf->ilf_u.ilfu_rdev = ip->i_df.if_u2.if_rdev; |
327 | ip->i_df.if_u2.if_rdev; | ||
328 | } | ||
329 | break; | 261 | break; |
330 | |||
331 | case XFS_DINODE_FMT_UUID: | 262 | case XFS_DINODE_FMT_UUID: |
332 | iip->ili_fields &= | 263 | iip->ili_fields &= |
333 | ~(XFS_ILOG_DDATA | XFS_ILOG_DBROOT | | 264 | ~(XFS_ILOG_DDATA | XFS_ILOG_DBROOT | |
334 | XFS_ILOG_DEXT | XFS_ILOG_DEV); | 265 | XFS_ILOG_DEXT | XFS_ILOG_DEV); |
335 | if (iip->ili_fields & XFS_ILOG_UUID) { | 266 | if (iip->ili_fields & XFS_ILOG_UUID) |
336 | iip->ili_format.ilf_u.ilfu_uuid = | 267 | ilf->ilf_u.ilfu_uuid = ip->i_df.if_u2.if_uuid; |
337 | ip->i_df.if_u2.if_uuid; | ||
338 | } | ||
339 | break; | 268 | break; |
340 | |||
341 | default: | 269 | default: |
342 | ASSERT(0); | 270 | ASSERT(0); |
343 | break; | 271 | break; |
344 | } | 272 | } |
273 | } | ||
345 | 274 | ||
346 | /* | 275 | STATIC void |
347 | * If there are no attributes associated with the file, then we're done. | 276 | xfs_inode_item_format_attr_fork( |
348 | */ | 277 | struct xfs_inode_log_item *iip, |
349 | if (!XFS_IFORK_Q(ip)) { | 278 | struct xfs_inode_log_format *ilf, |
350 | iip->ili_fields &= | 279 | struct xfs_log_vec *lv, |
351 | ~(XFS_ILOG_ADATA | XFS_ILOG_ABROOT | XFS_ILOG_AEXT); | 280 | struct xfs_log_iovec **vecp) |
352 | goto out; | 281 | { |
353 | } | 282 | struct xfs_inode *ip = iip->ili_inode; |
283 | size_t data_bytes; | ||
354 | 284 | ||
355 | switch (ip->i_d.di_aformat) { | 285 | switch (ip->i_d.di_aformat) { |
356 | case XFS_DINODE_FMT_EXTENTS: | 286 | case XFS_DINODE_FMT_EXTENTS: |
@@ -360,30 +290,22 @@ xfs_inode_item_format( | |||
360 | if ((iip->ili_fields & XFS_ILOG_AEXT) && | 290 | if ((iip->ili_fields & XFS_ILOG_AEXT) && |
361 | ip->i_d.di_anextents > 0 && | 291 | ip->i_d.di_anextents > 0 && |
362 | ip->i_afp->if_bytes > 0) { | 292 | ip->i_afp->if_bytes > 0) { |
293 | struct xfs_bmbt_rec *p; | ||
294 | |||
363 | ASSERT(ip->i_afp->if_bytes / sizeof(xfs_bmbt_rec_t) == | 295 | ASSERT(ip->i_afp->if_bytes / sizeof(xfs_bmbt_rec_t) == |
364 | ip->i_d.di_anextents); | 296 | ip->i_d.di_anextents); |
365 | ASSERT(ip->i_afp->if_u1.if_extents != NULL); | 297 | ASSERT(ip->i_afp->if_u1.if_extents != NULL); |
366 | #ifdef XFS_NATIVE_HOST | 298 | |
367 | /* | 299 | p = xlog_prepare_iovec(lv, vecp, XLOG_REG_TYPE_IATTR_EXT); |
368 | * There are not delayed allocation extents | 300 | data_bytes = xfs_iextents_copy(ip, p, XFS_ATTR_FORK); |
369 | * for attributes, so just point at the array. | 301 | xlog_finish_iovec(lv, *vecp, data_bytes); |
370 | */ | 302 | |
371 | vecp->i_addr = ip->i_afp->if_u1.if_extents; | 303 | ilf->ilf_asize = data_bytes; |
372 | vecp->i_len = ip->i_afp->if_bytes; | 304 | ilf->ilf_size++; |
373 | vecp->i_type = XLOG_REG_TYPE_IATTR_EXT; | ||
374 | #else | ||
375 | ASSERT(iip->ili_aextents_buf == NULL); | ||
376 | xfs_inode_item_format_extents(ip, vecp, | ||
377 | XFS_ATTR_FORK, XLOG_REG_TYPE_IATTR_EXT); | ||
378 | #endif | ||
379 | iip->ili_format.ilf_asize = vecp->i_len; | ||
380 | vecp++; | ||
381 | nvecs++; | ||
382 | } else { | 305 | } else { |
383 | iip->ili_fields &= ~XFS_ILOG_AEXT; | 306 | iip->ili_fields &= ~XFS_ILOG_AEXT; |
384 | } | 307 | } |
385 | break; | 308 | break; |
386 | |||
387 | case XFS_DINODE_FMT_BTREE: | 309 | case XFS_DINODE_FMT_BTREE: |
388 | iip->ili_fields &= | 310 | iip->ili_fields &= |
389 | ~(XFS_ILOG_ADATA | XFS_ILOG_AEXT); | 311 | ~(XFS_ILOG_ADATA | XFS_ILOG_AEXT); |
@@ -392,61 +314,89 @@ xfs_inode_item_format( | |||
392 | ip->i_afp->if_broot_bytes > 0) { | 314 | ip->i_afp->if_broot_bytes > 0) { |
393 | ASSERT(ip->i_afp->if_broot != NULL); | 315 | ASSERT(ip->i_afp->if_broot != NULL); |
394 | 316 | ||
395 | vecp->i_addr = ip->i_afp->if_broot; | 317 | xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_IATTR_BROOT, |
396 | vecp->i_len = ip->i_afp->if_broot_bytes; | 318 | ip->i_afp->if_broot, |
397 | vecp->i_type = XLOG_REG_TYPE_IATTR_BROOT; | 319 | ip->i_afp->if_broot_bytes); |
398 | vecp++; | 320 | ilf->ilf_asize = ip->i_afp->if_broot_bytes; |
399 | nvecs++; | 321 | ilf->ilf_size++; |
400 | iip->ili_format.ilf_asize = ip->i_afp->if_broot_bytes; | ||
401 | } else { | 322 | } else { |
402 | iip->ili_fields &= ~XFS_ILOG_ABROOT; | 323 | iip->ili_fields &= ~XFS_ILOG_ABROOT; |
403 | } | 324 | } |
404 | break; | 325 | break; |
405 | |||
406 | case XFS_DINODE_FMT_LOCAL: | 326 | case XFS_DINODE_FMT_LOCAL: |
407 | iip->ili_fields &= | 327 | iip->ili_fields &= |
408 | ~(XFS_ILOG_AEXT | XFS_ILOG_ABROOT); | 328 | ~(XFS_ILOG_AEXT | XFS_ILOG_ABROOT); |
409 | 329 | ||
410 | if ((iip->ili_fields & XFS_ILOG_ADATA) && | 330 | if ((iip->ili_fields & XFS_ILOG_ADATA) && |
411 | ip->i_afp->if_bytes > 0) { | 331 | ip->i_afp->if_bytes > 0) { |
412 | ASSERT(ip->i_afp->if_u1.if_data != NULL); | ||
413 | |||
414 | vecp->i_addr = ip->i_afp->if_u1.if_data; | ||
415 | /* | 332 | /* |
416 | * Round i_bytes up to a word boundary. | 333 | * Round i_bytes up to a word boundary. |
417 | * The underlying memory is guaranteed to | 334 | * The underlying memory is guaranteed to |
418 | * to be there by xfs_idata_realloc(). | 335 | * to be there by xfs_idata_realloc(). |
419 | */ | 336 | */ |
420 | data_bytes = roundup(ip->i_afp->if_bytes, 4); | 337 | data_bytes = roundup(ip->i_afp->if_bytes, 4); |
421 | ASSERT((ip->i_afp->if_real_bytes == 0) || | 338 | ASSERT(ip->i_afp->if_real_bytes == 0 || |
422 | (ip->i_afp->if_real_bytes == data_bytes)); | 339 | ip->i_afp->if_real_bytes == data_bytes); |
423 | vecp->i_len = (int)data_bytes; | 340 | ASSERT(ip->i_afp->if_u1.if_data != NULL); |
424 | vecp->i_type = XLOG_REG_TYPE_IATTR_LOCAL; | 341 | xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_IATTR_LOCAL, |
425 | vecp++; | 342 | ip->i_afp->if_u1.if_data, |
426 | nvecs++; | 343 | data_bytes); |
427 | iip->ili_format.ilf_asize = (unsigned)data_bytes; | 344 | ilf->ilf_asize = (unsigned)data_bytes; |
345 | ilf->ilf_size++; | ||
428 | } else { | 346 | } else { |
429 | iip->ili_fields &= ~XFS_ILOG_ADATA; | 347 | iip->ili_fields &= ~XFS_ILOG_ADATA; |
430 | } | 348 | } |
431 | break; | 349 | break; |
432 | |||
433 | default: | 350 | default: |
434 | ASSERT(0); | 351 | ASSERT(0); |
435 | break; | 352 | break; |
436 | } | 353 | } |
437 | |||
438 | out: | ||
439 | /* | ||
440 | * Now update the log format that goes out to disk from the in-core | ||
441 | * values. We always write the inode core to make the arithmetic | ||
442 | * games in recovery easier, which isn't a big deal as just about any | ||
443 | * transaction would dirty it anyway. | ||
444 | */ | ||
445 | iip->ili_format.ilf_fields = XFS_ILOG_CORE | | ||
446 | (iip->ili_fields & ~XFS_ILOG_TIMESTAMP); | ||
447 | iip->ili_format.ilf_size = nvecs; | ||
448 | } | 354 | } |
449 | 355 | ||
356 | /* | ||
357 | * This is called to fill in the vector of log iovecs for the given inode | ||
358 | * log item. It fills the first item with an inode log format structure, | ||
359 | * the second with the on-disk inode structure, and a possible third and/or | ||
360 | * fourth with the inode data/extents/b-tree root and inode attributes | ||
361 | * data/extents/b-tree root. | ||
362 | */ | ||
363 | STATIC void | ||
364 | xfs_inode_item_format( | ||
365 | struct xfs_log_item *lip, | ||
366 | struct xfs_log_vec *lv) | ||
367 | { | ||
368 | struct xfs_inode_log_item *iip = INODE_ITEM(lip); | ||
369 | struct xfs_inode *ip = iip->ili_inode; | ||
370 | struct xfs_inode_log_format *ilf; | ||
371 | struct xfs_log_iovec *vecp = NULL; | ||
372 | |||
373 | ilf = xlog_prepare_iovec(lv, &vecp, XLOG_REG_TYPE_IFORMAT); | ||
374 | ilf->ilf_type = XFS_LI_INODE; | ||
375 | ilf->ilf_ino = ip->i_ino; | ||
376 | ilf->ilf_blkno = ip->i_imap.im_blkno; | ||
377 | ilf->ilf_len = ip->i_imap.im_len; | ||
378 | ilf->ilf_boffset = ip->i_imap.im_boffset; | ||
379 | ilf->ilf_fields = XFS_ILOG_CORE; | ||
380 | ilf->ilf_size = 2; /* format + core */ | ||
381 | xlog_finish_iovec(lv, vecp, sizeof(struct xfs_inode_log_format)); | ||
382 | |||
383 | if (ip->i_d.di_version == 1) | ||
384 | xfs_inode_item_format_v1_inode(ip); | ||
385 | xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_ICORE, | ||
386 | &ip->i_d, | ||
387 | xfs_icdinode_size(ip->i_d.di_version)); | ||
388 | |||
389 | xfs_inode_item_format_data_fork(iip, ilf, lv, &vecp); | ||
390 | if (XFS_IFORK_Q(ip)) { | ||
391 | xfs_inode_item_format_attr_fork(iip, ilf, lv, &vecp); | ||
392 | } else { | ||
393 | iip->ili_fields &= | ||
394 | ~(XFS_ILOG_ADATA | XFS_ILOG_ABROOT | XFS_ILOG_AEXT); | ||
395 | } | ||
396 | |||
397 | /* update the format with the exact fields we actually logged */ | ||
398 | ilf->ilf_fields |= (iip->ili_fields & ~XFS_ILOG_TIMESTAMP); | ||
399 | } | ||
450 | 400 | ||
451 | /* | 401 | /* |
452 | * This is called to pin the inode associated with the inode log | 402 | * This is called to pin the inode associated with the inode log |
@@ -563,27 +513,6 @@ xfs_inode_item_unlock( | |||
563 | ASSERT(ip->i_itemp != NULL); | 513 | ASSERT(ip->i_itemp != NULL); |
564 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); | 514 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); |
565 | 515 | ||
566 | /* | ||
567 | * If the inode needed a separate buffer with which to log | ||
568 | * its extents, then free it now. | ||
569 | */ | ||
570 | if (iip->ili_extents_buf != NULL) { | ||
571 | ASSERT(ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS); | ||
572 | ASSERT(ip->i_d.di_nextents > 0); | ||
573 | ASSERT(iip->ili_fields & XFS_ILOG_DEXT); | ||
574 | ASSERT(ip->i_df.if_bytes > 0); | ||
575 | kmem_free(iip->ili_extents_buf); | ||
576 | iip->ili_extents_buf = NULL; | ||
577 | } | ||
578 | if (iip->ili_aextents_buf != NULL) { | ||
579 | ASSERT(ip->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS); | ||
580 | ASSERT(ip->i_d.di_anextents > 0); | ||
581 | ASSERT(iip->ili_fields & XFS_ILOG_AEXT); | ||
582 | ASSERT(ip->i_afp->if_bytes > 0); | ||
583 | kmem_free(iip->ili_aextents_buf); | ||
584 | iip->ili_aextents_buf = NULL; | ||
585 | } | ||
586 | |||
587 | lock_flags = iip->ili_lock_flags; | 516 | lock_flags = iip->ili_lock_flags; |
588 | iip->ili_lock_flags = 0; | 517 | iip->ili_lock_flags = 0; |
589 | if (lock_flags) | 518 | if (lock_flags) |
@@ -670,11 +599,6 @@ xfs_inode_item_init( | |||
670 | iip->ili_inode = ip; | 599 | iip->ili_inode = ip; |
671 | xfs_log_item_init(mp, &iip->ili_item, XFS_LI_INODE, | 600 | xfs_log_item_init(mp, &iip->ili_item, XFS_LI_INODE, |
672 | &xfs_inode_item_ops); | 601 | &xfs_inode_item_ops); |
673 | iip->ili_format.ilf_type = XFS_LI_INODE; | ||
674 | iip->ili_format.ilf_ino = ip->i_ino; | ||
675 | iip->ili_format.ilf_blkno = ip->i_imap.im_blkno; | ||
676 | iip->ili_format.ilf_len = ip->i_imap.im_len; | ||
677 | iip->ili_format.ilf_boffset = ip->i_imap.im_boffset; | ||
678 | } | 602 | } |
679 | 603 | ||
680 | /* | 604 | /* |
diff --git a/fs/xfs/xfs_inode_item.h b/fs/xfs/xfs_inode_item.h index dce4d656768c..488d81254e28 100644 --- a/fs/xfs/xfs_inode_item.h +++ b/fs/xfs/xfs_inode_item.h | |||
@@ -34,11 +34,6 @@ typedef struct xfs_inode_log_item { | |||
34 | unsigned short ili_logged; /* flushed logged data */ | 34 | unsigned short ili_logged; /* flushed logged data */ |
35 | unsigned int ili_last_fields; /* fields when flushed */ | 35 | unsigned int ili_last_fields; /* fields when flushed */ |
36 | unsigned int ili_fields; /* fields to be logged */ | 36 | unsigned int ili_fields; /* fields to be logged */ |
37 | struct xfs_bmbt_rec *ili_extents_buf; /* array of logged | ||
38 | data exts */ | ||
39 | struct xfs_bmbt_rec *ili_aextents_buf; /* array of logged | ||
40 | attr exts */ | ||
41 | xfs_inode_log_format_t ili_format; /* logged structure */ | ||
42 | } xfs_inode_log_item_t; | 37 | } xfs_inode_log_item_t; |
43 | 38 | ||
44 | static inline int xfs_inode_clean(xfs_inode_t *ip) | 39 | static inline int xfs_inode_clean(xfs_inode_t *ip) |
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c index 33ad9a77791f..518aa56b8f2e 100644 --- a/fs/xfs/xfs_ioctl.c +++ b/fs/xfs/xfs_ioctl.c | |||
@@ -112,15 +112,11 @@ xfs_find_handle( | |||
112 | memset(&handle.ha_fid, 0, sizeof(handle.ha_fid)); | 112 | memset(&handle.ha_fid, 0, sizeof(handle.ha_fid)); |
113 | hsize = sizeof(xfs_fsid_t); | 113 | hsize = sizeof(xfs_fsid_t); |
114 | } else { | 114 | } else { |
115 | int lock_mode; | ||
116 | |||
117 | lock_mode = xfs_ilock_map_shared(ip); | ||
118 | handle.ha_fid.fid_len = sizeof(xfs_fid_t) - | 115 | handle.ha_fid.fid_len = sizeof(xfs_fid_t) - |
119 | sizeof(handle.ha_fid.fid_len); | 116 | sizeof(handle.ha_fid.fid_len); |
120 | handle.ha_fid.fid_pad = 0; | 117 | handle.ha_fid.fid_pad = 0; |
121 | handle.ha_fid.fid_gen = ip->i_d.di_gen; | 118 | handle.ha_fid.fid_gen = ip->i_d.di_gen; |
122 | handle.ha_fid.fid_ino = ip->i_ino; | 119 | handle.ha_fid.fid_ino = ip->i_ino; |
123 | xfs_iunlock_map_shared(ip, lock_mode); | ||
124 | 120 | ||
125 | hsize = XFS_HSIZE(handle); | 121 | hsize = XFS_HSIZE(handle); |
126 | } | 122 | } |
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c index 104455b8046c..0ce1d759156e 100644 --- a/fs/xfs/xfs_iops.c +++ b/fs/xfs/xfs_iops.c | |||
@@ -459,14 +459,12 @@ xfs_vn_getattr( | |||
459 | 459 | ||
460 | static void | 460 | static void |
461 | xfs_setattr_mode( | 461 | xfs_setattr_mode( |
462 | struct xfs_trans *tp, | ||
463 | struct xfs_inode *ip, | 462 | struct xfs_inode *ip, |
464 | struct iattr *iattr) | 463 | struct iattr *iattr) |
465 | { | 464 | { |
466 | struct inode *inode = VFS_I(ip); | 465 | struct inode *inode = VFS_I(ip); |
467 | umode_t mode = iattr->ia_mode; | 466 | umode_t mode = iattr->ia_mode; |
468 | 467 | ||
469 | ASSERT(tp); | ||
470 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); | 468 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); |
471 | 469 | ||
472 | ip->i_d.di_mode &= S_IFMT; | 470 | ip->i_d.di_mode &= S_IFMT; |
@@ -476,6 +474,32 @@ xfs_setattr_mode( | |||
476 | inode->i_mode |= mode & ~S_IFMT; | 474 | inode->i_mode |= mode & ~S_IFMT; |
477 | } | 475 | } |
478 | 476 | ||
477 | static void | ||
478 | xfs_setattr_time( | ||
479 | struct xfs_inode *ip, | ||
480 | struct iattr *iattr) | ||
481 | { | ||
482 | struct inode *inode = VFS_I(ip); | ||
483 | |||
484 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); | ||
485 | |||
486 | if (iattr->ia_valid & ATTR_ATIME) { | ||
487 | inode->i_atime = iattr->ia_atime; | ||
488 | ip->i_d.di_atime.t_sec = iattr->ia_atime.tv_sec; | ||
489 | ip->i_d.di_atime.t_nsec = iattr->ia_atime.tv_nsec; | ||
490 | } | ||
491 | if (iattr->ia_valid & ATTR_CTIME) { | ||
492 | inode->i_ctime = iattr->ia_ctime; | ||
493 | ip->i_d.di_ctime.t_sec = iattr->ia_ctime.tv_sec; | ||
494 | ip->i_d.di_ctime.t_nsec = iattr->ia_ctime.tv_nsec; | ||
495 | } | ||
496 | if (iattr->ia_valid & ATTR_MTIME) { | ||
497 | inode->i_mtime = iattr->ia_mtime; | ||
498 | ip->i_d.di_mtime.t_sec = iattr->ia_mtime.tv_sec; | ||
499 | ip->i_d.di_mtime.t_nsec = iattr->ia_mtime.tv_nsec; | ||
500 | } | ||
501 | } | ||
502 | |||
479 | int | 503 | int |
480 | xfs_setattr_nonsize( | 504 | xfs_setattr_nonsize( |
481 | struct xfs_inode *ip, | 505 | struct xfs_inode *ip, |
@@ -630,30 +654,10 @@ xfs_setattr_nonsize( | |||
630 | } | 654 | } |
631 | } | 655 | } |
632 | 656 | ||
633 | /* | ||
634 | * Change file access modes. | ||
635 | */ | ||
636 | if (mask & ATTR_MODE) | 657 | if (mask & ATTR_MODE) |
637 | xfs_setattr_mode(tp, ip, iattr); | 658 | xfs_setattr_mode(ip, iattr); |
638 | 659 | if (mask & (ATTR_ATIME|ATTR_CTIME|ATTR_MTIME)) | |
639 | /* | 660 | xfs_setattr_time(ip, iattr); |
640 | * Change file access or modified times. | ||
641 | */ | ||
642 | if (mask & ATTR_ATIME) { | ||
643 | inode->i_atime = iattr->ia_atime; | ||
644 | ip->i_d.di_atime.t_sec = iattr->ia_atime.tv_sec; | ||
645 | ip->i_d.di_atime.t_nsec = iattr->ia_atime.tv_nsec; | ||
646 | } | ||
647 | if (mask & ATTR_CTIME) { | ||
648 | inode->i_ctime = iattr->ia_ctime; | ||
649 | ip->i_d.di_ctime.t_sec = iattr->ia_ctime.tv_sec; | ||
650 | ip->i_d.di_ctime.t_nsec = iattr->ia_ctime.tv_nsec; | ||
651 | } | ||
652 | if (mask & ATTR_MTIME) { | ||
653 | inode->i_mtime = iattr->ia_mtime; | ||
654 | ip->i_d.di_mtime.t_sec = iattr->ia_mtime.tv_sec; | ||
655 | ip->i_d.di_mtime.t_nsec = iattr->ia_mtime.tv_nsec; | ||
656 | } | ||
657 | 661 | ||
658 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); | 662 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); |
659 | 663 | ||
@@ -868,22 +872,10 @@ xfs_setattr_size( | |||
868 | xfs_inode_clear_eofblocks_tag(ip); | 872 | xfs_inode_clear_eofblocks_tag(ip); |
869 | } | 873 | } |
870 | 874 | ||
871 | /* | ||
872 | * Change file access modes. | ||
873 | */ | ||
874 | if (mask & ATTR_MODE) | 875 | if (mask & ATTR_MODE) |
875 | xfs_setattr_mode(tp, ip, iattr); | 876 | xfs_setattr_mode(ip, iattr); |
876 | 877 | if (mask & (ATTR_ATIME|ATTR_CTIME|ATTR_MTIME)) | |
877 | if (mask & ATTR_CTIME) { | 878 | xfs_setattr_time(ip, iattr); |
878 | inode->i_ctime = iattr->ia_ctime; | ||
879 | ip->i_d.di_ctime.t_sec = iattr->ia_ctime.tv_sec; | ||
880 | ip->i_d.di_ctime.t_nsec = iattr->ia_ctime.tv_nsec; | ||
881 | } | ||
882 | if (mask & ATTR_MTIME) { | ||
883 | inode->i_mtime = iattr->ia_mtime; | ||
884 | ip->i_d.di_mtime.t_sec = iattr->ia_mtime.tv_sec; | ||
885 | ip->i_d.di_mtime.t_nsec = iattr->ia_mtime.tv_nsec; | ||
886 | } | ||
887 | 879 | ||
888 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); | 880 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); |
889 | 881 | ||
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c index c237ad15d500..f46338285152 100644 --- a/fs/xfs/xfs_itable.c +++ b/fs/xfs/xfs_itable.c | |||
@@ -209,9 +209,8 @@ xfs_bulkstat( | |||
209 | xfs_inobt_rec_incore_t *irbuf; /* start of irec buffer */ | 209 | xfs_inobt_rec_incore_t *irbuf; /* start of irec buffer */ |
210 | xfs_inobt_rec_incore_t *irbufend; /* end of good irec buffer entries */ | 210 | xfs_inobt_rec_incore_t *irbufend; /* end of good irec buffer entries */ |
211 | xfs_ino_t lastino; /* last inode number returned */ | 211 | xfs_ino_t lastino; /* last inode number returned */ |
212 | int nbcluster; /* # of blocks in a cluster */ | 212 | int blks_per_cluster; /* # of blocks per cluster */ |
213 | int nicluster; /* # of inodes in a cluster */ | 213 | int inodes_per_cluster;/* # of inodes per cluster */ |
214 | int nimask; /* mask for inode clusters */ | ||
215 | int nirbuf; /* size of irbuf */ | 214 | int nirbuf; /* size of irbuf */ |
216 | int rval; /* return value error code */ | 215 | int rval; /* return value error code */ |
217 | int tmp; /* result value from btree calls */ | 216 | int tmp; /* result value from btree calls */ |
@@ -243,11 +242,8 @@ xfs_bulkstat( | |||
243 | *done = 0; | 242 | *done = 0; |
244 | fmterror = 0; | 243 | fmterror = 0; |
245 | ubufp = ubuffer; | 244 | ubufp = ubuffer; |
246 | nicluster = mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp) ? | 245 | blks_per_cluster = xfs_icluster_size_fsb(mp); |
247 | mp->m_sb.sb_inopblock : | 246 | inodes_per_cluster = blks_per_cluster << mp->m_sb.sb_inopblog; |
248 | (XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog); | ||
249 | nimask = ~(nicluster - 1); | ||
250 | nbcluster = nicluster >> mp->m_sb.sb_inopblog; | ||
251 | irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4); | 247 | irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4); |
252 | if (!irbuf) | 248 | if (!irbuf) |
253 | return ENOMEM; | 249 | return ENOMEM; |
@@ -390,12 +386,12 @@ xfs_bulkstat( | |||
390 | agbno = XFS_AGINO_TO_AGBNO(mp, r.ir_startino); | 386 | agbno = XFS_AGINO_TO_AGBNO(mp, r.ir_startino); |
391 | for (chunkidx = 0; | 387 | for (chunkidx = 0; |
392 | chunkidx < XFS_INODES_PER_CHUNK; | 388 | chunkidx < XFS_INODES_PER_CHUNK; |
393 | chunkidx += nicluster, | 389 | chunkidx += inodes_per_cluster, |
394 | agbno += nbcluster) { | 390 | agbno += blks_per_cluster) { |
395 | if (xfs_inobt_maskn(chunkidx, nicluster) | 391 | if (xfs_inobt_maskn(chunkidx, |
396 | & ~r.ir_free) | 392 | inodes_per_cluster) & ~r.ir_free) |
397 | xfs_btree_reada_bufs(mp, agno, | 393 | xfs_btree_reada_bufs(mp, agno, |
398 | agbno, nbcluster, | 394 | agbno, blks_per_cluster, |
399 | &xfs_inode_buf_ops); | 395 | &xfs_inode_buf_ops); |
400 | } | 396 | } |
401 | blk_finish_plug(&plug); | 397 | blk_finish_plug(&plug); |
diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h index e148719e0a5d..b0f4ef77fa70 100644 --- a/fs/xfs/xfs_log.h +++ b/fs/xfs/xfs_log.h | |||
@@ -30,6 +30,52 @@ struct xfs_log_vec { | |||
30 | 30 | ||
31 | #define XFS_LOG_VEC_ORDERED (-1) | 31 | #define XFS_LOG_VEC_ORDERED (-1) |
32 | 32 | ||
33 | static inline void * | ||
34 | xlog_prepare_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec **vecp, | ||
35 | uint type) | ||
36 | { | ||
37 | struct xfs_log_iovec *vec = *vecp; | ||
38 | |||
39 | if (vec) { | ||
40 | ASSERT(vec - lv->lv_iovecp < lv->lv_niovecs); | ||
41 | vec++; | ||
42 | } else { | ||
43 | vec = &lv->lv_iovecp[0]; | ||
44 | } | ||
45 | |||
46 | vec->i_type = type; | ||
47 | vec->i_addr = lv->lv_buf + lv->lv_buf_len; | ||
48 | |||
49 | ASSERT(IS_ALIGNED((unsigned long)vec->i_addr, sizeof(uint64_t))); | ||
50 | |||
51 | *vecp = vec; | ||
52 | return vec->i_addr; | ||
53 | } | ||
54 | |||
55 | static inline void | ||
56 | xlog_finish_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec *vec, int len) | ||
57 | { | ||
58 | /* | ||
59 | * We need to make sure the next buffer is naturally aligned for the | ||
60 | * biggest basic data type we put into it. We already accounted for | ||
61 | * this when sizing the buffer. | ||
62 | */ | ||
63 | lv->lv_buf_len += round_up(len, sizeof(uint64_t)); | ||
64 | vec->i_len = len; | ||
65 | } | ||
66 | |||
67 | static inline void * | ||
68 | xlog_copy_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec **vecp, | ||
69 | uint type, void *data, int len) | ||
70 | { | ||
71 | void *buf; | ||
72 | |||
73 | buf = xlog_prepare_iovec(lv, vecp, type); | ||
74 | memcpy(buf, data, len); | ||
75 | xlog_finish_iovec(lv, *vecp, len); | ||
76 | return buf; | ||
77 | } | ||
78 | |||
33 | /* | 79 | /* |
34 | * Structure used to pass callback function and the function's argument | 80 | * Structure used to pass callback function and the function's argument |
35 | * to the log manager. | 81 | * to the log manager. |
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c index 5eb51fc5eb84..cdebd832c3db 100644 --- a/fs/xfs/xfs_log_cil.c +++ b/fs/xfs/xfs_log_cil.c | |||
@@ -82,36 +82,6 @@ xlog_cil_init_post_recovery( | |||
82 | log->l_curr_block); | 82 | log->l_curr_block); |
83 | } | 83 | } |
84 | 84 | ||
85 | STATIC int | ||
86 | xlog_cil_lv_item_format( | ||
87 | struct xfs_log_item *lip, | ||
88 | struct xfs_log_vec *lv) | ||
89 | { | ||
90 | int index; | ||
91 | char *ptr; | ||
92 | |||
93 | /* format new vectors into array */ | ||
94 | lip->li_ops->iop_format(lip, lv->lv_iovecp); | ||
95 | |||
96 | /* copy data into existing array */ | ||
97 | ptr = lv->lv_buf; | ||
98 | for (index = 0; index < lv->lv_niovecs; index++) { | ||
99 | struct xfs_log_iovec *vec = &lv->lv_iovecp[index]; | ||
100 | |||
101 | memcpy(ptr, vec->i_addr, vec->i_len); | ||
102 | vec->i_addr = ptr; | ||
103 | ptr += vec->i_len; | ||
104 | } | ||
105 | |||
106 | /* | ||
107 | * some size calculations for log vectors over-estimate, so the caller | ||
108 | * doesn't know the amount of space actually used by the item. Return | ||
109 | * the byte count to the caller so they can check and store it | ||
110 | * appropriately. | ||
111 | */ | ||
112 | return ptr - lv->lv_buf; | ||
113 | } | ||
114 | |||
115 | /* | 85 | /* |
116 | * Prepare the log item for insertion into the CIL. Calculate the difference in | 86 | * Prepare the log item for insertion into the CIL. Calculate the difference in |
117 | * log space and vectors it will consume, and if it is a new item pin it as | 87 | * log space and vectors it will consume, and if it is a new item pin it as |
@@ -232,6 +202,13 @@ xlog_cil_insert_format_items( | |||
232 | nbytes = 0; | 202 | nbytes = 0; |
233 | } | 203 | } |
234 | 204 | ||
205 | /* | ||
206 | * We 64-bit align the length of each iovec so that the start | ||
207 | * of the next one is naturally aligned. We'll need to | ||
208 | * account for that slack space here. | ||
209 | */ | ||
210 | nbytes += niovecs * sizeof(uint64_t); | ||
211 | |||
235 | /* grab the old item if it exists for reservation accounting */ | 212 | /* grab the old item if it exists for reservation accounting */ |
236 | old_lv = lip->li_lv; | 213 | old_lv = lip->li_lv; |
237 | 214 | ||
@@ -254,34 +231,27 @@ xlog_cil_insert_format_items( | |||
254 | */ | 231 | */ |
255 | *diff_iovecs -= lv->lv_niovecs; | 232 | *diff_iovecs -= lv->lv_niovecs; |
256 | *diff_len -= lv->lv_buf_len; | 233 | *diff_len -= lv->lv_buf_len; |
257 | 234 | } else { | |
258 | /* Ensure the lv is set up according to ->iop_size */ | 235 | /* allocate new data chunk */ |
259 | lv->lv_niovecs = niovecs; | 236 | lv = kmem_zalloc(buf_size, KM_SLEEP|KM_NOFS); |
260 | lv->lv_buf = (char *)lv + buf_size - nbytes; | 237 | lv->lv_item = lip; |
261 | 238 | lv->lv_size = buf_size; | |
262 | lv->lv_buf_len = xlog_cil_lv_item_format(lip, lv); | 239 | if (ordered) { |
263 | goto insert; | 240 | /* track as an ordered logvec */ |
241 | ASSERT(lip->li_lv == NULL); | ||
242 | lv->lv_buf_len = XFS_LOG_VEC_ORDERED; | ||
243 | goto insert; | ||
244 | } | ||
245 | lv->lv_iovecp = (struct xfs_log_iovec *)&lv[1]; | ||
264 | } | 246 | } |
265 | 247 | ||
266 | /* allocate new data chunk */ | 248 | /* Ensure the lv is set up according to ->iop_size */ |
267 | lv = kmem_zalloc(buf_size, KM_SLEEP|KM_NOFS); | ||
268 | lv->lv_item = lip; | ||
269 | lv->lv_size = buf_size; | ||
270 | lv->lv_niovecs = niovecs; | 249 | lv->lv_niovecs = niovecs; |
271 | if (ordered) { | ||
272 | /* track as an ordered logvec */ | ||
273 | ASSERT(lip->li_lv == NULL); | ||
274 | lv->lv_buf_len = XFS_LOG_VEC_ORDERED; | ||
275 | goto insert; | ||
276 | } | ||
277 | |||
278 | /* The allocated iovec region lies beyond the log vector. */ | ||
279 | lv->lv_iovecp = (struct xfs_log_iovec *)&lv[1]; | ||
280 | 250 | ||
281 | /* The allocated data region lies beyond the iovec region */ | 251 | /* The allocated data region lies beyond the iovec region */ |
252 | lv->lv_buf_len = 0; | ||
282 | lv->lv_buf = (char *)lv + buf_size - nbytes; | 253 | lv->lv_buf = (char *)lv + buf_size - nbytes; |
283 | 254 | lip->li_ops->iop_format(lip, lv); | |
284 | lv->lv_buf_len = xlog_cil_lv_item_format(lip, lv); | ||
285 | insert: | 255 | insert: |
286 | ASSERT(lv->lv_buf_len <= nbytes); | 256 | ASSERT(lv->lv_buf_len <= nbytes); |
287 | xfs_cil_prepare_item(log, lv, old_lv, diff_len, diff_iovecs); | 257 | xfs_cil_prepare_item(log, lv, old_lv, diff_len, diff_iovecs); |
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index eae16920655b..bce53ac81096 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c | |||
@@ -1654,6 +1654,7 @@ xlog_recover_reorder_trans( | |||
1654 | int pass) | 1654 | int pass) |
1655 | { | 1655 | { |
1656 | xlog_recover_item_t *item, *n; | 1656 | xlog_recover_item_t *item, *n; |
1657 | int error = 0; | ||
1657 | LIST_HEAD(sort_list); | 1658 | LIST_HEAD(sort_list); |
1658 | LIST_HEAD(cancel_list); | 1659 | LIST_HEAD(cancel_list); |
1659 | LIST_HEAD(buffer_list); | 1660 | LIST_HEAD(buffer_list); |
@@ -1695,9 +1696,17 @@ xlog_recover_reorder_trans( | |||
1695 | "%s: unrecognized type of log operation", | 1696 | "%s: unrecognized type of log operation", |
1696 | __func__); | 1697 | __func__); |
1697 | ASSERT(0); | 1698 | ASSERT(0); |
1698 | return XFS_ERROR(EIO); | 1699 | /* |
1700 | * return the remaining items back to the transaction | ||
1701 | * item list so they can be freed in caller. | ||
1702 | */ | ||
1703 | if (!list_empty(&sort_list)) | ||
1704 | list_splice_init(&sort_list, &trans->r_itemq); | ||
1705 | error = XFS_ERROR(EIO); | ||
1706 | goto out; | ||
1699 | } | 1707 | } |
1700 | } | 1708 | } |
1709 | out: | ||
1701 | ASSERT(list_empty(&sort_list)); | 1710 | ASSERT(list_empty(&sort_list)); |
1702 | if (!list_empty(&buffer_list)) | 1711 | if (!list_empty(&buffer_list)) |
1703 | list_splice(&buffer_list, &trans->r_itemq); | 1712 | list_splice(&buffer_list, &trans->r_itemq); |
@@ -1707,7 +1716,7 @@ xlog_recover_reorder_trans( | |||
1707 | list_splice_tail(&inode_buffer_list, &trans->r_itemq); | 1716 | list_splice_tail(&inode_buffer_list, &trans->r_itemq); |
1708 | if (!list_empty(&cancel_list)) | 1717 | if (!list_empty(&cancel_list)) |
1709 | list_splice_tail(&cancel_list, &trans->r_itemq); | 1718 | list_splice_tail(&cancel_list, &trans->r_itemq); |
1710 | return 0; | 1719 | return error; |
1711 | } | 1720 | } |
1712 | 1721 | ||
1713 | /* | 1722 | /* |
@@ -2517,19 +2526,19 @@ xlog_recover_buffer_pass2( | |||
2517 | * | 2526 | * |
2518 | * Also make sure that only inode buffers with good sizes stay in | 2527 | * Also make sure that only inode buffers with good sizes stay in |
2519 | * the buffer cache. The kernel moves inodes in buffers of 1 block | 2528 | * the buffer cache. The kernel moves inodes in buffers of 1 block |
2520 | * or XFS_INODE_CLUSTER_SIZE bytes, whichever is bigger. The inode | 2529 | * or mp->m_inode_cluster_size bytes, whichever is bigger. The inode |
2521 | * buffers in the log can be a different size if the log was generated | 2530 | * buffers in the log can be a different size if the log was generated |
2522 | * by an older kernel using unclustered inode buffers or a newer kernel | 2531 | * by an older kernel using unclustered inode buffers or a newer kernel |
2523 | * running with a different inode cluster size. Regardless, if the | 2532 | * running with a different inode cluster size. Regardless, if the |
2524 | * the inode buffer size isn't MAX(blocksize, XFS_INODE_CLUSTER_SIZE) | 2533 | * the inode buffer size isn't MAX(blocksize, mp->m_inode_cluster_size) |
2525 | * for *our* value of XFS_INODE_CLUSTER_SIZE, then we need to keep | 2534 | * for *our* value of mp->m_inode_cluster_size, then we need to keep |
2526 | * the buffer out of the buffer cache so that the buffer won't | 2535 | * the buffer out of the buffer cache so that the buffer won't |
2527 | * overlap with future reads of those inodes. | 2536 | * overlap with future reads of those inodes. |
2528 | */ | 2537 | */ |
2529 | if (XFS_DINODE_MAGIC == | 2538 | if (XFS_DINODE_MAGIC == |
2530 | be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) && | 2539 | be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) && |
2531 | (BBTOB(bp->b_io_length) != MAX(log->l_mp->m_sb.sb_blocksize, | 2540 | (BBTOB(bp->b_io_length) != MAX(log->l_mp->m_sb.sb_blocksize, |
2532 | (__uint32_t)XFS_INODE_CLUSTER_SIZE(log->l_mp)))) { | 2541 | (__uint32_t)log->l_mp->m_inode_cluster_size))) { |
2533 | xfs_buf_stale(bp); | 2542 | xfs_buf_stale(bp); |
2534 | error = xfs_bwrite(bp); | 2543 | error = xfs_bwrite(bp); |
2535 | } else { | 2544 | } else { |
@@ -3202,10 +3211,10 @@ xlog_recover_do_icreate_pass2( | |||
3202 | } | 3211 | } |
3203 | 3212 | ||
3204 | /* existing allocation is fixed value */ | 3213 | /* existing allocation is fixed value */ |
3205 | ASSERT(count == XFS_IALLOC_INODES(mp)); | 3214 | ASSERT(count == mp->m_ialloc_inos); |
3206 | ASSERT(length == XFS_IALLOC_BLOCKS(mp)); | 3215 | ASSERT(length == mp->m_ialloc_blks); |
3207 | if (count != XFS_IALLOC_INODES(mp) || | 3216 | if (count != mp->m_ialloc_inos || |
3208 | length != XFS_IALLOC_BLOCKS(mp)) { | 3217 | length != mp->m_ialloc_blks) { |
3209 | xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad count 2"); | 3218 | xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad count 2"); |
3210 | return EINVAL; | 3219 | return EINVAL; |
3211 | } | 3220 | } |
@@ -3611,8 +3620,10 @@ xlog_recover_process_data( | |||
3611 | error = XFS_ERROR(EIO); | 3620 | error = XFS_ERROR(EIO); |
3612 | break; | 3621 | break; |
3613 | } | 3622 | } |
3614 | if (error) | 3623 | if (error) { |
3624 | xlog_recover_free_trans(trans); | ||
3615 | return error; | 3625 | return error; |
3626 | } | ||
3616 | } | 3627 | } |
3617 | dp += be32_to_cpu(ohead->oh_len); | 3628 | dp += be32_to_cpu(ohead->oh_len); |
3618 | num_logops--; | 3629 | num_logops--; |
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c index dd88f0e27bd8..348e4d2ed6e6 100644 --- a/fs/xfs/xfs_qm.c +++ b/fs/xfs/xfs_qm.c | |||
@@ -1222,16 +1222,18 @@ xfs_qm_dqiterate( | |||
1222 | lblkno = 0; | 1222 | lblkno = 0; |
1223 | maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes); | 1223 | maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes); |
1224 | do { | 1224 | do { |
1225 | uint lock_mode; | ||
1226 | |||
1225 | nmaps = XFS_DQITER_MAP_SIZE; | 1227 | nmaps = XFS_DQITER_MAP_SIZE; |
1226 | /* | 1228 | /* |
1227 | * We aren't changing the inode itself. Just changing | 1229 | * We aren't changing the inode itself. Just changing |
1228 | * some of its data. No new blocks are added here, and | 1230 | * some of its data. No new blocks are added here, and |
1229 | * the inode is never added to the transaction. | 1231 | * the inode is never added to the transaction. |
1230 | */ | 1232 | */ |
1231 | xfs_ilock(qip, XFS_ILOCK_SHARED); | 1233 | lock_mode = xfs_ilock_data_map_shared(qip); |
1232 | error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno, | 1234 | error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno, |
1233 | map, &nmaps, 0); | 1235 | map, &nmaps, 0); |
1234 | xfs_iunlock(qip, XFS_ILOCK_SHARED); | 1236 | xfs_iunlock(qip, lock_mode); |
1235 | if (error) | 1237 | if (error) |
1236 | break; | 1238 | break; |
1237 | 1239 | ||
diff --git a/fs/xfs/xfs_qm.h b/fs/xfs/xfs_qm.h index a788b66a5cb1..797fd4636273 100644 --- a/fs/xfs/xfs_qm.h +++ b/fs/xfs/xfs_qm.h | |||
@@ -20,13 +20,29 @@ | |||
20 | 20 | ||
21 | #include "xfs_dquot_item.h" | 21 | #include "xfs_dquot_item.h" |
22 | #include "xfs_dquot.h" | 22 | #include "xfs_dquot.h" |
23 | #include "xfs_quota_priv.h" | ||
24 | 23 | ||
25 | struct xfs_inode; | 24 | struct xfs_inode; |
26 | 25 | ||
27 | extern struct kmem_zone *xfs_qm_dqtrxzone; | 26 | extern struct kmem_zone *xfs_qm_dqtrxzone; |
28 | 27 | ||
29 | /* | 28 | /* |
29 | * Number of bmaps that we ask from bmapi when doing a quotacheck. | ||
30 | * We make this restriction to keep the memory usage to a minimum. | ||
31 | */ | ||
32 | #define XFS_DQITER_MAP_SIZE 10 | ||
33 | |||
34 | #define XFS_IS_DQUOT_UNINITIALIZED(dqp) ( \ | ||
35 | !dqp->q_core.d_blk_hardlimit && \ | ||
36 | !dqp->q_core.d_blk_softlimit && \ | ||
37 | !dqp->q_core.d_rtb_hardlimit && \ | ||
38 | !dqp->q_core.d_rtb_softlimit && \ | ||
39 | !dqp->q_core.d_ino_hardlimit && \ | ||
40 | !dqp->q_core.d_ino_softlimit && \ | ||
41 | !dqp->q_core.d_bcount && \ | ||
42 | !dqp->q_core.d_rtbcount && \ | ||
43 | !dqp->q_core.d_icount) | ||
44 | |||
45 | /* | ||
30 | * This defines the unit of allocation of dquots. | 46 | * This defines the unit of allocation of dquots. |
31 | * Currently, it is just one file system block, and a 4K blk contains 30 | 47 | * Currently, it is just one file system block, and a 4K blk contains 30 |
32 | * (136 * 30 = 4080) dquots. It's probably not worth trying to make | 48 | * (136 * 30 = 4080) dquots. It's probably not worth trying to make |
diff --git a/fs/xfs/xfs_qm_syscalls.c b/fs/xfs/xfs_qm_syscalls.c index 437c9198031a..3daf5ea1eb8d 100644 --- a/fs/xfs/xfs_qm_syscalls.c +++ b/fs/xfs/xfs_qm_syscalls.c | |||
@@ -278,7 +278,7 @@ xfs_qm_scall_trunc_qfiles( | |||
278 | xfs_mount_t *mp, | 278 | xfs_mount_t *mp, |
279 | uint flags) | 279 | uint flags) |
280 | { | 280 | { |
281 | int error = 0, error2 = 0; | 281 | int error; |
282 | 282 | ||
283 | if (!xfs_sb_version_hasquota(&mp->m_sb) || flags == 0) { | 283 | if (!xfs_sb_version_hasquota(&mp->m_sb) || flags == 0) { |
284 | xfs_debug(mp, "%s: flags=%x m_qflags=%x", | 284 | xfs_debug(mp, "%s: flags=%x m_qflags=%x", |
@@ -286,14 +286,20 @@ xfs_qm_scall_trunc_qfiles( | |||
286 | return XFS_ERROR(EINVAL); | 286 | return XFS_ERROR(EINVAL); |
287 | } | 287 | } |
288 | 288 | ||
289 | if (flags & XFS_DQ_USER) | 289 | if (flags & XFS_DQ_USER) { |
290 | error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_uquotino); | 290 | error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_uquotino); |
291 | if (flags & XFS_DQ_GROUP) | 291 | if (error) |
292 | error2 = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_gquotino); | 292 | return error; |
293 | } | ||
294 | if (flags & XFS_DQ_GROUP) { | ||
295 | error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_gquotino); | ||
296 | if (error) | ||
297 | return error; | ||
298 | } | ||
293 | if (flags & XFS_DQ_PROJ) | 299 | if (flags & XFS_DQ_PROJ) |
294 | error2 = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_pquotino); | 300 | error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_pquotino); |
295 | 301 | ||
296 | return error ? error : error2; | 302 | return error; |
297 | } | 303 | } |
298 | 304 | ||
299 | /* | 305 | /* |
diff --git a/fs/xfs/xfs_quota_priv.h b/fs/xfs/xfs_quota_priv.h deleted file mode 100644 index 6d86219d93da..000000000000 --- a/fs/xfs/xfs_quota_priv.h +++ /dev/null | |||
@@ -1,42 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2000-2003 Silicon Graphics, Inc. | ||
3 | * All Rights Reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it would be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write the Free Software Foundation, | ||
16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
17 | */ | ||
18 | #ifndef __XFS_QUOTA_PRIV_H__ | ||
19 | #define __XFS_QUOTA_PRIV_H__ | ||
20 | |||
21 | /* | ||
22 | * Number of bmaps that we ask from bmapi when doing a quotacheck. | ||
23 | * We make this restriction to keep the memory usage to a minimum. | ||
24 | */ | ||
25 | #define XFS_DQITER_MAP_SIZE 10 | ||
26 | |||
27 | #define XFS_IS_DQUOT_UNINITIALIZED(dqp) ( \ | ||
28 | !dqp->q_core.d_blk_hardlimit && \ | ||
29 | !dqp->q_core.d_blk_softlimit && \ | ||
30 | !dqp->q_core.d_rtb_hardlimit && \ | ||
31 | !dqp->q_core.d_rtb_softlimit && \ | ||
32 | !dqp->q_core.d_ino_hardlimit && \ | ||
33 | !dqp->q_core.d_ino_softlimit && \ | ||
34 | !dqp->q_core.d_bcount && \ | ||
35 | !dqp->q_core.d_rtbcount && \ | ||
36 | !dqp->q_core.d_icount) | ||
37 | |||
38 | #define DQFLAGTO_TYPESTR(d) (((d)->dq_flags & XFS_DQ_USER) ? "USR" : \ | ||
39 | (((d)->dq_flags & XFS_DQ_GROUP) ? "GRP" : \ | ||
40 | (((d)->dq_flags & XFS_DQ_PROJ) ? "PRJ":"???"))) | ||
41 | |||
42 | #endif /* __XFS_QUOTA_PRIV_H__ */ | ||
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h index 9b96d35e483d..b5bc1ab3c4da 100644 --- a/fs/xfs/xfs_trans.h +++ b/fs/xfs/xfs_trans.h | |||
@@ -64,7 +64,7 @@ typedef struct xfs_log_item { | |||
64 | 64 | ||
65 | struct xfs_item_ops { | 65 | struct xfs_item_ops { |
66 | void (*iop_size)(xfs_log_item_t *, int *, int *); | 66 | void (*iop_size)(xfs_log_item_t *, int *, int *); |
67 | void (*iop_format)(xfs_log_item_t *, struct xfs_log_iovec *); | 67 | void (*iop_format)(xfs_log_item_t *, struct xfs_log_vec *); |
68 | void (*iop_pin)(xfs_log_item_t *); | 68 | void (*iop_pin)(xfs_log_item_t *); |
69 | void (*iop_unpin)(xfs_log_item_t *, int remove); | 69 | void (*iop_unpin)(xfs_log_item_t *, int remove); |
70 | uint (*iop_push)(struct xfs_log_item *, struct list_head *); | 70 | uint (*iop_push)(struct xfs_log_item *, struct list_head *); |
diff --git a/fs/xfs/xfs_trans_dquot.c b/fs/xfs/xfs_trans_dquot.c index cd2a10e15d3a..41172861e857 100644 --- a/fs/xfs/xfs_trans_dquot.c +++ b/fs/xfs/xfs_trans_dquot.c | |||
@@ -295,8 +295,8 @@ xfs_trans_mod_dquot( | |||
295 | /* | 295 | /* |
296 | * Given an array of dqtrx structures, lock all the dquots associated and join | 296 | * Given an array of dqtrx structures, lock all the dquots associated and join |
297 | * them to the transaction, provided they have been modified. We know that the | 297 | * them to the transaction, provided they have been modified. We know that the |
298 | * highest number of dquots of one type - usr, grp OR prj - involved in a | 298 | * highest number of dquots of one type - usr, grp and prj - involved in a |
299 | * transaction is 2 so we don't need to make this very generic. | 299 | * transaction is 3 so we don't need to make this very generic. |
300 | */ | 300 | */ |
301 | STATIC void | 301 | STATIC void |
302 | xfs_trans_dqlockedjoin( | 302 | xfs_trans_dqlockedjoin( |
diff --git a/fs/xfs/xfs_trans_resv.c b/fs/xfs/xfs_trans_resv.c index 2fd59c0dae66..2ffd3e331b49 100644 --- a/fs/xfs/xfs_trans_resv.c +++ b/fs/xfs/xfs_trans_resv.c | |||
@@ -174,7 +174,7 @@ xfs_calc_itruncate_reservation( | |||
174 | xfs_calc_buf_res(5, 0) + | 174 | xfs_calc_buf_res(5, 0) + |
175 | xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1), | 175 | xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1), |
176 | XFS_FSB_TO_B(mp, 1)) + | 176 | XFS_FSB_TO_B(mp, 1)) + |
177 | xfs_calc_buf_res(2 + XFS_IALLOC_BLOCKS(mp) + | 177 | xfs_calc_buf_res(2 + mp->m_ialloc_blks + |
178 | mp->m_in_maxlevels, 0))); | 178 | mp->m_in_maxlevels, 0))); |
179 | } | 179 | } |
180 | 180 | ||
@@ -282,7 +282,7 @@ xfs_calc_create_resv_modify( | |||
282 | * For create we can allocate some inodes giving: | 282 | * For create we can allocate some inodes giving: |
283 | * the agi and agf of the ag getting the new inodes: 2 * sectorsize | 283 | * the agi and agf of the ag getting the new inodes: 2 * sectorsize |
284 | * the superblock for the nlink flag: sector size | 284 | * the superblock for the nlink flag: sector size |
285 | * the inode blocks allocated: XFS_IALLOC_BLOCKS * blocksize | 285 | * the inode blocks allocated: mp->m_ialloc_blks * blocksize |
286 | * the inode btree: max depth * blocksize | 286 | * the inode btree: max depth * blocksize |
287 | * the allocation btrees: 2 trees * (max depth - 1) * block size | 287 | * the allocation btrees: 2 trees * (max depth - 1) * block size |
288 | */ | 288 | */ |
@@ -292,7 +292,7 @@ xfs_calc_create_resv_alloc( | |||
292 | { | 292 | { |
293 | return xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) + | 293 | return xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) + |
294 | mp->m_sb.sb_sectsize + | 294 | mp->m_sb.sb_sectsize + |
295 | xfs_calc_buf_res(XFS_IALLOC_BLOCKS(mp), XFS_FSB_TO_B(mp, 1)) + | 295 | xfs_calc_buf_res(mp->m_ialloc_blks, XFS_FSB_TO_B(mp, 1)) + |
296 | xfs_calc_buf_res(mp->m_in_maxlevels, XFS_FSB_TO_B(mp, 1)) + | 296 | xfs_calc_buf_res(mp->m_in_maxlevels, XFS_FSB_TO_B(mp, 1)) + |
297 | xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1), | 297 | xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1), |
298 | XFS_FSB_TO_B(mp, 1)); | 298 | XFS_FSB_TO_B(mp, 1)); |
@@ -385,9 +385,9 @@ xfs_calc_ifree_reservation( | |||
385 | xfs_calc_inode_res(mp, 1) + | 385 | xfs_calc_inode_res(mp, 1) + |
386 | xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) + | 386 | xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) + |
387 | xfs_calc_buf_res(1, XFS_FSB_TO_B(mp, 1)) + | 387 | xfs_calc_buf_res(1, XFS_FSB_TO_B(mp, 1)) + |
388 | max_t(uint, XFS_FSB_TO_B(mp, 1), XFS_INODE_CLUSTER_SIZE(mp)) + | 388 | max_t(uint, XFS_FSB_TO_B(mp, 1), mp->m_inode_cluster_size) + |
389 | xfs_calc_buf_res(1, 0) + | 389 | xfs_calc_buf_res(1, 0) + |
390 | xfs_calc_buf_res(2 + XFS_IALLOC_BLOCKS(mp) + | 390 | xfs_calc_buf_res(2 + mp->m_ialloc_blks + |
391 | mp->m_in_maxlevels, 0) + | 391 | mp->m_in_maxlevels, 0) + |
392 | xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1), | 392 | xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1), |
393 | XFS_FSB_TO_B(mp, 1)); | 393 | XFS_FSB_TO_B(mp, 1)); |
diff --git a/fs/xfs/xfs_trans_space.h b/fs/xfs/xfs_trans_space.h index 7d2c920dfb9c..af5dbe06cb65 100644 --- a/fs/xfs/xfs_trans_space.h +++ b/fs/xfs/xfs_trans_space.h | |||
@@ -47,7 +47,7 @@ | |||
47 | #define XFS_DIRREMOVE_SPACE_RES(mp) \ | 47 | #define XFS_DIRREMOVE_SPACE_RES(mp) \ |
48 | XFS_DAREMOVE_SPACE_RES(mp, XFS_DATA_FORK) | 48 | XFS_DAREMOVE_SPACE_RES(mp, XFS_DATA_FORK) |
49 | #define XFS_IALLOC_SPACE_RES(mp) \ | 49 | #define XFS_IALLOC_SPACE_RES(mp) \ |
50 | (XFS_IALLOC_BLOCKS(mp) + (mp)->m_in_maxlevels - 1) | 50 | ((mp)->m_ialloc_blks + (mp)->m_in_maxlevels - 1) |
51 | 51 | ||
52 | /* | 52 | /* |
53 | * Space reservation values for various transactions. | 53 | * Space reservation values for various transactions. |
diff --git a/fs/xfs/xfs_vnode.h b/fs/xfs/xfs_vnode.h index 3e8e797c6d11..e8a77383c0d5 100644 --- a/fs/xfs/xfs_vnode.h +++ b/fs/xfs/xfs_vnode.h | |||
@@ -35,15 +35,6 @@ struct attrlist_cursor_kern; | |||
35 | { IO_INVIS, "INVIS"} | 35 | { IO_INVIS, "INVIS"} |
36 | 36 | ||
37 | /* | 37 | /* |
38 | * Flush/Invalidate options for vop_toss/flush/flushinval_pages. | ||
39 | */ | ||
40 | #define FI_NONE 0 /* none */ | ||
41 | #define FI_REMAPF 1 /* Do a remapf prior to the operation */ | ||
42 | #define FI_REMAPF_LOCKED 2 /* Do a remapf prior to the operation. | ||
43 | Prevent VM access to the pages until | ||
44 | the operation completes. */ | ||
45 | |||
46 | /* | ||
47 | * Some useful predicates. | 38 | * Some useful predicates. |
48 | */ | 39 | */ |
49 | #define VN_MAPPED(vp) mapping_mapped(vp->i_mapping) | 40 | #define VN_MAPPED(vp) mapping_mapped(vp->i_mapping) |