diff options
Diffstat (limited to 'fs/xfs')
-rw-r--r-- | fs/xfs/libxfs/xfs_bmap.c | 5 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_ialloc_btree.c | 11 | ||||
-rw-r--r-- | fs/xfs/xfs_bmap_util.c | 10 | ||||
-rw-r--r-- | fs/xfs/xfs_bmap_util.h | 3 | ||||
-rw-r--r-- | fs/xfs/xfs_buf_item.c | 28 | ||||
-rw-r--r-- | fs/xfs/xfs_file.c | 2 | ||||
-rw-r--r-- | fs/xfs/xfs_reflink.c | 18 | ||||
-rw-r--r-- | fs/xfs/xfs_trace.h | 5 |
8 files changed, 56 insertions, 26 deletions
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c index 74d7228e755b..19e921d1586f 100644 --- a/fs/xfs/libxfs/xfs_bmap.c +++ b/fs/xfs/libxfs/xfs_bmap.c | |||
@@ -1694,10 +1694,13 @@ xfs_bmap_add_extent_delay_real( | |||
1694 | case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: | 1694 | case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: |
1695 | /* | 1695 | /* |
1696 | * Filling in all of a previously delayed allocation extent. | 1696 | * Filling in all of a previously delayed allocation extent. |
1697 | * The right neighbor is contiguous, the left is not. | 1697 | * The right neighbor is contiguous, the left is not. Take care |
1698 | * with delay -> unwritten extent allocation here because the | ||
1699 | * delalloc record we are overwriting is always written. | ||
1698 | */ | 1700 | */ |
1699 | PREV.br_startblock = new->br_startblock; | 1701 | PREV.br_startblock = new->br_startblock; |
1700 | PREV.br_blockcount += RIGHT.br_blockcount; | 1702 | PREV.br_blockcount += RIGHT.br_blockcount; |
1703 | PREV.br_state = new->br_state; | ||
1701 | 1704 | ||
1702 | xfs_iext_next(ifp, &bma->icur); | 1705 | xfs_iext_next(ifp, &bma->icur); |
1703 | xfs_iext_remove(bma->ip, &bma->icur, state); | 1706 | xfs_iext_remove(bma->ip, &bma->icur, state); |
diff --git a/fs/xfs/libxfs/xfs_ialloc_btree.c b/fs/xfs/libxfs/xfs_ialloc_btree.c index 86c50208a143..7fbf8af0b159 100644 --- a/fs/xfs/libxfs/xfs_ialloc_btree.c +++ b/fs/xfs/libxfs/xfs_ialloc_btree.c | |||
@@ -538,15 +538,18 @@ xfs_inobt_rec_check_count( | |||
538 | 538 | ||
539 | static xfs_extlen_t | 539 | static xfs_extlen_t |
540 | xfs_inobt_max_size( | 540 | xfs_inobt_max_size( |
541 | struct xfs_mount *mp) | 541 | struct xfs_mount *mp, |
542 | xfs_agnumber_t agno) | ||
542 | { | 543 | { |
544 | xfs_agblock_t agblocks = xfs_ag_block_count(mp, agno); | ||
545 | |||
543 | /* Bail out if we're uninitialized, which can happen in mkfs. */ | 546 | /* Bail out if we're uninitialized, which can happen in mkfs. */ |
544 | if (mp->m_inobt_mxr[0] == 0) | 547 | if (mp->m_inobt_mxr[0] == 0) |
545 | return 0; | 548 | return 0; |
546 | 549 | ||
547 | return xfs_btree_calc_size(mp->m_inobt_mnr, | 550 | return xfs_btree_calc_size(mp->m_inobt_mnr, |
548 | (uint64_t)mp->m_sb.sb_agblocks * mp->m_sb.sb_inopblock / | 551 | (uint64_t)agblocks * mp->m_sb.sb_inopblock / |
549 | XFS_INODES_PER_CHUNK); | 552 | XFS_INODES_PER_CHUNK); |
550 | } | 553 | } |
551 | 554 | ||
552 | static int | 555 | static int |
@@ -594,7 +597,7 @@ xfs_finobt_calc_reserves( | |||
594 | if (error) | 597 | if (error) |
595 | return error; | 598 | return error; |
596 | 599 | ||
597 | *ask += xfs_inobt_max_size(mp); | 600 | *ask += xfs_inobt_max_size(mp, agno); |
598 | *used += tree_len; | 601 | *used += tree_len; |
599 | return 0; | 602 | return 0; |
600 | } | 603 | } |
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c index 5d263dfdb3bc..404e581f1ea1 100644 --- a/fs/xfs/xfs_bmap_util.c +++ b/fs/xfs/xfs_bmap_util.c | |||
@@ -1042,7 +1042,7 @@ out_trans_cancel: | |||
1042 | goto out_unlock; | 1042 | goto out_unlock; |
1043 | } | 1043 | } |
1044 | 1044 | ||
1045 | static int | 1045 | int |
1046 | xfs_flush_unmap_range( | 1046 | xfs_flush_unmap_range( |
1047 | struct xfs_inode *ip, | 1047 | struct xfs_inode *ip, |
1048 | xfs_off_t offset, | 1048 | xfs_off_t offset, |
@@ -1195,13 +1195,7 @@ xfs_prepare_shift( | |||
1195 | * Writeback and invalidate cache for the remainder of the file as we're | 1195 | * Writeback and invalidate cache for the remainder of the file as we're |
1196 | * about to shift down every extent from offset to EOF. | 1196 | * about to shift down every extent from offset to EOF. |
1197 | */ | 1197 | */ |
1198 | error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, offset, -1); | 1198 | error = xfs_flush_unmap_range(ip, offset, XFS_ISIZE(ip)); |
1199 | if (error) | ||
1200 | return error; | ||
1201 | error = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping, | ||
1202 | offset >> PAGE_SHIFT, -1); | ||
1203 | if (error) | ||
1204 | return error; | ||
1205 | 1199 | ||
1206 | /* | 1200 | /* |
1207 | * Clean out anything hanging around in the cow fork now that | 1201 | * Clean out anything hanging around in the cow fork now that |
diff --git a/fs/xfs/xfs_bmap_util.h b/fs/xfs/xfs_bmap_util.h index 87363d136bb6..7a78229cf1a7 100644 --- a/fs/xfs/xfs_bmap_util.h +++ b/fs/xfs/xfs_bmap_util.h | |||
@@ -80,4 +80,7 @@ int xfs_bmap_count_blocks(struct xfs_trans *tp, struct xfs_inode *ip, | |||
80 | int whichfork, xfs_extnum_t *nextents, | 80 | int whichfork, xfs_extnum_t *nextents, |
81 | xfs_filblks_t *count); | 81 | xfs_filblks_t *count); |
82 | 82 | ||
83 | int xfs_flush_unmap_range(struct xfs_inode *ip, xfs_off_t offset, | ||
84 | xfs_off_t len); | ||
85 | |||
83 | #endif /* __XFS_BMAP_UTIL_H__ */ | 86 | #endif /* __XFS_BMAP_UTIL_H__ */ |
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c index 12d8455bfbb2..010db5f8fb00 100644 --- a/fs/xfs/xfs_buf_item.c +++ b/fs/xfs/xfs_buf_item.c | |||
@@ -1233,9 +1233,23 @@ xfs_buf_iodone( | |||
1233 | } | 1233 | } |
1234 | 1234 | ||
1235 | /* | 1235 | /* |
1236 | * Requeue a failed buffer for writeback | 1236 | * Requeue a failed buffer for writeback. |
1237 | * | 1237 | * |
1238 | * Return true if the buffer has been re-queued properly, false otherwise | 1238 | * We clear the log item failed state here as well, but we have to be careful |
1239 | * about reference counts because the only active reference counts on the buffer | ||
1240 | * may be the failed log items. Hence if we clear the log item failed state | ||
1241 | * before queuing the buffer for IO we can release all active references to | ||
1242 | * the buffer and free it, leading to use after free problems in | ||
1243 | * xfs_buf_delwri_queue. It makes no difference to the buffer or log items which | ||
1244 | * order we process them in - the buffer is locked, and we own the buffer list | ||
1245 | * so nothing on them is going to change while we are performing this action. | ||
1246 | * | ||
1247 | * Hence we can safely queue the buffer for IO before we clear the failed log | ||
1248 | * item state, therefore always having an active reference to the buffer and | ||
1249 | * avoiding the transient zero-reference state that leads to use-after-free. | ||
1250 | * | ||
1251 | * Return true if the buffer was added to the buffer list, false if it was | ||
1252 | * already on the buffer list. | ||
1239 | */ | 1253 | */ |
1240 | bool | 1254 | bool |
1241 | xfs_buf_resubmit_failed_buffers( | 1255 | xfs_buf_resubmit_failed_buffers( |
@@ -1243,16 +1257,16 @@ xfs_buf_resubmit_failed_buffers( | |||
1243 | struct list_head *buffer_list) | 1257 | struct list_head *buffer_list) |
1244 | { | 1258 | { |
1245 | struct xfs_log_item *lip; | 1259 | struct xfs_log_item *lip; |
1260 | bool ret; | ||
1261 | |||
1262 | ret = xfs_buf_delwri_queue(bp, buffer_list); | ||
1246 | 1263 | ||
1247 | /* | 1264 | /* |
1248 | * Clear XFS_LI_FAILED flag from all items before resubmit | 1265 | * XFS_LI_FAILED set/clear is protected by ail_lock, caller of this |
1249 | * | ||
1250 | * XFS_LI_FAILED set/clear is protected by ail_lock, caller this | ||
1251 | * function already have it acquired | 1266 | * function already have it acquired |
1252 | */ | 1267 | */ |
1253 | list_for_each_entry(lip, &bp->b_li_list, li_bio_list) | 1268 | list_for_each_entry(lip, &bp->b_li_list, li_bio_list) |
1254 | xfs_clear_li_failed(lip); | 1269 | xfs_clear_li_failed(lip); |
1255 | 1270 | ||
1256 | /* Add this buffer back to the delayed write list */ | 1271 | return ret; |
1257 | return xfs_buf_delwri_queue(bp, buffer_list); | ||
1258 | } | 1272 | } |
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index 53c9ab8fb777..e47425071e65 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c | |||
@@ -920,7 +920,7 @@ out_unlock: | |||
920 | } | 920 | } |
921 | 921 | ||
922 | 922 | ||
923 | loff_t | 923 | STATIC loff_t |
924 | xfs_file_remap_range( | 924 | xfs_file_remap_range( |
925 | struct file *file_in, | 925 | struct file *file_in, |
926 | loff_t pos_in, | 926 | loff_t pos_in, |
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c index ecdb086bc23e..322a852ce284 100644 --- a/fs/xfs/xfs_reflink.c +++ b/fs/xfs/xfs_reflink.c | |||
@@ -296,6 +296,7 @@ xfs_reflink_reserve_cow( | |||
296 | if (error) | 296 | if (error) |
297 | return error; | 297 | return error; |
298 | 298 | ||
299 | xfs_trim_extent(imap, got.br_startoff, got.br_blockcount); | ||
299 | trace_xfs_reflink_cow_alloc(ip, &got); | 300 | trace_xfs_reflink_cow_alloc(ip, &got); |
300 | return 0; | 301 | return 0; |
301 | } | 302 | } |
@@ -1351,10 +1352,19 @@ xfs_reflink_remap_prep( | |||
1351 | if (ret) | 1352 | if (ret) |
1352 | goto out_unlock; | 1353 | goto out_unlock; |
1353 | 1354 | ||
1354 | /* Zap any page cache for the destination file's range. */ | 1355 | /* |
1355 | truncate_inode_pages_range(&inode_out->i_data, | 1356 | * If pos_out > EOF, we may have dirtied blocks between EOF and |
1356 | round_down(pos_out, PAGE_SIZE), | 1357 | * pos_out. In that case, we need to extend the flush and unmap to cover |
1357 | round_up(pos_out + *len, PAGE_SIZE) - 1); | 1358 | * from EOF to the end of the copy length. |
1359 | */ | ||
1360 | if (pos_out > XFS_ISIZE(dest)) { | ||
1361 | loff_t flen = *len + (pos_out - XFS_ISIZE(dest)); | ||
1362 | ret = xfs_flush_unmap_range(dest, XFS_ISIZE(dest), flen); | ||
1363 | } else { | ||
1364 | ret = xfs_flush_unmap_range(dest, pos_out, *len); | ||
1365 | } | ||
1366 | if (ret) | ||
1367 | goto out_unlock; | ||
1358 | 1368 | ||
1359 | return 1; | 1369 | return 1; |
1360 | out_unlock: | 1370 | out_unlock: |
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h index 3043e5ed6495..8a6532aae779 100644 --- a/fs/xfs/xfs_trace.h +++ b/fs/xfs/xfs_trace.h | |||
@@ -280,7 +280,10 @@ DECLARE_EVENT_CLASS(xfs_buf_class, | |||
280 | ), | 280 | ), |
281 | TP_fast_assign( | 281 | TP_fast_assign( |
282 | __entry->dev = bp->b_target->bt_dev; | 282 | __entry->dev = bp->b_target->bt_dev; |
283 | __entry->bno = bp->b_bn; | 283 | if (bp->b_bn == XFS_BUF_DADDR_NULL) |
284 | __entry->bno = bp->b_maps[0].bm_bn; | ||
285 | else | ||
286 | __entry->bno = bp->b_bn; | ||
284 | __entry->nblks = bp->b_length; | 287 | __entry->nblks = bp->b_length; |
285 | __entry->hold = atomic_read(&bp->b_hold); | 288 | __entry->hold = atomic_read(&bp->b_hold); |
286 | __entry->pincount = atomic_read(&bp->b_pin_count); | 289 | __entry->pincount = atomic_read(&bp->b_pin_count); |