aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_aops.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/xfs_aops.c')
-rw-r--r--fs/xfs/xfs_aops.c47
1 files changed, 35 insertions, 12 deletions
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index f18e5932aec4..a3eeaba156c5 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -446,6 +446,19 @@ xfs_imap_valid(
446{ 446{
447 offset >>= inode->i_blkbits; 447 offset >>= inode->i_blkbits;
448 448
449 /*
450 * We have to make sure the cached mapping is within EOF to protect
451 * against eofblocks trimming on file release leaving us with a stale
452 * mapping. Otherwise, a page for a subsequent file extending buffered
453 * write could get picked up by this writeback cycle and written to the
454 * wrong blocks.
455 *
456 * Note that what we really want here is a generic mapping invalidation
457 * mechanism to protect us from arbitrary extent modifying contexts, not
458 * just eofblocks.
459 */
460 xfs_trim_extent_eof(imap, XFS_I(inode));
461
449 return offset >= imap->br_startoff && 462 return offset >= imap->br_startoff &&
450 offset < imap->br_startoff + imap->br_blockcount; 463 offset < imap->br_startoff + imap->br_blockcount;
451} 464}
@@ -735,6 +748,14 @@ xfs_vm_invalidatepage(
735{ 748{
736 trace_xfs_invalidatepage(page->mapping->host, page, offset, 749 trace_xfs_invalidatepage(page->mapping->host, page, offset,
737 length); 750 length);
751
752 /*
753 * If we are invalidating the entire page, clear the dirty state from it
754 * so that we can check for attempts to release dirty cached pages in
755 * xfs_vm_releasepage().
756 */
757 if (offset == 0 && length >= PAGE_SIZE)
758 cancel_dirty_page(page);
738 block_invalidatepage(page, offset, length); 759 block_invalidatepage(page, offset, length);
739} 760}
740 761
@@ -1190,25 +1211,27 @@ xfs_vm_releasepage(
1190 * mm accommodates an old ext3 case where clean pages might not have had 1211 * mm accommodates an old ext3 case where clean pages might not have had
1191 * the dirty bit cleared. Thus, it can send actual dirty pages to 1212 * the dirty bit cleared. Thus, it can send actual dirty pages to
1192 * ->releasepage() via shrink_active_list(). Conversely, 1213 * ->releasepage() via shrink_active_list(). Conversely,
1193 * block_invalidatepage() can send pages that are still marked dirty 1214 * block_invalidatepage() can send pages that are still marked dirty but
1194 * but otherwise have invalidated buffers. 1215 * otherwise have invalidated buffers.
1195 * 1216 *
1196 * We want to release the latter to avoid unnecessary buildup of the 1217 * We want to release the latter to avoid unnecessary buildup of the
1197 * LRU, skip the former and warn if we've left any lingering 1218 * LRU, so xfs_vm_invalidatepage() clears the page dirty flag on pages
1198 * delalloc/unwritten buffers on clean pages. Skip pages with delalloc 1219 * that are entirely invalidated and need to be released. Hence the
1199 * or unwritten buffers and warn if the page is not dirty. Otherwise 1220 * only time we should get dirty pages here is through
1200 * try to release the buffers. 1221 * shrink_active_list() and so we can simply skip those now.
1222 *
1223 * warn if we've left any lingering delalloc/unwritten buffers on clean
1224 * or invalidated pages we are about to release.
1201 */ 1225 */
1226 if (PageDirty(page))
1227 return 0;
1228
1202 xfs_count_page_state(page, &delalloc, &unwritten); 1229 xfs_count_page_state(page, &delalloc, &unwritten);
1203 1230
1204 if (delalloc) { 1231 if (WARN_ON_ONCE(delalloc))
1205 WARN_ON_ONCE(!PageDirty(page));
1206 return 0; 1232 return 0;
1207 } 1233 if (WARN_ON_ONCE(unwritten))
1208 if (unwritten) {
1209 WARN_ON_ONCE(!PageDirty(page));
1210 return 0; 1234 return 0;
1211 }
1212 1235
1213 return try_to_free_buffers(page); 1236 return try_to_free_buffers(page);
1214} 1237}