aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--mm/swap_state.c2
-rw-r--r--mm/swapfile.c2
-rw-r--r--mm/truncate.c4
3 files changed, 4 insertions, 4 deletions
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 5f7cf2a4cb55..925d5c50f18d 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -21,7 +21,7 @@
21 21
22/* 22/*
23 * swapper_space is a fiction, retained to simplify the path through 23 * swapper_space is a fiction, retained to simplify the path through
24 * vmscan's shrink_list, to make sync_page look nicer, and to allow 24 * vmscan's shrink_page_list, to make sync_page look nicer, and to allow
25 * future use of radix_tree tags in the swap cache. 25 * future use of radix_tree tags in the swap cache.
26 */ 26 */
27static const struct address_space_operations swap_aops = { 27static const struct address_space_operations swap_aops = {
diff --git a/mm/swapfile.c b/mm/swapfile.c
index acc172cbe3aa..7ff0a81c7b01 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -885,7 +885,7 @@ static int try_to_unuse(unsigned int type)
885 /* 885 /*
886 * So we could skip searching mms once swap count went 886 * So we could skip searching mms once swap count went
887 * to 1, we did not mark any present ptes as dirty: must 887 * to 1, we did not mark any present ptes as dirty: must
888 * mark page dirty so shrink_list will preserve it. 888 * mark page dirty so shrink_page_list will preserve it.
889 */ 889 */
890 SetPageDirty(page); 890 SetPageDirty(page);
891 unlock_page(page); 891 unlock_page(page);
diff --git a/mm/truncate.c b/mm/truncate.c
index 4fbe1a2da5fb..af3dcf0e48e6 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -317,8 +317,8 @@ EXPORT_SYMBOL(invalidate_mapping_pages);
317 * This is like invalidate_complete_page(), except it ignores the page's 317 * This is like invalidate_complete_page(), except it ignores the page's
318 * refcount. We do this because invalidate_inode_pages2() needs stronger 318 * refcount. We do this because invalidate_inode_pages2() needs stronger
319 * invalidation guarantees, and cannot afford to leave pages behind because 319 * invalidation guarantees, and cannot afford to leave pages behind because
320 * shrink_list() has a temp ref on them, or because they're transiently sitting 320 * shrink_page_list() has a temp ref on them, or because they're transiently
321 * in the lru_cache_add() pagevecs. 321 * sitting in the lru_cache_add() pagevecs.
322 */ 322 */
323static int 323static int
324invalidate_complete_page2(struct address_space *mapping, struct page *page) 324invalidate_complete_page2(struct address_space *mapping, struct page *page)