diff options
author | Minchan Kim <minchan@kernel.org> | 2015-04-15 19:13:26 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-04-15 19:35:17 -0400 |
commit | cc5993bd7b8cff4a3e37042ee1358d1d5eafa70c (patch) | |
tree | d100134adb4f0cb82632a5c5e79225ffcd0b10b4 /mm/swap.c | |
parent | 922c0551a795dccadeb1dadc756d93fe3e303180 (diff) |
mm: rename deactivate_page to deactivate_file_page
"deactivate_page" was created for file invalidation so it has too
specific logic for file-backed pages. So, let's change the name of the
function and date to a file-specific one and yield the generic name.
Signed-off-by: Minchan Kim <minchan@kernel.org>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Rik van Riel <riel@redhat.com>
Cc: Shaohua Li <shli@kernel.org>
Cc: Wang, Yalin <Yalin.Wang@sonymobile.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/swap.c')
-rw-r--r-- | mm/swap.c | 24 |
1 files changed, 12 insertions, 12 deletions
@@ -42,7 +42,7 @@ int page_cluster; | |||
42 | 42 | ||
43 | static DEFINE_PER_CPU(struct pagevec, lru_add_pvec); | 43 | static DEFINE_PER_CPU(struct pagevec, lru_add_pvec); |
44 | static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs); | 44 | static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs); |
45 | static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs); | 45 | static DEFINE_PER_CPU(struct pagevec, lru_deactivate_file_pvecs); |
46 | 46 | ||
47 | /* | 47 | /* |
48 | * This path almost never happens for VM activity - pages are normally | 48 | * This path almost never happens for VM activity - pages are normally |
@@ -743,7 +743,7 @@ void lru_cache_add_active_or_unevictable(struct page *page, | |||
743 | * be write it out by flusher threads as this is much more effective | 743 | * be write it out by flusher threads as this is much more effective |
744 | * than the single-page writeout from reclaim. | 744 | * than the single-page writeout from reclaim. |
745 | */ | 745 | */ |
746 | static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec, | 746 | static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec, |
747 | void *arg) | 747 | void *arg) |
748 | { | 748 | { |
749 | int lru, file; | 749 | int lru, file; |
@@ -811,36 +811,36 @@ void lru_add_drain_cpu(int cpu) | |||
811 | local_irq_restore(flags); | 811 | local_irq_restore(flags); |
812 | } | 812 | } |
813 | 813 | ||
814 | pvec = &per_cpu(lru_deactivate_pvecs, cpu); | 814 | pvec = &per_cpu(lru_deactivate_file_pvecs, cpu); |
815 | if (pagevec_count(pvec)) | 815 | if (pagevec_count(pvec)) |
816 | pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); | 816 | pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL); |
817 | 817 | ||
818 | activate_page_drain(cpu); | 818 | activate_page_drain(cpu); |
819 | } | 819 | } |
820 | 820 | ||
821 | /** | 821 | /** |
822 | * deactivate_page - forcefully deactivate a page | 822 | * deactivate_file_page - forcefully deactivate a file page |
823 | * @page: page to deactivate | 823 | * @page: page to deactivate |
824 | * | 824 | * |
825 | * This function hints the VM that @page is a good reclaim candidate, | 825 | * This function hints the VM that @page is a good reclaim candidate, |
826 | * for example if its invalidation fails due to the page being dirty | 826 | * for example if its invalidation fails due to the page being dirty |
827 | * or under writeback. | 827 | * or under writeback. |
828 | */ | 828 | */ |
829 | void deactivate_page(struct page *page) | 829 | void deactivate_file_page(struct page *page) |
830 | { | 830 | { |
831 | /* | 831 | /* |
832 | * In a workload with many unevictable page such as mprotect, unevictable | 832 | * In a workload with many unevictable page such as mprotect, |
833 | * page deactivation for accelerating reclaim is pointless. | 833 | * unevictable page deactivation for accelerating reclaim is pointless. |
834 | */ | 834 | */ |
835 | if (PageUnevictable(page)) | 835 | if (PageUnevictable(page)) |
836 | return; | 836 | return; |
837 | 837 | ||
838 | if (likely(get_page_unless_zero(page))) { | 838 | if (likely(get_page_unless_zero(page))) { |
839 | struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs); | 839 | struct pagevec *pvec = &get_cpu_var(lru_deactivate_file_pvecs); |
840 | 840 | ||
841 | if (!pagevec_add(pvec, page)) | 841 | if (!pagevec_add(pvec, page)) |
842 | pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); | 842 | pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL); |
843 | put_cpu_var(lru_deactivate_pvecs); | 843 | put_cpu_var(lru_deactivate_file_pvecs); |
844 | } | 844 | } |
845 | } | 845 | } |
846 | 846 | ||
@@ -872,7 +872,7 @@ void lru_add_drain_all(void) | |||
872 | 872 | ||
873 | if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) || | 873 | if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) || |
874 | pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) || | 874 | pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) || |
875 | pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) || | 875 | pagevec_count(&per_cpu(lru_deactivate_file_pvecs, cpu)) || |
876 | need_activate_page_drain(cpu)) { | 876 | need_activate_page_drain(cpu)) { |
877 | INIT_WORK(work, lru_add_drain_per_cpu); | 877 | INIT_WORK(work, lru_add_drain_per_cpu); |
878 | schedule_work_on(cpu, work); | 878 | schedule_work_on(cpu, work); |