diff options
Diffstat (limited to 'mm/rmap.c')
-rw-r--r-- | mm/rmap.c | 88 |
1 files changed, 51 insertions, 37 deletions
@@ -1042,15 +1042,46 @@ void page_add_new_anon_rmap(struct page *page, | |||
1042 | */ | 1042 | */ |
1043 | void page_add_file_rmap(struct page *page) | 1043 | void page_add_file_rmap(struct page *page) |
1044 | { | 1044 | { |
1045 | bool locked; | 1045 | struct mem_cgroup *memcg; |
1046 | unsigned long flags; | 1046 | unsigned long flags; |
1047 | bool locked; | ||
1047 | 1048 | ||
1048 | mem_cgroup_begin_update_page_stat(page, &locked, &flags); | 1049 | memcg = mem_cgroup_begin_page_stat(page, &locked, &flags); |
1049 | if (atomic_inc_and_test(&page->_mapcount)) { | 1050 | if (atomic_inc_and_test(&page->_mapcount)) { |
1050 | __inc_zone_page_state(page, NR_FILE_MAPPED); | 1051 | __inc_zone_page_state(page, NR_FILE_MAPPED); |
1051 | mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED); | 1052 | mem_cgroup_inc_page_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED); |
1052 | } | 1053 | } |
1053 | mem_cgroup_end_update_page_stat(page, &locked, &flags); | 1054 | mem_cgroup_end_page_stat(memcg, locked, flags); |
1055 | } | ||
1056 | |||
1057 | static void page_remove_file_rmap(struct page *page) | ||
1058 | { | ||
1059 | struct mem_cgroup *memcg; | ||
1060 | unsigned long flags; | ||
1061 | bool locked; | ||
1062 | |||
1063 | memcg = mem_cgroup_begin_page_stat(page, &locked, &flags); | ||
1064 | |||
1065 | /* page still mapped by someone else? */ | ||
1066 | if (!atomic_add_negative(-1, &page->_mapcount)) | ||
1067 | goto out; | ||
1068 | |||
1069 | /* Hugepages are not counted in NR_FILE_MAPPED for now. */ | ||
1070 | if (unlikely(PageHuge(page))) | ||
1071 | goto out; | ||
1072 | |||
1073 | /* | ||
1074 | * We use the irq-unsafe __{inc|mod}_zone_page_stat because | ||
1075 | * these counters are not modified in interrupt context, and | ||
1076 | * pte lock(a spinlock) is held, which implies preemption disabled. | ||
1077 | */ | ||
1078 | __dec_zone_page_state(page, NR_FILE_MAPPED); | ||
1079 | mem_cgroup_dec_page_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED); | ||
1080 | |||
1081 | if (unlikely(PageMlocked(page))) | ||
1082 | clear_page_mlock(page); | ||
1083 | out: | ||
1084 | mem_cgroup_end_page_stat(memcg, locked, flags); | ||
1054 | } | 1085 | } |
1055 | 1086 | ||
1056 | /** | 1087 | /** |
@@ -1061,46 +1092,33 @@ void page_add_file_rmap(struct page *page) | |||
1061 | */ | 1092 | */ |
1062 | void page_remove_rmap(struct page *page) | 1093 | void page_remove_rmap(struct page *page) |
1063 | { | 1094 | { |
1064 | bool anon = PageAnon(page); | 1095 | if (!PageAnon(page)) { |
1065 | bool locked; | 1096 | page_remove_file_rmap(page); |
1066 | unsigned long flags; | 1097 | return; |
1067 | 1098 | } | |
1068 | /* | ||
1069 | * The anon case has no mem_cgroup page_stat to update; but may | ||
1070 | * uncharge_page() below, where the lock ordering can deadlock if | ||
1071 | * we hold the lock against page_stat move: so avoid it on anon. | ||
1072 | */ | ||
1073 | if (!anon) | ||
1074 | mem_cgroup_begin_update_page_stat(page, &locked, &flags); | ||
1075 | 1099 | ||
1076 | /* page still mapped by someone else? */ | 1100 | /* page still mapped by someone else? */ |
1077 | if (!atomic_add_negative(-1, &page->_mapcount)) | 1101 | if (!atomic_add_negative(-1, &page->_mapcount)) |
1078 | goto out; | 1102 | return; |
1103 | |||
1104 | /* Hugepages are not counted in NR_ANON_PAGES for now. */ | ||
1105 | if (unlikely(PageHuge(page))) | ||
1106 | return; | ||
1079 | 1107 | ||
1080 | /* | 1108 | /* |
1081 | * Hugepages are not counted in NR_ANON_PAGES nor NR_FILE_MAPPED | ||
1082 | * and not charged by memcg for now. | ||
1083 | * | ||
1084 | * We use the irq-unsafe __{inc|mod}_zone_page_stat because | 1109 | * We use the irq-unsafe __{inc|mod}_zone_page_stat because |
1085 | * these counters are not modified in interrupt context, and | 1110 | * these counters are not modified in interrupt context, and |
1086 | * these counters are not modified in interrupt context, and | ||
1087 | * pte lock(a spinlock) is held, which implies preemption disabled. | 1111 | * pte lock(a spinlock) is held, which implies preemption disabled. |
1088 | */ | 1112 | */ |
1089 | if (unlikely(PageHuge(page))) | 1113 | if (PageTransHuge(page)) |
1090 | goto out; | 1114 | __dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES); |
1091 | if (anon) { | 1115 | |
1092 | if (PageTransHuge(page)) | 1116 | __mod_zone_page_state(page_zone(page), NR_ANON_PAGES, |
1093 | __dec_zone_page_state(page, | 1117 | -hpage_nr_pages(page)); |
1094 | NR_ANON_TRANSPARENT_HUGEPAGES); | 1118 | |
1095 | __mod_zone_page_state(page_zone(page), NR_ANON_PAGES, | ||
1096 | -hpage_nr_pages(page)); | ||
1097 | } else { | ||
1098 | __dec_zone_page_state(page, NR_FILE_MAPPED); | ||
1099 | mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED); | ||
1100 | mem_cgroup_end_update_page_stat(page, &locked, &flags); | ||
1101 | } | ||
1102 | if (unlikely(PageMlocked(page))) | 1119 | if (unlikely(PageMlocked(page))) |
1103 | clear_page_mlock(page); | 1120 | clear_page_mlock(page); |
1121 | |||
1104 | /* | 1122 | /* |
1105 | * It would be tidy to reset the PageAnon mapping here, | 1123 | * It would be tidy to reset the PageAnon mapping here, |
1106 | * but that might overwrite a racing page_add_anon_rmap | 1124 | * but that might overwrite a racing page_add_anon_rmap |
@@ -1110,10 +1128,6 @@ void page_remove_rmap(struct page *page) | |||
1110 | * Leaving it set also helps swapoff to reinstate ptes | 1128 | * Leaving it set also helps swapoff to reinstate ptes |
1111 | * faster for those pages still in swapcache. | 1129 | * faster for those pages still in swapcache. |
1112 | */ | 1130 | */ |
1113 | return; | ||
1114 | out: | ||
1115 | if (!anon) | ||
1116 | mem_cgroup_end_update_page_stat(page, &locked, &flags); | ||
1117 | } | 1131 | } |
1118 | 1132 | ||
1119 | /* | 1133 | /* |