diff options
Diffstat (limited to 'mm/rmap.c')
-rw-r--r-- | mm/rmap.c | 94 |
1 files changed, 55 insertions, 39 deletions
@@ -274,6 +274,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) | |||
274 | { | 274 | { |
275 | struct anon_vma_chain *avc; | 275 | struct anon_vma_chain *avc; |
276 | struct anon_vma *anon_vma; | 276 | struct anon_vma *anon_vma; |
277 | int error; | ||
277 | 278 | ||
278 | /* Don't bother if the parent process has no anon_vma here. */ | 279 | /* Don't bother if the parent process has no anon_vma here. */ |
279 | if (!pvma->anon_vma) | 280 | if (!pvma->anon_vma) |
@@ -283,8 +284,9 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) | |||
283 | * First, attach the new VMA to the parent VMA's anon_vmas, | 284 | * First, attach the new VMA to the parent VMA's anon_vmas, |
284 | * so rmap can find non-COWed pages in child processes. | 285 | * so rmap can find non-COWed pages in child processes. |
285 | */ | 286 | */ |
286 | if (anon_vma_clone(vma, pvma)) | 287 | error = anon_vma_clone(vma, pvma); |
287 | return -ENOMEM; | 288 | if (error) |
289 | return error; | ||
288 | 290 | ||
289 | /* Then add our own anon_vma. */ | 291 | /* Then add our own anon_vma. */ |
290 | anon_vma = anon_vma_alloc(); | 292 | anon_vma = anon_vma_alloc(); |
@@ -1042,15 +1044,46 @@ void page_add_new_anon_rmap(struct page *page, | |||
1042 | */ | 1044 | */ |
1043 | void page_add_file_rmap(struct page *page) | 1045 | void page_add_file_rmap(struct page *page) |
1044 | { | 1046 | { |
1045 | bool locked; | 1047 | struct mem_cgroup *memcg; |
1046 | unsigned long flags; | 1048 | unsigned long flags; |
1049 | bool locked; | ||
1047 | 1050 | ||
1048 | mem_cgroup_begin_update_page_stat(page, &locked, &flags); | 1051 | memcg = mem_cgroup_begin_page_stat(page, &locked, &flags); |
1049 | if (atomic_inc_and_test(&page->_mapcount)) { | 1052 | if (atomic_inc_and_test(&page->_mapcount)) { |
1050 | __inc_zone_page_state(page, NR_FILE_MAPPED); | 1053 | __inc_zone_page_state(page, NR_FILE_MAPPED); |
1051 | mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED); | 1054 | mem_cgroup_inc_page_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED); |
1052 | } | 1055 | } |
1053 | mem_cgroup_end_update_page_stat(page, &locked, &flags); | 1056 | mem_cgroup_end_page_stat(memcg, locked, flags); |
1057 | } | ||
1058 | |||
1059 | static void page_remove_file_rmap(struct page *page) | ||
1060 | { | ||
1061 | struct mem_cgroup *memcg; | ||
1062 | unsigned long flags; | ||
1063 | bool locked; | ||
1064 | |||
1065 | memcg = mem_cgroup_begin_page_stat(page, &locked, &flags); | ||
1066 | |||
1067 | /* page still mapped by someone else? */ | ||
1068 | if (!atomic_add_negative(-1, &page->_mapcount)) | ||
1069 | goto out; | ||
1070 | |||
1071 | /* Hugepages are not counted in NR_FILE_MAPPED for now. */ | ||
1072 | if (unlikely(PageHuge(page))) | ||
1073 | goto out; | ||
1074 | |||
1075 | /* | ||
1076 | * We use the irq-unsafe __{inc|mod}_zone_page_stat because | ||
1077 | * these counters are not modified in interrupt context, and | ||
1078 | * pte lock(a spinlock) is held, which implies preemption disabled. | ||
1079 | */ | ||
1080 | __dec_zone_page_state(page, NR_FILE_MAPPED); | ||
1081 | mem_cgroup_dec_page_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED); | ||
1082 | |||
1083 | if (unlikely(PageMlocked(page))) | ||
1084 | clear_page_mlock(page); | ||
1085 | out: | ||
1086 | mem_cgroup_end_page_stat(memcg, locked, flags); | ||
1054 | } | 1087 | } |
1055 | 1088 | ||
1056 | /** | 1089 | /** |
@@ -1061,46 +1094,33 @@ void page_add_file_rmap(struct page *page) | |||
1061 | */ | 1094 | */ |
1062 | void page_remove_rmap(struct page *page) | 1095 | void page_remove_rmap(struct page *page) |
1063 | { | 1096 | { |
1064 | bool anon = PageAnon(page); | 1097 | if (!PageAnon(page)) { |
1065 | bool locked; | 1098 | page_remove_file_rmap(page); |
1066 | unsigned long flags; | 1099 | return; |
1067 | 1100 | } | |
1068 | /* | ||
1069 | * The anon case has no mem_cgroup page_stat to update; but may | ||
1070 | * uncharge_page() below, where the lock ordering can deadlock if | ||
1071 | * we hold the lock against page_stat move: so avoid it on anon. | ||
1072 | */ | ||
1073 | if (!anon) | ||
1074 | mem_cgroup_begin_update_page_stat(page, &locked, &flags); | ||
1075 | 1101 | ||
1076 | /* page still mapped by someone else? */ | 1102 | /* page still mapped by someone else? */ |
1077 | if (!atomic_add_negative(-1, &page->_mapcount)) | 1103 | if (!atomic_add_negative(-1, &page->_mapcount)) |
1078 | goto out; | 1104 | return; |
1105 | |||
1106 | /* Hugepages are not counted in NR_ANON_PAGES for now. */ | ||
1107 | if (unlikely(PageHuge(page))) | ||
1108 | return; | ||
1079 | 1109 | ||
1080 | /* | 1110 | /* |
1081 | * Hugepages are not counted in NR_ANON_PAGES nor NR_FILE_MAPPED | ||
1082 | * and not charged by memcg for now. | ||
1083 | * | ||
1084 | * We use the irq-unsafe __{inc|mod}_zone_page_stat because | 1111 | * We use the irq-unsafe __{inc|mod}_zone_page_stat because |
1085 | * these counters are not modified in interrupt context, and | 1112 | * these counters are not modified in interrupt context, and |
1086 | * these counters are not modified in interrupt context, and | ||
1087 | * pte lock(a spinlock) is held, which implies preemption disabled. | 1113 | * pte lock(a spinlock) is held, which implies preemption disabled. |
1088 | */ | 1114 | */ |
1089 | if (unlikely(PageHuge(page))) | 1115 | if (PageTransHuge(page)) |
1090 | goto out; | 1116 | __dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES); |
1091 | if (anon) { | 1117 | |
1092 | if (PageTransHuge(page)) | 1118 | __mod_zone_page_state(page_zone(page), NR_ANON_PAGES, |
1093 | __dec_zone_page_state(page, | 1119 | -hpage_nr_pages(page)); |
1094 | NR_ANON_TRANSPARENT_HUGEPAGES); | 1120 | |
1095 | __mod_zone_page_state(page_zone(page), NR_ANON_PAGES, | ||
1096 | -hpage_nr_pages(page)); | ||
1097 | } else { | ||
1098 | __dec_zone_page_state(page, NR_FILE_MAPPED); | ||
1099 | mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED); | ||
1100 | mem_cgroup_end_update_page_stat(page, &locked, &flags); | ||
1101 | } | ||
1102 | if (unlikely(PageMlocked(page))) | 1121 | if (unlikely(PageMlocked(page))) |
1103 | clear_page_mlock(page); | 1122 | clear_page_mlock(page); |
1123 | |||
1104 | /* | 1124 | /* |
1105 | * It would be tidy to reset the PageAnon mapping here, | 1125 | * It would be tidy to reset the PageAnon mapping here, |
1106 | * but that might overwrite a racing page_add_anon_rmap | 1126 | * but that might overwrite a racing page_add_anon_rmap |
@@ -1110,10 +1130,6 @@ void page_remove_rmap(struct page *page) | |||
1110 | * Leaving it set also helps swapoff to reinstate ptes | 1130 | * Leaving it set also helps swapoff to reinstate ptes |
1111 | * faster for those pages still in swapcache. | 1131 | * faster for those pages still in swapcache. |
1112 | */ | 1132 | */ |
1113 | return; | ||
1114 | out: | ||
1115 | if (!anon) | ||
1116 | mem_cgroup_end_update_page_stat(page, &locked, &flags); | ||
1117 | } | 1133 | } |
1118 | 1134 | ||
1119 | /* | 1135 | /* |