diff options
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r-- | mm/memcontrol.c | 134 |
1 files changed, 60 insertions, 74 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index f50cb7b1efdb..93a792871804 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -893,6 +893,23 @@ nomem: | |||
893 | return -ENOMEM; | 893 | return -ENOMEM; |
894 | } | 894 | } |
895 | 895 | ||
896 | static struct mem_cgroup *try_get_mem_cgroup_from_swapcache(struct page *page) | ||
897 | { | ||
898 | struct mem_cgroup *mem; | ||
899 | swp_entry_t ent; | ||
900 | |||
901 | if (!PageSwapCache(page)) | ||
902 | return NULL; | ||
903 | |||
904 | ent.val = page_private(page); | ||
905 | mem = lookup_swap_cgroup(ent); | ||
906 | if (!mem) | ||
907 | return NULL; | ||
908 | if (!css_tryget(&mem->css)) | ||
909 | return NULL; | ||
910 | return mem; | ||
911 | } | ||
912 | |||
896 | /* | 913 | /* |
897 | * commit a charge got by __mem_cgroup_try_charge() and makes page_cgroup to be | 914 | * commit a charge got by __mem_cgroup_try_charge() and makes page_cgroup to be |
898 | * USED state. If already USED, uncharge and return. | 915 | * USED state. If already USED, uncharge and return. |
@@ -1084,6 +1101,9 @@ int mem_cgroup_newpage_charge(struct page *page, | |||
1084 | int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, | 1101 | int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, |
1085 | gfp_t gfp_mask) | 1102 | gfp_t gfp_mask) |
1086 | { | 1103 | { |
1104 | struct mem_cgroup *mem = NULL; | ||
1105 | int ret; | ||
1106 | |||
1087 | if (mem_cgroup_disabled()) | 1107 | if (mem_cgroup_disabled()) |
1088 | return 0; | 1108 | return 0; |
1089 | if (PageCompound(page)) | 1109 | if (PageCompound(page)) |
@@ -1096,6 +1116,8 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, | |||
1096 | * For GFP_NOWAIT case, the page may be pre-charged before calling | 1116 | * For GFP_NOWAIT case, the page may be pre-charged before calling |
1097 | * add_to_page_cache(). (See shmem.c) check it here and avoid to call | 1117 | * add_to_page_cache(). (See shmem.c) check it here and avoid to call |
1098 | * charge twice. (It works but has to pay a bit larger cost.) | 1118 | * charge twice. (It works but has to pay a bit larger cost.) |
1119 | * And when the page is SwapCache, it should take swap information | ||
1120 | * into account. This is under lock_page() now. | ||
1099 | */ | 1121 | */ |
1100 | if (!(gfp_mask & __GFP_WAIT)) { | 1122 | if (!(gfp_mask & __GFP_WAIT)) { |
1101 | struct page_cgroup *pc; | 1123 | struct page_cgroup *pc; |
@@ -1112,15 +1134,40 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, | |||
1112 | unlock_page_cgroup(pc); | 1134 | unlock_page_cgroup(pc); |
1113 | } | 1135 | } |
1114 | 1136 | ||
1115 | if (unlikely(!mm)) | 1137 | if (do_swap_account && PageSwapCache(page)) { |
1138 | mem = try_get_mem_cgroup_from_swapcache(page); | ||
1139 | if (mem) | ||
1140 | mm = NULL; | ||
1141 | else | ||
1142 | mem = NULL; | ||
1143 | /* SwapCache may be still linked to LRU now. */ | ||
1144 | mem_cgroup_lru_del_before_commit_swapcache(page); | ||
1145 | } | ||
1146 | |||
1147 | if (unlikely(!mm && !mem)) | ||
1116 | mm = &init_mm; | 1148 | mm = &init_mm; |
1117 | 1149 | ||
1118 | if (page_is_file_cache(page)) | 1150 | if (page_is_file_cache(page)) |
1119 | return mem_cgroup_charge_common(page, mm, gfp_mask, | 1151 | return mem_cgroup_charge_common(page, mm, gfp_mask, |
1120 | MEM_CGROUP_CHARGE_TYPE_CACHE, NULL); | 1152 | MEM_CGROUP_CHARGE_TYPE_CACHE, NULL); |
1121 | else | 1153 | |
1122 | return mem_cgroup_charge_common(page, mm, gfp_mask, | 1154 | ret = mem_cgroup_charge_common(page, mm, gfp_mask, |
1123 | MEM_CGROUP_CHARGE_TYPE_SHMEM, NULL); | 1155 | MEM_CGROUP_CHARGE_TYPE_SHMEM, mem); |
1156 | if (mem) | ||
1157 | css_put(&mem->css); | ||
1158 | if (PageSwapCache(page)) | ||
1159 | mem_cgroup_lru_add_after_commit_swapcache(page); | ||
1160 | |||
1161 | if (do_swap_account && !ret && PageSwapCache(page)) { | ||
1162 | swp_entry_t ent = {.val = page_private(page)}; | ||
1163 | /* avoid double counting */ | ||
1164 | mem = swap_cgroup_record(ent, NULL); | ||
1165 | if (mem) { | ||
1166 | res_counter_uncharge(&mem->memsw, PAGE_SIZE); | ||
1167 | mem_cgroup_put(mem); | ||
1168 | } | ||
1169 | } | ||
1170 | return ret; | ||
1124 | } | 1171 | } |
1125 | 1172 | ||
1126 | /* | 1173 | /* |
@@ -1134,7 +1181,6 @@ int mem_cgroup_try_charge_swapin(struct mm_struct *mm, | |||
1134 | gfp_t mask, struct mem_cgroup **ptr) | 1181 | gfp_t mask, struct mem_cgroup **ptr) |
1135 | { | 1182 | { |
1136 | struct mem_cgroup *mem; | 1183 | struct mem_cgroup *mem; |
1137 | swp_entry_t ent; | ||
1138 | int ret; | 1184 | int ret; |
1139 | 1185 | ||
1140 | if (mem_cgroup_disabled()) | 1186 | if (mem_cgroup_disabled()) |
@@ -1142,7 +1188,6 @@ int mem_cgroup_try_charge_swapin(struct mm_struct *mm, | |||
1142 | 1188 | ||
1143 | if (!do_swap_account) | 1189 | if (!do_swap_account) |
1144 | goto charge_cur_mm; | 1190 | goto charge_cur_mm; |
1145 | |||
1146 | /* | 1191 | /* |
1147 | * A racing thread's fault, or swapoff, may have already updated | 1192 | * A racing thread's fault, or swapoff, may have already updated |
1148 | * the pte, and even removed page from swap cache: return success | 1193 | * the pte, and even removed page from swap cache: return success |
@@ -1150,14 +1195,9 @@ int mem_cgroup_try_charge_swapin(struct mm_struct *mm, | |||
1150 | */ | 1195 | */ |
1151 | if (!PageSwapCache(page)) | 1196 | if (!PageSwapCache(page)) |
1152 | return 0; | 1197 | return 0; |
1153 | 1198 | mem = try_get_mem_cgroup_from_swapcache(page); | |
1154 | ent.val = page_private(page); | ||
1155 | |||
1156 | mem = lookup_swap_cgroup(ent); | ||
1157 | if (!mem) | 1199 | if (!mem) |
1158 | goto charge_cur_mm; | 1200 | goto charge_cur_mm; |
1159 | if (!css_tryget(&mem->css)) | ||
1160 | goto charge_cur_mm; | ||
1161 | *ptr = mem; | 1201 | *ptr = mem; |
1162 | ret = __mem_cgroup_try_charge(NULL, mask, ptr, true); | 1202 | ret = __mem_cgroup_try_charge(NULL, mask, ptr, true); |
1163 | /* drop extra refcnt from tryget */ | 1203 | /* drop extra refcnt from tryget */ |
@@ -1169,62 +1209,6 @@ charge_cur_mm: | |||
1169 | return __mem_cgroup_try_charge(mm, mask, ptr, true); | 1209 | return __mem_cgroup_try_charge(mm, mask, ptr, true); |
1170 | } | 1210 | } |
1171 | 1211 | ||
1172 | #ifdef CONFIG_SWAP | ||
1173 | |||
1174 | int mem_cgroup_cache_charge_swapin(struct page *page, | ||
1175 | struct mm_struct *mm, gfp_t mask, bool locked) | ||
1176 | { | ||
1177 | int ret = 0; | ||
1178 | |||
1179 | if (mem_cgroup_disabled()) | ||
1180 | return 0; | ||
1181 | if (unlikely(!mm)) | ||
1182 | mm = &init_mm; | ||
1183 | if (!locked) | ||
1184 | lock_page(page); | ||
1185 | /* | ||
1186 | * If not locked, the page can be dropped from SwapCache until | ||
1187 | * we reach here. | ||
1188 | */ | ||
1189 | if (PageSwapCache(page)) { | ||
1190 | struct mem_cgroup *mem = NULL; | ||
1191 | swp_entry_t ent; | ||
1192 | |||
1193 | ent.val = page_private(page); | ||
1194 | if (do_swap_account) { | ||
1195 | mem = lookup_swap_cgroup(ent); | ||
1196 | if (mem) { | ||
1197 | if (css_tryget(&mem->css)) | ||
1198 | mm = NULL; /* charge to recorded */ | ||
1199 | else | ||
1200 | mem = NULL; /* charge to current */ | ||
1201 | } | ||
1202 | } | ||
1203 | /* SwapCache may be still linked to LRU now. */ | ||
1204 | mem_cgroup_lru_del_before_commit_swapcache(page); | ||
1205 | ret = mem_cgroup_charge_common(page, mm, mask, | ||
1206 | MEM_CGROUP_CHARGE_TYPE_SHMEM, mem); | ||
1207 | mem_cgroup_lru_add_after_commit_swapcache(page); | ||
1208 | /* drop extra refcnt from tryget */ | ||
1209 | if (mem) | ||
1210 | css_put(&mem->css); | ||
1211 | |||
1212 | if (!ret && do_swap_account) { | ||
1213 | /* avoid double counting */ | ||
1214 | mem = swap_cgroup_record(ent, NULL); | ||
1215 | if (mem) { | ||
1216 | res_counter_uncharge(&mem->memsw, PAGE_SIZE); | ||
1217 | mem_cgroup_put(mem); | ||
1218 | } | ||
1219 | } | ||
1220 | } | ||
1221 | if (!locked) | ||
1222 | unlock_page(page); | ||
1223 | |||
1224 | return ret; | ||
1225 | } | ||
1226 | #endif | ||
1227 | |||
1228 | void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr) | 1212 | void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr) |
1229 | { | 1213 | { |
1230 | struct page_cgroup *pc; | 1214 | struct page_cgroup *pc; |
@@ -1486,18 +1470,20 @@ void mem_cgroup_end_migration(struct mem_cgroup *mem, | |||
1486 | * This is typically used for page reclaiming for shmem for reducing side | 1470 | * This is typically used for page reclaiming for shmem for reducing side |
1487 | * effect of page allocation from shmem, which is used by some mem_cgroup. | 1471 | * effect of page allocation from shmem, which is used by some mem_cgroup. |
1488 | */ | 1472 | */ |
1489 | int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask) | 1473 | int mem_cgroup_shrink_usage(struct page *page, |
1474 | struct mm_struct *mm, | ||
1475 | gfp_t gfp_mask) | ||
1490 | { | 1476 | { |
1491 | struct mem_cgroup *mem; | 1477 | struct mem_cgroup *mem = NULL; |
1492 | int progress = 0; | 1478 | int progress = 0; |
1493 | int retry = MEM_CGROUP_RECLAIM_RETRIES; | 1479 | int retry = MEM_CGROUP_RECLAIM_RETRIES; |
1494 | 1480 | ||
1495 | if (mem_cgroup_disabled()) | 1481 | if (mem_cgroup_disabled()) |
1496 | return 0; | 1482 | return 0; |
1497 | if (!mm) | 1483 | if (page) |
1498 | return 0; | 1484 | mem = try_get_mem_cgroup_from_swapcache(page); |
1499 | 1485 | if (!mem && mm) | |
1500 | mem = try_get_mem_cgroup_from_mm(mm); | 1486 | mem = try_get_mem_cgroup_from_mm(mm); |
1501 | if (unlikely(!mem)) | 1487 | if (unlikely(!mem)) |
1502 | return 0; | 1488 | return 0; |
1503 | 1489 | ||