summaryrefslogtreecommitdiffstats
path: root/mm/shmem.c
diff options
context:
space:
mode:
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>2016-01-15 19:52:20 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-01-15 20:56:32 -0500
commitf627c2f53786b0445abca47f6aa84c96a1fffec2 (patch)
treea2c0a52a4448ad779d9027c943eb8e1217ae2504 /mm/shmem.c
parentd281ee6145183594788ab6d5b55f8d144e69eace (diff)
memcg: adjust to support new THP refcounting
As with rmap, with new refcounting we cannot rely on PageTransHuge() to check if we need to charge size of huge page form the cgroup. We need to get information from caller to know whether it was mapped with PMD or PTE. We do uncharge when last reference on the page gone. At that point if we see PageTransHuge() it means we need to unchange whole huge page. The tricky part is partial unmap -- when we try to unmap part of huge page. We don't do a special handing of this situation, meaning we don't uncharge the part of huge page unless last user is gone or split_huge_page() is triggered. In case of cgroup memory pressure happens the partial unmapped page will be split through shrinker. This should be good enough. Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Tested-by: Sasha Levin <sasha.levin@oracle.com> Tested-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Jerome Marchand <jmarchan@redhat.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Rik van Riel <riel@redhat.com> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Steve Capper <steve.capper@linaro.org> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Christoph Lameter <cl@linux.com> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/shmem.c')
-rw-r--r--mm/shmem.c21
1 files changed, 12 insertions, 9 deletions
diff --git a/mm/shmem.c b/mm/shmem.c
index d271932f9ef9..b98e1011858c 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -810,7 +810,8 @@ int shmem_unuse(swp_entry_t swap, struct page *page)
810 * the shmem_swaplist_mutex which might hold up shmem_writepage(). 810 * the shmem_swaplist_mutex which might hold up shmem_writepage().
811 * Charged back to the user (not to caller) when swap account is used. 811 * Charged back to the user (not to caller) when swap account is used.
812 */ 812 */
813 error = mem_cgroup_try_charge(page, current->mm, GFP_KERNEL, &memcg); 813 error = mem_cgroup_try_charge(page, current->mm, GFP_KERNEL, &memcg,
814 false);
814 if (error) 815 if (error)
815 goto out; 816 goto out;
816 /* No radix_tree_preload: swap entry keeps a place for page in tree */ 817 /* No radix_tree_preload: swap entry keeps a place for page in tree */
@@ -833,9 +834,9 @@ int shmem_unuse(swp_entry_t swap, struct page *page)
833 if (error) { 834 if (error) {
834 if (error != -ENOMEM) 835 if (error != -ENOMEM)
835 error = 0; 836 error = 0;
836 mem_cgroup_cancel_charge(page, memcg); 837 mem_cgroup_cancel_charge(page, memcg, false);
837 } else 838 } else
838 mem_cgroup_commit_charge(page, memcg, true); 839 mem_cgroup_commit_charge(page, memcg, true, false);
839out: 840out:
840 unlock_page(page); 841 unlock_page(page);
841 page_cache_release(page); 842 page_cache_release(page);
@@ -1218,7 +1219,8 @@ repeat:
1218 goto failed; 1219 goto failed;
1219 } 1220 }
1220 1221
1221 error = mem_cgroup_try_charge(page, current->mm, gfp, &memcg); 1222 error = mem_cgroup_try_charge(page, current->mm, gfp, &memcg,
1223 false);
1222 if (!error) { 1224 if (!error) {
1223 error = shmem_add_to_page_cache(page, mapping, index, 1225 error = shmem_add_to_page_cache(page, mapping, index,
1224 swp_to_radix_entry(swap)); 1226 swp_to_radix_entry(swap));
@@ -1235,14 +1237,14 @@ repeat:
1235 * "repeat": reading a hole and writing should succeed. 1237 * "repeat": reading a hole and writing should succeed.
1236 */ 1238 */
1237 if (error) { 1239 if (error) {
1238 mem_cgroup_cancel_charge(page, memcg); 1240 mem_cgroup_cancel_charge(page, memcg, false);
1239 delete_from_swap_cache(page); 1241 delete_from_swap_cache(page);
1240 } 1242 }
1241 } 1243 }
1242 if (error) 1244 if (error)
1243 goto failed; 1245 goto failed;
1244 1246
1245 mem_cgroup_commit_charge(page, memcg, true); 1247 mem_cgroup_commit_charge(page, memcg, true, false);
1246 1248
1247 spin_lock(&info->lock); 1249 spin_lock(&info->lock);
1248 info->swapped--; 1250 info->swapped--;
@@ -1281,7 +1283,8 @@ repeat:
1281 if (sgp == SGP_WRITE) 1283 if (sgp == SGP_WRITE)
1282 __SetPageReferenced(page); 1284 __SetPageReferenced(page);
1283 1285
1284 error = mem_cgroup_try_charge(page, current->mm, gfp, &memcg); 1286 error = mem_cgroup_try_charge(page, current->mm, gfp, &memcg,
1287 false);
1285 if (error) 1288 if (error)
1286 goto decused; 1289 goto decused;
1287 error = radix_tree_maybe_preload(gfp & GFP_RECLAIM_MASK); 1290 error = radix_tree_maybe_preload(gfp & GFP_RECLAIM_MASK);
@@ -1291,10 +1294,10 @@ repeat:
1291 radix_tree_preload_end(); 1294 radix_tree_preload_end();
1292 } 1295 }
1293 if (error) { 1296 if (error) {
1294 mem_cgroup_cancel_charge(page, memcg); 1297 mem_cgroup_cancel_charge(page, memcg, false);
1295 goto decused; 1298 goto decused;
1296 } 1299 }
1297 mem_cgroup_commit_charge(page, memcg, false); 1300 mem_cgroup_commit_charge(page, memcg, false, false);
1298 lru_cache_add_anon(page); 1301 lru_cache_add_anon(page);
1299 1302
1300 spin_lock(&info->lock); 1303 spin_lock(&info->lock);