aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/memcontrol.c44
1 files changed, 15 insertions, 29 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 160101a05b4d..dbf571547c03 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -345,23 +345,22 @@ retry:
345 goto done; 345 goto done;
346 } 346 }
347 } 347 }
348
349 unlock_page_cgroup(page); 348 unlock_page_cgroup(page);
350 349
351 pc = kzalloc(sizeof(struct page_cgroup), gfp_mask); 350 pc = kzalloc(sizeof(struct page_cgroup), gfp_mask);
352 if (pc == NULL) 351 if (pc == NULL)
353 goto err; 352 goto err;
354 353
355 rcu_read_lock();
356 /* 354 /*
357 * We always charge the cgroup the mm_struct belongs to 355 * We always charge the cgroup the mm_struct belongs to.
358 * the mm_struct's mem_cgroup changes on task migration if the 356 * The mm_struct's mem_cgroup changes on task migration if the
359 * thread group leader migrates. It's possible that mm is not 357 * thread group leader migrates. It's possible that mm is not
360 * set, if so charge the init_mm (happens for pagecache usage). 358 * set, if so charge the init_mm (happens for pagecache usage).
361 */ 359 */
362 if (!mm) 360 if (!mm)
363 mm = &init_mm; 361 mm = &init_mm;
364 362
363 rcu_read_lock();
365 mem = rcu_dereference(mm->mem_cgroup); 364 mem = rcu_dereference(mm->mem_cgroup);
366 /* 365 /*
367 * For every charge from the cgroup, increment reference 366 * For every charge from the cgroup, increment reference
@@ -375,12 +374,8 @@ retry:
375 * the cgroup limit. 374 * the cgroup limit.
376 */ 375 */
377 while (res_counter_charge(&mem->res, PAGE_SIZE)) { 376 while (res_counter_charge(&mem->res, PAGE_SIZE)) {
378 bool is_atomic = gfp_mask & GFP_ATOMIC; 377 if (!(gfp_mask & __GFP_WAIT))
379 /* 378 goto out;
380 * We cannot reclaim under GFP_ATOMIC, fail the charge
381 */
382 if (is_atomic)
383 goto noreclaim;
384 379
385 if (try_to_free_mem_cgroup_pages(mem, gfp_mask)) 380 if (try_to_free_mem_cgroup_pages(mem, gfp_mask))
386 continue; 381 continue;
@@ -394,23 +389,12 @@ retry:
394 */ 389 */
395 if (res_counter_check_under_limit(&mem->res)) 390 if (res_counter_check_under_limit(&mem->res))
396 continue; 391 continue;
397 /* 392
398 * Since we control both RSS and cache, we end up with a 393 if (!nr_retries--) {
399 * very interesting scenario where we end up reclaiming 394 mem_cgroup_out_of_memory(mem, gfp_mask);
400 * memory (essentially RSS), since the memory is pushed 395 goto out;
401 * to swap cache, we eventually end up adding those
402 * pages back to our list. Hence we give ourselves a
403 * few chances before we fail
404 */
405 else if (nr_retries--) {
406 congestion_wait(WRITE, HZ/10);
407 continue;
408 } 396 }
409noreclaim: 397 congestion_wait(WRITE, HZ/10);
410 css_put(&mem->css);
411 if (!is_atomic)
412 mem_cgroup_out_of_memory(mem, GFP_KERNEL);
413 goto free_pc;
414 } 398 }
415 399
416 atomic_set(&pc->ref_cnt, 1); 400 atomic_set(&pc->ref_cnt, 1);
@@ -419,10 +403,11 @@ noreclaim:
419 pc->flags = 0; 403 pc->flags = 0;
420 if (ctype == MEM_CGROUP_CHARGE_TYPE_CACHE) 404 if (ctype == MEM_CGROUP_CHARGE_TYPE_CACHE)
421 pc->flags |= PAGE_CGROUP_FLAG_CACHE; 405 pc->flags |= PAGE_CGROUP_FLAG_CACHE;
406
422 if (page_cgroup_assign_new_page_cgroup(page, pc)) { 407 if (page_cgroup_assign_new_page_cgroup(page, pc)) {
423 /* 408 /*
424 * an another charge is added to this page already. 409 * Another charge has been added to this page already.
425 * we do take lock_page_cgroup(page) again and read 410 * We take lock_page_cgroup(page) again and read
426 * page->cgroup, increment refcnt.... just retry is OK. 411 * page->cgroup, increment refcnt.... just retry is OK.
427 */ 412 */
428 res_counter_uncharge(&mem->res, PAGE_SIZE); 413 res_counter_uncharge(&mem->res, PAGE_SIZE);
@@ -437,7 +422,8 @@ noreclaim:
437 422
438done: 423done:
439 return 0; 424 return 0;
440free_pc: 425out:
426 css_put(&mem->css);
441 kfree(pc); 427 kfree(pc);
442err: 428err:
443 return -ENOMEM; 429 return -ENOMEM;