aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c2
-rw-r--r--mm/memcontrol.c1846
-rw-r--r--mm/memory.c28
-rw-r--r--mm/migrate.c42
-rw-r--r--mm/oom_kill.c10
-rw-r--r--mm/page_alloc.c8
-rw-r--r--mm/page_cgroup.c207
-rw-r--r--mm/shmem.c20
-rw-r--r--mm/swap.c33
-rw-r--r--mm/swap_state.c4
-rw-r--r--mm/swapfile.c24
-rw-r--r--mm/vmscan.c197
12 files changed, 1914 insertions, 507 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 2f55a1e2baf7..ceba0bd03662 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -460,7 +460,7 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
460 VM_BUG_ON(!PageLocked(page)); 460 VM_BUG_ON(!PageLocked(page));
461 461
462 error = mem_cgroup_cache_charge(page, current->mm, 462 error = mem_cgroup_cache_charge(page, current->mm,
463 gfp_mask & ~__GFP_HIGHMEM); 463 gfp_mask & GFP_RECLAIM_MASK);
464 if (error) 464 if (error)
465 goto out; 465 goto out;
466 466
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 51ee96545579..e2996b80601f 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -21,11 +21,13 @@
21#include <linux/memcontrol.h> 21#include <linux/memcontrol.h>
22#include <linux/cgroup.h> 22#include <linux/cgroup.h>
23#include <linux/mm.h> 23#include <linux/mm.h>
24#include <linux/pagemap.h>
24#include <linux/smp.h> 25#include <linux/smp.h>
25#include <linux/page-flags.h> 26#include <linux/page-flags.h>
26#include <linux/backing-dev.h> 27#include <linux/backing-dev.h>
27#include <linux/bit_spinlock.h> 28#include <linux/bit_spinlock.h>
28#include <linux/rcupdate.h> 29#include <linux/rcupdate.h>
30#include <linux/mutex.h>
29#include <linux/slab.h> 31#include <linux/slab.h>
30#include <linux/swap.h> 32#include <linux/swap.h>
31#include <linux/spinlock.h> 33#include <linux/spinlock.h>
@@ -34,12 +36,23 @@
34#include <linux/vmalloc.h> 36#include <linux/vmalloc.h>
35#include <linux/mm_inline.h> 37#include <linux/mm_inline.h>
36#include <linux/page_cgroup.h> 38#include <linux/page_cgroup.h>
39#include "internal.h"
37 40
38#include <asm/uaccess.h> 41#include <asm/uaccess.h>
39 42
40struct cgroup_subsys mem_cgroup_subsys __read_mostly; 43struct cgroup_subsys mem_cgroup_subsys __read_mostly;
41#define MEM_CGROUP_RECLAIM_RETRIES 5 44#define MEM_CGROUP_RECLAIM_RETRIES 5
42 45
46#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
47/* Turned on only when memory cgroup is enabled && really_do_swap_account = 0 */
48int do_swap_account __read_mostly;
49static int really_do_swap_account __initdata = 1; /* for remember boot option*/
50#else
51#define do_swap_account (0)
52#endif
53
54static DEFINE_MUTEX(memcg_tasklist); /* can be hold under cgroup_mutex */
55
43/* 56/*
44 * Statistics for memory cgroup. 57 * Statistics for memory cgroup.
45 */ 58 */
@@ -60,7 +73,7 @@ struct mem_cgroup_stat_cpu {
60} ____cacheline_aligned_in_smp; 73} ____cacheline_aligned_in_smp;
61 74
62struct mem_cgroup_stat { 75struct mem_cgroup_stat {
63 struct mem_cgroup_stat_cpu cpustat[NR_CPUS]; 76 struct mem_cgroup_stat_cpu cpustat[0];
64}; 77};
65 78
66/* 79/*
@@ -89,9 +102,10 @@ struct mem_cgroup_per_zone {
89 /* 102 /*
90 * spin_lock to protect the per cgroup LRU 103 * spin_lock to protect the per cgroup LRU
91 */ 104 */
92 spinlock_t lru_lock;
93 struct list_head lists[NR_LRU_LISTS]; 105 struct list_head lists[NR_LRU_LISTS];
94 unsigned long count[NR_LRU_LISTS]; 106 unsigned long count[NR_LRU_LISTS];
107
108 struct zone_reclaim_stat reclaim_stat;
95}; 109};
96/* Macro for accessing counter */ 110/* Macro for accessing counter */
97#define MEM_CGROUP_ZSTAT(mz, idx) ((mz)->count[(idx)]) 111#define MEM_CGROUP_ZSTAT(mz, idx) ((mz)->count[(idx)])
@@ -122,44 +136,73 @@ struct mem_cgroup {
122 */ 136 */
123 struct res_counter res; 137 struct res_counter res;
124 /* 138 /*
139 * the counter to account for mem+swap usage.
140 */
141 struct res_counter memsw;
142 /*
125 * Per cgroup active and inactive list, similar to the 143 * Per cgroup active and inactive list, similar to the
126 * per zone LRU lists. 144 * per zone LRU lists.
127 */ 145 */
128 struct mem_cgroup_lru_info info; 146 struct mem_cgroup_lru_info info;
129 147
148 /*
149 protect against reclaim related member.
150 */
151 spinlock_t reclaim_param_lock;
152
130 int prev_priority; /* for recording reclaim priority */ 153 int prev_priority; /* for recording reclaim priority */
154
155 /*
156 * While reclaiming in a hiearchy, we cache the last child we
157 * reclaimed from. Protected by hierarchy_mutex
158 */
159 struct mem_cgroup *last_scanned_child;
131 /* 160 /*
132 * statistics. 161 * Should the accounting and control be hierarchical, per subtree?
162 */
163 bool use_hierarchy;
164 unsigned long last_oom_jiffies;
165 atomic_t refcnt;
166
167 unsigned int swappiness;
168
169 /*
170 * statistics. This must be placed at the end of memcg.
133 */ 171 */
134 struct mem_cgroup_stat stat; 172 struct mem_cgroup_stat stat;
135}; 173};
136static struct mem_cgroup init_mem_cgroup;
137 174
138enum charge_type { 175enum charge_type {
139 MEM_CGROUP_CHARGE_TYPE_CACHE = 0, 176 MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
140 MEM_CGROUP_CHARGE_TYPE_MAPPED, 177 MEM_CGROUP_CHARGE_TYPE_MAPPED,
141 MEM_CGROUP_CHARGE_TYPE_SHMEM, /* used by page migration of shmem */ 178 MEM_CGROUP_CHARGE_TYPE_SHMEM, /* used by page migration of shmem */
142 MEM_CGROUP_CHARGE_TYPE_FORCE, /* used by force_empty */ 179 MEM_CGROUP_CHARGE_TYPE_FORCE, /* used by force_empty */
180 MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
143 NR_CHARGE_TYPE, 181 NR_CHARGE_TYPE,
144}; 182};
145 183
146/* only for here (for easy reading.) */ 184/* only for here (for easy reading.) */
147#define PCGF_CACHE (1UL << PCG_CACHE) 185#define PCGF_CACHE (1UL << PCG_CACHE)
148#define PCGF_USED (1UL << PCG_USED) 186#define PCGF_USED (1UL << PCG_USED)
149#define PCGF_ACTIVE (1UL << PCG_ACTIVE)
150#define PCGF_LOCK (1UL << PCG_LOCK) 187#define PCGF_LOCK (1UL << PCG_LOCK)
151#define PCGF_FILE (1UL << PCG_FILE)
152static const unsigned long 188static const unsigned long
153pcg_default_flags[NR_CHARGE_TYPE] = { 189pcg_default_flags[NR_CHARGE_TYPE] = {
154 PCGF_CACHE | PCGF_FILE | PCGF_USED | PCGF_LOCK, /* File Cache */ 190 PCGF_CACHE | PCGF_USED | PCGF_LOCK, /* File Cache */
155 PCGF_ACTIVE | PCGF_USED | PCGF_LOCK, /* Anon */ 191 PCGF_USED | PCGF_LOCK, /* Anon */
156 PCGF_ACTIVE | PCGF_CACHE | PCGF_USED | PCGF_LOCK, /* Shmem */ 192 PCGF_CACHE | PCGF_USED | PCGF_LOCK, /* Shmem */
157 0, /* FORCE */ 193 0, /* FORCE */
158}; 194};
159 195
160/* 196/* for encoding cft->private value on file */
161 * Always modified under lru lock. Then, not necessary to preempt_disable() 197#define _MEM (0)
162 */ 198#define _MEMSWAP (1)
199#define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
200#define MEMFILE_TYPE(val) (((val) >> 16) & 0xffff)
201#define MEMFILE_ATTR(val) ((val) & 0xffff)
202
203static void mem_cgroup_get(struct mem_cgroup *mem);
204static void mem_cgroup_put(struct mem_cgroup *mem);
205
163static void mem_cgroup_charge_statistics(struct mem_cgroup *mem, 206static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
164 struct page_cgroup *pc, 207 struct page_cgroup *pc,
165 bool charge) 208 bool charge)
@@ -167,10 +210,9 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
167 int val = (charge)? 1 : -1; 210 int val = (charge)? 1 : -1;
168 struct mem_cgroup_stat *stat = &mem->stat; 211 struct mem_cgroup_stat *stat = &mem->stat;
169 struct mem_cgroup_stat_cpu *cpustat; 212 struct mem_cgroup_stat_cpu *cpustat;
213 int cpu = get_cpu();
170 214
171 VM_BUG_ON(!irqs_disabled()); 215 cpustat = &stat->cpustat[cpu];
172
173 cpustat = &stat->cpustat[smp_processor_id()];
174 if (PageCgroupCache(pc)) 216 if (PageCgroupCache(pc))
175 __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_CACHE, val); 217 __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_CACHE, val);
176 else 218 else
@@ -182,6 +224,7 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
182 else 224 else
183 __mem_cgroup_stat_add_safe(cpustat, 225 __mem_cgroup_stat_add_safe(cpustat,
184 MEM_CGROUP_STAT_PGPGOUT_COUNT, 1); 226 MEM_CGROUP_STAT_PGPGOUT_COUNT, 1);
227 put_cpu();
185} 228}
186 229
187static struct mem_cgroup_per_zone * 230static struct mem_cgroup_per_zone *
@@ -197,6 +240,9 @@ page_cgroup_zoneinfo(struct page_cgroup *pc)
197 int nid = page_cgroup_nid(pc); 240 int nid = page_cgroup_nid(pc);
198 int zid = page_cgroup_zid(pc); 241 int zid = page_cgroup_zid(pc);
199 242
243 if (!mem)
244 return NULL;
245
200 return mem_cgroup_zoneinfo(mem, nid, zid); 246 return mem_cgroup_zoneinfo(mem, nid, zid);
201} 247}
202 248
@@ -236,77 +282,152 @@ struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
236 struct mem_cgroup, css); 282 struct mem_cgroup, css);
237} 283}
238 284
239static void __mem_cgroup_remove_list(struct mem_cgroup_per_zone *mz, 285static struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
240 struct page_cgroup *pc)
241{ 286{
242 int lru = LRU_BASE; 287 struct mem_cgroup *mem = NULL;
288 /*
289 * Because we have no locks, mm->owner's may be being moved to other
290 * cgroup. We use css_tryget() here even if this looks
291 * pessimistic (rather than adding locks here).
292 */
293 rcu_read_lock();
294 do {
295 mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
296 if (unlikely(!mem))
297 break;
298 } while (!css_tryget(&mem->css));
299 rcu_read_unlock();
300 return mem;
301}
243 302
244 if (PageCgroupUnevictable(pc)) 303static bool mem_cgroup_is_obsolete(struct mem_cgroup *mem)
245 lru = LRU_UNEVICTABLE; 304{
246 else { 305 if (!mem)
247 if (PageCgroupActive(pc)) 306 return true;
248 lru += LRU_ACTIVE; 307 return css_is_removed(&mem->css);
249 if (PageCgroupFile(pc)) 308}
250 lru += LRU_FILE;
251 }
252 309
253 MEM_CGROUP_ZSTAT(mz, lru) -= 1; 310/*
311 * Following LRU functions are allowed to be used without PCG_LOCK.
312 * Operations are called by routine of global LRU independently from memcg.
313 * What we have to take care of here is validness of pc->mem_cgroup.
314 *
315 * Changes to pc->mem_cgroup happens when
316 * 1. charge
317 * 2. moving account
318 * In typical case, "charge" is done before add-to-lru. Exception is SwapCache.
319 * It is added to LRU before charge.
320 * If PCG_USED bit is not set, page_cgroup is not added to this private LRU.
321 * When moving account, the page is not on LRU. It's isolated.
322 */
254 323
255 mem_cgroup_charge_statistics(pc->mem_cgroup, pc, false); 324void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru)
256 list_del(&pc->lru); 325{
326 struct page_cgroup *pc;
327 struct mem_cgroup *mem;
328 struct mem_cgroup_per_zone *mz;
329
330 if (mem_cgroup_disabled())
331 return;
332 pc = lookup_page_cgroup(page);
333 /* can happen while we handle swapcache. */
334 if (list_empty(&pc->lru) || !pc->mem_cgroup)
335 return;
336 /*
337 * We don't check PCG_USED bit. It's cleared when the "page" is finally
338 * removed from global LRU.
339 */
340 mz = page_cgroup_zoneinfo(pc);
341 mem = pc->mem_cgroup;
342 MEM_CGROUP_ZSTAT(mz, lru) -= 1;
343 list_del_init(&pc->lru);
344 return;
257} 345}
258 346
259static void __mem_cgroup_add_list(struct mem_cgroup_per_zone *mz, 347void mem_cgroup_del_lru(struct page *page)
260 struct page_cgroup *pc)
261{ 348{
262 int lru = LRU_BASE; 349 mem_cgroup_del_lru_list(page, page_lru(page));
350}
263 351
264 if (PageCgroupUnevictable(pc)) 352void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
265 lru = LRU_UNEVICTABLE; 353{
266 else { 354 struct mem_cgroup_per_zone *mz;
267 if (PageCgroupActive(pc)) 355 struct page_cgroup *pc;
268 lru += LRU_ACTIVE;
269 if (PageCgroupFile(pc))
270 lru += LRU_FILE;
271 }
272 356
273 MEM_CGROUP_ZSTAT(mz, lru) += 1; 357 if (mem_cgroup_disabled())
274 list_add(&pc->lru, &mz->lists[lru]); 358 return;
275 359
276 mem_cgroup_charge_statistics(pc->mem_cgroup, pc, true); 360 pc = lookup_page_cgroup(page);
361 smp_rmb();
362 /* unused page is not rotated. */
363 if (!PageCgroupUsed(pc))
364 return;
365 mz = page_cgroup_zoneinfo(pc);
366 list_move(&pc->lru, &mz->lists[lru]);
277} 367}
278 368
279static void __mem_cgroup_move_lists(struct page_cgroup *pc, enum lru_list lru) 369void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
280{ 370{
281 struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc); 371 struct page_cgroup *pc;
282 int active = PageCgroupActive(pc); 372 struct mem_cgroup_per_zone *mz;
283 int file = PageCgroupFile(pc);
284 int unevictable = PageCgroupUnevictable(pc);
285 enum lru_list from = unevictable ? LRU_UNEVICTABLE :
286 (LRU_FILE * !!file + !!active);
287 373
288 if (lru == from) 374 if (mem_cgroup_disabled())
375 return;
376 pc = lookup_page_cgroup(page);
377 /* barrier to sync with "charge" */
378 smp_rmb();
379 if (!PageCgroupUsed(pc))
289 return; 380 return;
290 381
291 MEM_CGROUP_ZSTAT(mz, from) -= 1; 382 mz = page_cgroup_zoneinfo(pc);
383 MEM_CGROUP_ZSTAT(mz, lru) += 1;
384 list_add(&pc->lru, &mz->lists[lru]);
385}
386
387/*
388 * At handling SwapCache, pc->mem_cgroup may be changed while it's linked to
389 * lru because the page may.be reused after it's fully uncharged (because of
390 * SwapCache behavior).To handle that, unlink page_cgroup from LRU when charge
391 * it again. This function is only used to charge SwapCache. It's done under
392 * lock_page and expected that zone->lru_lock is never held.
393 */
394static void mem_cgroup_lru_del_before_commit_swapcache(struct page *page)
395{
396 unsigned long flags;
397 struct zone *zone = page_zone(page);
398 struct page_cgroup *pc = lookup_page_cgroup(page);
399
400 spin_lock_irqsave(&zone->lru_lock, flags);
292 /* 401 /*
293 * However this is done under mz->lru_lock, another flags, which 402 * Forget old LRU when this page_cgroup is *not* used. This Used bit
294 * are not related to LRU, will be modified from out-of-lock. 403 * is guarded by lock_page() because the page is SwapCache.
295 * We have to use atomic set/clear flags.
296 */ 404 */
297 if (is_unevictable_lru(lru)) { 405 if (!PageCgroupUsed(pc))
298 ClearPageCgroupActive(pc); 406 mem_cgroup_del_lru_list(page, page_lru(page));
299 SetPageCgroupUnevictable(pc); 407 spin_unlock_irqrestore(&zone->lru_lock, flags);
300 } else { 408}
301 if (is_active_lru(lru))
302 SetPageCgroupActive(pc);
303 else
304 ClearPageCgroupActive(pc);
305 ClearPageCgroupUnevictable(pc);
306 }
307 409
308 MEM_CGROUP_ZSTAT(mz, lru) += 1; 410static void mem_cgroup_lru_add_after_commit_swapcache(struct page *page)
309 list_move(&pc->lru, &mz->lists[lru]); 411{
412 unsigned long flags;
413 struct zone *zone = page_zone(page);
414 struct page_cgroup *pc = lookup_page_cgroup(page);
415
416 spin_lock_irqsave(&zone->lru_lock, flags);
417 /* link when the page is linked to LRU but page_cgroup isn't */
418 if (PageLRU(page) && list_empty(&pc->lru))
419 mem_cgroup_add_lru_list(page, page_lru(page));
420 spin_unlock_irqrestore(&zone->lru_lock, flags);
421}
422
423
424void mem_cgroup_move_lists(struct page *page,
425 enum lru_list from, enum lru_list to)
426{
427 if (mem_cgroup_disabled())
428 return;
429 mem_cgroup_del_lru_list(page, from);
430 mem_cgroup_add_lru_list(page, to);
310} 431}
311 432
312int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem) 433int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
@@ -320,37 +441,6 @@ int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
320} 441}
321 442
322/* 443/*
323 * This routine assumes that the appropriate zone's lru lock is already held
324 */
325void mem_cgroup_move_lists(struct page *page, enum lru_list lru)
326{
327 struct page_cgroup *pc;
328 struct mem_cgroup_per_zone *mz;
329 unsigned long flags;
330
331 if (mem_cgroup_subsys.disabled)
332 return;
333
334 /*
335 * We cannot lock_page_cgroup while holding zone's lru_lock,
336 * because other holders of lock_page_cgroup can be interrupted
337 * with an attempt to rotate_reclaimable_page. But we cannot
338 * safely get to page_cgroup without it, so just try_lock it:
339 * mem_cgroup_isolate_pages allows for page left on wrong list.
340 */
341 pc = lookup_page_cgroup(page);
342 if (!trylock_page_cgroup(pc))
343 return;
344 if (pc && PageCgroupUsed(pc)) {
345 mz = page_cgroup_zoneinfo(pc);
346 spin_lock_irqsave(&mz->lru_lock, flags);
347 __mem_cgroup_move_lists(pc, lru);
348 spin_unlock_irqrestore(&mz->lru_lock, flags);
349 }
350 unlock_page_cgroup(pc);
351}
352
353/*
354 * Calculate mapped_ratio under memory controller. This will be used in 444 * Calculate mapped_ratio under memory controller. This will be used in
355 * vmscan.c for deteremining we have to reclaim mapped pages. 445 * vmscan.c for deteremining we have to reclaim mapped pages.
356 */ 446 */
@@ -372,39 +462,108 @@ int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem)
372 */ 462 */
373int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem) 463int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
374{ 464{
375 return mem->prev_priority; 465 int prev_priority;
466
467 spin_lock(&mem->reclaim_param_lock);
468 prev_priority = mem->prev_priority;
469 spin_unlock(&mem->reclaim_param_lock);
470
471 return prev_priority;
376} 472}
377 473
378void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, int priority) 474void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, int priority)
379{ 475{
476 spin_lock(&mem->reclaim_param_lock);
380 if (priority < mem->prev_priority) 477 if (priority < mem->prev_priority)
381 mem->prev_priority = priority; 478 mem->prev_priority = priority;
479 spin_unlock(&mem->reclaim_param_lock);
382} 480}
383 481
384void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority) 482void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority)
385{ 483{
484 spin_lock(&mem->reclaim_param_lock);
386 mem->prev_priority = priority; 485 mem->prev_priority = priority;
486 spin_unlock(&mem->reclaim_param_lock);
387} 487}
388 488
389/* 489static int calc_inactive_ratio(struct mem_cgroup *memcg, unsigned long *present_pages)
390 * Calculate # of pages to be scanned in this priority/zone. 490{
391 * See also vmscan.c 491 unsigned long active;
392 * 492 unsigned long inactive;
393 * priority starts from "DEF_PRIORITY" and decremented in each loop. 493 unsigned long gb;
394 * (see include/linux/mmzone.h) 494 unsigned long inactive_ratio;
395 */ 495
496 inactive = mem_cgroup_get_all_zonestat(memcg, LRU_INACTIVE_ANON);
497 active = mem_cgroup_get_all_zonestat(memcg, LRU_ACTIVE_ANON);
498
499 gb = (inactive + active) >> (30 - PAGE_SHIFT);
500 if (gb)
501 inactive_ratio = int_sqrt(10 * gb);
502 else
503 inactive_ratio = 1;
504
505 if (present_pages) {
506 present_pages[0] = inactive;
507 present_pages[1] = active;
508 }
509
510 return inactive_ratio;
511}
512
513int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg)
514{
515 unsigned long active;
516 unsigned long inactive;
517 unsigned long present_pages[2];
518 unsigned long inactive_ratio;
396 519
397long mem_cgroup_calc_reclaim(struct mem_cgroup *mem, struct zone *zone, 520 inactive_ratio = calc_inactive_ratio(memcg, present_pages);
398 int priority, enum lru_list lru) 521
522 inactive = present_pages[0];
523 active = present_pages[1];
524
525 if (inactive * inactive_ratio < active)
526 return 1;
527
528 return 0;
529}
530
531unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg,
532 struct zone *zone,
533 enum lru_list lru)
399{ 534{
400 long nr_pages;
401 int nid = zone->zone_pgdat->node_id; 535 int nid = zone->zone_pgdat->node_id;
402 int zid = zone_idx(zone); 536 int zid = zone_idx(zone);
403 struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid); 537 struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
404 538
405 nr_pages = MEM_CGROUP_ZSTAT(mz, lru); 539 return MEM_CGROUP_ZSTAT(mz, lru);
540}
406 541
407 return (nr_pages >> priority); 542struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
543 struct zone *zone)
544{
545 int nid = zone->zone_pgdat->node_id;
546 int zid = zone_idx(zone);
547 struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
548
549 return &mz->reclaim_stat;
550}
551
552struct zone_reclaim_stat *
553mem_cgroup_get_reclaim_stat_from_page(struct page *page)
554{
555 struct page_cgroup *pc;
556 struct mem_cgroup_per_zone *mz;
557
558 if (mem_cgroup_disabled())
559 return NULL;
560
561 pc = lookup_page_cgroup(page);
562 mz = page_cgroup_zoneinfo(pc);
563 if (!mz)
564 return NULL;
565
566 return &mz->reclaim_stat;
408} 567}
409 568
410unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, 569unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
@@ -429,95 +588,281 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
429 mz = mem_cgroup_zoneinfo(mem_cont, nid, zid); 588 mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
430 src = &mz->lists[lru]; 589 src = &mz->lists[lru];
431 590
432 spin_lock(&mz->lru_lock);
433 scan = 0; 591 scan = 0;
434 list_for_each_entry_safe_reverse(pc, tmp, src, lru) { 592 list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
435 if (scan >= nr_to_scan) 593 if (scan >= nr_to_scan)
436 break; 594 break;
595
596 page = pc->page;
437 if (unlikely(!PageCgroupUsed(pc))) 597 if (unlikely(!PageCgroupUsed(pc)))
438 continue; 598 continue;
439 page = pc->page;
440
441 if (unlikely(!PageLRU(page))) 599 if (unlikely(!PageLRU(page)))
442 continue; 600 continue;
443 601
444 /*
445 * TODO: play better with lumpy reclaim, grabbing anything.
446 */
447 if (PageUnevictable(page) ||
448 (PageActive(page) && !active) ||
449 (!PageActive(page) && active)) {
450 __mem_cgroup_move_lists(pc, page_lru(page));
451 continue;
452 }
453
454 scan++; 602 scan++;
455 list_move(&pc->lru, &pc_list);
456
457 if (__isolate_lru_page(page, mode, file) == 0) { 603 if (__isolate_lru_page(page, mode, file) == 0) {
458 list_move(&page->lru, dst); 604 list_move(&page->lru, dst);
459 nr_taken++; 605 nr_taken++;
460 } 606 }
461 } 607 }
462 608
463 list_splice(&pc_list, src);
464 spin_unlock(&mz->lru_lock);
465
466 *scanned = scan; 609 *scanned = scan;
467 return nr_taken; 610 return nr_taken;
468} 611}
469 612
613#define mem_cgroup_from_res_counter(counter, member) \
614 container_of(counter, struct mem_cgroup, member)
615
470/* 616/*
471 * Charge the memory controller for page usage. 617 * This routine finds the DFS walk successor. This routine should be
472 * Return 618 * called with hierarchy_mutex held
473 * 0 if the charge was successful
474 * < 0 if the cgroup is over its limit
475 */ 619 */
476static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, 620static struct mem_cgroup *
477 gfp_t gfp_mask, enum charge_type ctype, 621mem_cgroup_get_next_node(struct mem_cgroup *curr, struct mem_cgroup *root_mem)
478 struct mem_cgroup *memcg)
479{ 622{
623 struct cgroup *cgroup, *curr_cgroup, *root_cgroup;
624
625 curr_cgroup = curr->css.cgroup;
626 root_cgroup = root_mem->css.cgroup;
627
628 if (!list_empty(&curr_cgroup->children)) {
629 /*
630 * Walk down to children
631 */
632 mem_cgroup_put(curr);
633 cgroup = list_entry(curr_cgroup->children.next,
634 struct cgroup, sibling);
635 curr = mem_cgroup_from_cont(cgroup);
636 mem_cgroup_get(curr);
637 goto done;
638 }
639
640visit_parent:
641 if (curr_cgroup == root_cgroup) {
642 mem_cgroup_put(curr);
643 curr = root_mem;
644 mem_cgroup_get(curr);
645 goto done;
646 }
647
648 /*
649 * Goto next sibling
650 */
651 if (curr_cgroup->sibling.next != &curr_cgroup->parent->children) {
652 mem_cgroup_put(curr);
653 cgroup = list_entry(curr_cgroup->sibling.next, struct cgroup,
654 sibling);
655 curr = mem_cgroup_from_cont(cgroup);
656 mem_cgroup_get(curr);
657 goto done;
658 }
659
660 /*
661 * Go up to next parent and next parent's sibling if need be
662 */
663 curr_cgroup = curr_cgroup->parent;
664 goto visit_parent;
665
666done:
667 root_mem->last_scanned_child = curr;
668 return curr;
669}
670
671/*
672 * Visit the first child (need not be the first child as per the ordering
673 * of the cgroup list, since we track last_scanned_child) of @mem and use
674 * that to reclaim free pages from.
675 */
676static struct mem_cgroup *
677mem_cgroup_get_first_node(struct mem_cgroup *root_mem)
678{
679 struct cgroup *cgroup;
680 struct mem_cgroup *ret;
681 bool obsolete;
682
683 obsolete = mem_cgroup_is_obsolete(root_mem->last_scanned_child);
684
685 /*
686 * Scan all children under the mem_cgroup mem
687 */
688 mutex_lock(&mem_cgroup_subsys.hierarchy_mutex);
689 if (list_empty(&root_mem->css.cgroup->children)) {
690 ret = root_mem;
691 goto done;
692 }
693
694 if (!root_mem->last_scanned_child || obsolete) {
695
696 if (obsolete && root_mem->last_scanned_child)
697 mem_cgroup_put(root_mem->last_scanned_child);
698
699 cgroup = list_first_entry(&root_mem->css.cgroup->children,
700 struct cgroup, sibling);
701 ret = mem_cgroup_from_cont(cgroup);
702 mem_cgroup_get(ret);
703 } else
704 ret = mem_cgroup_get_next_node(root_mem->last_scanned_child,
705 root_mem);
706
707done:
708 root_mem->last_scanned_child = ret;
709 mutex_unlock(&mem_cgroup_subsys.hierarchy_mutex);
710 return ret;
711}
712
713static bool mem_cgroup_check_under_limit(struct mem_cgroup *mem)
714{
715 if (do_swap_account) {
716 if (res_counter_check_under_limit(&mem->res) &&
717 res_counter_check_under_limit(&mem->memsw))
718 return true;
719 } else
720 if (res_counter_check_under_limit(&mem->res))
721 return true;
722 return false;
723}
724
725static unsigned int get_swappiness(struct mem_cgroup *memcg)
726{
727 struct cgroup *cgrp = memcg->css.cgroup;
728 unsigned int swappiness;
729
730 /* root ? */
731 if (cgrp->parent == NULL)
732 return vm_swappiness;
733
734 spin_lock(&memcg->reclaim_param_lock);
735 swappiness = memcg->swappiness;
736 spin_unlock(&memcg->reclaim_param_lock);
737
738 return swappiness;
739}
740
741/*
742 * Dance down the hierarchy if needed to reclaim memory. We remember the
743 * last child we reclaimed from, so that we don't end up penalizing
744 * one child extensively based on its position in the children list.
745 *
746 * root_mem is the original ancestor that we've been reclaim from.
747 */
748static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
749 gfp_t gfp_mask, bool noswap)
750{
751 struct mem_cgroup *next_mem;
752 int ret = 0;
753
754 /*
755 * Reclaim unconditionally and don't check for return value.
756 * We need to reclaim in the current group and down the tree.
757 * One might think about checking for children before reclaiming,
758 * but there might be left over accounting, even after children
759 * have left.
760 */
761 ret = try_to_free_mem_cgroup_pages(root_mem, gfp_mask, noswap,
762 get_swappiness(root_mem));
763 if (mem_cgroup_check_under_limit(root_mem))
764 return 0;
765 if (!root_mem->use_hierarchy)
766 return ret;
767
768 next_mem = mem_cgroup_get_first_node(root_mem);
769
770 while (next_mem != root_mem) {
771 if (mem_cgroup_is_obsolete(next_mem)) {
772 mem_cgroup_put(next_mem);
773 next_mem = mem_cgroup_get_first_node(root_mem);
774 continue;
775 }
776 ret = try_to_free_mem_cgroup_pages(next_mem, gfp_mask, noswap,
777 get_swappiness(next_mem));
778 if (mem_cgroup_check_under_limit(root_mem))
779 return 0;
780 mutex_lock(&mem_cgroup_subsys.hierarchy_mutex);
781 next_mem = mem_cgroup_get_next_node(next_mem, root_mem);
782 mutex_unlock(&mem_cgroup_subsys.hierarchy_mutex);
783 }
784 return ret;
785}
786
787bool mem_cgroup_oom_called(struct task_struct *task)
788{
789 bool ret = false;
480 struct mem_cgroup *mem; 790 struct mem_cgroup *mem;
481 struct page_cgroup *pc; 791 struct mm_struct *mm;
482 unsigned long nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
483 struct mem_cgroup_per_zone *mz;
484 unsigned long flags;
485 792
486 pc = lookup_page_cgroup(page); 793 rcu_read_lock();
487 /* can happen at boot */ 794 mm = task->mm;
488 if (unlikely(!pc)) 795 if (!mm)
796 mm = &init_mm;
797 mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
798 if (mem && time_before(jiffies, mem->last_oom_jiffies + HZ/10))
799 ret = true;
800 rcu_read_unlock();
801 return ret;
802}
803/*
804 * Unlike exported interface, "oom" parameter is added. if oom==true,
805 * oom-killer can be invoked.
806 */
807static int __mem_cgroup_try_charge(struct mm_struct *mm,
808 gfp_t gfp_mask, struct mem_cgroup **memcg,
809 bool oom)
810{
811 struct mem_cgroup *mem, *mem_over_limit;
812 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
813 struct res_counter *fail_res;
814
815 if (unlikely(test_thread_flag(TIF_MEMDIE))) {
816 /* Don't account this! */
817 *memcg = NULL;
489 return 0; 818 return 0;
490 prefetchw(pc); 819 }
820
491 /* 821 /*
492 * We always charge the cgroup the mm_struct belongs to. 822 * We always charge the cgroup the mm_struct belongs to.
493 * The mm_struct's mem_cgroup changes on task migration if the 823 * The mm_struct's mem_cgroup changes on task migration if the
494 * thread group leader migrates. It's possible that mm is not 824 * thread group leader migrates. It's possible that mm is not
495 * set, if so charge the init_mm (happens for pagecache usage). 825 * set, if so charge the init_mm (happens for pagecache usage).
496 */ 826 */
497 827 mem = *memcg;
498 if (likely(!memcg)) { 828 if (likely(!mem)) {
499 rcu_read_lock(); 829 mem = try_get_mem_cgroup_from_mm(mm);
500 mem = mem_cgroup_from_task(rcu_dereference(mm->owner)); 830 *memcg = mem;
501 if (unlikely(!mem)) {
502 rcu_read_unlock();
503 return 0;
504 }
505 /*
506 * For every charge from the cgroup, increment reference count
507 */
508 css_get(&mem->css);
509 rcu_read_unlock();
510 } else { 831 } else {
511 mem = memcg; 832 css_get(&mem->css);
512 css_get(&memcg->css);
513 } 833 }
834 if (unlikely(!mem))
835 return 0;
836
837 VM_BUG_ON(mem_cgroup_is_obsolete(mem));
838
839 while (1) {
840 int ret;
841 bool noswap = false;
842
843 ret = res_counter_charge(&mem->res, PAGE_SIZE, &fail_res);
844 if (likely(!ret)) {
845 if (!do_swap_account)
846 break;
847 ret = res_counter_charge(&mem->memsw, PAGE_SIZE,
848 &fail_res);
849 if (likely(!ret))
850 break;
851 /* mem+swap counter fails */
852 res_counter_uncharge(&mem->res, PAGE_SIZE);
853 noswap = true;
854 mem_over_limit = mem_cgroup_from_res_counter(fail_res,
855 memsw);
856 } else
857 /* mem counter fails */
858 mem_over_limit = mem_cgroup_from_res_counter(fail_res,
859 res);
514 860
515 while (unlikely(res_counter_charge(&mem->res, PAGE_SIZE))) {
516 if (!(gfp_mask & __GFP_WAIT)) 861 if (!(gfp_mask & __GFP_WAIT))
517 goto out; 862 goto nomem;
518 863
519 if (try_to_free_mem_cgroup_pages(mem, gfp_mask)) 864 ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, gfp_mask,
520 continue; 865 noswap);
521 866
522 /* 867 /*
523 * try_to_free_mem_cgroup_pages() might not give us a full 868 * try_to_free_mem_cgroup_pages() might not give us a full
@@ -525,49 +870,214 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
525 * moved to swap cache or just unmapped from the cgroup. 870 * moved to swap cache or just unmapped from the cgroup.
526 * Check the limit again to see if the reclaim reduced the 871 * Check the limit again to see if the reclaim reduced the
527 * current usage of the cgroup before giving up 872 * current usage of the cgroup before giving up
873 *
528 */ 874 */
529 if (res_counter_check_under_limit(&mem->res)) 875 if (mem_cgroup_check_under_limit(mem_over_limit))
530 continue; 876 continue;
531 877
532 if (!nr_retries--) { 878 if (!nr_retries--) {
533 mem_cgroup_out_of_memory(mem, gfp_mask); 879 if (oom) {
534 goto out; 880 mutex_lock(&memcg_tasklist);
881 mem_cgroup_out_of_memory(mem_over_limit, gfp_mask);
882 mutex_unlock(&memcg_tasklist);
883 mem_over_limit->last_oom_jiffies = jiffies;
884 }
885 goto nomem;
535 } 886 }
536 } 887 }
888 return 0;
889nomem:
890 css_put(&mem->css);
891 return -ENOMEM;
892}
537 893
894static struct mem_cgroup *try_get_mem_cgroup_from_swapcache(struct page *page)
895{
896 struct mem_cgroup *mem;
897 swp_entry_t ent;
898
899 if (!PageSwapCache(page))
900 return NULL;
901
902 ent.val = page_private(page);
903 mem = lookup_swap_cgroup(ent);
904 if (!mem)
905 return NULL;
906 if (!css_tryget(&mem->css))
907 return NULL;
908 return mem;
909}
910
911/*
912 * commit a charge got by __mem_cgroup_try_charge() and makes page_cgroup to be
913 * USED state. If already USED, uncharge and return.
914 */
915
916static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
917 struct page_cgroup *pc,
918 enum charge_type ctype)
919{
920 /* try_charge() can return NULL to *memcg, taking care of it. */
921 if (!mem)
922 return;
538 923
539 lock_page_cgroup(pc); 924 lock_page_cgroup(pc);
540 if (unlikely(PageCgroupUsed(pc))) { 925 if (unlikely(PageCgroupUsed(pc))) {
541 unlock_page_cgroup(pc); 926 unlock_page_cgroup(pc);
542 res_counter_uncharge(&mem->res, PAGE_SIZE); 927 res_counter_uncharge(&mem->res, PAGE_SIZE);
928 if (do_swap_account)
929 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
543 css_put(&mem->css); 930 css_put(&mem->css);
544 931 return;
545 goto done;
546 } 932 }
547 pc->mem_cgroup = mem; 933 pc->mem_cgroup = mem;
548 /* 934 smp_wmb();
549 * If a page is accounted as a page cache, insert to inactive list.
550 * If anon, insert to active list.
551 */
552 pc->flags = pcg_default_flags[ctype]; 935 pc->flags = pcg_default_flags[ctype];
553 936
554 mz = page_cgroup_zoneinfo(pc); 937 mem_cgroup_charge_statistics(mem, pc, true);
555 938
556 spin_lock_irqsave(&mz->lru_lock, flags);
557 __mem_cgroup_add_list(mz, pc);
558 spin_unlock_irqrestore(&mz->lru_lock, flags);
559 unlock_page_cgroup(pc); 939 unlock_page_cgroup(pc);
940}
560 941
561done: 942/**
562 return 0; 943 * mem_cgroup_move_account - move account of the page
944 * @pc: page_cgroup of the page.
945 * @from: mem_cgroup which the page is moved from.
946 * @to: mem_cgroup which the page is moved to. @from != @to.
947 *
948 * The caller must confirm following.
949 * - page is not on LRU (isolate_page() is useful.)
950 *
951 * returns 0 at success,
952 * returns -EBUSY when lock is busy or "pc" is unstable.
953 *
954 * This function does "uncharge" from old cgroup but doesn't do "charge" to
955 * new cgroup. It should be done by a caller.
956 */
957
958static int mem_cgroup_move_account(struct page_cgroup *pc,
959 struct mem_cgroup *from, struct mem_cgroup *to)
960{
961 struct mem_cgroup_per_zone *from_mz, *to_mz;
962 int nid, zid;
963 int ret = -EBUSY;
964
965 VM_BUG_ON(from == to);
966 VM_BUG_ON(PageLRU(pc->page));
967
968 nid = page_cgroup_nid(pc);
969 zid = page_cgroup_zid(pc);
970 from_mz = mem_cgroup_zoneinfo(from, nid, zid);
971 to_mz = mem_cgroup_zoneinfo(to, nid, zid);
972
973 if (!trylock_page_cgroup(pc))
974 return ret;
975
976 if (!PageCgroupUsed(pc))
977 goto out;
978
979 if (pc->mem_cgroup != from)
980 goto out;
981
982 css_put(&from->css);
983 res_counter_uncharge(&from->res, PAGE_SIZE);
984 mem_cgroup_charge_statistics(from, pc, false);
985 if (do_swap_account)
986 res_counter_uncharge(&from->memsw, PAGE_SIZE);
987 pc->mem_cgroup = to;
988 mem_cgroup_charge_statistics(to, pc, true);
989 css_get(&to->css);
990 ret = 0;
563out: 991out:
564 css_put(&mem->css); 992 unlock_page_cgroup(pc);
565 return -ENOMEM; 993 return ret;
994}
995
996/*
997 * move charges to its parent.
998 */
999
1000static int mem_cgroup_move_parent(struct page_cgroup *pc,
1001 struct mem_cgroup *child,
1002 gfp_t gfp_mask)
1003{
1004 struct page *page = pc->page;
1005 struct cgroup *cg = child->css.cgroup;
1006 struct cgroup *pcg = cg->parent;
1007 struct mem_cgroup *parent;
1008 int ret;
1009
1010 /* Is ROOT ? */
1011 if (!pcg)
1012 return -EINVAL;
1013
1014
1015 parent = mem_cgroup_from_cont(pcg);
1016
1017
1018 ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false);
1019 if (ret || !parent)
1020 return ret;
1021
1022 if (!get_page_unless_zero(page))
1023 return -EBUSY;
1024
1025 ret = isolate_lru_page(page);
1026
1027 if (ret)
1028 goto cancel;
1029
1030 ret = mem_cgroup_move_account(pc, child, parent);
1031
1032 /* drop extra refcnt by try_charge() (move_account increment one) */
1033 css_put(&parent->css);
1034 putback_lru_page(page);
1035 if (!ret) {
1036 put_page(page);
1037 return 0;
1038 }
1039 /* uncharge if move fails */
1040cancel:
1041 res_counter_uncharge(&parent->res, PAGE_SIZE);
1042 if (do_swap_account)
1043 res_counter_uncharge(&parent->memsw, PAGE_SIZE);
1044 put_page(page);
1045 return ret;
1046}
1047
1048/*
1049 * Charge the memory controller for page usage.
1050 * Return
1051 * 0 if the charge was successful
1052 * < 0 if the cgroup is over its limit
1053 */
1054static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
1055 gfp_t gfp_mask, enum charge_type ctype,
1056 struct mem_cgroup *memcg)
1057{
1058 struct mem_cgroup *mem;
1059 struct page_cgroup *pc;
1060 int ret;
1061
1062 pc = lookup_page_cgroup(page);
1063 /* can happen at boot */
1064 if (unlikely(!pc))
1065 return 0;
1066 prefetchw(pc);
1067
1068 mem = memcg;
1069 ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true);
1070 if (ret || !mem)
1071 return ret;
1072
1073 __mem_cgroup_commit_charge(mem, pc, ctype);
1074 return 0;
566} 1075}
567 1076
568int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask) 1077int mem_cgroup_newpage_charge(struct page *page,
1078 struct mm_struct *mm, gfp_t gfp_mask)
569{ 1079{
570 if (mem_cgroup_subsys.disabled) 1080 if (mem_cgroup_disabled())
571 return 0; 1081 return 0;
572 if (PageCompound(page)) 1082 if (PageCompound(page))
573 return 0; 1083 return 0;
@@ -589,7 +1099,10 @@ int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
589int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, 1099int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
590 gfp_t gfp_mask) 1100 gfp_t gfp_mask)
591{ 1101{
592 if (mem_cgroup_subsys.disabled) 1102 struct mem_cgroup *mem = NULL;
1103 int ret;
1104
1105 if (mem_cgroup_disabled())
593 return 0; 1106 return 0;
594 if (PageCompound(page)) 1107 if (PageCompound(page))
595 return 0; 1108 return 0;
@@ -601,6 +1114,8 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
601 * For GFP_NOWAIT case, the page may be pre-charged before calling 1114 * For GFP_NOWAIT case, the page may be pre-charged before calling
602 * add_to_page_cache(). (See shmem.c) check it here and avoid to call 1115 * add_to_page_cache(). (See shmem.c) check it here and avoid to call
603 * charge twice. (It works but has to pay a bit larger cost.) 1116 * charge twice. (It works but has to pay a bit larger cost.)
1117 * And when the page is SwapCache, it should take swap information
1118 * into account. This is under lock_page() now.
604 */ 1119 */
605 if (!(gfp_mask & __GFP_WAIT)) { 1120 if (!(gfp_mask & __GFP_WAIT)) {
606 struct page_cgroup *pc; 1121 struct page_cgroup *pc;
@@ -617,58 +1132,198 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
617 unlock_page_cgroup(pc); 1132 unlock_page_cgroup(pc);
618 } 1133 }
619 1134
620 if (unlikely(!mm)) 1135 if (do_swap_account && PageSwapCache(page)) {
1136 mem = try_get_mem_cgroup_from_swapcache(page);
1137 if (mem)
1138 mm = NULL;
1139 else
1140 mem = NULL;
1141 /* SwapCache may be still linked to LRU now. */
1142 mem_cgroup_lru_del_before_commit_swapcache(page);
1143 }
1144
1145 if (unlikely(!mm && !mem))
621 mm = &init_mm; 1146 mm = &init_mm;
622 1147
623 if (page_is_file_cache(page)) 1148 if (page_is_file_cache(page))
624 return mem_cgroup_charge_common(page, mm, gfp_mask, 1149 return mem_cgroup_charge_common(page, mm, gfp_mask,
625 MEM_CGROUP_CHARGE_TYPE_CACHE, NULL); 1150 MEM_CGROUP_CHARGE_TYPE_CACHE, NULL);
626 else 1151
627 return mem_cgroup_charge_common(page, mm, gfp_mask, 1152 ret = mem_cgroup_charge_common(page, mm, gfp_mask,
628 MEM_CGROUP_CHARGE_TYPE_SHMEM, NULL); 1153 MEM_CGROUP_CHARGE_TYPE_SHMEM, mem);
1154 if (mem)
1155 css_put(&mem->css);
1156 if (PageSwapCache(page))
1157 mem_cgroup_lru_add_after_commit_swapcache(page);
1158
1159 if (do_swap_account && !ret && PageSwapCache(page)) {
1160 swp_entry_t ent = {.val = page_private(page)};
1161 /* avoid double counting */
1162 mem = swap_cgroup_record(ent, NULL);
1163 if (mem) {
1164 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1165 mem_cgroup_put(mem);
1166 }
1167 }
1168 return ret;
1169}
1170
1171/*
1172 * While swap-in, try_charge -> commit or cancel, the page is locked.
1173 * And when try_charge() successfully returns, one refcnt to memcg without
1174 * struct page_cgroup is aquired. This refcnt will be cumsumed by
1175 * "commit()" or removed by "cancel()"
1176 */
1177int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
1178 struct page *page,
1179 gfp_t mask, struct mem_cgroup **ptr)
1180{
1181 struct mem_cgroup *mem;
1182 int ret;
1183
1184 if (mem_cgroup_disabled())
1185 return 0;
1186
1187 if (!do_swap_account)
1188 goto charge_cur_mm;
1189 /*
1190 * A racing thread's fault, or swapoff, may have already updated
1191 * the pte, and even removed page from swap cache: return success
1192 * to go on to do_swap_page()'s pte_same() test, which should fail.
1193 */
1194 if (!PageSwapCache(page))
1195 return 0;
1196 mem = try_get_mem_cgroup_from_swapcache(page);
1197 if (!mem)
1198 goto charge_cur_mm;
1199 *ptr = mem;
1200 ret = __mem_cgroup_try_charge(NULL, mask, ptr, true);
1201 /* drop extra refcnt from tryget */
1202 css_put(&mem->css);
1203 return ret;
1204charge_cur_mm:
1205 if (unlikely(!mm))
1206 mm = &init_mm;
1207 return __mem_cgroup_try_charge(mm, mask, ptr, true);
1208}
1209
1210void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
1211{
1212 struct page_cgroup *pc;
1213
1214 if (mem_cgroup_disabled())
1215 return;
1216 if (!ptr)
1217 return;
1218 pc = lookup_page_cgroup(page);
1219 mem_cgroup_lru_del_before_commit_swapcache(page);
1220 __mem_cgroup_commit_charge(ptr, pc, MEM_CGROUP_CHARGE_TYPE_MAPPED);
1221 mem_cgroup_lru_add_after_commit_swapcache(page);
1222 /*
1223 * Now swap is on-memory. This means this page may be
1224 * counted both as mem and swap....double count.
1225 * Fix it by uncharging from memsw. Basically, this SwapCache is stable
1226 * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page()
1227 * may call delete_from_swap_cache() before reach here.
1228 */
1229 if (do_swap_account && PageSwapCache(page)) {
1230 swp_entry_t ent = {.val = page_private(page)};
1231 struct mem_cgroup *memcg;
1232 memcg = swap_cgroup_record(ent, NULL);
1233 if (memcg) {
1234 res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
1235 mem_cgroup_put(memcg);
1236 }
1237
1238 }
1239 /* add this page(page_cgroup) to the LRU we want. */
1240
629} 1241}
630 1242
1243void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
1244{
1245 if (mem_cgroup_disabled())
1246 return;
1247 if (!mem)
1248 return;
1249 res_counter_uncharge(&mem->res, PAGE_SIZE);
1250 if (do_swap_account)
1251 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1252 css_put(&mem->css);
1253}
1254
1255
631/* 1256/*
632 * uncharge if !page_mapped(page) 1257 * uncharge if !page_mapped(page)
633 */ 1258 */
634static void 1259static struct mem_cgroup *
635__mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype) 1260__mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
636{ 1261{
637 struct page_cgroup *pc; 1262 struct page_cgroup *pc;
638 struct mem_cgroup *mem; 1263 struct mem_cgroup *mem = NULL;
639 struct mem_cgroup_per_zone *mz; 1264 struct mem_cgroup_per_zone *mz;
640 unsigned long flags;
641 1265
642 if (mem_cgroup_subsys.disabled) 1266 if (mem_cgroup_disabled())
643 return; 1267 return NULL;
1268
1269 if (PageSwapCache(page))
1270 return NULL;
644 1271
645 /* 1272 /*
646 * Check if our page_cgroup is valid 1273 * Check if our page_cgroup is valid
647 */ 1274 */
648 pc = lookup_page_cgroup(page); 1275 pc = lookup_page_cgroup(page);
649 if (unlikely(!pc || !PageCgroupUsed(pc))) 1276 if (unlikely(!pc || !PageCgroupUsed(pc)))
650 return; 1277 return NULL;
651 1278
652 lock_page_cgroup(pc); 1279 lock_page_cgroup(pc);
653 if ((ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED && page_mapped(page)) 1280
654 || !PageCgroupUsed(pc)) { 1281 mem = pc->mem_cgroup;
655 /* This happens at race in zap_pte_range() and do_swap_page()*/ 1282
656 unlock_page_cgroup(pc); 1283 if (!PageCgroupUsed(pc))
657 return; 1284 goto unlock_out;
1285
1286 switch (ctype) {
1287 case MEM_CGROUP_CHARGE_TYPE_MAPPED:
1288 if (page_mapped(page))
1289 goto unlock_out;
1290 break;
1291 case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
1292 if (!PageAnon(page)) { /* Shared memory */
1293 if (page->mapping && !page_is_file_cache(page))
1294 goto unlock_out;
1295 } else if (page_mapped(page)) /* Anon */
1296 goto unlock_out;
1297 break;
1298 default:
1299 break;
658 } 1300 }
1301
1302 res_counter_uncharge(&mem->res, PAGE_SIZE);
1303 if (do_swap_account && (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT))
1304 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1305
1306 mem_cgroup_charge_statistics(mem, pc, false);
659 ClearPageCgroupUsed(pc); 1307 ClearPageCgroupUsed(pc);
660 mem = pc->mem_cgroup; 1308 /*
1309 * pc->mem_cgroup is not cleared here. It will be accessed when it's
1310 * freed from LRU. This is safe because uncharged page is expected not
1311 * to be reused (freed soon). Exception is SwapCache, it's handled by
1312 * special functions.
1313 */
661 1314
662 mz = page_cgroup_zoneinfo(pc); 1315 mz = page_cgroup_zoneinfo(pc);
663 spin_lock_irqsave(&mz->lru_lock, flags);
664 __mem_cgroup_remove_list(mz, pc);
665 spin_unlock_irqrestore(&mz->lru_lock, flags);
666 unlock_page_cgroup(pc); 1316 unlock_page_cgroup(pc);
667 1317
668 res_counter_uncharge(&mem->res, PAGE_SIZE); 1318 /* at swapout, this memcg will be accessed to record to swap */
669 css_put(&mem->css); 1319 if (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
1320 css_put(&mem->css);
670 1321
671 return; 1322 return mem;
1323
1324unlock_out:
1325 unlock_page_cgroup(pc);
1326 return NULL;
672} 1327}
673 1328
674void mem_cgroup_uncharge_page(struct page *page) 1329void mem_cgroup_uncharge_page(struct page *page)
@@ -689,16 +1344,55 @@ void mem_cgroup_uncharge_cache_page(struct page *page)
689} 1344}
690 1345
691/* 1346/*
692 * Before starting migration, account against new page. 1347 * called from __delete_from_swap_cache() and drop "page" account.
1348 * memcg information is recorded to swap_cgroup of "ent"
1349 */
1350void mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
1351{
1352 struct mem_cgroup *memcg;
1353
1354 memcg = __mem_cgroup_uncharge_common(page,
1355 MEM_CGROUP_CHARGE_TYPE_SWAPOUT);
1356 /* record memcg information */
1357 if (do_swap_account && memcg) {
1358 swap_cgroup_record(ent, memcg);
1359 mem_cgroup_get(memcg);
1360 }
1361 if (memcg)
1362 css_put(&memcg->css);
1363}
1364
1365#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
1366/*
1367 * called from swap_entry_free(). remove record in swap_cgroup and
1368 * uncharge "memsw" account.
693 */ 1369 */
694int mem_cgroup_prepare_migration(struct page *page, struct page *newpage) 1370void mem_cgroup_uncharge_swap(swp_entry_t ent)
1371{
1372 struct mem_cgroup *memcg;
1373
1374 if (!do_swap_account)
1375 return;
1376
1377 memcg = swap_cgroup_record(ent, NULL);
1378 if (memcg) {
1379 res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
1380 mem_cgroup_put(memcg);
1381 }
1382}
1383#endif
1384
1385/*
1386 * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
1387 * page belongs to.
1388 */
1389int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
695{ 1390{
696 struct page_cgroup *pc; 1391 struct page_cgroup *pc;
697 struct mem_cgroup *mem = NULL; 1392 struct mem_cgroup *mem = NULL;
698 enum charge_type ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
699 int ret = 0; 1393 int ret = 0;
700 1394
701 if (mem_cgroup_subsys.disabled) 1395 if (mem_cgroup_disabled())
702 return 0; 1396 return 0;
703 1397
704 pc = lookup_page_cgroup(page); 1398 pc = lookup_page_cgroup(page);
@@ -706,41 +1400,67 @@ int mem_cgroup_prepare_migration(struct page *page, struct page *newpage)
706 if (PageCgroupUsed(pc)) { 1400 if (PageCgroupUsed(pc)) {
707 mem = pc->mem_cgroup; 1401 mem = pc->mem_cgroup;
708 css_get(&mem->css); 1402 css_get(&mem->css);
709 if (PageCgroupCache(pc)) {
710 if (page_is_file_cache(page))
711 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
712 else
713 ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
714 }
715 } 1403 }
716 unlock_page_cgroup(pc); 1404 unlock_page_cgroup(pc);
1405
717 if (mem) { 1406 if (mem) {
718 ret = mem_cgroup_charge_common(newpage, NULL, GFP_KERNEL, 1407 ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false);
719 ctype, mem);
720 css_put(&mem->css); 1408 css_put(&mem->css);
721 } 1409 }
1410 *ptr = mem;
722 return ret; 1411 return ret;
723} 1412}
724 1413
725/* remove redundant charge if migration failed*/ 1414/* remove redundant charge if migration failed*/
726void mem_cgroup_end_migration(struct page *newpage) 1415void mem_cgroup_end_migration(struct mem_cgroup *mem,
1416 struct page *oldpage, struct page *newpage)
727{ 1417{
1418 struct page *target, *unused;
1419 struct page_cgroup *pc;
1420 enum charge_type ctype;
1421
1422 if (!mem)
1423 return;
1424
1425 /* at migration success, oldpage->mapping is NULL. */
1426 if (oldpage->mapping) {
1427 target = oldpage;
1428 unused = NULL;
1429 } else {
1430 target = newpage;
1431 unused = oldpage;
1432 }
1433
1434 if (PageAnon(target))
1435 ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
1436 else if (page_is_file_cache(target))
1437 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
1438 else
1439 ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
1440
1441 /* unused page is not on radix-tree now. */
1442 if (unused)
1443 __mem_cgroup_uncharge_common(unused, ctype);
1444
1445 pc = lookup_page_cgroup(target);
728 /* 1446 /*
729 * At success, page->mapping is not NULL. 1447 * __mem_cgroup_commit_charge() check PCG_USED bit of page_cgroup.
730 * special rollback care is necessary when 1448 * So, double-counting is effectively avoided.
731 * 1. at migration failure. (newpage->mapping is cleared in this case)
732 * 2. the newpage was moved but not remapped again because the task
733 * exits and the newpage is obsolete. In this case, the new page
734 * may be a swapcache. So, we just call mem_cgroup_uncharge_page()
735 * always for avoiding mess. The page_cgroup will be removed if
736 * unnecessary. File cache pages is still on radix-tree. Don't
737 * care it.
738 */ 1449 */
739 if (!newpage->mapping) 1450 __mem_cgroup_commit_charge(mem, pc, ctype);
740 __mem_cgroup_uncharge_common(newpage, 1451
741 MEM_CGROUP_CHARGE_TYPE_FORCE); 1452 /*
742 else if (PageAnon(newpage)) 1453 * Both of oldpage and newpage are still under lock_page().
743 mem_cgroup_uncharge_page(newpage); 1454 * Then, we don't have to care about race in radix-tree.
1455 * But we have to be careful that this page is unmapped or not.
1456 *
1457 * There is a case for !page_mapped(). At the start of
1458 * migration, oldpage was mapped. But now, it's zapped.
1459 * But we know *target* page is not freed/reused under us.
1460 * mem_cgroup_uncharge_page() does all necessary checks.
1461 */
1462 if (ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED)
1463 mem_cgroup_uncharge_page(target);
744} 1464}
745 1465
746/* 1466/*
@@ -748,29 +1468,26 @@ void mem_cgroup_end_migration(struct page *newpage)
748 * This is typically used for page reclaiming for shmem for reducing side 1468 * This is typically used for page reclaiming for shmem for reducing side
749 * effect of page allocation from shmem, which is used by some mem_cgroup. 1469 * effect of page allocation from shmem, which is used by some mem_cgroup.
750 */ 1470 */
751int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask) 1471int mem_cgroup_shrink_usage(struct page *page,
1472 struct mm_struct *mm,
1473 gfp_t gfp_mask)
752{ 1474{
753 struct mem_cgroup *mem; 1475 struct mem_cgroup *mem = NULL;
754 int progress = 0; 1476 int progress = 0;
755 int retry = MEM_CGROUP_RECLAIM_RETRIES; 1477 int retry = MEM_CGROUP_RECLAIM_RETRIES;
756 1478
757 if (mem_cgroup_subsys.disabled) 1479 if (mem_cgroup_disabled())
758 return 0; 1480 return 0;
759 if (!mm) 1481 if (page)
1482 mem = try_get_mem_cgroup_from_swapcache(page);
1483 if (!mem && mm)
1484 mem = try_get_mem_cgroup_from_mm(mm);
1485 if (unlikely(!mem))
760 return 0; 1486 return 0;
761 1487
762 rcu_read_lock();
763 mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
764 if (unlikely(!mem)) {
765 rcu_read_unlock();
766 return 0;
767 }
768 css_get(&mem->css);
769 rcu_read_unlock();
770
771 do { 1488 do {
772 progress = try_to_free_mem_cgroup_pages(mem, gfp_mask); 1489 progress = mem_cgroup_hierarchical_reclaim(mem, gfp_mask, true);
773 progress += res_counter_check_under_limit(&mem->res); 1490 progress += mem_cgroup_check_under_limit(mem);
774 } while (!progress && --retry); 1491 } while (!progress && --retry);
775 1492
776 css_put(&mem->css); 1493 css_put(&mem->css);
@@ -779,117 +1496,295 @@ int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask)
779 return 0; 1496 return 0;
780} 1497}
781 1498
1499static DEFINE_MUTEX(set_limit_mutex);
1500
782static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, 1501static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
783 unsigned long long val) 1502 unsigned long long val)
784{ 1503{
785 1504
786 int retry_count = MEM_CGROUP_RECLAIM_RETRIES; 1505 int retry_count = MEM_CGROUP_RECLAIM_RETRIES;
787 int progress; 1506 int progress;
1507 u64 memswlimit;
788 int ret = 0; 1508 int ret = 0;
789 1509
790 while (res_counter_set_limit(&memcg->res, val)) { 1510 while (retry_count) {
791 if (signal_pending(current)) { 1511 if (signal_pending(current)) {
792 ret = -EINTR; 1512 ret = -EINTR;
793 break; 1513 break;
794 } 1514 }
795 if (!retry_count) { 1515 /*
796 ret = -EBUSY; 1516 * Rather than hide all in some function, I do this in
1517 * open coded manner. You see what this really does.
1518 * We have to guarantee mem->res.limit < mem->memsw.limit.
1519 */
1520 mutex_lock(&set_limit_mutex);
1521 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
1522 if (memswlimit < val) {
1523 ret = -EINVAL;
1524 mutex_unlock(&set_limit_mutex);
797 break; 1525 break;
798 } 1526 }
799 progress = try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL); 1527 ret = res_counter_set_limit(&memcg->res, val);
800 if (!progress) 1528 mutex_unlock(&set_limit_mutex);
801 retry_count--; 1529
1530 if (!ret)
1531 break;
1532
1533 progress = mem_cgroup_hierarchical_reclaim(memcg, GFP_KERNEL,
1534 false);
1535 if (!progress) retry_count--;
802 } 1536 }
1537
803 return ret; 1538 return ret;
804} 1539}
805 1540
1541int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
1542 unsigned long long val)
1543{
1544 int retry_count = MEM_CGROUP_RECLAIM_RETRIES;
1545 u64 memlimit, oldusage, curusage;
1546 int ret;
1547
1548 if (!do_swap_account)
1549 return -EINVAL;
1550
1551 while (retry_count) {
1552 if (signal_pending(current)) {
1553 ret = -EINTR;
1554 break;
1555 }
1556 /*
1557 * Rather than hide all in some function, I do this in
1558 * open coded manner. You see what this really does.
1559 * We have to guarantee mem->res.limit < mem->memsw.limit.
1560 */
1561 mutex_lock(&set_limit_mutex);
1562 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
1563 if (memlimit > val) {
1564 ret = -EINVAL;
1565 mutex_unlock(&set_limit_mutex);
1566 break;
1567 }
1568 ret = res_counter_set_limit(&memcg->memsw, val);
1569 mutex_unlock(&set_limit_mutex);
1570
1571 if (!ret)
1572 break;
1573
1574 oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
1575 mem_cgroup_hierarchical_reclaim(memcg, GFP_KERNEL, true);
1576 curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
1577 if (curusage >= oldusage)
1578 retry_count--;
1579 }
1580 return ret;
1581}
806 1582
807/* 1583/*
808 * This routine traverse page_cgroup in given list and drop them all. 1584 * This routine traverse page_cgroup in given list and drop them all.
809 * *And* this routine doesn't reclaim page itself, just removes page_cgroup. 1585 * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
810 */ 1586 */
811#define FORCE_UNCHARGE_BATCH (128) 1587static int mem_cgroup_force_empty_list(struct mem_cgroup *mem,
812static void mem_cgroup_force_empty_list(struct mem_cgroup *mem, 1588 int node, int zid, enum lru_list lru)
813 struct mem_cgroup_per_zone *mz,
814 enum lru_list lru)
815{ 1589{
816 struct page_cgroup *pc; 1590 struct zone *zone;
817 struct page *page; 1591 struct mem_cgroup_per_zone *mz;
818 int count = FORCE_UNCHARGE_BATCH; 1592 struct page_cgroup *pc, *busy;
819 unsigned long flags; 1593 unsigned long flags, loop;
820 struct list_head *list; 1594 struct list_head *list;
1595 int ret = 0;
821 1596
1597 zone = &NODE_DATA(node)->node_zones[zid];
1598 mz = mem_cgroup_zoneinfo(mem, node, zid);
822 list = &mz->lists[lru]; 1599 list = &mz->lists[lru];
823 1600
824 spin_lock_irqsave(&mz->lru_lock, flags); 1601 loop = MEM_CGROUP_ZSTAT(mz, lru);
825 while (!list_empty(list)) { 1602 /* give some margin against EBUSY etc...*/
826 pc = list_entry(list->prev, struct page_cgroup, lru); 1603 loop += 256;
827 page = pc->page; 1604 busy = NULL;
828 if (!PageCgroupUsed(pc)) 1605 while (loop--) {
829 break; 1606 ret = 0;
830 get_page(page); 1607 spin_lock_irqsave(&zone->lru_lock, flags);
831 spin_unlock_irqrestore(&mz->lru_lock, flags); 1608 if (list_empty(list)) {
832 /* 1609 spin_unlock_irqrestore(&zone->lru_lock, flags);
833 * Check if this page is on LRU. !LRU page can be found
834 * if it's under page migration.
835 */
836 if (PageLRU(page)) {
837 __mem_cgroup_uncharge_common(page,
838 MEM_CGROUP_CHARGE_TYPE_FORCE);
839 put_page(page);
840 if (--count <= 0) {
841 count = FORCE_UNCHARGE_BATCH;
842 cond_resched();
843 }
844 } else {
845 spin_lock_irqsave(&mz->lru_lock, flags);
846 break; 1610 break;
847 } 1611 }
848 spin_lock_irqsave(&mz->lru_lock, flags); 1612 pc = list_entry(list->prev, struct page_cgroup, lru);
1613 if (busy == pc) {
1614 list_move(&pc->lru, list);
1615 busy = 0;
1616 spin_unlock_irqrestore(&zone->lru_lock, flags);
1617 continue;
1618 }
1619 spin_unlock_irqrestore(&zone->lru_lock, flags);
1620
1621 ret = mem_cgroup_move_parent(pc, mem, GFP_KERNEL);
1622 if (ret == -ENOMEM)
1623 break;
1624
1625 if (ret == -EBUSY || ret == -EINVAL) {
1626 /* found lock contention or "pc" is obsolete. */
1627 busy = pc;
1628 cond_resched();
1629 } else
1630 busy = NULL;
849 } 1631 }
850 spin_unlock_irqrestore(&mz->lru_lock, flags); 1632
1633 if (!ret && !list_empty(list))
1634 return -EBUSY;
1635 return ret;
851} 1636}
852 1637
853/* 1638/*
854 * make mem_cgroup's charge to be 0 if there is no task. 1639 * make mem_cgroup's charge to be 0 if there is no task.
855 * This enables deleting this mem_cgroup. 1640 * This enables deleting this mem_cgroup.
856 */ 1641 */
857static int mem_cgroup_force_empty(struct mem_cgroup *mem) 1642static int mem_cgroup_force_empty(struct mem_cgroup *mem, bool free_all)
858{ 1643{
859 int ret = -EBUSY; 1644 int ret;
860 int node, zid; 1645 int node, zid, shrink;
1646 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
1647 struct cgroup *cgrp = mem->css.cgroup;
861 1648
862 css_get(&mem->css); 1649 css_get(&mem->css);
863 /* 1650
864 * page reclaim code (kswapd etc..) will move pages between 1651 shrink = 0;
865 * active_list <-> inactive_list while we don't take a lock. 1652 /* should free all ? */
866 * So, we have to do loop here until all lists are empty. 1653 if (free_all)
867 */ 1654 goto try_to_free;
1655move_account:
868 while (mem->res.usage > 0) { 1656 while (mem->res.usage > 0) {
869 if (atomic_read(&mem->css.cgroup->count) > 0) 1657 ret = -EBUSY;
1658 if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children))
1659 goto out;
1660 ret = -EINTR;
1661 if (signal_pending(current))
870 goto out; 1662 goto out;
871 /* This is for making all *used* pages to be on LRU. */ 1663 /* This is for making all *used* pages to be on LRU. */
872 lru_add_drain_all(); 1664 lru_add_drain_all();
873 for_each_node_state(node, N_POSSIBLE) 1665 ret = 0;
874 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 1666 for_each_node_state(node, N_POSSIBLE) {
875 struct mem_cgroup_per_zone *mz; 1667 for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) {
876 enum lru_list l; 1668 enum lru_list l;
877 mz = mem_cgroup_zoneinfo(mem, node, zid); 1669 for_each_lru(l) {
878 for_each_lru(l) 1670 ret = mem_cgroup_force_empty_list(mem,
879 mem_cgroup_force_empty_list(mem, mz, l); 1671 node, zid, l);
1672 if (ret)
1673 break;
1674 }
880 } 1675 }
1676 if (ret)
1677 break;
1678 }
1679 /* it seems parent cgroup doesn't have enough mem */
1680 if (ret == -ENOMEM)
1681 goto try_to_free;
881 cond_resched(); 1682 cond_resched();
882 } 1683 }
883 ret = 0; 1684 ret = 0;
884out: 1685out:
885 css_put(&mem->css); 1686 css_put(&mem->css);
886 return ret; 1687 return ret;
1688
1689try_to_free:
1690 /* returns EBUSY if there is a task or if we come here twice. */
1691 if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children) || shrink) {
1692 ret = -EBUSY;
1693 goto out;
1694 }
1695 /* we call try-to-free pages for make this cgroup empty */
1696 lru_add_drain_all();
1697 /* try to free all pages in this cgroup */
1698 shrink = 1;
1699 while (nr_retries && mem->res.usage > 0) {
1700 int progress;
1701
1702 if (signal_pending(current)) {
1703 ret = -EINTR;
1704 goto out;
1705 }
1706 progress = try_to_free_mem_cgroup_pages(mem, GFP_KERNEL,
1707 false, get_swappiness(mem));
1708 if (!progress) {
1709 nr_retries--;
1710 /* maybe some writeback is necessary */
1711 congestion_wait(WRITE, HZ/10);
1712 }
1713
1714 }
1715 lru_add_drain();
1716 /* try move_account...there may be some *locked* pages. */
1717 if (mem->res.usage)
1718 goto move_account;
1719 ret = 0;
1720 goto out;
1721}
1722
1723int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
1724{
1725 return mem_cgroup_force_empty(mem_cgroup_from_cont(cont), true);
1726}
1727
1728
1729static u64 mem_cgroup_hierarchy_read(struct cgroup *cont, struct cftype *cft)
1730{
1731 return mem_cgroup_from_cont(cont)->use_hierarchy;
1732}
1733
1734static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
1735 u64 val)
1736{
1737 int retval = 0;
1738 struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
1739 struct cgroup *parent = cont->parent;
1740 struct mem_cgroup *parent_mem = NULL;
1741
1742 if (parent)
1743 parent_mem = mem_cgroup_from_cont(parent);
1744
1745 cgroup_lock();
1746 /*
1747 * If parent's use_hiearchy is set, we can't make any modifications
1748 * in the child subtrees. If it is unset, then the change can
1749 * occur, provided the current cgroup has no children.
1750 *
1751 * For the root cgroup, parent_mem is NULL, we allow value to be
1752 * set if there are no children.
1753 */
1754 if ((!parent_mem || !parent_mem->use_hierarchy) &&
1755 (val == 1 || val == 0)) {
1756 if (list_empty(&cont->children))
1757 mem->use_hierarchy = val;
1758 else
1759 retval = -EBUSY;
1760 } else
1761 retval = -EINVAL;
1762 cgroup_unlock();
1763
1764 return retval;
887} 1765}
888 1766
889static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft) 1767static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
890{ 1768{
891 return res_counter_read_u64(&mem_cgroup_from_cont(cont)->res, 1769 struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
892 cft->private); 1770 u64 val = 0;
1771 int type, name;
1772
1773 type = MEMFILE_TYPE(cft->private);
1774 name = MEMFILE_ATTR(cft->private);
1775 switch (type) {
1776 case _MEM:
1777 val = res_counter_read_u64(&mem->res, name);
1778 break;
1779 case _MEMSWAP:
1780 if (do_swap_account)
1781 val = res_counter_read_u64(&mem->memsw, name);
1782 break;
1783 default:
1784 BUG();
1785 break;
1786 }
1787 return val;
893} 1788}
894/* 1789/*
895 * The user of this function is... 1790 * The user of this function is...
@@ -899,15 +1794,22 @@ static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
899 const char *buffer) 1794 const char *buffer)
900{ 1795{
901 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); 1796 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
1797 int type, name;
902 unsigned long long val; 1798 unsigned long long val;
903 int ret; 1799 int ret;
904 1800
905 switch (cft->private) { 1801 type = MEMFILE_TYPE(cft->private);
1802 name = MEMFILE_ATTR(cft->private);
1803 switch (name) {
906 case RES_LIMIT: 1804 case RES_LIMIT:
907 /* This function does all necessary parse...reuse it */ 1805 /* This function does all necessary parse...reuse it */
908 ret = res_counter_memparse_write_strategy(buffer, &val); 1806 ret = res_counter_memparse_write_strategy(buffer, &val);
909 if (!ret) 1807 if (ret)
1808 break;
1809 if (type == _MEM)
910 ret = mem_cgroup_resize_limit(memcg, val); 1810 ret = mem_cgroup_resize_limit(memcg, val);
1811 else
1812 ret = mem_cgroup_resize_memsw_limit(memcg, val);
911 break; 1813 break;
912 default: 1814 default:
913 ret = -EINVAL; /* should be BUG() ? */ 1815 ret = -EINVAL; /* should be BUG() ? */
@@ -916,27 +1818,59 @@ static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
916 return ret; 1818 return ret;
917} 1819}
918 1820
1821static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg,
1822 unsigned long long *mem_limit, unsigned long long *memsw_limit)
1823{
1824 struct cgroup *cgroup;
1825 unsigned long long min_limit, min_memsw_limit, tmp;
1826
1827 min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
1828 min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
1829 cgroup = memcg->css.cgroup;
1830 if (!memcg->use_hierarchy)
1831 goto out;
1832
1833 while (cgroup->parent) {
1834 cgroup = cgroup->parent;
1835 memcg = mem_cgroup_from_cont(cgroup);
1836 if (!memcg->use_hierarchy)
1837 break;
1838 tmp = res_counter_read_u64(&memcg->res, RES_LIMIT);
1839 min_limit = min(min_limit, tmp);
1840 tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
1841 min_memsw_limit = min(min_memsw_limit, tmp);
1842 }
1843out:
1844 *mem_limit = min_limit;
1845 *memsw_limit = min_memsw_limit;
1846 return;
1847}
1848
919static int mem_cgroup_reset(struct cgroup *cont, unsigned int event) 1849static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
920{ 1850{
921 struct mem_cgroup *mem; 1851 struct mem_cgroup *mem;
1852 int type, name;
922 1853
923 mem = mem_cgroup_from_cont(cont); 1854 mem = mem_cgroup_from_cont(cont);
924 switch (event) { 1855 type = MEMFILE_TYPE(event);
1856 name = MEMFILE_ATTR(event);
1857 switch (name) {
925 case RES_MAX_USAGE: 1858 case RES_MAX_USAGE:
926 res_counter_reset_max(&mem->res); 1859 if (type == _MEM)
1860 res_counter_reset_max(&mem->res);
1861 else
1862 res_counter_reset_max(&mem->memsw);
927 break; 1863 break;
928 case RES_FAILCNT: 1864 case RES_FAILCNT:
929 res_counter_reset_failcnt(&mem->res); 1865 if (type == _MEM)
1866 res_counter_reset_failcnt(&mem->res);
1867 else
1868 res_counter_reset_failcnt(&mem->memsw);
930 break; 1869 break;
931 } 1870 }
932 return 0; 1871 return 0;
933} 1872}
934 1873
935static int mem_force_empty_write(struct cgroup *cont, unsigned int event)
936{
937 return mem_cgroup_force_empty(mem_cgroup_from_cont(cont));
938}
939
940static const struct mem_cgroup_stat_desc { 1874static const struct mem_cgroup_stat_desc {
941 const char *msg; 1875 const char *msg;
942 u64 unit; 1876 u64 unit;
@@ -985,43 +1919,163 @@ static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
985 cb->fill(cb, "unevictable", unevictable * PAGE_SIZE); 1919 cb->fill(cb, "unevictable", unevictable * PAGE_SIZE);
986 1920
987 } 1921 }
1922 {
1923 unsigned long long limit, memsw_limit;
1924 memcg_get_hierarchical_limit(mem_cont, &limit, &memsw_limit);
1925 cb->fill(cb, "hierarchical_memory_limit", limit);
1926 if (do_swap_account)
1927 cb->fill(cb, "hierarchical_memsw_limit", memsw_limit);
1928 }
1929
1930#ifdef CONFIG_DEBUG_VM
1931 cb->fill(cb, "inactive_ratio", calc_inactive_ratio(mem_cont, NULL));
1932
1933 {
1934 int nid, zid;
1935 struct mem_cgroup_per_zone *mz;
1936 unsigned long recent_rotated[2] = {0, 0};
1937 unsigned long recent_scanned[2] = {0, 0};
1938
1939 for_each_online_node(nid)
1940 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1941 mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
1942
1943 recent_rotated[0] +=
1944 mz->reclaim_stat.recent_rotated[0];
1945 recent_rotated[1] +=
1946 mz->reclaim_stat.recent_rotated[1];
1947 recent_scanned[0] +=
1948 mz->reclaim_stat.recent_scanned[0];
1949 recent_scanned[1] +=
1950 mz->reclaim_stat.recent_scanned[1];
1951 }
1952 cb->fill(cb, "recent_rotated_anon", recent_rotated[0]);
1953 cb->fill(cb, "recent_rotated_file", recent_rotated[1]);
1954 cb->fill(cb, "recent_scanned_anon", recent_scanned[0]);
1955 cb->fill(cb, "recent_scanned_file", recent_scanned[1]);
1956 }
1957#endif
1958
1959 return 0;
1960}
1961
1962static u64 mem_cgroup_swappiness_read(struct cgroup *cgrp, struct cftype *cft)
1963{
1964 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
1965
1966 return get_swappiness(memcg);
1967}
1968
1969static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft,
1970 u64 val)
1971{
1972 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
1973 struct mem_cgroup *parent;
1974 if (val > 100)
1975 return -EINVAL;
1976
1977 if (cgrp->parent == NULL)
1978 return -EINVAL;
1979
1980 parent = mem_cgroup_from_cont(cgrp->parent);
1981 /* If under hierarchy, only empty-root can set this value */
1982 if ((parent->use_hierarchy) ||
1983 (memcg->use_hierarchy && !list_empty(&cgrp->children)))
1984 return -EINVAL;
1985
1986 spin_lock(&memcg->reclaim_param_lock);
1987 memcg->swappiness = val;
1988 spin_unlock(&memcg->reclaim_param_lock);
1989
988 return 0; 1990 return 0;
989} 1991}
990 1992
1993
991static struct cftype mem_cgroup_files[] = { 1994static struct cftype mem_cgroup_files[] = {
992 { 1995 {
993 .name = "usage_in_bytes", 1996 .name = "usage_in_bytes",
994 .private = RES_USAGE, 1997 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
995 .read_u64 = mem_cgroup_read, 1998 .read_u64 = mem_cgroup_read,
996 }, 1999 },
997 { 2000 {
998 .name = "max_usage_in_bytes", 2001 .name = "max_usage_in_bytes",
999 .private = RES_MAX_USAGE, 2002 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
1000 .trigger = mem_cgroup_reset, 2003 .trigger = mem_cgroup_reset,
1001 .read_u64 = mem_cgroup_read, 2004 .read_u64 = mem_cgroup_read,
1002 }, 2005 },
1003 { 2006 {
1004 .name = "limit_in_bytes", 2007 .name = "limit_in_bytes",
1005 .private = RES_LIMIT, 2008 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
1006 .write_string = mem_cgroup_write, 2009 .write_string = mem_cgroup_write,
1007 .read_u64 = mem_cgroup_read, 2010 .read_u64 = mem_cgroup_read,
1008 }, 2011 },
1009 { 2012 {
1010 .name = "failcnt", 2013 .name = "failcnt",
1011 .private = RES_FAILCNT, 2014 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
1012 .trigger = mem_cgroup_reset, 2015 .trigger = mem_cgroup_reset,
1013 .read_u64 = mem_cgroup_read, 2016 .read_u64 = mem_cgroup_read,
1014 }, 2017 },
1015 { 2018 {
2019 .name = "stat",
2020 .read_map = mem_control_stat_show,
2021 },
2022 {
1016 .name = "force_empty", 2023 .name = "force_empty",
1017 .trigger = mem_force_empty_write, 2024 .trigger = mem_cgroup_force_empty_write,
1018 }, 2025 },
1019 { 2026 {
1020 .name = "stat", 2027 .name = "use_hierarchy",
1021 .read_map = mem_control_stat_show, 2028 .write_u64 = mem_cgroup_hierarchy_write,
2029 .read_u64 = mem_cgroup_hierarchy_read,
2030 },
2031 {
2032 .name = "swappiness",
2033 .read_u64 = mem_cgroup_swappiness_read,
2034 .write_u64 = mem_cgroup_swappiness_write,
1022 }, 2035 },
1023}; 2036};
1024 2037
2038#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
2039static struct cftype memsw_cgroup_files[] = {
2040 {
2041 .name = "memsw.usage_in_bytes",
2042 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
2043 .read_u64 = mem_cgroup_read,
2044 },
2045 {
2046 .name = "memsw.max_usage_in_bytes",
2047 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
2048 .trigger = mem_cgroup_reset,
2049 .read_u64 = mem_cgroup_read,
2050 },
2051 {
2052 .name = "memsw.limit_in_bytes",
2053 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
2054 .write_string = mem_cgroup_write,
2055 .read_u64 = mem_cgroup_read,
2056 },
2057 {
2058 .name = "memsw.failcnt",
2059 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
2060 .trigger = mem_cgroup_reset,
2061 .read_u64 = mem_cgroup_read,
2062 },
2063};
2064
2065static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
2066{
2067 if (!do_swap_account)
2068 return 0;
2069 return cgroup_add_files(cont, ss, memsw_cgroup_files,
2070 ARRAY_SIZE(memsw_cgroup_files));
2071};
2072#else
2073static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
2074{
2075 return 0;
2076}
2077#endif
2078
1025static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node) 2079static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
1026{ 2080{
1027 struct mem_cgroup_per_node *pn; 2081 struct mem_cgroup_per_node *pn;
@@ -1047,7 +2101,6 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
1047 2101
1048 for (zone = 0; zone < MAX_NR_ZONES; zone++) { 2102 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
1049 mz = &pn->zoneinfo[zone]; 2103 mz = &pn->zoneinfo[zone];
1050 spin_lock_init(&mz->lru_lock);
1051 for_each_lru(l) 2104 for_each_lru(l)
1052 INIT_LIST_HEAD(&mz->lists[l]); 2105 INIT_LIST_HEAD(&mz->lists[l]);
1053 } 2106 }
@@ -1059,55 +2112,113 @@ static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
1059 kfree(mem->info.nodeinfo[node]); 2112 kfree(mem->info.nodeinfo[node]);
1060} 2113}
1061 2114
2115static int mem_cgroup_size(void)
2116{
2117 int cpustat_size = nr_cpu_ids * sizeof(struct mem_cgroup_stat_cpu);
2118 return sizeof(struct mem_cgroup) + cpustat_size;
2119}
2120
1062static struct mem_cgroup *mem_cgroup_alloc(void) 2121static struct mem_cgroup *mem_cgroup_alloc(void)
1063{ 2122{
1064 struct mem_cgroup *mem; 2123 struct mem_cgroup *mem;
2124 int size = mem_cgroup_size();
1065 2125
1066 if (sizeof(*mem) < PAGE_SIZE) 2126 if (size < PAGE_SIZE)
1067 mem = kmalloc(sizeof(*mem), GFP_KERNEL); 2127 mem = kmalloc(size, GFP_KERNEL);
1068 else 2128 else
1069 mem = vmalloc(sizeof(*mem)); 2129 mem = vmalloc(size);
1070 2130
1071 if (mem) 2131 if (mem)
1072 memset(mem, 0, sizeof(*mem)); 2132 memset(mem, 0, size);
1073 return mem; 2133 return mem;
1074} 2134}
1075 2135
1076static void mem_cgroup_free(struct mem_cgroup *mem) 2136/*
2137 * At destroying mem_cgroup, references from swap_cgroup can remain.
2138 * (scanning all at force_empty is too costly...)
2139 *
2140 * Instead of clearing all references at force_empty, we remember
2141 * the number of reference from swap_cgroup and free mem_cgroup when
2142 * it goes down to 0.
2143 *
2144 * Removal of cgroup itself succeeds regardless of refs from swap.
2145 */
2146
2147static void __mem_cgroup_free(struct mem_cgroup *mem)
1077{ 2148{
1078 if (sizeof(*mem) < PAGE_SIZE) 2149 int node;
2150
2151 for_each_node_state(node, N_POSSIBLE)
2152 free_mem_cgroup_per_zone_info(mem, node);
2153
2154 if (mem_cgroup_size() < PAGE_SIZE)
1079 kfree(mem); 2155 kfree(mem);
1080 else 2156 else
1081 vfree(mem); 2157 vfree(mem);
1082} 2158}
1083 2159
2160static void mem_cgroup_get(struct mem_cgroup *mem)
2161{
2162 atomic_inc(&mem->refcnt);
2163}
2164
2165static void mem_cgroup_put(struct mem_cgroup *mem)
2166{
2167 if (atomic_dec_and_test(&mem->refcnt))
2168 __mem_cgroup_free(mem);
2169}
2170
2171
2172#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
2173static void __init enable_swap_cgroup(void)
2174{
2175 if (!mem_cgroup_disabled() && really_do_swap_account)
2176 do_swap_account = 1;
2177}
2178#else
2179static void __init enable_swap_cgroup(void)
2180{
2181}
2182#endif
1084 2183
1085static struct cgroup_subsys_state * 2184static struct cgroup_subsys_state *
1086mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) 2185mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
1087{ 2186{
1088 struct mem_cgroup *mem; 2187 struct mem_cgroup *mem, *parent;
1089 int node; 2188 int node;
1090 2189
1091 if (unlikely((cont->parent) == NULL)) { 2190 mem = mem_cgroup_alloc();
1092 mem = &init_mem_cgroup; 2191 if (!mem)
1093 } else { 2192 return ERR_PTR(-ENOMEM);
1094 mem = mem_cgroup_alloc();
1095 if (!mem)
1096 return ERR_PTR(-ENOMEM);
1097 }
1098
1099 res_counter_init(&mem->res);
1100 2193
1101 for_each_node_state(node, N_POSSIBLE) 2194 for_each_node_state(node, N_POSSIBLE)
1102 if (alloc_mem_cgroup_per_zone_info(mem, node)) 2195 if (alloc_mem_cgroup_per_zone_info(mem, node))
1103 goto free_out; 2196 goto free_out;
2197 /* root ? */
2198 if (cont->parent == NULL) {
2199 enable_swap_cgroup();
2200 parent = NULL;
2201 } else {
2202 parent = mem_cgroup_from_cont(cont->parent);
2203 mem->use_hierarchy = parent->use_hierarchy;
2204 }
1104 2205
2206 if (parent && parent->use_hierarchy) {
2207 res_counter_init(&mem->res, &parent->res);
2208 res_counter_init(&mem->memsw, &parent->memsw);
2209 } else {
2210 res_counter_init(&mem->res, NULL);
2211 res_counter_init(&mem->memsw, NULL);
2212 }
2213 mem->last_scanned_child = NULL;
2214 spin_lock_init(&mem->reclaim_param_lock);
2215
2216 if (parent)
2217 mem->swappiness = get_swappiness(parent);
2218 atomic_set(&mem->refcnt, 1);
1105 return &mem->css; 2219 return &mem->css;
1106free_out: 2220free_out:
1107 for_each_node_state(node, N_POSSIBLE) 2221 __mem_cgroup_free(mem);
1108 free_mem_cgroup_per_zone_info(mem, node);
1109 if (cont->parent != NULL)
1110 mem_cgroup_free(mem);
1111 return ERR_PTR(-ENOMEM); 2222 return ERR_PTR(-ENOMEM);
1112} 2223}
1113 2224
@@ -1115,26 +2226,26 @@ static void mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
1115 struct cgroup *cont) 2226 struct cgroup *cont)
1116{ 2227{
1117 struct mem_cgroup *mem = mem_cgroup_from_cont(cont); 2228 struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
1118 mem_cgroup_force_empty(mem); 2229 mem_cgroup_force_empty(mem, false);
1119} 2230}
1120 2231
1121static void mem_cgroup_destroy(struct cgroup_subsys *ss, 2232static void mem_cgroup_destroy(struct cgroup_subsys *ss,
1122 struct cgroup *cont) 2233 struct cgroup *cont)
1123{ 2234{
1124 int node; 2235 mem_cgroup_put(mem_cgroup_from_cont(cont));
1125 struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
1126
1127 for_each_node_state(node, N_POSSIBLE)
1128 free_mem_cgroup_per_zone_info(mem, node);
1129
1130 mem_cgroup_free(mem_cgroup_from_cont(cont));
1131} 2236}
1132 2237
1133static int mem_cgroup_populate(struct cgroup_subsys *ss, 2238static int mem_cgroup_populate(struct cgroup_subsys *ss,
1134 struct cgroup *cont) 2239 struct cgroup *cont)
1135{ 2240{
1136 return cgroup_add_files(cont, ss, mem_cgroup_files, 2241 int ret;
1137 ARRAY_SIZE(mem_cgroup_files)); 2242
2243 ret = cgroup_add_files(cont, ss, mem_cgroup_files,
2244 ARRAY_SIZE(mem_cgroup_files));
2245
2246 if (!ret)
2247 ret = register_memsw_files(cont, ss);
2248 return ret;
1138} 2249}
1139 2250
1140static void mem_cgroup_move_task(struct cgroup_subsys *ss, 2251static void mem_cgroup_move_task(struct cgroup_subsys *ss,
@@ -1142,25 +2253,12 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss,
1142 struct cgroup *old_cont, 2253 struct cgroup *old_cont,
1143 struct task_struct *p) 2254 struct task_struct *p)
1144{ 2255{
1145 struct mm_struct *mm; 2256 mutex_lock(&memcg_tasklist);
1146 struct mem_cgroup *mem, *old_mem;
1147
1148 mm = get_task_mm(p);
1149 if (mm == NULL)
1150 return;
1151
1152 mem = mem_cgroup_from_cont(cont);
1153 old_mem = mem_cgroup_from_cont(old_cont);
1154
1155 /* 2257 /*
1156 * Only thread group leaders are allowed to migrate, the mm_struct is 2258 * FIXME: It's better to move charges of this process from old
1157 * in effect owned by the leader 2259 * memcg to new memcg. But it's just on TODO-List now.
1158 */ 2260 */
1159 if (!thread_group_leader(p)) 2261 mutex_unlock(&memcg_tasklist);
1160 goto out;
1161
1162out:
1163 mmput(mm);
1164} 2262}
1165 2263
1166struct cgroup_subsys mem_cgroup_subsys = { 2264struct cgroup_subsys mem_cgroup_subsys = {
@@ -1173,3 +2271,13 @@ struct cgroup_subsys mem_cgroup_subsys = {
1173 .attach = mem_cgroup_move_task, 2271 .attach = mem_cgroup_move_task,
1174 .early_init = 0, 2272 .early_init = 0,
1175}; 2273};
2274
2275#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
2276
2277static int __init disable_swap_account(char *s)
2278{
2279 really_do_swap_account = 0;
2280 return 1;
2281}
2282__setup("noswapaccount", disable_swap_account);
2283#endif
diff --git a/mm/memory.c b/mm/memory.c
index 3f8fa06b963b..e009ce870859 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2000,7 +2000,7 @@ gotten:
2000 cow_user_page(new_page, old_page, address, vma); 2000 cow_user_page(new_page, old_page, address, vma);
2001 __SetPageUptodate(new_page); 2001 __SetPageUptodate(new_page);
2002 2002
2003 if (mem_cgroup_charge(new_page, mm, GFP_KERNEL)) 2003 if (mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))
2004 goto oom_free_new; 2004 goto oom_free_new;
2005 2005
2006 /* 2006 /*
@@ -2392,6 +2392,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
2392 struct page *page; 2392 struct page *page;
2393 swp_entry_t entry; 2393 swp_entry_t entry;
2394 pte_t pte; 2394 pte_t pte;
2395 struct mem_cgroup *ptr = NULL;
2395 int ret = 0; 2396 int ret = 0;
2396 2397
2397 if (!pte_unmap_same(mm, pmd, page_table, orig_pte)) 2398 if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
@@ -2430,7 +2431,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
2430 lock_page(page); 2431 lock_page(page);
2431 delayacct_clear_flag(DELAYACCT_PF_SWAPIN); 2432 delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
2432 2433
2433 if (mem_cgroup_charge(page, mm, GFP_KERNEL)) { 2434 if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) {
2434 ret = VM_FAULT_OOM; 2435 ret = VM_FAULT_OOM;
2435 unlock_page(page); 2436 unlock_page(page);
2436 goto out; 2437 goto out;
@@ -2448,7 +2449,19 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
2448 goto out_nomap; 2449 goto out_nomap;
2449 } 2450 }
2450 2451
2451 /* The page isn't present yet, go ahead with the fault. */ 2452 /*
2453 * The page isn't present yet, go ahead with the fault.
2454 *
2455 * Be careful about the sequence of operations here.
2456 * To get its accounting right, reuse_swap_page() must be called
2457 * while the page is counted on swap but not yet in mapcount i.e.
2458 * before page_add_anon_rmap() and swap_free(); try_to_free_swap()
2459 * must be called after the swap_free(), or it will never succeed.
2460 * Because delete_from_swap_page() may be called by reuse_swap_page(),
2461 * mem_cgroup_commit_charge_swapin() may not be able to find swp_entry
2462 * in page->private. In this case, a record in swap_cgroup is silently
2463 * discarded at swap_free().
2464 */
2452 2465
2453 inc_mm_counter(mm, anon_rss); 2466 inc_mm_counter(mm, anon_rss);
2454 pte = mk_pte(page, vma->vm_page_prot); 2467 pte = mk_pte(page, vma->vm_page_prot);
@@ -2456,10 +2469,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
2456 pte = maybe_mkwrite(pte_mkdirty(pte), vma); 2469 pte = maybe_mkwrite(pte_mkdirty(pte), vma);
2457 write_access = 0; 2470 write_access = 0;
2458 } 2471 }
2459
2460 flush_icache_page(vma, page); 2472 flush_icache_page(vma, page);
2461 set_pte_at(mm, address, page_table, pte); 2473 set_pte_at(mm, address, page_table, pte);
2462 page_add_anon_rmap(page, vma, address); 2474 page_add_anon_rmap(page, vma, address);
2475 /* It's better to call commit-charge after rmap is established */
2476 mem_cgroup_commit_charge_swapin(page, ptr);
2463 2477
2464 swap_free(entry); 2478 swap_free(entry);
2465 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page)) 2479 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
@@ -2480,7 +2494,7 @@ unlock:
2480out: 2494out:
2481 return ret; 2495 return ret;
2482out_nomap: 2496out_nomap:
2483 mem_cgroup_uncharge_page(page); 2497 mem_cgroup_cancel_charge_swapin(ptr);
2484 pte_unmap_unlock(page_table, ptl); 2498 pte_unmap_unlock(page_table, ptl);
2485 unlock_page(page); 2499 unlock_page(page);
2486 page_cache_release(page); 2500 page_cache_release(page);
@@ -2510,7 +2524,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
2510 goto oom; 2524 goto oom;
2511 __SetPageUptodate(page); 2525 __SetPageUptodate(page);
2512 2526
2513 if (mem_cgroup_charge(page, mm, GFP_KERNEL)) 2527 if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))
2514 goto oom_free_page; 2528 goto oom_free_page;
2515 2529
2516 entry = mk_pte(page, vma->vm_page_prot); 2530 entry = mk_pte(page, vma->vm_page_prot);
@@ -2601,7 +2615,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2601 ret = VM_FAULT_OOM; 2615 ret = VM_FAULT_OOM;
2602 goto out; 2616 goto out;
2603 } 2617 }
2604 if (mem_cgroup_charge(page, mm, GFP_KERNEL)) { 2618 if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL)) {
2605 ret = VM_FAULT_OOM; 2619 ret = VM_FAULT_OOM;
2606 page_cache_release(page); 2620 page_cache_release(page);
2607 goto out; 2621 goto out;
diff --git a/mm/migrate.c b/mm/migrate.c
index 55373983c9c6..a30ea5fcf9f1 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -121,20 +121,6 @@ static void remove_migration_pte(struct vm_area_struct *vma,
121 if (!is_migration_entry(entry) || migration_entry_to_page(entry) != old) 121 if (!is_migration_entry(entry) || migration_entry_to_page(entry) != old)
122 goto out; 122 goto out;
123 123
124 /*
125 * Yes, ignore the return value from a GFP_ATOMIC mem_cgroup_charge.
126 * Failure is not an option here: we're now expected to remove every
127 * migration pte, and will cause crashes otherwise. Normally this
128 * is not an issue: mem_cgroup_prepare_migration bumped up the old
129 * page_cgroup count for safety, that's now attached to the new page,
130 * so this charge should just be another incrementation of the count,
131 * to keep in balance with rmap.c's mem_cgroup_uncharging. But if
132 * there's been a force_empty, those reference counts may no longer
133 * be reliable, and this charge can actually fail: oh well, we don't
134 * make the situation any worse by proceeding as if it had succeeded.
135 */
136 mem_cgroup_charge(new, mm, GFP_ATOMIC);
137
138 get_page(new); 124 get_page(new);
139 pte = pte_mkold(mk_pte(new, vma->vm_page_prot)); 125 pte = pte_mkold(mk_pte(new, vma->vm_page_prot));
140 if (is_write_migration_entry(entry)) 126 if (is_write_migration_entry(entry))
@@ -378,9 +364,6 @@ static void migrate_page_copy(struct page *newpage, struct page *page)
378 anon = PageAnon(page); 364 anon = PageAnon(page);
379 page->mapping = NULL; 365 page->mapping = NULL;
380 366
381 if (!anon) /* This page was removed from radix-tree. */
382 mem_cgroup_uncharge_cache_page(page);
383
384 /* 367 /*
385 * If any waiters have accumulated on the new page then 368 * If any waiters have accumulated on the new page then
386 * wake them up. 369 * wake them up.
@@ -614,6 +597,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
614 struct page *newpage = get_new_page(page, private, &result); 597 struct page *newpage = get_new_page(page, private, &result);
615 int rcu_locked = 0; 598 int rcu_locked = 0;
616 int charge = 0; 599 int charge = 0;
600 struct mem_cgroup *mem;
617 601
618 if (!newpage) 602 if (!newpage)
619 return -ENOMEM; 603 return -ENOMEM;
@@ -623,24 +607,26 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
623 goto move_newpage; 607 goto move_newpage;
624 } 608 }
625 609
626 charge = mem_cgroup_prepare_migration(page, newpage);
627 if (charge == -ENOMEM) {
628 rc = -ENOMEM;
629 goto move_newpage;
630 }
631 /* prepare cgroup just returns 0 or -ENOMEM */ 610 /* prepare cgroup just returns 0 or -ENOMEM */
632 BUG_ON(charge);
633
634 rc = -EAGAIN; 611 rc = -EAGAIN;
612
635 if (!trylock_page(page)) { 613 if (!trylock_page(page)) {
636 if (!force) 614 if (!force)
637 goto move_newpage; 615 goto move_newpage;
638 lock_page(page); 616 lock_page(page);
639 } 617 }
640 618
619 /* charge against new page */
620 charge = mem_cgroup_prepare_migration(page, &mem);
621 if (charge == -ENOMEM) {
622 rc = -ENOMEM;
623 goto unlock;
624 }
625 BUG_ON(charge);
626
641 if (PageWriteback(page)) { 627 if (PageWriteback(page)) {
642 if (!force) 628 if (!force)
643 goto unlock; 629 goto uncharge;
644 wait_on_page_writeback(page); 630 wait_on_page_writeback(page);
645 } 631 }
646 /* 632 /*
@@ -693,7 +679,9 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
693rcu_unlock: 679rcu_unlock:
694 if (rcu_locked) 680 if (rcu_locked)
695 rcu_read_unlock(); 681 rcu_read_unlock();
696 682uncharge:
683 if (!charge)
684 mem_cgroup_end_migration(mem, page, newpage);
697unlock: 685unlock:
698 unlock_page(page); 686 unlock_page(page);
699 687
@@ -709,8 +697,6 @@ unlock:
709 } 697 }
710 698
711move_newpage: 699move_newpage:
712 if (!charge)
713 mem_cgroup_end_migration(newpage);
714 700
715 /* 701 /*
716 * Move the new page to the LRU. If migration was not successful 702 * Move the new page to the LRU. If migration was not successful
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 6b9e758c98a5..40ba05061a4f 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -429,7 +429,6 @@ void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask)
429 unsigned long points = 0; 429 unsigned long points = 0;
430 struct task_struct *p; 430 struct task_struct *p;
431 431
432 cgroup_lock();
433 read_lock(&tasklist_lock); 432 read_lock(&tasklist_lock);
434retry: 433retry:
435 p = select_bad_process(&points, mem); 434 p = select_bad_process(&points, mem);
@@ -444,7 +443,6 @@ retry:
444 goto retry; 443 goto retry;
445out: 444out:
446 read_unlock(&tasklist_lock); 445 read_unlock(&tasklist_lock);
447 cgroup_unlock();
448} 446}
449#endif 447#endif
450 448
@@ -560,6 +558,13 @@ void pagefault_out_of_memory(void)
560 /* Got some memory back in the last second. */ 558 /* Got some memory back in the last second. */
561 return; 559 return;
562 560
561 /*
562 * If this is from memcg, oom-killer is already invoked.
563 * and not worth to go system-wide-oom.
564 */
565 if (mem_cgroup_oom_called(current))
566 goto rest_and_return;
567
563 if (sysctl_panic_on_oom) 568 if (sysctl_panic_on_oom)
564 panic("out of memory from page fault. panic_on_oom is selected.\n"); 569 panic("out of memory from page fault. panic_on_oom is selected.\n");
565 570
@@ -571,6 +576,7 @@ void pagefault_out_of_memory(void)
571 * Give "p" a good chance of killing itself before we 576 * Give "p" a good chance of killing itself before we
572 * retry to allocate memory. 577 * retry to allocate memory.
573 */ 578 */
579rest_and_return:
574 if (!test_thread_flag(TIF_MEMDIE)) 580 if (!test_thread_flag(TIF_MEMDIE))
575 schedule_timeout_uninterruptible(1); 581 schedule_timeout_uninterruptible(1);
576} 582}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 7bf22e045318..5675b3073854 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3523,10 +3523,10 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
3523 INIT_LIST_HEAD(&zone->lru[l].list); 3523 INIT_LIST_HEAD(&zone->lru[l].list);
3524 zone->lru[l].nr_scan = 0; 3524 zone->lru[l].nr_scan = 0;
3525 } 3525 }
3526 zone->recent_rotated[0] = 0; 3526 zone->reclaim_stat.recent_rotated[0] = 0;
3527 zone->recent_rotated[1] = 0; 3527 zone->reclaim_stat.recent_rotated[1] = 0;
3528 zone->recent_scanned[0] = 0; 3528 zone->reclaim_stat.recent_scanned[0] = 0;
3529 zone->recent_scanned[1] = 0; 3529 zone->reclaim_stat.recent_scanned[1] = 0;
3530 zap_zone_vm_stats(zone); 3530 zap_zone_vm_stats(zone);
3531 zone->flags = 0; 3531 zone->flags = 0;
3532 if (!size) 3532 if (!size)
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c
index d6507a660ed6..7006a11350c8 100644
--- a/mm/page_cgroup.c
+++ b/mm/page_cgroup.c
@@ -8,6 +8,7 @@
8#include <linux/memory.h> 8#include <linux/memory.h>
9#include <linux/vmalloc.h> 9#include <linux/vmalloc.h>
10#include <linux/cgroup.h> 10#include <linux/cgroup.h>
11#include <linux/swapops.h>
11 12
12static void __meminit 13static void __meminit
13__init_page_cgroup(struct page_cgroup *pc, unsigned long pfn) 14__init_page_cgroup(struct page_cgroup *pc, unsigned long pfn)
@@ -15,6 +16,7 @@ __init_page_cgroup(struct page_cgroup *pc, unsigned long pfn)
15 pc->flags = 0; 16 pc->flags = 0;
16 pc->mem_cgroup = NULL; 17 pc->mem_cgroup = NULL;
17 pc->page = pfn_to_page(pfn); 18 pc->page = pfn_to_page(pfn);
19 INIT_LIST_HEAD(&pc->lru);
18} 20}
19static unsigned long total_usage; 21static unsigned long total_usage;
20 22
@@ -72,7 +74,7 @@ void __init page_cgroup_init(void)
72 74
73 int nid, fail; 75 int nid, fail;
74 76
75 if (mem_cgroup_subsys.disabled) 77 if (mem_cgroup_disabled())
76 return; 78 return;
77 79
78 for_each_online_node(nid) { 80 for_each_online_node(nid) {
@@ -103,13 +105,11 @@ struct page_cgroup *lookup_page_cgroup(struct page *page)
103/* __alloc_bootmem...() is protected by !slab_available() */ 105/* __alloc_bootmem...() is protected by !slab_available() */
104static int __init_refok init_section_page_cgroup(unsigned long pfn) 106static int __init_refok init_section_page_cgroup(unsigned long pfn)
105{ 107{
106 struct mem_section *section; 108 struct mem_section *section = __pfn_to_section(pfn);
107 struct page_cgroup *base, *pc; 109 struct page_cgroup *base, *pc;
108 unsigned long table_size; 110 unsigned long table_size;
109 int nid, index; 111 int nid, index;
110 112
111 section = __pfn_to_section(pfn);
112
113 if (!section->page_cgroup) { 113 if (!section->page_cgroup) {
114 nid = page_to_nid(pfn_to_page(pfn)); 114 nid = page_to_nid(pfn_to_page(pfn));
115 table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION; 115 table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION;
@@ -145,7 +145,6 @@ static int __init_refok init_section_page_cgroup(unsigned long pfn)
145 __init_page_cgroup(pc, pfn + index); 145 __init_page_cgroup(pc, pfn + index);
146 } 146 }
147 147
148 section = __pfn_to_section(pfn);
149 section->page_cgroup = base - pfn; 148 section->page_cgroup = base - pfn;
150 total_usage += table_size; 149 total_usage += table_size;
151 return 0; 150 return 0;
@@ -248,7 +247,7 @@ void __init page_cgroup_init(void)
248 unsigned long pfn; 247 unsigned long pfn;
249 int fail = 0; 248 int fail = 0;
250 249
251 if (mem_cgroup_subsys.disabled) 250 if (mem_cgroup_disabled())
252 return; 251 return;
253 252
254 for (pfn = 0; !fail && pfn < max_pfn; pfn += PAGES_PER_SECTION) { 253 for (pfn = 0; !fail && pfn < max_pfn; pfn += PAGES_PER_SECTION) {
@@ -273,3 +272,199 @@ void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
273} 272}
274 273
275#endif 274#endif
275
276
277#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
278
279static DEFINE_MUTEX(swap_cgroup_mutex);
280struct swap_cgroup_ctrl {
281 struct page **map;
282 unsigned long length;
283};
284
285struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES];
286
287/*
288 * This 8bytes seems big..maybe we can reduce this when we can use "id" for
289 * cgroup rather than pointer.
290 */
291struct swap_cgroup {
292 struct mem_cgroup *val;
293};
294#define SC_PER_PAGE (PAGE_SIZE/sizeof(struct swap_cgroup))
295#define SC_POS_MASK (SC_PER_PAGE - 1)
296
297/*
298 * SwapCgroup implements "lookup" and "exchange" operations.
299 * In typical usage, this swap_cgroup is accessed via memcg's charge/uncharge
300 * against SwapCache. At swap_free(), this is accessed directly from swap.
301 *
302 * This means,
303 * - we have no race in "exchange" when we're accessed via SwapCache because
304 * SwapCache(and its swp_entry) is under lock.
305 * - When called via swap_free(), there is no user of this entry and no race.
306 * Then, we don't need lock around "exchange".
307 *
308 * TODO: we can push these buffers out to HIGHMEM.
309 */
310
311/*
312 * allocate buffer for swap_cgroup.
313 */
314static int swap_cgroup_prepare(int type)
315{
316 struct page *page;
317 struct swap_cgroup_ctrl *ctrl;
318 unsigned long idx, max;
319
320 if (!do_swap_account)
321 return 0;
322 ctrl = &swap_cgroup_ctrl[type];
323
324 for (idx = 0; idx < ctrl->length; idx++) {
325 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
326 if (!page)
327 goto not_enough_page;
328 ctrl->map[idx] = page;
329 }
330 return 0;
331not_enough_page:
332 max = idx;
333 for (idx = 0; idx < max; idx++)
334 __free_page(ctrl->map[idx]);
335
336 return -ENOMEM;
337}
338
339/**
340 * swap_cgroup_record - record mem_cgroup for this swp_entry.
341 * @ent: swap entry to be recorded into
342 * @mem: mem_cgroup to be recorded
343 *
344 * Returns old value at success, NULL at failure.
345 * (Of course, old value can be NULL.)
346 */
347struct mem_cgroup *swap_cgroup_record(swp_entry_t ent, struct mem_cgroup *mem)
348{
349 int type = swp_type(ent);
350 unsigned long offset = swp_offset(ent);
351 unsigned long idx = offset / SC_PER_PAGE;
352 unsigned long pos = offset & SC_POS_MASK;
353 struct swap_cgroup_ctrl *ctrl;
354 struct page *mappage;
355 struct swap_cgroup *sc;
356 struct mem_cgroup *old;
357
358 if (!do_swap_account)
359 return NULL;
360
361 ctrl = &swap_cgroup_ctrl[type];
362
363 mappage = ctrl->map[idx];
364 sc = page_address(mappage);
365 sc += pos;
366 old = sc->val;
367 sc->val = mem;
368
369 return old;
370}
371
372/**
373 * lookup_swap_cgroup - lookup mem_cgroup tied to swap entry
374 * @ent: swap entry to be looked up.
375 *
376 * Returns pointer to mem_cgroup at success. NULL at failure.
377 */
378struct mem_cgroup *lookup_swap_cgroup(swp_entry_t ent)
379{
380 int type = swp_type(ent);
381 unsigned long offset = swp_offset(ent);
382 unsigned long idx = offset / SC_PER_PAGE;
383 unsigned long pos = offset & SC_POS_MASK;
384 struct swap_cgroup_ctrl *ctrl;
385 struct page *mappage;
386 struct swap_cgroup *sc;
387 struct mem_cgroup *ret;
388
389 if (!do_swap_account)
390 return NULL;
391
392 ctrl = &swap_cgroup_ctrl[type];
393 mappage = ctrl->map[idx];
394 sc = page_address(mappage);
395 sc += pos;
396 ret = sc->val;
397 return ret;
398}
399
400int swap_cgroup_swapon(int type, unsigned long max_pages)
401{
402 void *array;
403 unsigned long array_size;
404 unsigned long length;
405 struct swap_cgroup_ctrl *ctrl;
406
407 if (!do_swap_account)
408 return 0;
409
410 length = ((max_pages/SC_PER_PAGE) + 1);
411 array_size = length * sizeof(void *);
412
413 array = vmalloc(array_size);
414 if (!array)
415 goto nomem;
416
417 memset(array, 0, array_size);
418 ctrl = &swap_cgroup_ctrl[type];
419 mutex_lock(&swap_cgroup_mutex);
420 ctrl->length = length;
421 ctrl->map = array;
422 if (swap_cgroup_prepare(type)) {
423 /* memory shortage */
424 ctrl->map = NULL;
425 ctrl->length = 0;
426 vfree(array);
427 mutex_unlock(&swap_cgroup_mutex);
428 goto nomem;
429 }
430 mutex_unlock(&swap_cgroup_mutex);
431
432 printk(KERN_INFO
433 "swap_cgroup: uses %ld bytes of vmalloc for pointer array space"
434 " and %ld bytes to hold mem_cgroup pointers on swap\n",
435 array_size, length * PAGE_SIZE);
436 printk(KERN_INFO
437 "swap_cgroup can be disabled by noswapaccount boot option.\n");
438
439 return 0;
440nomem:
441 printk(KERN_INFO "couldn't allocate enough memory for swap_cgroup.\n");
442 printk(KERN_INFO
443 "swap_cgroup can be disabled by noswapaccount boot option\n");
444 return -ENOMEM;
445}
446
447void swap_cgroup_swapoff(int type)
448{
449 int i;
450 struct swap_cgroup_ctrl *ctrl;
451
452 if (!do_swap_account)
453 return;
454
455 mutex_lock(&swap_cgroup_mutex);
456 ctrl = &swap_cgroup_ctrl[type];
457 if (ctrl->map) {
458 for (i = 0; i < ctrl->length; i++) {
459 struct page *page = ctrl->map[i];
460 if (page)
461 __free_page(page);
462 }
463 vfree(ctrl->map);
464 ctrl->map = NULL;
465 ctrl->length = 0;
466 }
467 mutex_unlock(&swap_cgroup_mutex);
468}
469
470#endif
diff --git a/mm/shmem.c b/mm/shmem.c
index 5941f9801363..5d0de96c9789 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -928,7 +928,11 @@ found:
928 error = 1; 928 error = 1;
929 if (!inode) 929 if (!inode)
930 goto out; 930 goto out;
931 /* Precharge page using GFP_KERNEL while we can wait */ 931 /*
932 * Charge page using GFP_KERNEL while we can wait.
933 * Charged back to the user(not to caller) when swap account is used.
934 * add_to_page_cache() will be called with GFP_NOWAIT.
935 */
932 error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL); 936 error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL);
933 if (error) 937 if (error)
934 goto out; 938 goto out;
@@ -1320,15 +1324,19 @@ repeat:
1320 } else { 1324 } else {
1321 shmem_swp_unmap(entry); 1325 shmem_swp_unmap(entry);
1322 spin_unlock(&info->lock); 1326 spin_unlock(&info->lock);
1323 unlock_page(swappage);
1324 page_cache_release(swappage);
1325 if (error == -ENOMEM) { 1327 if (error == -ENOMEM) {
1326 /* allow reclaim from this memory cgroup */ 1328 /* allow reclaim from this memory cgroup */
1327 error = mem_cgroup_shrink_usage(current->mm, 1329 error = mem_cgroup_shrink_usage(swappage,
1330 current->mm,
1328 gfp); 1331 gfp);
1329 if (error) 1332 if (error) {
1333 unlock_page(swappage);
1334 page_cache_release(swappage);
1330 goto failed; 1335 goto failed;
1336 }
1331 } 1337 }
1338 unlock_page(swappage);
1339 page_cache_release(swappage);
1332 goto repeat; 1340 goto repeat;
1333 } 1341 }
1334 } else if (sgp == SGP_READ && !filepage) { 1342 } else if (sgp == SGP_READ && !filepage) {
@@ -1379,7 +1387,7 @@ repeat:
1379 1387
1380 /* Precharge page while we can wait, compensate after */ 1388 /* Precharge page while we can wait, compensate after */
1381 error = mem_cgroup_cache_charge(filepage, current->mm, 1389 error = mem_cgroup_cache_charge(filepage, current->mm,
1382 gfp & ~__GFP_HIGHMEM); 1390 GFP_KERNEL);
1383 if (error) { 1391 if (error) {
1384 page_cache_release(filepage); 1392 page_cache_release(filepage);
1385 shmem_unacct_blocks(info->flags, 1); 1393 shmem_unacct_blocks(info->flags, 1);
diff --git a/mm/swap.c b/mm/swap.c
index ba2c0e8b8b54..8adb9feb61e1 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -151,6 +151,26 @@ void rotate_reclaimable_page(struct page *page)
151 } 151 }
152} 152}
153 153
154static void update_page_reclaim_stat(struct zone *zone, struct page *page,
155 int file, int rotated)
156{
157 struct zone_reclaim_stat *reclaim_stat = &zone->reclaim_stat;
158 struct zone_reclaim_stat *memcg_reclaim_stat;
159
160 memcg_reclaim_stat = mem_cgroup_get_reclaim_stat_from_page(page);
161
162 reclaim_stat->recent_scanned[file]++;
163 if (rotated)
164 reclaim_stat->recent_rotated[file]++;
165
166 if (!memcg_reclaim_stat)
167 return;
168
169 memcg_reclaim_stat->recent_scanned[file]++;
170 if (rotated)
171 memcg_reclaim_stat->recent_rotated[file]++;
172}
173
154/* 174/*
155 * FIXME: speed this up? 175 * FIXME: speed this up?
156 */ 176 */
@@ -168,10 +188,8 @@ void activate_page(struct page *page)
168 lru += LRU_ACTIVE; 188 lru += LRU_ACTIVE;
169 add_page_to_lru_list(zone, page, lru); 189 add_page_to_lru_list(zone, page, lru);
170 __count_vm_event(PGACTIVATE); 190 __count_vm_event(PGACTIVATE);
171 mem_cgroup_move_lists(page, lru);
172 191
173 zone->recent_rotated[!!file]++; 192 update_page_reclaim_stat(zone, page, !!file, 1);
174 zone->recent_scanned[!!file]++;
175 } 193 }
176 spin_unlock_irq(&zone->lru_lock); 194 spin_unlock_irq(&zone->lru_lock);
177} 195}
@@ -386,12 +404,14 @@ void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru)
386{ 404{
387 int i; 405 int i;
388 struct zone *zone = NULL; 406 struct zone *zone = NULL;
407
389 VM_BUG_ON(is_unevictable_lru(lru)); 408 VM_BUG_ON(is_unevictable_lru(lru));
390 409
391 for (i = 0; i < pagevec_count(pvec); i++) { 410 for (i = 0; i < pagevec_count(pvec); i++) {
392 struct page *page = pvec->pages[i]; 411 struct page *page = pvec->pages[i];
393 struct zone *pagezone = page_zone(page); 412 struct zone *pagezone = page_zone(page);
394 int file; 413 int file;
414 int active;
395 415
396 if (pagezone != zone) { 416 if (pagezone != zone) {
397 if (zone) 417 if (zone)
@@ -403,12 +423,11 @@ void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru)
403 VM_BUG_ON(PageUnevictable(page)); 423 VM_BUG_ON(PageUnevictable(page));
404 VM_BUG_ON(PageLRU(page)); 424 VM_BUG_ON(PageLRU(page));
405 SetPageLRU(page); 425 SetPageLRU(page);
426 active = is_active_lru(lru);
406 file = is_file_lru(lru); 427 file = is_file_lru(lru);
407 zone->recent_scanned[file]++; 428 if (active)
408 if (is_active_lru(lru)) {
409 SetPageActive(page); 429 SetPageActive(page);
410 zone->recent_rotated[file]++; 430 update_page_reclaim_stat(zone, page, file, active);
411 }
412 add_page_to_lru_list(zone, page, lru); 431 add_page_to_lru_list(zone, page, lru);
413 } 432 }
414 if (zone) 433 if (zone)
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 81c825f67a7f..3ecea98ecb45 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -17,6 +17,7 @@
17#include <linux/backing-dev.h> 17#include <linux/backing-dev.h>
18#include <linux/pagevec.h> 18#include <linux/pagevec.h>
19#include <linux/migrate.h> 19#include <linux/migrate.h>
20#include <linux/page_cgroup.h>
20 21
21#include <asm/pgtable.h> 22#include <asm/pgtable.h>
22 23
@@ -108,6 +109,8 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
108 */ 109 */
109void __delete_from_swap_cache(struct page *page) 110void __delete_from_swap_cache(struct page *page)
110{ 111{
112 swp_entry_t ent = {.val = page_private(page)};
113
111 VM_BUG_ON(!PageLocked(page)); 114 VM_BUG_ON(!PageLocked(page));
112 VM_BUG_ON(!PageSwapCache(page)); 115 VM_BUG_ON(!PageSwapCache(page));
113 VM_BUG_ON(PageWriteback(page)); 116 VM_BUG_ON(PageWriteback(page));
@@ -118,6 +121,7 @@ void __delete_from_swap_cache(struct page *page)
118 total_swapcache_pages--; 121 total_swapcache_pages--;
119 __dec_zone_page_state(page, NR_FILE_PAGES); 122 __dec_zone_page_state(page, NR_FILE_PAGES);
120 INC_CACHE_INFO(del_total); 123 INC_CACHE_INFO(del_total);
124 mem_cgroup_uncharge_swapcache(page, ent);
121} 125}
122 126
123/** 127/**
diff --git a/mm/swapfile.c b/mm/swapfile.c
index eec5ca758a23..da422c47e2ee 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -33,6 +33,7 @@
33#include <asm/pgtable.h> 33#include <asm/pgtable.h>
34#include <asm/tlbflush.h> 34#include <asm/tlbflush.h>
35#include <linux/swapops.h> 35#include <linux/swapops.h>
36#include <linux/page_cgroup.h>
36 37
37static DEFINE_SPINLOCK(swap_lock); 38static DEFINE_SPINLOCK(swap_lock);
38static unsigned int nr_swapfiles; 39static unsigned int nr_swapfiles;
@@ -470,8 +471,9 @@ out:
470 return NULL; 471 return NULL;
471} 472}
472 473
473static int swap_entry_free(struct swap_info_struct *p, unsigned long offset) 474static int swap_entry_free(struct swap_info_struct *p, swp_entry_t ent)
474{ 475{
476 unsigned long offset = swp_offset(ent);
475 int count = p->swap_map[offset]; 477 int count = p->swap_map[offset];
476 478
477 if (count < SWAP_MAP_MAX) { 479 if (count < SWAP_MAP_MAX) {
@@ -486,6 +488,7 @@ static int swap_entry_free(struct swap_info_struct *p, unsigned long offset)
486 swap_list.next = p - swap_info; 488 swap_list.next = p - swap_info;
487 nr_swap_pages++; 489 nr_swap_pages++;
488 p->inuse_pages--; 490 p->inuse_pages--;
491 mem_cgroup_uncharge_swap(ent);
489 } 492 }
490 } 493 }
491 return count; 494 return count;
@@ -501,7 +504,7 @@ void swap_free(swp_entry_t entry)
501 504
502 p = swap_info_get(entry); 505 p = swap_info_get(entry);
503 if (p) { 506 if (p) {
504 swap_entry_free(p, swp_offset(entry)); 507 swap_entry_free(p, entry);
505 spin_unlock(&swap_lock); 508 spin_unlock(&swap_lock);
506 } 509 }
507} 510}
@@ -581,7 +584,7 @@ int free_swap_and_cache(swp_entry_t entry)
581 584
582 p = swap_info_get(entry); 585 p = swap_info_get(entry);
583 if (p) { 586 if (p) {
584 if (swap_entry_free(p, swp_offset(entry)) == 1) { 587 if (swap_entry_free(p, entry) == 1) {
585 page = find_get_page(&swapper_space, entry.val); 588 page = find_get_page(&swapper_space, entry.val);
586 if (page && !trylock_page(page)) { 589 if (page && !trylock_page(page)) {
587 page_cache_release(page); 590 page_cache_release(page);
@@ -690,17 +693,18 @@ unsigned int count_swap_pages(int type, int free)
690static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, 693static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
691 unsigned long addr, swp_entry_t entry, struct page *page) 694 unsigned long addr, swp_entry_t entry, struct page *page)
692{ 695{
696 struct mem_cgroup *ptr = NULL;
693 spinlock_t *ptl; 697 spinlock_t *ptl;
694 pte_t *pte; 698 pte_t *pte;
695 int ret = 1; 699 int ret = 1;
696 700
697 if (mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL)) 701 if (mem_cgroup_try_charge_swapin(vma->vm_mm, page, GFP_KERNEL, &ptr))
698 ret = -ENOMEM; 702 ret = -ENOMEM;
699 703
700 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 704 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
701 if (unlikely(!pte_same(*pte, swp_entry_to_pte(entry)))) { 705 if (unlikely(!pte_same(*pte, swp_entry_to_pte(entry)))) {
702 if (ret > 0) 706 if (ret > 0)
703 mem_cgroup_uncharge_page(page); 707 mem_cgroup_cancel_charge_swapin(ptr);
704 ret = 0; 708 ret = 0;
705 goto out; 709 goto out;
706 } 710 }
@@ -710,6 +714,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
710 set_pte_at(vma->vm_mm, addr, pte, 714 set_pte_at(vma->vm_mm, addr, pte,
711 pte_mkold(mk_pte(page, vma->vm_page_prot))); 715 pte_mkold(mk_pte(page, vma->vm_page_prot)));
712 page_add_anon_rmap(page, vma, addr); 716 page_add_anon_rmap(page, vma, addr);
717 mem_cgroup_commit_charge_swapin(page, ptr);
713 swap_free(entry); 718 swap_free(entry);
714 /* 719 /*
715 * Move the page to the active list so it is not 720 * Move the page to the active list so it is not
@@ -1492,6 +1497,9 @@ asmlinkage long sys_swapoff(const char __user * specialfile)
1492 spin_unlock(&swap_lock); 1497 spin_unlock(&swap_lock);
1493 mutex_unlock(&swapon_mutex); 1498 mutex_unlock(&swapon_mutex);
1494 vfree(swap_map); 1499 vfree(swap_map);
1500 /* Destroy swap account informatin */
1501 swap_cgroup_swapoff(type);
1502
1495 inode = mapping->host; 1503 inode = mapping->host;
1496 if (S_ISBLK(inode->i_mode)) { 1504 if (S_ISBLK(inode->i_mode)) {
1497 struct block_device *bdev = I_BDEV(inode); 1505 struct block_device *bdev = I_BDEV(inode);
@@ -1809,6 +1817,11 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags)
1809 } 1817 }
1810 swap_map[page_nr] = SWAP_MAP_BAD; 1818 swap_map[page_nr] = SWAP_MAP_BAD;
1811 } 1819 }
1820
1821 error = swap_cgroup_swapon(type, maxpages);
1822 if (error)
1823 goto bad_swap;
1824
1812 nr_good_pages = swap_header->info.last_page - 1825 nr_good_pages = swap_header->info.last_page -
1813 swap_header->info.nr_badpages - 1826 swap_header->info.nr_badpages -
1814 1 /* header page */; 1827 1 /* header page */;
@@ -1880,6 +1893,7 @@ bad_swap:
1880 bd_release(bdev); 1893 bd_release(bdev);
1881 } 1894 }
1882 destroy_swap_extents(p); 1895 destroy_swap_extents(p);
1896 swap_cgroup_swapoff(type);
1883bad_swap_2: 1897bad_swap_2:
1884 spin_lock(&swap_lock); 1898 spin_lock(&swap_lock);
1885 p->swap_file = NULL; 1899 p->swap_file = NULL;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index b07c48b09a93..9a27c44aa327 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -125,11 +125,30 @@ static LIST_HEAD(shrinker_list);
125static DECLARE_RWSEM(shrinker_rwsem); 125static DECLARE_RWSEM(shrinker_rwsem);
126 126
127#ifdef CONFIG_CGROUP_MEM_RES_CTLR 127#ifdef CONFIG_CGROUP_MEM_RES_CTLR
128#define scan_global_lru(sc) (!(sc)->mem_cgroup) 128#define scanning_global_lru(sc) (!(sc)->mem_cgroup)
129#else 129#else
130#define scan_global_lru(sc) (1) 130#define scanning_global_lru(sc) (1)
131#endif 131#endif
132 132
133static struct zone_reclaim_stat *get_reclaim_stat(struct zone *zone,
134 struct scan_control *sc)
135{
136 if (!scanning_global_lru(sc))
137 return mem_cgroup_get_reclaim_stat(sc->mem_cgroup, zone);
138
139 return &zone->reclaim_stat;
140}
141
142static unsigned long zone_nr_pages(struct zone *zone, struct scan_control *sc,
143 enum lru_list lru)
144{
145 if (!scanning_global_lru(sc))
146 return mem_cgroup_zone_nr_pages(sc->mem_cgroup, zone, lru);
147
148 return zone_page_state(zone, NR_LRU_BASE + lru);
149}
150
151
133/* 152/*
134 * Add a shrinker callback to be called from the vm 153 * Add a shrinker callback to be called from the vm
135 */ 154 */
@@ -512,7 +531,6 @@ redo:
512 lru = LRU_UNEVICTABLE; 531 lru = LRU_UNEVICTABLE;
513 add_page_to_unevictable_list(page); 532 add_page_to_unevictable_list(page);
514 } 533 }
515 mem_cgroup_move_lists(page, lru);
516 534
517 /* 535 /*
518 * page's status can change while we move it among lru. If an evictable 536 * page's status can change while we move it among lru. If an evictable
@@ -547,7 +565,6 @@ void putback_lru_page(struct page *page)
547 565
548 lru = !!TestClearPageActive(page) + page_is_file_cache(page); 566 lru = !!TestClearPageActive(page) + page_is_file_cache(page);
549 lru_cache_add_lru(page, lru); 567 lru_cache_add_lru(page, lru);
550 mem_cgroup_move_lists(page, lru);
551 put_page(page); 568 put_page(page);
552} 569}
553#endif /* CONFIG_UNEVICTABLE_LRU */ 570#endif /* CONFIG_UNEVICTABLE_LRU */
@@ -813,6 +830,7 @@ int __isolate_lru_page(struct page *page, int mode, int file)
813 return ret; 830 return ret;
814 831
815 ret = -EBUSY; 832 ret = -EBUSY;
833
816 if (likely(get_page_unless_zero(page))) { 834 if (likely(get_page_unless_zero(page))) {
817 /* 835 /*
818 * Be careful not to clear PageLRU until after we're 836 * Be careful not to clear PageLRU until after we're
@@ -821,6 +839,7 @@ int __isolate_lru_page(struct page *page, int mode, int file)
821 */ 839 */
822 ClearPageLRU(page); 840 ClearPageLRU(page);
823 ret = 0; 841 ret = 0;
842 mem_cgroup_del_lru(page);
824 } 843 }
825 844
826 return ret; 845 return ret;
@@ -1029,6 +1048,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
1029 struct pagevec pvec; 1048 struct pagevec pvec;
1030 unsigned long nr_scanned = 0; 1049 unsigned long nr_scanned = 0;
1031 unsigned long nr_reclaimed = 0; 1050 unsigned long nr_reclaimed = 0;
1051 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
1032 1052
1033 pagevec_init(&pvec, 1); 1053 pagevec_init(&pvec, 1);
1034 1054
@@ -1070,13 +1090,14 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
1070 __mod_zone_page_state(zone, NR_INACTIVE_ANON, 1090 __mod_zone_page_state(zone, NR_INACTIVE_ANON,
1071 -count[LRU_INACTIVE_ANON]); 1091 -count[LRU_INACTIVE_ANON]);
1072 1092
1073 if (scan_global_lru(sc)) { 1093 if (scanning_global_lru(sc))
1074 zone->pages_scanned += nr_scan; 1094 zone->pages_scanned += nr_scan;
1075 zone->recent_scanned[0] += count[LRU_INACTIVE_ANON]; 1095
1076 zone->recent_scanned[0] += count[LRU_ACTIVE_ANON]; 1096 reclaim_stat->recent_scanned[0] += count[LRU_INACTIVE_ANON];
1077 zone->recent_scanned[1] += count[LRU_INACTIVE_FILE]; 1097 reclaim_stat->recent_scanned[0] += count[LRU_ACTIVE_ANON];
1078 zone->recent_scanned[1] += count[LRU_ACTIVE_FILE]; 1098 reclaim_stat->recent_scanned[1] += count[LRU_INACTIVE_FILE];
1079 } 1099 reclaim_stat->recent_scanned[1] += count[LRU_ACTIVE_FILE];
1100
1080 spin_unlock_irq(&zone->lru_lock); 1101 spin_unlock_irq(&zone->lru_lock);
1081 1102
1082 nr_scanned += nr_scan; 1103 nr_scanned += nr_scan;
@@ -1108,7 +1129,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
1108 if (current_is_kswapd()) { 1129 if (current_is_kswapd()) {
1109 __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scan); 1130 __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scan);
1110 __count_vm_events(KSWAPD_STEAL, nr_freed); 1131 __count_vm_events(KSWAPD_STEAL, nr_freed);
1111 } else if (scan_global_lru(sc)) 1132 } else if (scanning_global_lru(sc))
1112 __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scan); 1133 __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scan);
1113 1134
1114 __count_zone_vm_events(PGSTEAL, zone, nr_freed); 1135 __count_zone_vm_events(PGSTEAL, zone, nr_freed);
@@ -1134,10 +1155,9 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
1134 SetPageLRU(page); 1155 SetPageLRU(page);
1135 lru = page_lru(page); 1156 lru = page_lru(page);
1136 add_page_to_lru_list(zone, page, lru); 1157 add_page_to_lru_list(zone, page, lru);
1137 mem_cgroup_move_lists(page, lru); 1158 if (PageActive(page)) {
1138 if (PageActive(page) && scan_global_lru(sc)) {
1139 int file = !!page_is_file_cache(page); 1159 int file = !!page_is_file_cache(page);
1140 zone->recent_rotated[file]++; 1160 reclaim_stat->recent_rotated[file]++;
1141 } 1161 }
1142 if (!pagevec_add(&pvec, page)) { 1162 if (!pagevec_add(&pvec, page)) {
1143 spin_unlock_irq(&zone->lru_lock); 1163 spin_unlock_irq(&zone->lru_lock);
@@ -1197,6 +1217,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1197 struct page *page; 1217 struct page *page;
1198 struct pagevec pvec; 1218 struct pagevec pvec;
1199 enum lru_list lru; 1219 enum lru_list lru;
1220 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
1200 1221
1201 lru_add_drain(); 1222 lru_add_drain();
1202 spin_lock_irq(&zone->lru_lock); 1223 spin_lock_irq(&zone->lru_lock);
@@ -1207,10 +1228,10 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1207 * zone->pages_scanned is used for detect zone's oom 1228 * zone->pages_scanned is used for detect zone's oom
1208 * mem_cgroup remembers nr_scan by itself. 1229 * mem_cgroup remembers nr_scan by itself.
1209 */ 1230 */
1210 if (scan_global_lru(sc)) { 1231 if (scanning_global_lru(sc)) {
1211 zone->pages_scanned += pgscanned; 1232 zone->pages_scanned += pgscanned;
1212 zone->recent_scanned[!!file] += pgmoved;
1213 } 1233 }
1234 reclaim_stat->recent_scanned[!!file] += pgmoved;
1214 1235
1215 if (file) 1236 if (file)
1216 __mod_zone_page_state(zone, NR_ACTIVE_FILE, -pgmoved); 1237 __mod_zone_page_state(zone, NR_ACTIVE_FILE, -pgmoved);
@@ -1251,8 +1272,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1251 * This helps balance scan pressure between file and anonymous 1272 * This helps balance scan pressure between file and anonymous
1252 * pages in get_scan_ratio. 1273 * pages in get_scan_ratio.
1253 */ 1274 */
1254 if (scan_global_lru(sc)) 1275 reclaim_stat->recent_rotated[!!file] += pgmoved;
1255 zone->recent_rotated[!!file] += pgmoved;
1256 1276
1257 while (!list_empty(&l_inactive)) { 1277 while (!list_empty(&l_inactive)) {
1258 page = lru_to_page(&l_inactive); 1278 page = lru_to_page(&l_inactive);
@@ -1263,7 +1283,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1263 ClearPageActive(page); 1283 ClearPageActive(page);
1264 1284
1265 list_move(&page->lru, &zone->lru[lru].list); 1285 list_move(&page->lru, &zone->lru[lru].list);
1266 mem_cgroup_move_lists(page, lru); 1286 mem_cgroup_add_lru_list(page, lru);
1267 pgmoved++; 1287 pgmoved++;
1268 if (!pagevec_add(&pvec, page)) { 1288 if (!pagevec_add(&pvec, page)) {
1269 __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved); 1289 __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
@@ -1292,6 +1312,38 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1292 pagevec_release(&pvec); 1312 pagevec_release(&pvec);
1293} 1313}
1294 1314
1315static int inactive_anon_is_low_global(struct zone *zone)
1316{
1317 unsigned long active, inactive;
1318
1319 active = zone_page_state(zone, NR_ACTIVE_ANON);
1320 inactive = zone_page_state(zone, NR_INACTIVE_ANON);
1321
1322 if (inactive * zone->inactive_ratio < active)
1323 return 1;
1324
1325 return 0;
1326}
1327
1328/**
1329 * inactive_anon_is_low - check if anonymous pages need to be deactivated
1330 * @zone: zone to check
1331 * @sc: scan control of this context
1332 *
1333 * Returns true if the zone does not have enough inactive anon pages,
1334 * meaning some active anon pages need to be deactivated.
1335 */
1336static int inactive_anon_is_low(struct zone *zone, struct scan_control *sc)
1337{
1338 int low;
1339
1340 if (scanning_global_lru(sc))
1341 low = inactive_anon_is_low_global(zone);
1342 else
1343 low = mem_cgroup_inactive_anon_is_low(sc->mem_cgroup);
1344 return low;
1345}
1346
1295static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan, 1347static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
1296 struct zone *zone, struct scan_control *sc, int priority) 1348 struct zone *zone, struct scan_control *sc, int priority)
1297{ 1349{
@@ -1302,8 +1354,7 @@ static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
1302 return 0; 1354 return 0;
1303 } 1355 }
1304 1356
1305 if (lru == LRU_ACTIVE_ANON && 1357 if (lru == LRU_ACTIVE_ANON && inactive_anon_is_low(zone, sc)) {
1306 (!scan_global_lru(sc) || inactive_anon_is_low(zone))) {
1307 shrink_active_list(nr_to_scan, zone, sc, priority, file); 1358 shrink_active_list(nr_to_scan, zone, sc, priority, file);
1308 return 0; 1359 return 0;
1309 } 1360 }
@@ -1325,6 +1376,7 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
1325 unsigned long anon, file, free; 1376 unsigned long anon, file, free;
1326 unsigned long anon_prio, file_prio; 1377 unsigned long anon_prio, file_prio;
1327 unsigned long ap, fp; 1378 unsigned long ap, fp;
1379 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
1328 1380
1329 /* If we have no swap space, do not bother scanning anon pages. */ 1381 /* If we have no swap space, do not bother scanning anon pages. */
1330 if (nr_swap_pages <= 0) { 1382 if (nr_swap_pages <= 0) {
@@ -1333,17 +1385,20 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
1333 return; 1385 return;
1334 } 1386 }
1335 1387
1336 anon = zone_page_state(zone, NR_ACTIVE_ANON) + 1388 anon = zone_nr_pages(zone, sc, LRU_ACTIVE_ANON) +
1337 zone_page_state(zone, NR_INACTIVE_ANON); 1389 zone_nr_pages(zone, sc, LRU_INACTIVE_ANON);
1338 file = zone_page_state(zone, NR_ACTIVE_FILE) + 1390 file = zone_nr_pages(zone, sc, LRU_ACTIVE_FILE) +
1339 zone_page_state(zone, NR_INACTIVE_FILE); 1391 zone_nr_pages(zone, sc, LRU_INACTIVE_FILE);
1340 free = zone_page_state(zone, NR_FREE_PAGES); 1392
1341 1393 if (scanning_global_lru(sc)) {
1342 /* If we have very few page cache pages, force-scan anon pages. */ 1394 free = zone_page_state(zone, NR_FREE_PAGES);
1343 if (unlikely(file + free <= zone->pages_high)) { 1395 /* If we have very few page cache pages,
1344 percent[0] = 100; 1396 force-scan anon pages. */
1345 percent[1] = 0; 1397 if (unlikely(file + free <= zone->pages_high)) {
1346 return; 1398 percent[0] = 100;
1399 percent[1] = 0;
1400 return;
1401 }
1347 } 1402 }
1348 1403
1349 /* 1404 /*
@@ -1357,17 +1412,17 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
1357 * 1412 *
1358 * anon in [0], file in [1] 1413 * anon in [0], file in [1]
1359 */ 1414 */
1360 if (unlikely(zone->recent_scanned[0] > anon / 4)) { 1415 if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
1361 spin_lock_irq(&zone->lru_lock); 1416 spin_lock_irq(&zone->lru_lock);
1362 zone->recent_scanned[0] /= 2; 1417 reclaim_stat->recent_scanned[0] /= 2;
1363 zone->recent_rotated[0] /= 2; 1418 reclaim_stat->recent_rotated[0] /= 2;
1364 spin_unlock_irq(&zone->lru_lock); 1419 spin_unlock_irq(&zone->lru_lock);
1365 } 1420 }
1366 1421
1367 if (unlikely(zone->recent_scanned[1] > file / 4)) { 1422 if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) {
1368 spin_lock_irq(&zone->lru_lock); 1423 spin_lock_irq(&zone->lru_lock);
1369 zone->recent_scanned[1] /= 2; 1424 reclaim_stat->recent_scanned[1] /= 2;
1370 zone->recent_rotated[1] /= 2; 1425 reclaim_stat->recent_rotated[1] /= 2;
1371 spin_unlock_irq(&zone->lru_lock); 1426 spin_unlock_irq(&zone->lru_lock);
1372 } 1427 }
1373 1428
@@ -1383,11 +1438,11 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
1383 * proportional to the fraction of recently scanned pages on 1438 * proportional to the fraction of recently scanned pages on
1384 * each list that were recently referenced and in active use. 1439 * each list that were recently referenced and in active use.
1385 */ 1440 */
1386 ap = (anon_prio + 1) * (zone->recent_scanned[0] + 1); 1441 ap = (anon_prio + 1) * (reclaim_stat->recent_scanned[0] + 1);
1387 ap /= zone->recent_rotated[0] + 1; 1442 ap /= reclaim_stat->recent_rotated[0] + 1;
1388 1443
1389 fp = (file_prio + 1) * (zone->recent_scanned[1] + 1); 1444 fp = (file_prio + 1) * (reclaim_stat->recent_scanned[1] + 1);
1390 fp /= zone->recent_rotated[1] + 1; 1445 fp /= reclaim_stat->recent_rotated[1] + 1;
1391 1446
1392 /* Normalize to percentages */ 1447 /* Normalize to percentages */
1393 percent[0] = 100 * ap / (ap + fp + 1); 1448 percent[0] = 100 * ap / (ap + fp + 1);
@@ -1411,30 +1466,23 @@ static void shrink_zone(int priority, struct zone *zone,
1411 get_scan_ratio(zone, sc, percent); 1466 get_scan_ratio(zone, sc, percent);
1412 1467
1413 for_each_evictable_lru(l) { 1468 for_each_evictable_lru(l) {
1414 if (scan_global_lru(sc)) { 1469 int file = is_file_lru(l);
1415 int file = is_file_lru(l); 1470 int scan;
1416 int scan; 1471
1417 1472 scan = zone_page_state(zone, NR_LRU_BASE + l);
1418 scan = zone_page_state(zone, NR_LRU_BASE + l); 1473 if (priority) {
1419 if (priority) { 1474 scan >>= priority;
1420 scan >>= priority; 1475 scan = (scan * percent[file]) / 100;
1421 scan = (scan * percent[file]) / 100; 1476 }
1422 } 1477 if (scanning_global_lru(sc)) {
1423 zone->lru[l].nr_scan += scan; 1478 zone->lru[l].nr_scan += scan;
1424 nr[l] = zone->lru[l].nr_scan; 1479 nr[l] = zone->lru[l].nr_scan;
1425 if (nr[l] >= swap_cluster_max) 1480 if (nr[l] >= swap_cluster_max)
1426 zone->lru[l].nr_scan = 0; 1481 zone->lru[l].nr_scan = 0;
1427 else 1482 else
1428 nr[l] = 0; 1483 nr[l] = 0;
1429 } else { 1484 } else
1430 /* 1485 nr[l] = scan;
1431 * This reclaim occurs not because zone memory shortage
1432 * but because memory controller hits its limit.
1433 * Don't modify zone reclaim related data.
1434 */
1435 nr[l] = mem_cgroup_calc_reclaim(sc->mem_cgroup, zone,
1436 priority, l);
1437 }
1438 } 1486 }
1439 1487
1440 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || 1488 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
@@ -1467,9 +1515,7 @@ static void shrink_zone(int priority, struct zone *zone,
1467 * Even if we did not try to evict anon pages at all, we want to 1515 * Even if we did not try to evict anon pages at all, we want to
1468 * rebalance the anon lru active/inactive ratio. 1516 * rebalance the anon lru active/inactive ratio.
1469 */ 1517 */
1470 if (!scan_global_lru(sc) || inactive_anon_is_low(zone)) 1518 if (inactive_anon_is_low(zone, sc))
1471 shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0);
1472 else if (!scan_global_lru(sc))
1473 shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0); 1519 shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0);
1474 1520
1475 throttle_vm_writeout(sc->gfp_mask); 1521 throttle_vm_writeout(sc->gfp_mask);
@@ -1504,7 +1550,7 @@ static void shrink_zones(int priority, struct zonelist *zonelist,
1504 * Take care memory controller reclaiming has small influence 1550 * Take care memory controller reclaiming has small influence
1505 * to global LRU. 1551 * to global LRU.
1506 */ 1552 */
1507 if (scan_global_lru(sc)) { 1553 if (scanning_global_lru(sc)) {
1508 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) 1554 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1509 continue; 1555 continue;
1510 note_zone_scanning_priority(zone, priority); 1556 note_zone_scanning_priority(zone, priority);
@@ -1557,12 +1603,12 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
1557 1603
1558 delayacct_freepages_start(); 1604 delayacct_freepages_start();
1559 1605
1560 if (scan_global_lru(sc)) 1606 if (scanning_global_lru(sc))
1561 count_vm_event(ALLOCSTALL); 1607 count_vm_event(ALLOCSTALL);
1562 /* 1608 /*
1563 * mem_cgroup will not do shrink_slab. 1609 * mem_cgroup will not do shrink_slab.
1564 */ 1610 */
1565 if (scan_global_lru(sc)) { 1611 if (scanning_global_lru(sc)) {
1566 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { 1612 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
1567 1613
1568 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) 1614 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
@@ -1581,7 +1627,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
1581 * Don't shrink slabs when reclaiming memory from 1627 * Don't shrink slabs when reclaiming memory from
1582 * over limit cgroups 1628 * over limit cgroups
1583 */ 1629 */
1584 if (scan_global_lru(sc)) { 1630 if (scanning_global_lru(sc)) {
1585 shrink_slab(sc->nr_scanned, sc->gfp_mask, lru_pages); 1631 shrink_slab(sc->nr_scanned, sc->gfp_mask, lru_pages);
1586 if (reclaim_state) { 1632 if (reclaim_state) {
1587 sc->nr_reclaimed += reclaim_state->reclaimed_slab; 1633 sc->nr_reclaimed += reclaim_state->reclaimed_slab;
@@ -1612,7 +1658,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
1612 congestion_wait(WRITE, HZ/10); 1658 congestion_wait(WRITE, HZ/10);
1613 } 1659 }
1614 /* top priority shrink_zones still had more to do? don't OOM, then */ 1660 /* top priority shrink_zones still had more to do? don't OOM, then */
1615 if (!sc->all_unreclaimable && scan_global_lru(sc)) 1661 if (!sc->all_unreclaimable && scanning_global_lru(sc))
1616 ret = sc->nr_reclaimed; 1662 ret = sc->nr_reclaimed;
1617out: 1663out:
1618 /* 1664 /*
@@ -1625,7 +1671,7 @@ out:
1625 if (priority < 0) 1671 if (priority < 0)
1626 priority = 0; 1672 priority = 0;
1627 1673
1628 if (scan_global_lru(sc)) { 1674 if (scanning_global_lru(sc)) {
1629 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { 1675 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
1630 1676
1631 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) 1677 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
@@ -1661,19 +1707,24 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
1661#ifdef CONFIG_CGROUP_MEM_RES_CTLR 1707#ifdef CONFIG_CGROUP_MEM_RES_CTLR
1662 1708
1663unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, 1709unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
1664 gfp_t gfp_mask) 1710 gfp_t gfp_mask,
1711 bool noswap,
1712 unsigned int swappiness)
1665{ 1713{
1666 struct scan_control sc = { 1714 struct scan_control sc = {
1667 .may_writepage = !laptop_mode, 1715 .may_writepage = !laptop_mode,
1668 .may_swap = 1, 1716 .may_swap = 1,
1669 .swap_cluster_max = SWAP_CLUSTER_MAX, 1717 .swap_cluster_max = SWAP_CLUSTER_MAX,
1670 .swappiness = vm_swappiness, 1718 .swappiness = swappiness,
1671 .order = 0, 1719 .order = 0,
1672 .mem_cgroup = mem_cont, 1720 .mem_cgroup = mem_cont,
1673 .isolate_pages = mem_cgroup_isolate_pages, 1721 .isolate_pages = mem_cgroup_isolate_pages,
1674 }; 1722 };
1675 struct zonelist *zonelist; 1723 struct zonelist *zonelist;
1676 1724
1725 if (noswap)
1726 sc.may_swap = 0;
1727
1677 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | 1728 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
1678 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); 1729 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
1679 zonelist = NODE_DATA(numa_node_id())->node_zonelists; 1730 zonelist = NODE_DATA(numa_node_id())->node_zonelists;
@@ -1761,7 +1812,7 @@ loop_again:
1761 * Do some background aging of the anon list, to give 1812 * Do some background aging of the anon list, to give
1762 * pages a chance to be referenced before reclaiming. 1813 * pages a chance to be referenced before reclaiming.
1763 */ 1814 */
1764 if (inactive_anon_is_low(zone)) 1815 if (inactive_anon_is_low(zone, &sc))
1765 shrink_active_list(SWAP_CLUSTER_MAX, zone, 1816 shrink_active_list(SWAP_CLUSTER_MAX, zone,
1766 &sc, priority, 0); 1817 &sc, priority, 0);
1767 1818
@@ -2404,6 +2455,7 @@ retry:
2404 2455
2405 __dec_zone_state(zone, NR_UNEVICTABLE); 2456 __dec_zone_state(zone, NR_UNEVICTABLE);
2406 list_move(&page->lru, &zone->lru[l].list); 2457 list_move(&page->lru, &zone->lru[l].list);
2458 mem_cgroup_move_lists(page, LRU_UNEVICTABLE, l);
2407 __inc_zone_state(zone, NR_INACTIVE_ANON + l); 2459 __inc_zone_state(zone, NR_INACTIVE_ANON + l);
2408 __count_vm_event(UNEVICTABLE_PGRESCUED); 2460 __count_vm_event(UNEVICTABLE_PGRESCUED);
2409 } else { 2461 } else {
@@ -2412,6 +2464,7 @@ retry:
2412 */ 2464 */
2413 SetPageUnevictable(page); 2465 SetPageUnevictable(page);
2414 list_move(&page->lru, &zone->lru[LRU_UNEVICTABLE].list); 2466 list_move(&page->lru, &zone->lru[LRU_UNEVICTABLE].list);
2467 mem_cgroup_rotate_lru_list(page, LRU_UNEVICTABLE);
2415 if (page_evictable(page, NULL)) 2468 if (page_evictable(page, NULL))
2416 goto retry; 2469 goto retry;
2417 } 2470 }