aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
authorKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>2008-10-18 23:28:11 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-10-20 11:52:39 -0400
commitc05555b572921c464d064d9267f7f7bc06d424fa (patch)
tree48d4dbc315eeb4c851c6c484ef75bc816360d510 /mm/memcontrol.c
parentaddb9efebb2ee2202d324e75b593b39868528f68 (diff)
memcg: atomic ops for page_cgroup->flags
This patch makes page_cgroup->flags to be atomic_ops and define functions (and macros) to access it. Before trying to modify memory resource controller, this atomic operation on flags is necessary. Most of flags in this patch is for LRU and modfied under mz->lru_lock but we'll add another flags which is not for LRU soon. For example, we'll place LOCK bit on flags field. We need atomic operation to modify LRU bit without LOCK. Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Acked-by: Balbir Singh <balbir@linux.vnet.ibm.com> Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c122
1 files changed, 82 insertions, 40 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 10846b9656aa..031682e7ef0c 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -157,12 +157,46 @@ struct page_cgroup {
157 struct list_head lru; /* per cgroup LRU list */ 157 struct list_head lru; /* per cgroup LRU list */
158 struct page *page; 158 struct page *page;
159 struct mem_cgroup *mem_cgroup; 159 struct mem_cgroup *mem_cgroup;
160 int flags; 160 unsigned long flags;
161};
162
163enum {
164 /* flags for mem_cgroup */
165 PCG_CACHE, /* charged as cache */
166 /* flags for LRU placement */
167 PCG_ACTIVE, /* page is active in this cgroup */
168 PCG_FILE, /* page is file system backed */
169 PCG_UNEVICTABLE, /* page is unevictableable */
161}; 170};
162#define PAGE_CGROUP_FLAG_CACHE (0x1) /* charged as cache */ 171
163#define PAGE_CGROUP_FLAG_ACTIVE (0x2) /* page is active in this cgroup */ 172#define TESTPCGFLAG(uname, lname) \
164#define PAGE_CGROUP_FLAG_FILE (0x4) /* page is file system backed */ 173static inline int PageCgroup##uname(struct page_cgroup *pc) \
165#define PAGE_CGROUP_FLAG_UNEVICTABLE (0x8) /* page is unevictableable */ 174 { return test_bit(PCG_##lname, &pc->flags); }
175
176#define SETPCGFLAG(uname, lname) \
177static inline void SetPageCgroup##uname(struct page_cgroup *pc)\
178 { set_bit(PCG_##lname, &pc->flags); }
179
180#define CLEARPCGFLAG(uname, lname) \
181static inline void ClearPageCgroup##uname(struct page_cgroup *pc) \
182 { clear_bit(PCG_##lname, &pc->flags); }
183
184
185/* Cache flag is set only once (at allocation) */
186TESTPCGFLAG(Cache, CACHE)
187
188/* LRU management flags (from global-lru definition) */
189TESTPCGFLAG(File, FILE)
190SETPCGFLAG(File, FILE)
191CLEARPCGFLAG(File, FILE)
192
193TESTPCGFLAG(Active, ACTIVE)
194SETPCGFLAG(Active, ACTIVE)
195CLEARPCGFLAG(Active, ACTIVE)
196
197TESTPCGFLAG(Unevictable, UNEVICTABLE)
198SETPCGFLAG(Unevictable, UNEVICTABLE)
199CLEARPCGFLAG(Unevictable, UNEVICTABLE)
166 200
167static int page_cgroup_nid(struct page_cgroup *pc) 201static int page_cgroup_nid(struct page_cgroup *pc)
168{ 202{
@@ -177,15 +211,25 @@ static enum zone_type page_cgroup_zid(struct page_cgroup *pc)
177enum charge_type { 211enum charge_type {
178 MEM_CGROUP_CHARGE_TYPE_CACHE = 0, 212 MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
179 MEM_CGROUP_CHARGE_TYPE_MAPPED, 213 MEM_CGROUP_CHARGE_TYPE_MAPPED,
180 MEM_CGROUP_CHARGE_TYPE_FORCE, /* used by force_empty */
181 MEM_CGROUP_CHARGE_TYPE_SHMEM, /* used by page migration of shmem */ 214 MEM_CGROUP_CHARGE_TYPE_SHMEM, /* used by page migration of shmem */
215 MEM_CGROUP_CHARGE_TYPE_FORCE, /* used by force_empty */
216 NR_CHARGE_TYPE,
217};
218
219static const unsigned long
220pcg_default_flags[NR_CHARGE_TYPE] = {
221 ((1 << PCG_CACHE) | (1 << PCG_FILE)),
222 ((1 << PCG_ACTIVE)),
223 ((1 << PCG_ACTIVE) | (1 << PCG_CACHE)),
224 0,
182}; 225};
183 226
184/* 227/*
185 * Always modified under lru lock. Then, not necessary to preempt_disable() 228 * Always modified under lru lock. Then, not necessary to preempt_disable()
186 */ 229 */
187static void mem_cgroup_charge_statistics(struct mem_cgroup *mem, int flags, 230static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
188 bool charge) 231 struct page_cgroup *pc,
232 bool charge)
189{ 233{
190 int val = (charge)? 1 : -1; 234 int val = (charge)? 1 : -1;
191 struct mem_cgroup_stat *stat = &mem->stat; 235 struct mem_cgroup_stat *stat = &mem->stat;
@@ -194,7 +238,7 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *mem, int flags,
194 VM_BUG_ON(!irqs_disabled()); 238 VM_BUG_ON(!irqs_disabled());
195 239
196 cpustat = &stat->cpustat[smp_processor_id()]; 240 cpustat = &stat->cpustat[smp_processor_id()];
197 if (flags & PAGE_CGROUP_FLAG_CACHE) 241 if (PageCgroupCache(pc))
198 __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_CACHE, val); 242 __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_CACHE, val);
199 else 243 else
200 __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_RSS, val); 244 __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_RSS, val);
@@ -295,18 +339,18 @@ static void __mem_cgroup_remove_list(struct mem_cgroup_per_zone *mz,
295{ 339{
296 int lru = LRU_BASE; 340 int lru = LRU_BASE;
297 341
298 if (pc->flags & PAGE_CGROUP_FLAG_UNEVICTABLE) 342 if (PageCgroupUnevictable(pc))
299 lru = LRU_UNEVICTABLE; 343 lru = LRU_UNEVICTABLE;
300 else { 344 else {
301 if (pc->flags & PAGE_CGROUP_FLAG_ACTIVE) 345 if (PageCgroupActive(pc))
302 lru += LRU_ACTIVE; 346 lru += LRU_ACTIVE;
303 if (pc->flags & PAGE_CGROUP_FLAG_FILE) 347 if (PageCgroupFile(pc))
304 lru += LRU_FILE; 348 lru += LRU_FILE;
305 } 349 }
306 350
307 MEM_CGROUP_ZSTAT(mz, lru) -= 1; 351 MEM_CGROUP_ZSTAT(mz, lru) -= 1;
308 352
309 mem_cgroup_charge_statistics(pc->mem_cgroup, pc->flags, false); 353 mem_cgroup_charge_statistics(pc->mem_cgroup, pc, false);
310 list_del(&pc->lru); 354 list_del(&pc->lru);
311} 355}
312 356
@@ -315,27 +359,27 @@ static void __mem_cgroup_add_list(struct mem_cgroup_per_zone *mz,
315{ 359{
316 int lru = LRU_BASE; 360 int lru = LRU_BASE;
317 361
318 if (pc->flags & PAGE_CGROUP_FLAG_UNEVICTABLE) 362 if (PageCgroupUnevictable(pc))
319 lru = LRU_UNEVICTABLE; 363 lru = LRU_UNEVICTABLE;
320 else { 364 else {
321 if (pc->flags & PAGE_CGROUP_FLAG_ACTIVE) 365 if (PageCgroupActive(pc))
322 lru += LRU_ACTIVE; 366 lru += LRU_ACTIVE;
323 if (pc->flags & PAGE_CGROUP_FLAG_FILE) 367 if (PageCgroupFile(pc))
324 lru += LRU_FILE; 368 lru += LRU_FILE;
325 } 369 }
326 370
327 MEM_CGROUP_ZSTAT(mz, lru) += 1; 371 MEM_CGROUP_ZSTAT(mz, lru) += 1;
328 list_add(&pc->lru, &mz->lists[lru]); 372 list_add(&pc->lru, &mz->lists[lru]);
329 373
330 mem_cgroup_charge_statistics(pc->mem_cgroup, pc->flags, true); 374 mem_cgroup_charge_statistics(pc->mem_cgroup, pc, true);
331} 375}
332 376
333static void __mem_cgroup_move_lists(struct page_cgroup *pc, enum lru_list lru) 377static void __mem_cgroup_move_lists(struct page_cgroup *pc, enum lru_list lru)
334{ 378{
335 struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc); 379 struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc);
336 int active = pc->flags & PAGE_CGROUP_FLAG_ACTIVE; 380 int active = PageCgroupActive(pc);
337 int file = pc->flags & PAGE_CGROUP_FLAG_FILE; 381 int file = PageCgroupFile(pc);
338 int unevictable = pc->flags & PAGE_CGROUP_FLAG_UNEVICTABLE; 382 int unevictable = PageCgroupUnevictable(pc);
339 enum lru_list from = unevictable ? LRU_UNEVICTABLE : 383 enum lru_list from = unevictable ? LRU_UNEVICTABLE :
340 (LRU_FILE * !!file + !!active); 384 (LRU_FILE * !!file + !!active);
341 385
@@ -343,16 +387,20 @@ static void __mem_cgroup_move_lists(struct page_cgroup *pc, enum lru_list lru)
343 return; 387 return;
344 388
345 MEM_CGROUP_ZSTAT(mz, from) -= 1; 389 MEM_CGROUP_ZSTAT(mz, from) -= 1;
346 390 /*
391 * However this is done under mz->lru_lock, another flags, which
392 * are not related to LRU, will be modified from out-of-lock.
393 * We have to use atomic set/clear flags.
394 */
347 if (is_unevictable_lru(lru)) { 395 if (is_unevictable_lru(lru)) {
348 pc->flags &= ~PAGE_CGROUP_FLAG_ACTIVE; 396 ClearPageCgroupActive(pc);
349 pc->flags |= PAGE_CGROUP_FLAG_UNEVICTABLE; 397 SetPageCgroupUnevictable(pc);
350 } else { 398 } else {
351 if (is_active_lru(lru)) 399 if (is_active_lru(lru))
352 pc->flags |= PAGE_CGROUP_FLAG_ACTIVE; 400 SetPageCgroupActive(pc);
353 else 401 else
354 pc->flags &= ~PAGE_CGROUP_FLAG_ACTIVE; 402 ClearPageCgroupActive(pc);
355 pc->flags &= ~PAGE_CGROUP_FLAG_UNEVICTABLE; 403 ClearPageCgroupUnevictable(pc);
356 } 404 }
357 405
358 MEM_CGROUP_ZSTAT(mz, lru) += 1; 406 MEM_CGROUP_ZSTAT(mz, lru) += 1;
@@ -589,16 +637,7 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
589 * If a page is accounted as a page cache, insert to inactive list. 637 * If a page is accounted as a page cache, insert to inactive list.
590 * If anon, insert to active list. 638 * If anon, insert to active list.
591 */ 639 */
592 if (ctype == MEM_CGROUP_CHARGE_TYPE_CACHE) { 640 pc->flags = pcg_default_flags[ctype];
593 pc->flags = PAGE_CGROUP_FLAG_CACHE;
594 if (page_is_file_cache(page))
595 pc->flags |= PAGE_CGROUP_FLAG_FILE;
596 else
597 pc->flags |= PAGE_CGROUP_FLAG_ACTIVE;
598 } else if (ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED)
599 pc->flags = PAGE_CGROUP_FLAG_ACTIVE;
600 else /* MEM_CGROUP_CHARGE_TYPE_SHMEM */
601 pc->flags = PAGE_CGROUP_FLAG_CACHE | PAGE_CGROUP_FLAG_ACTIVE;
602 641
603 lock_page_cgroup(page); 642 lock_page_cgroup(page);
604 if (unlikely(page_get_page_cgroup(page))) { 643 if (unlikely(page_get_page_cgroup(page))) {
@@ -677,8 +716,12 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
677 if (unlikely(!mm)) 716 if (unlikely(!mm))
678 mm = &init_mm; 717 mm = &init_mm;
679 718
680 return mem_cgroup_charge_common(page, mm, gfp_mask, 719 if (page_is_file_cache(page))
720 return mem_cgroup_charge_common(page, mm, gfp_mask,
681 MEM_CGROUP_CHARGE_TYPE_CACHE, NULL); 721 MEM_CGROUP_CHARGE_TYPE_CACHE, NULL);
722 else
723 return mem_cgroup_charge_common(page, mm, gfp_mask,
724 MEM_CGROUP_CHARGE_TYPE_SHMEM, NULL);
682} 725}
683 726
684/* 727/*
@@ -706,8 +749,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
706 VM_BUG_ON(pc->page != page); 749 VM_BUG_ON(pc->page != page);
707 750
708 if ((ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED) 751 if ((ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED)
709 && ((pc->flags & PAGE_CGROUP_FLAG_CACHE) 752 && ((PageCgroupCache(pc) || page_mapped(page))))
710 || page_mapped(page)))
711 goto unlock; 753 goto unlock;
712 754
713 mz = page_cgroup_zoneinfo(pc); 755 mz = page_cgroup_zoneinfo(pc);
@@ -758,7 +800,7 @@ int mem_cgroup_prepare_migration(struct page *page, struct page *newpage)
758 if (pc) { 800 if (pc) {
759 mem = pc->mem_cgroup; 801 mem = pc->mem_cgroup;
760 css_get(&mem->css); 802 css_get(&mem->css);
761 if (pc->flags & PAGE_CGROUP_FLAG_CACHE) { 803 if (PageCgroupCache(pc)) {
762 if (page_is_file_cache(page)) 804 if (page_is_file_cache(page))
763 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE; 805 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
764 else 806 else