aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux-foundation.org>2008-10-18 23:26:14 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-10-20 11:50:25 -0400
commitb69408e88bd86b98feb7b9a38fd865e1ddb29827 (patch)
treeb19277c29fe624870ba776cc6ada59928cd2796d /mm/memcontrol.c
parent62695a84eb8f2e718bf4dfb21700afaa7a08e0ea (diff)
vmscan: Use an indexed array for LRU variables
Currently we are defining explicit variables for the inactive and active list. An indexed array can be more generic and avoid repeating similar code in several places in the reclaim code. We are saving a few bytes in terms of code size: Before: text data bss dec hex filename 4097753 573120 4092484 8763357 85b7dd vmlinux After: text data bss dec hex filename 4097729 573120 4092484 8763333 85b7c5 vmlinux Having an easy way to add new lru lists may ease future work on the reclaim code. Signed-off-by: Rik van Riel <riel@redhat.com> Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com> Signed-off-by: Christoph Lameter <cl@linux-foundation.org> Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c115
1 files changed, 43 insertions, 72 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 36896f3eb7f5..c0cbd7790c51 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -32,6 +32,7 @@
32#include <linux/fs.h> 32#include <linux/fs.h>
33#include <linux/seq_file.h> 33#include <linux/seq_file.h>
34#include <linux/vmalloc.h> 34#include <linux/vmalloc.h>
35#include <linux/mm_inline.h>
35 36
36#include <asm/uaccess.h> 37#include <asm/uaccess.h>
37 38
@@ -85,22 +86,13 @@ static s64 mem_cgroup_read_stat(struct mem_cgroup_stat *stat,
85/* 86/*
86 * per-zone information in memory controller. 87 * per-zone information in memory controller.
87 */ 88 */
88
89enum mem_cgroup_zstat_index {
90 MEM_CGROUP_ZSTAT_ACTIVE,
91 MEM_CGROUP_ZSTAT_INACTIVE,
92
93 NR_MEM_CGROUP_ZSTAT,
94};
95
96struct mem_cgroup_per_zone { 89struct mem_cgroup_per_zone {
97 /* 90 /*
98 * spin_lock to protect the per cgroup LRU 91 * spin_lock to protect the per cgroup LRU
99 */ 92 */
100 spinlock_t lru_lock; 93 spinlock_t lru_lock;
101 struct list_head active_list; 94 struct list_head lists[NR_LRU_LISTS];
102 struct list_head inactive_list; 95 unsigned long count[NR_LRU_LISTS];
103 unsigned long count[NR_MEM_CGROUP_ZSTAT];
104}; 96};
105/* Macro for accessing counter */ 97/* Macro for accessing counter */
106#define MEM_CGROUP_ZSTAT(mz, idx) ((mz)->count[(idx)]) 98#define MEM_CGROUP_ZSTAT(mz, idx) ((mz)->count[(idx)])
@@ -227,7 +219,7 @@ page_cgroup_zoneinfo(struct page_cgroup *pc)
227} 219}
228 220
229static unsigned long mem_cgroup_get_all_zonestat(struct mem_cgroup *mem, 221static unsigned long mem_cgroup_get_all_zonestat(struct mem_cgroup *mem,
230 enum mem_cgroup_zstat_index idx) 222 enum lru_list idx)
231{ 223{
232 int nid, zid; 224 int nid, zid;
233 struct mem_cgroup_per_zone *mz; 225 struct mem_cgroup_per_zone *mz;
@@ -297,11 +289,9 @@ static void __mem_cgroup_remove_list(struct mem_cgroup_per_zone *mz,
297 struct page_cgroup *pc) 289 struct page_cgroup *pc)
298{ 290{
299 int from = pc->flags & PAGE_CGROUP_FLAG_ACTIVE; 291 int from = pc->flags & PAGE_CGROUP_FLAG_ACTIVE;
292 int lru = !!from;
300 293
301 if (from) 294 MEM_CGROUP_ZSTAT(mz, lru) -= 1;
302 MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) -= 1;
303 else
304 MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) -= 1;
305 295
306 mem_cgroup_charge_statistics(pc->mem_cgroup, pc->flags, false); 296 mem_cgroup_charge_statistics(pc->mem_cgroup, pc->flags, false);
307 list_del(&pc->lru); 297 list_del(&pc->lru);
@@ -310,37 +300,35 @@ static void __mem_cgroup_remove_list(struct mem_cgroup_per_zone *mz,
310static void __mem_cgroup_add_list(struct mem_cgroup_per_zone *mz, 300static void __mem_cgroup_add_list(struct mem_cgroup_per_zone *mz,
311 struct page_cgroup *pc) 301 struct page_cgroup *pc)
312{ 302{
313 int to = pc->flags & PAGE_CGROUP_FLAG_ACTIVE; 303 int lru = LRU_INACTIVE;
304
305 if (pc->flags & PAGE_CGROUP_FLAG_ACTIVE)
306 lru += LRU_ACTIVE;
307
308 MEM_CGROUP_ZSTAT(mz, lru) += 1;
309 list_add(&pc->lru, &mz->lists[lru]);
314 310
315 if (!to) {
316 MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) += 1;
317 list_add(&pc->lru, &mz->inactive_list);
318 } else {
319 MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) += 1;
320 list_add(&pc->lru, &mz->active_list);
321 }
322 mem_cgroup_charge_statistics(pc->mem_cgroup, pc->flags, true); 311 mem_cgroup_charge_statistics(pc->mem_cgroup, pc->flags, true);
323} 312}
324 313
325static void __mem_cgroup_move_lists(struct page_cgroup *pc, bool active) 314static void __mem_cgroup_move_lists(struct page_cgroup *pc, bool active)
326{ 315{
327 int from = pc->flags & PAGE_CGROUP_FLAG_ACTIVE;
328 struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc); 316 struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc);
317 int lru = LRU_INACTIVE;
329 318
330 if (from) 319 if (pc->flags & PAGE_CGROUP_FLAG_ACTIVE)
331 MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) -= 1; 320 lru += LRU_ACTIVE;
332 else
333 MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) -= 1;
334 321
335 if (active) { 322 MEM_CGROUP_ZSTAT(mz, lru) -= 1;
336 MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) += 1; 323
324 if (active)
337 pc->flags |= PAGE_CGROUP_FLAG_ACTIVE; 325 pc->flags |= PAGE_CGROUP_FLAG_ACTIVE;
338 list_move(&pc->lru, &mz->active_list); 326 else
339 } else {
340 MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) += 1;
341 pc->flags &= ~PAGE_CGROUP_FLAG_ACTIVE; 327 pc->flags &= ~PAGE_CGROUP_FLAG_ACTIVE;
342 list_move(&pc->lru, &mz->inactive_list); 328
343 } 329 lru = !!active;
330 MEM_CGROUP_ZSTAT(mz, lru) += 1;
331 list_move(&pc->lru, &mz->lists[lru]);
344} 332}
345 333
346int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem) 334int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
@@ -412,8 +400,8 @@ long mem_cgroup_reclaim_imbalance(struct mem_cgroup *mem)
412{ 400{
413 unsigned long active, inactive; 401 unsigned long active, inactive;
414 /* active and inactive are the number of pages. 'long' is ok.*/ 402 /* active and inactive are the number of pages. 'long' is ok.*/
415 active = mem_cgroup_get_all_zonestat(mem, MEM_CGROUP_ZSTAT_ACTIVE); 403 active = mem_cgroup_get_all_zonestat(mem, LRU_ACTIVE);
416 inactive = mem_cgroup_get_all_zonestat(mem, MEM_CGROUP_ZSTAT_INACTIVE); 404 inactive = mem_cgroup_get_all_zonestat(mem, LRU_INACTIVE);
417 return (long) (active / (inactive + 1)); 405 return (long) (active / (inactive + 1));
418} 406}
419 407
@@ -444,28 +432,17 @@ void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority)
444 * (see include/linux/mmzone.h) 432 * (see include/linux/mmzone.h)
445 */ 433 */
446 434
447long mem_cgroup_calc_reclaim_active(struct mem_cgroup *mem, 435long mem_cgroup_calc_reclaim(struct mem_cgroup *mem, struct zone *zone,
448 struct zone *zone, int priority) 436 int priority, enum lru_list lru)
449{ 437{
450 long nr_active; 438 long nr_pages;
451 int nid = zone->zone_pgdat->node_id; 439 int nid = zone->zone_pgdat->node_id;
452 int zid = zone_idx(zone); 440 int zid = zone_idx(zone);
453 struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid); 441 struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid);
454 442
455 nr_active = MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE); 443 nr_pages = MEM_CGROUP_ZSTAT(mz, lru);
456 return (nr_active >> priority);
457}
458
459long mem_cgroup_calc_reclaim_inactive(struct mem_cgroup *mem,
460 struct zone *zone, int priority)
461{
462 long nr_inactive;
463 int nid = zone->zone_pgdat->node_id;
464 int zid = zone_idx(zone);
465 struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid);
466 444
467 nr_inactive = MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE); 445 return (nr_pages >> priority);
468 return (nr_inactive >> priority);
469} 446}
470 447
471unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, 448unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
@@ -484,14 +461,11 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
484 int nid = z->zone_pgdat->node_id; 461 int nid = z->zone_pgdat->node_id;
485 int zid = zone_idx(z); 462 int zid = zone_idx(z);
486 struct mem_cgroup_per_zone *mz; 463 struct mem_cgroup_per_zone *mz;
464 int lru = !!active;
487 465
488 BUG_ON(!mem_cont); 466 BUG_ON(!mem_cont);
489 mz = mem_cgroup_zoneinfo(mem_cont, nid, zid); 467 mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
490 if (active) 468 src = &mz->lists[lru];
491 src = &mz->active_list;
492 else
493 src = &mz->inactive_list;
494
495 469
496 spin_lock(&mz->lru_lock); 470 spin_lock(&mz->lru_lock);
497 scan = 0; 471 scan = 0;
@@ -863,7 +837,7 @@ int mem_cgroup_resize_limit(struct mem_cgroup *memcg, unsigned long long val)
863#define FORCE_UNCHARGE_BATCH (128) 837#define FORCE_UNCHARGE_BATCH (128)
864static void mem_cgroup_force_empty_list(struct mem_cgroup *mem, 838static void mem_cgroup_force_empty_list(struct mem_cgroup *mem,
865 struct mem_cgroup_per_zone *mz, 839 struct mem_cgroup_per_zone *mz,
866 int active) 840 enum lru_list lru)
867{ 841{
868 struct page_cgroup *pc; 842 struct page_cgroup *pc;
869 struct page *page; 843 struct page *page;
@@ -871,10 +845,7 @@ static void mem_cgroup_force_empty_list(struct mem_cgroup *mem,
871 unsigned long flags; 845 unsigned long flags;
872 struct list_head *list; 846 struct list_head *list;
873 847
874 if (active) 848 list = &mz->lists[lru];
875 list = &mz->active_list;
876 else
877 list = &mz->inactive_list;
878 849
879 spin_lock_irqsave(&mz->lru_lock, flags); 850 spin_lock_irqsave(&mz->lru_lock, flags);
880 while (!list_empty(list)) { 851 while (!list_empty(list)) {
@@ -922,11 +893,10 @@ static int mem_cgroup_force_empty(struct mem_cgroup *mem)
922 for_each_node_state(node, N_POSSIBLE) 893 for_each_node_state(node, N_POSSIBLE)
923 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 894 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
924 struct mem_cgroup_per_zone *mz; 895 struct mem_cgroup_per_zone *mz;
896 enum lru_list l;
925 mz = mem_cgroup_zoneinfo(mem, node, zid); 897 mz = mem_cgroup_zoneinfo(mem, node, zid);
926 /* drop all page_cgroup in active_list */ 898 for_each_lru(l)
927 mem_cgroup_force_empty_list(mem, mz, 1); 899 mem_cgroup_force_empty_list(mem, mz, l);
928 /* drop all page_cgroup in inactive_list */
929 mem_cgroup_force_empty_list(mem, mz, 0);
930 } 900 }
931 } 901 }
932 ret = 0; 902 ret = 0;
@@ -1015,9 +985,9 @@ static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
1015 unsigned long active, inactive; 985 unsigned long active, inactive;
1016 986
1017 inactive = mem_cgroup_get_all_zonestat(mem_cont, 987 inactive = mem_cgroup_get_all_zonestat(mem_cont,
1018 MEM_CGROUP_ZSTAT_INACTIVE); 988 LRU_INACTIVE);
1019 active = mem_cgroup_get_all_zonestat(mem_cont, 989 active = mem_cgroup_get_all_zonestat(mem_cont,
1020 MEM_CGROUP_ZSTAT_ACTIVE); 990 LRU_ACTIVE);
1021 cb->fill(cb, "active", (active) * PAGE_SIZE); 991 cb->fill(cb, "active", (active) * PAGE_SIZE);
1022 cb->fill(cb, "inactive", (inactive) * PAGE_SIZE); 992 cb->fill(cb, "inactive", (inactive) * PAGE_SIZE);
1023 } 993 }
@@ -1062,6 +1032,7 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
1062{ 1032{
1063 struct mem_cgroup_per_node *pn; 1033 struct mem_cgroup_per_node *pn;
1064 struct mem_cgroup_per_zone *mz; 1034 struct mem_cgroup_per_zone *mz;
1035 enum lru_list l;
1065 int zone, tmp = node; 1036 int zone, tmp = node;
1066 /* 1037 /*
1067 * This routine is called against possible nodes. 1038 * This routine is called against possible nodes.
@@ -1082,9 +1053,9 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
1082 1053
1083 for (zone = 0; zone < MAX_NR_ZONES; zone++) { 1054 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
1084 mz = &pn->zoneinfo[zone]; 1055 mz = &pn->zoneinfo[zone];
1085 INIT_LIST_HEAD(&mz->active_list);
1086 INIT_LIST_HEAD(&mz->inactive_list);
1087 spin_lock_init(&mz->lru_lock); 1056 spin_lock_init(&mz->lru_lock);
1057 for_each_lru(l)
1058 INIT_LIST_HEAD(&mz->lists[l]);
1088 } 1059 }
1089 return 0; 1060 return 0;
1090} 1061}