diff options
| author | Balbir Singh <balbir@linux.vnet.ibm.com> | 2009-09-23 18:56:37 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-24 10:20:59 -0400 |
| commit | f64c3f54940d6929a2b6dcffaab942bd62be2e66 (patch) | |
| tree | 7b3587700b08639970580be6c87f36df80ca8c74 /kernel | |
| parent | 296c81d89f4f14269f7346f81442910158c0a83a (diff) | |
memory controller: soft limit organize cgroups
Organize cgroups over soft limit in a RB-Tree
Introduce an RB-Tree for storing memory cgroups that are over their soft
limit. The overall goal is to
1. Add a memory cgroup to the RB-Tree when the soft limit is exceeded.
We are careful about updates, updates take place only after a particular
time interval has passed
2. We remove the node from the RB-Tree when the usage goes below the soft
limit
The next set of patches will exploit the RB-Tree to get the group that is
over its soft limit by the largest amount and reclaim from it, when we
face memory contention.
[hugh.dickins@tiscali.co.uk: CONFIG_CGROUP_MEM_RES_CTLR=y CONFIG_PREEMPT=y fails to boot]
Signed-off-by: Balbir Singh <balbir@linux.vnet.ibm.com>
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Li Zefan <lizf@cn.fujitsu.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Hugh Dickins <hugh.dickins@tiscali.co.uk>
Cc: Jiri Slaby <jirislaby@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/res_counter.c | 18 |
1 files changed, 16 insertions, 2 deletions
diff --git a/kernel/res_counter.c b/kernel/res_counter.c index bcdabf37c40b..88faec23e833 100644 --- a/kernel/res_counter.c +++ b/kernel/res_counter.c | |||
| @@ -37,17 +37,27 @@ int res_counter_charge_locked(struct res_counter *counter, unsigned long val) | |||
| 37 | } | 37 | } |
| 38 | 38 | ||
| 39 | int res_counter_charge(struct res_counter *counter, unsigned long val, | 39 | int res_counter_charge(struct res_counter *counter, unsigned long val, |
| 40 | struct res_counter **limit_fail_at) | 40 | struct res_counter **limit_fail_at, |
| 41 | struct res_counter **soft_limit_fail_at) | ||
| 41 | { | 42 | { |
| 42 | int ret; | 43 | int ret; |
| 43 | unsigned long flags; | 44 | unsigned long flags; |
| 44 | struct res_counter *c, *u; | 45 | struct res_counter *c, *u; |
| 45 | 46 | ||
| 46 | *limit_fail_at = NULL; | 47 | *limit_fail_at = NULL; |
| 48 | if (soft_limit_fail_at) | ||
| 49 | *soft_limit_fail_at = NULL; | ||
| 47 | local_irq_save(flags); | 50 | local_irq_save(flags); |
| 48 | for (c = counter; c != NULL; c = c->parent) { | 51 | for (c = counter; c != NULL; c = c->parent) { |
| 49 | spin_lock(&c->lock); | 52 | spin_lock(&c->lock); |
| 50 | ret = res_counter_charge_locked(c, val); | 53 | ret = res_counter_charge_locked(c, val); |
| 54 | /* | ||
| 55 | * With soft limits, we return the highest ancestor | ||
| 56 | * that exceeds its soft limit | ||
| 57 | */ | ||
| 58 | if (soft_limit_fail_at && | ||
| 59 | !res_counter_soft_limit_check_locked(c)) | ||
| 60 | *soft_limit_fail_at = c; | ||
| 51 | spin_unlock(&c->lock); | 61 | spin_unlock(&c->lock); |
| 52 | if (ret < 0) { | 62 | if (ret < 0) { |
| 53 | *limit_fail_at = c; | 63 | *limit_fail_at = c; |
| @@ -75,7 +85,8 @@ void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val) | |||
| 75 | counter->usage -= val; | 85 | counter->usage -= val; |
| 76 | } | 86 | } |
| 77 | 87 | ||
| 78 | void res_counter_uncharge(struct res_counter *counter, unsigned long val) | 88 | void res_counter_uncharge(struct res_counter *counter, unsigned long val, |
| 89 | bool *was_soft_limit_excess) | ||
| 79 | { | 90 | { |
| 80 | unsigned long flags; | 91 | unsigned long flags; |
| 81 | struct res_counter *c; | 92 | struct res_counter *c; |
| @@ -83,6 +94,9 @@ void res_counter_uncharge(struct res_counter *counter, unsigned long val) | |||
| 83 | local_irq_save(flags); | 94 | local_irq_save(flags); |
| 84 | for (c = counter; c != NULL; c = c->parent) { | 95 | for (c = counter; c != NULL; c = c->parent) { |
| 85 | spin_lock(&c->lock); | 96 | spin_lock(&c->lock); |
| 97 | if (was_soft_limit_excess) | ||
| 98 | *was_soft_limit_excess = | ||
| 99 | !res_counter_soft_limit_check_locked(c); | ||
| 86 | res_counter_uncharge_locked(c, val); | 100 | res_counter_uncharge_locked(c, val); |
| 87 | spin_unlock(&c->lock); | 101 | spin_unlock(&c->lock); |
| 88 | } | 102 | } |
