aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
authorKirill A. Shutemov <kirill@shutemov.name>2010-05-26 17:42:46 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-05-27 12:12:44 -0400
commit907860ed381a31b0102f362df67c1c5cae6ef050 (patch)
tree4f47a6fe898b1f45da505fc0c27d98e66d42aa46 /mm/memcontrol.c
parentac39cf8cb86c45eeac6a592ce0d58f9021a97235 (diff)
cgroups: make cftype.unregister_event() void-returning
Since we are unable to handle an error returned by cftype.unregister_event() properly, let's make the callback void-returning. mem_cgroup_unregister_event() has been rewritten to be a "never fail" function. On mem_cgroup_usage_register_event() we save old buffer for thresholds array and reuse it in mem_cgroup_usage_unregister_event() to avoid allocation. Signed-off-by: Kirill A. Shutemov <kirill@shutemov.name> Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Phil Carmody <ext-phil.2.carmody@nokia.com> Cc: Balbir Singh <balbir@linux.vnet.ibm.com> Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Cc: Paul Menage <menage@google.com> Cc: Li Zefan <lizf@cn.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c65
1 files changed, 41 insertions, 24 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index df1234c0dac3..a4172a861b30 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -226,9 +226,19 @@ struct mem_cgroup {
226 /* thresholds for memory usage. RCU-protected */ 226 /* thresholds for memory usage. RCU-protected */
227 struct mem_cgroup_threshold_ary *thresholds; 227 struct mem_cgroup_threshold_ary *thresholds;
228 228
229 /*
230 * Preallocated buffer to be used in mem_cgroup_unregister_event()
231 * to make it "never fail".
232 * It must be able to store at least thresholds->size - 1 entries.
233 */
234 struct mem_cgroup_threshold_ary *__thresholds;
235
229 /* thresholds for mem+swap usage. RCU-protected */ 236 /* thresholds for mem+swap usage. RCU-protected */
230 struct mem_cgroup_threshold_ary *memsw_thresholds; 237 struct mem_cgroup_threshold_ary *memsw_thresholds;
231 238
239 /* the same as __thresholds, but for memsw_thresholds */
240 struct mem_cgroup_threshold_ary *__memsw_thresholds;
241
232 /* For oom notifier event fd */ 242 /* For oom notifier event fd */
233 struct list_head oom_notify; 243 struct list_head oom_notify;
234 244
@@ -3604,17 +3614,27 @@ static int mem_cgroup_usage_register_event(struct cgroup *cgrp,
3604 else 3614 else
3605 rcu_assign_pointer(memcg->memsw_thresholds, thresholds_new); 3615 rcu_assign_pointer(memcg->memsw_thresholds, thresholds_new);
3606 3616
3607 /* To be sure that nobody uses thresholds before freeing it */ 3617 /* To be sure that nobody uses thresholds */
3608 synchronize_rcu(); 3618 synchronize_rcu();
3609 3619
3610 kfree(thresholds); 3620 /*
3621 * Free old preallocated buffer and use thresholds as new
3622 * preallocated buffer.
3623 */
3624 if (type == _MEM) {
3625 kfree(memcg->__thresholds);
3626 memcg->__thresholds = thresholds;
3627 } else {
3628 kfree(memcg->__memsw_thresholds);
3629 memcg->__memsw_thresholds = thresholds;
3630 }
3611unlock: 3631unlock:
3612 mutex_unlock(&memcg->thresholds_lock); 3632 mutex_unlock(&memcg->thresholds_lock);
3613 3633
3614 return ret; 3634 return ret;
3615} 3635}
3616 3636
3617static int mem_cgroup_usage_unregister_event(struct cgroup *cgrp, 3637static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
3618 struct cftype *cft, struct eventfd_ctx *eventfd) 3638 struct cftype *cft, struct eventfd_ctx *eventfd)
3619{ 3639{
3620 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); 3640 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
@@ -3622,7 +3642,7 @@ static int mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
3622 int type = MEMFILE_TYPE(cft->private); 3642 int type = MEMFILE_TYPE(cft->private);
3623 u64 usage; 3643 u64 usage;
3624 int size = 0; 3644 int size = 0;
3625 int i, j, ret = 0; 3645 int i, j;
3626 3646
3627 mutex_lock(&memcg->thresholds_lock); 3647 mutex_lock(&memcg->thresholds_lock);
3628 if (type == _MEM) 3648 if (type == _MEM)
@@ -3649,20 +3669,19 @@ static int mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
3649 size++; 3669 size++;
3650 } 3670 }
3651 3671
3672 /* Use preallocated buffer for new array of thresholds */
3673 if (type == _MEM)
3674 thresholds_new = memcg->__thresholds;
3675 else
3676 thresholds_new = memcg->__memsw_thresholds;
3677
3652 /* Set thresholds array to NULL if we don't have thresholds */ 3678 /* Set thresholds array to NULL if we don't have thresholds */
3653 if (!size) { 3679 if (!size) {
3680 kfree(thresholds_new);
3654 thresholds_new = NULL; 3681 thresholds_new = NULL;
3655 goto assign; 3682 goto swap_buffers;
3656 } 3683 }
3657 3684
3658 /* Allocate memory for new array of thresholds */
3659 thresholds_new = kmalloc(sizeof(*thresholds_new) +
3660 size * sizeof(struct mem_cgroup_threshold),
3661 GFP_KERNEL);
3662 if (!thresholds_new) {
3663 ret = -ENOMEM;
3664 goto unlock;
3665 }
3666 thresholds_new->size = size; 3685 thresholds_new->size = size;
3667 3686
3668 /* Copy thresholds and find current threshold */ 3687 /* Copy thresholds and find current threshold */
@@ -3683,20 +3702,20 @@ static int mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
3683 j++; 3702 j++;
3684 } 3703 }
3685 3704
3686assign: 3705swap_buffers:
3687 if (type == _MEM) 3706 /* Swap thresholds array and preallocated buffer */
3707 if (type == _MEM) {
3708 memcg->__thresholds = thresholds;
3688 rcu_assign_pointer(memcg->thresholds, thresholds_new); 3709 rcu_assign_pointer(memcg->thresholds, thresholds_new);
3689 else 3710 } else {
3711 memcg->__memsw_thresholds = thresholds;
3690 rcu_assign_pointer(memcg->memsw_thresholds, thresholds_new); 3712 rcu_assign_pointer(memcg->memsw_thresholds, thresholds_new);
3713 }
3691 3714
3692 /* To be sure that nobody uses thresholds before freeing it */ 3715 /* To be sure that nobody uses thresholds */
3693 synchronize_rcu(); 3716 synchronize_rcu();
3694 3717
3695 kfree(thresholds);
3696unlock:
3697 mutex_unlock(&memcg->thresholds_lock); 3718 mutex_unlock(&memcg->thresholds_lock);
3698
3699 return ret;
3700} 3719}
3701 3720
3702static int mem_cgroup_oom_register_event(struct cgroup *cgrp, 3721static int mem_cgroup_oom_register_event(struct cgroup *cgrp,
@@ -3724,7 +3743,7 @@ static int mem_cgroup_oom_register_event(struct cgroup *cgrp,
3724 return 0; 3743 return 0;
3725} 3744}
3726 3745
3727static int mem_cgroup_oom_unregister_event(struct cgroup *cgrp, 3746static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp,
3728 struct cftype *cft, struct eventfd_ctx *eventfd) 3747 struct cftype *cft, struct eventfd_ctx *eventfd)
3729{ 3748{
3730 struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp); 3749 struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
@@ -3743,8 +3762,6 @@ static int mem_cgroup_oom_unregister_event(struct cgroup *cgrp,
3743 } 3762 }
3744 3763
3745 mutex_unlock(&memcg_oom_mutex); 3764 mutex_unlock(&memcg_oom_mutex);
3746
3747 return 0;
3748} 3765}
3749 3766
3750static int mem_cgroup_oom_control_read(struct cgroup *cgrp, 3767static int mem_cgroup_oom_control_read(struct cgroup *cgrp,