aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
authorPhil Carmody <ext-phil.2.carmody@nokia.com>2010-05-26 17:42:42 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-05-27 12:12:43 -0400
commit5407a56257b6ade44fd9bcac972c99845b7413cd (patch)
tree833a55856af25fe9be5e8a871d89f43677eac512 /mm/memcontrol.c
parentdf64f81bb1e01cbef967a96642dacf208acb7e72 (diff)
mm: remove unnecessary use of atomic
The bottom 4 hunks are atomically changing memory to which there are no aliases as it's freshly allocated, so there's no need to use atomic operations. The other hunks are just atomic_read and atomic_set, and do not involve any read-modify-write. The use of atomic_{read,set} doesn't prevent a read/write or write/write race, so if a race were possible (I'm not saying one is), then it would still be there even with atomic_set. See: http://digitalvampire.org/blog/index.php/2007/05/13/atomic-cargo-cults/ Signed-off-by: Phil Carmody <ext-phil.2.carmody@nokia.com> Acked-by: Kirill A. Shutemov <kirill@shutemov.name> Cc: Balbir Singh <balbir@in.ibm.com> Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index be5f478351bd..93b0239bc34d 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -152,7 +152,7 @@ struct mem_cgroup_threshold {
152/* For threshold */ 152/* For threshold */
153struct mem_cgroup_threshold_ary { 153struct mem_cgroup_threshold_ary {
154 /* An array index points to threshold just below usage. */ 154 /* An array index points to threshold just below usage. */
155 atomic_t current_threshold; 155 int current_threshold;
156 /* Size of entries[] */ 156 /* Size of entries[] */
157 unsigned int size; 157 unsigned int size;
158 /* Array of thresholds */ 158 /* Array of thresholds */
@@ -3412,7 +3412,7 @@ static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
3412 * If it's not true, a threshold was crossed after last 3412 * If it's not true, a threshold was crossed after last
3413 * call of __mem_cgroup_threshold(). 3413 * call of __mem_cgroup_threshold().
3414 */ 3414 */
3415 i = atomic_read(&t->current_threshold); 3415 i = t->current_threshold;
3416 3416
3417 /* 3417 /*
3418 * Iterate backward over array of thresholds starting from 3418 * Iterate backward over array of thresholds starting from
@@ -3436,7 +3436,7 @@ static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
3436 eventfd_signal(t->entries[i].eventfd, 1); 3436 eventfd_signal(t->entries[i].eventfd, 1);
3437 3437
3438 /* Update current_threshold */ 3438 /* Update current_threshold */
3439 atomic_set(&t->current_threshold, i - 1); 3439 t->current_threshold = i - 1;
3440unlock: 3440unlock:
3441 rcu_read_unlock(); 3441 rcu_read_unlock();
3442} 3442}
@@ -3528,7 +3528,7 @@ static int mem_cgroup_usage_register_event(struct cgroup *cgrp,
3528 compare_thresholds, NULL); 3528 compare_thresholds, NULL);
3529 3529
3530 /* Find current threshold */ 3530 /* Find current threshold */
3531 atomic_set(&thresholds_new->current_threshold, -1); 3531 thresholds_new->current_threshold = -1;
3532 for (i = 0; i < size; i++) { 3532 for (i = 0; i < size; i++) {
3533 if (thresholds_new->entries[i].threshold < usage) { 3533 if (thresholds_new->entries[i].threshold < usage) {
3534 /* 3534 /*
@@ -3536,7 +3536,7 @@ static int mem_cgroup_usage_register_event(struct cgroup *cgrp,
3536 * until rcu_assign_pointer(), so it's safe to increment 3536 * until rcu_assign_pointer(), so it's safe to increment
3537 * it here. 3537 * it here.
3538 */ 3538 */
3539 atomic_inc(&thresholds_new->current_threshold); 3539 ++thresholds_new->current_threshold;
3540 } 3540 }
3541 } 3541 }
3542 3542
@@ -3607,7 +3607,7 @@ static int mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
3607 thresholds_new->size = size; 3607 thresholds_new->size = size;
3608 3608
3609 /* Copy thresholds and find current threshold */ 3609 /* Copy thresholds and find current threshold */
3610 atomic_set(&thresholds_new->current_threshold, -1); 3610 thresholds_new->current_threshold = -1;
3611 for (i = 0, j = 0; i < thresholds->size; i++) { 3611 for (i = 0, j = 0; i < thresholds->size; i++) {
3612 if (thresholds->entries[i].eventfd == eventfd) 3612 if (thresholds->entries[i].eventfd == eventfd)
3613 continue; 3613 continue;
@@ -3619,7 +3619,7 @@ static int mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
3619 * until rcu_assign_pointer(), so it's safe to increment 3619 * until rcu_assign_pointer(), so it's safe to increment
3620 * it here. 3620 * it here.
3621 */ 3621 */
3622 atomic_inc(&thresholds_new->current_threshold); 3622 ++thresholds_new->current_threshold;
3623 } 3623 }
3624 j++; 3624 j++;
3625 } 3625 }