diff options
author | Michal Hocko <mhocko@suse.cz> | 2011-07-26 19:08:24 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-07-26 19:49:42 -0400 |
commit | 1af8efe965676ab30d6c8a5b1fccc9229f339a3b (patch) | |
tree | 832cfd92cae498d9af1687bccc77344143df97a1 /mm | |
parent | 79dfdaccd1d5b40ff7cf4a35a0e63696ebb78b4d (diff) |
memcg: change memcg_oom_mutex to spinlock
memcg_oom_mutex is used to protect memcg OOM path and eventfd interface
for oom_control. None of the critical sections which it protects sleep
(eventfd_signal works from atomic context and the rest are simple linked
list resp. oom_lock atomic operations).
Mutex is also too heavyweight for those code paths because it triggers a
lot of scheduling. It also makes makes convoying effects more visible
when we have a big number of oom killing because we take the lock
mutliple times during mem_cgroup_handle_oom so we have multiple places
where many processes can sleep.
Signed-off-by: Michal Hocko <mhocko@suse.cz>
Cc: Balbir Singh <bsingharora@gmail.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memcontrol.c | 22 |
1 files changed, 11 insertions, 11 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 95d6c256b54c..c0b065ec1571 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -1725,7 +1725,7 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem, | |||
1725 | /* | 1725 | /* |
1726 | * Check OOM-Killer is already running under our hierarchy. | 1726 | * Check OOM-Killer is already running under our hierarchy. |
1727 | * If someone is running, return false. | 1727 | * If someone is running, return false. |
1728 | * Has to be called with memcg_oom_mutex | 1728 | * Has to be called with memcg_oom_lock |
1729 | */ | 1729 | */ |
1730 | static bool mem_cgroup_oom_lock(struct mem_cgroup *mem) | 1730 | static bool mem_cgroup_oom_lock(struct mem_cgroup *mem) |
1731 | { | 1731 | { |
@@ -1770,7 +1770,7 @@ done: | |||
1770 | } | 1770 | } |
1771 | 1771 | ||
1772 | /* | 1772 | /* |
1773 | * Has to be called with memcg_oom_mutex | 1773 | * Has to be called with memcg_oom_lock |
1774 | */ | 1774 | */ |
1775 | static int mem_cgroup_oom_unlock(struct mem_cgroup *mem) | 1775 | static int mem_cgroup_oom_unlock(struct mem_cgroup *mem) |
1776 | { | 1776 | { |
@@ -1802,7 +1802,7 @@ static void mem_cgroup_unmark_under_oom(struct mem_cgroup *mem) | |||
1802 | atomic_add_unless(&iter->under_oom, -1, 0); | 1802 | atomic_add_unless(&iter->under_oom, -1, 0); |
1803 | } | 1803 | } |
1804 | 1804 | ||
1805 | static DEFINE_MUTEX(memcg_oom_mutex); | 1805 | static DEFINE_SPINLOCK(memcg_oom_lock); |
1806 | static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq); | 1806 | static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq); |
1807 | 1807 | ||
1808 | struct oom_wait_info { | 1808 | struct oom_wait_info { |
@@ -1864,7 +1864,7 @@ bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask) | |||
1864 | mem_cgroup_mark_under_oom(mem); | 1864 | mem_cgroup_mark_under_oom(mem); |
1865 | 1865 | ||
1866 | /* At first, try to OOM lock hierarchy under mem.*/ | 1866 | /* At first, try to OOM lock hierarchy under mem.*/ |
1867 | mutex_lock(&memcg_oom_mutex); | 1867 | spin_lock(&memcg_oom_lock); |
1868 | locked = mem_cgroup_oom_lock(mem); | 1868 | locked = mem_cgroup_oom_lock(mem); |
1869 | /* | 1869 | /* |
1870 | * Even if signal_pending(), we can't quit charge() loop without | 1870 | * Even if signal_pending(), we can't quit charge() loop without |
@@ -1876,7 +1876,7 @@ bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask) | |||
1876 | need_to_kill = false; | 1876 | need_to_kill = false; |
1877 | if (locked) | 1877 | if (locked) |
1878 | mem_cgroup_oom_notify(mem); | 1878 | mem_cgroup_oom_notify(mem); |
1879 | mutex_unlock(&memcg_oom_mutex); | 1879 | spin_unlock(&memcg_oom_lock); |
1880 | 1880 | ||
1881 | if (need_to_kill) { | 1881 | if (need_to_kill) { |
1882 | finish_wait(&memcg_oom_waitq, &owait.wait); | 1882 | finish_wait(&memcg_oom_waitq, &owait.wait); |
@@ -1885,11 +1885,11 @@ bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask) | |||
1885 | schedule(); | 1885 | schedule(); |
1886 | finish_wait(&memcg_oom_waitq, &owait.wait); | 1886 | finish_wait(&memcg_oom_waitq, &owait.wait); |
1887 | } | 1887 | } |
1888 | mutex_lock(&memcg_oom_mutex); | 1888 | spin_lock(&memcg_oom_lock); |
1889 | if (locked) | 1889 | if (locked) |
1890 | mem_cgroup_oom_unlock(mem); | 1890 | mem_cgroup_oom_unlock(mem); |
1891 | memcg_wakeup_oom(mem); | 1891 | memcg_wakeup_oom(mem); |
1892 | mutex_unlock(&memcg_oom_mutex); | 1892 | spin_unlock(&memcg_oom_lock); |
1893 | 1893 | ||
1894 | mem_cgroup_unmark_under_oom(mem); | 1894 | mem_cgroup_unmark_under_oom(mem); |
1895 | 1895 | ||
@@ -4553,7 +4553,7 @@ static int mem_cgroup_oom_register_event(struct cgroup *cgrp, | |||
4553 | if (!event) | 4553 | if (!event) |
4554 | return -ENOMEM; | 4554 | return -ENOMEM; |
4555 | 4555 | ||
4556 | mutex_lock(&memcg_oom_mutex); | 4556 | spin_lock(&memcg_oom_lock); |
4557 | 4557 | ||
4558 | event->eventfd = eventfd; | 4558 | event->eventfd = eventfd; |
4559 | list_add(&event->list, &memcg->oom_notify); | 4559 | list_add(&event->list, &memcg->oom_notify); |
@@ -4561,7 +4561,7 @@ static int mem_cgroup_oom_register_event(struct cgroup *cgrp, | |||
4561 | /* already in OOM ? */ | 4561 | /* already in OOM ? */ |
4562 | if (atomic_read(&memcg->under_oom)) | 4562 | if (atomic_read(&memcg->under_oom)) |
4563 | eventfd_signal(eventfd, 1); | 4563 | eventfd_signal(eventfd, 1); |
4564 | mutex_unlock(&memcg_oom_mutex); | 4564 | spin_unlock(&memcg_oom_lock); |
4565 | 4565 | ||
4566 | return 0; | 4566 | return 0; |
4567 | } | 4567 | } |
@@ -4575,7 +4575,7 @@ static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp, | |||
4575 | 4575 | ||
4576 | BUG_ON(type != _OOM_TYPE); | 4576 | BUG_ON(type != _OOM_TYPE); |
4577 | 4577 | ||
4578 | mutex_lock(&memcg_oom_mutex); | 4578 | spin_lock(&memcg_oom_lock); |
4579 | 4579 | ||
4580 | list_for_each_entry_safe(ev, tmp, &mem->oom_notify, list) { | 4580 | list_for_each_entry_safe(ev, tmp, &mem->oom_notify, list) { |
4581 | if (ev->eventfd == eventfd) { | 4581 | if (ev->eventfd == eventfd) { |
@@ -4584,7 +4584,7 @@ static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp, | |||
4584 | } | 4584 | } |
4585 | } | 4585 | } |
4586 | 4586 | ||
4587 | mutex_unlock(&memcg_oom_mutex); | 4587 | spin_unlock(&memcg_oom_lock); |
4588 | } | 4588 | } |
4589 | 4589 | ||
4590 | static int mem_cgroup_oom_control_read(struct cgroup *cgrp, | 4590 | static int mem_cgroup_oom_control_read(struct cgroup *cgrp, |