aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mempolicy.c
diff options
context:
space:
mode:
authorDavid Rientjes <rientjes@google.com>2010-08-09 20:18:52 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-08-09 23:44:56 -0400
commit6f48d0ebd907ae419387f27b602ee98870cfa7bb (patch)
tree355bd8b616f5a78d8adabe5b9631d7aad970dbaa /mm/mempolicy.c
parent5e9d834a0e0c0485dfa487281ab9650fc37a3bb5 (diff)
oom: select task from tasklist for mempolicy ooms
The oom killer presently kills current whenever there is no more memory free or reclaimable on its mempolicy's nodes. There is no guarantee that current is a memory-hogging task or that killing it will free any substantial amount of memory, however. In such situations, it is better to scan the tasklist for nodes that are allowed to allocate on current's set of nodes and kill the task with the highest badness() score. This ensures that the most memory-hogging task, or the one configured by the user with /proc/pid/oom_adj, is always selected in such scenarios. Signed-off-by: David Rientjes <rientjes@google.com> Reviewed-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/mempolicy.c')
-rw-r--r--mm/mempolicy.c44
1 files changed, 44 insertions, 0 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 5bc0a96beb51..8a73708d59bb 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1712,6 +1712,50 @@ bool init_nodemask_of_mempolicy(nodemask_t *mask)
1712} 1712}
1713#endif 1713#endif
1714 1714
1715/*
1716 * mempolicy_nodemask_intersects
1717 *
1718 * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
1719 * policy. Otherwise, check for intersection between mask and the policy
1720 * nodemask for 'bind' or 'interleave' policy. For 'perferred' or 'local'
1721 * policy, always return true since it may allocate elsewhere on fallback.
1722 *
1723 * Takes task_lock(tsk) to prevent freeing of its mempolicy.
1724 */
1725bool mempolicy_nodemask_intersects(struct task_struct *tsk,
1726 const nodemask_t *mask)
1727{
1728 struct mempolicy *mempolicy;
1729 bool ret = true;
1730
1731 if (!mask)
1732 return ret;
1733 task_lock(tsk);
1734 mempolicy = tsk->mempolicy;
1735 if (!mempolicy)
1736 goto out;
1737
1738 switch (mempolicy->mode) {
1739 case MPOL_PREFERRED:
1740 /*
1741 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
1742 * allocate from, they may fallback to other nodes when oom.
1743 * Thus, it's possible for tsk to have allocated memory from
1744 * nodes in mask.
1745 */
1746 break;
1747 case MPOL_BIND:
1748 case MPOL_INTERLEAVE:
1749 ret = nodes_intersects(mempolicy->v.nodes, *mask);
1750 break;
1751 default:
1752 BUG();
1753 }
1754out:
1755 task_unlock(tsk);
1756 return ret;
1757}
1758
1715/* Allocate a page in interleaved policy. 1759/* Allocate a page in interleaved policy.
1716 Own path because it needs to do special accounting. */ 1760 Own path because it needs to do special accounting. */
1717static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, 1761static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,