aboutsummaryrefslogtreecommitdiffstats
path: root/mm/hugetlb.c
diff options
context:
space:
mode:
authorDavid Rientjes <rientjes@google.com>2009-12-14 20:58:38 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-12-15 11:53:13 -0500
commitbad44b5be84cf3bb1ff900bec02ee61e1993328c (patch)
treeedd9a9cc2f482ca8684c9e64abe4b977a5525615 /mm/hugetlb.c
parent39da08cb074cf19cb249832a2a955dfb28837e65 (diff)
mm: add gfp flags for NODEMASK_ALLOC slab allocations
Objects passed to NODEMASK_ALLOC() are relatively small in size and are backed by slab caches that are not of large order, traditionally never greater than PAGE_ALLOC_COSTLY_ORDER. Thus, using GFP_KERNEL for these allocations on large machines when CONFIG_NODES_SHIFT > 8 will cause the page allocator to loop endlessly in the allocation attempt, each time invoking both direct reclaim or the oom killer. This is of particular interest when using NODEMASK_ALLOC() from a mempolicy context (either directly in mm/mempolicy.c or the mempolicy constrained hugetlb allocations) since the oom killer always kills current when allocations are constrained by mempolicies. So for all present use cases in the kernel, current would end up being oom killed when direct reclaim fails. That would allow the NODEMASK_ALLOC() to succeed but current would have sacrificed itself upon returning. This patch adds gfp flags to NODEMASK_ALLOC() to pass to kmalloc() on CONFIG_NODES_SHIFT > 8; this parameter is a nop on other configurations. All current use cases either directly from hugetlb code or indirectly via NODEMASK_SCRATCH() union __GFP_NORETRY to avoid direct reclaim and the oom killer when the slab allocator needs to allocate additional pages. The side-effect of this change is that all current use cases of either NODEMASK_ALLOC() or NODEMASK_SCRATCH() need appropriate -ENOMEM handling when the allocation fails (never for CONFIG_NODES_SHIFT <= 8). All current use cases were audited and do have appropriate error handling at this time. Signed-off-by: David Rientjes <rientjes@google.com> Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Lee Schermerhorn <lee.schermerhorn@hp.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: Randy Dunlap <randy.dunlap@oracle.com> Cc: Nishanth Aravamudan <nacc@us.ibm.com> Cc: Andi Kleen <andi@firstfloor.org> Cc: David Rientjes <rientjes@google.com> Cc: Adam Litke <agl@us.ibm.com> Cc: Andy Whitcroft <apw@canonical.com> Cc: Eric Whitney <eric.whitney@hp.com> Cc: Christoph Lameter <cl@linux-foundation.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r--mm/hugetlb.c5
1 files changed, 3 insertions, 2 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index b4a263512cb7..450493d25572 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1361,7 +1361,7 @@ static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
1361 int nid; 1361 int nid;
1362 unsigned long count; 1362 unsigned long count;
1363 struct hstate *h; 1363 struct hstate *h;
1364 NODEMASK_ALLOC(nodemask_t, nodes_allowed); 1364 NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
1365 1365
1366 err = strict_strtoul(buf, 10, &count); 1366 err = strict_strtoul(buf, 10, &count);
1367 if (err) 1367 if (err)
@@ -1857,7 +1857,8 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
1857 proc_doulongvec_minmax(table, write, buffer, length, ppos); 1857 proc_doulongvec_minmax(table, write, buffer, length, ppos);
1858 1858
1859 if (write) { 1859 if (write) {
1860 NODEMASK_ALLOC(nodemask_t, nodes_allowed); 1860 NODEMASK_ALLOC(nodemask_t, nodes_allowed,
1861 GFP_KERNEL | __GFP_NORETRY);
1861 if (!(obey_mempolicy && 1862 if (!(obey_mempolicy &&
1862 init_nodemask_of_mempolicy(nodes_allowed))) { 1863 init_nodemask_of_mempolicy(nodes_allowed))) {
1863 NODEMASK_FREE(nodes_allowed); 1864 NODEMASK_FREE(nodes_allowed);