diff options
Diffstat (limited to 'kernel/cpuset.c')
-rw-r--r-- | kernel/cpuset.c | 55 |
1 files changed, 2 insertions, 53 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index f21ba868f0d1..38f7433c1cd2 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -2453,7 +2453,7 @@ static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs) | |||
2453 | } | 2453 | } |
2454 | 2454 | ||
2455 | /** | 2455 | /** |
2456 | * cpuset_node_allowed_softwall - Can we allocate on a memory node? | 2456 | * cpuset_node_allowed - Can we allocate on a memory node? |
2457 | * @node: is this an allowed node? | 2457 | * @node: is this an allowed node? |
2458 | * @gfp_mask: memory allocation flags | 2458 | * @gfp_mask: memory allocation flags |
2459 | * | 2459 | * |
@@ -2465,13 +2465,6 @@ static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs) | |||
2465 | * flag, yes. | 2465 | * flag, yes. |
2466 | * Otherwise, no. | 2466 | * Otherwise, no. |
2467 | * | 2467 | * |
2468 | * If __GFP_HARDWALL is set, cpuset_node_allowed_softwall() reduces to | ||
2469 | * cpuset_node_allowed_hardwall(). Otherwise, cpuset_node_allowed_softwall() | ||
2470 | * might sleep, and might allow a node from an enclosing cpuset. | ||
2471 | * | ||
2472 | * cpuset_node_allowed_hardwall() only handles the simpler case of hardwall | ||
2473 | * cpusets, and never sleeps. | ||
2474 | * | ||
2475 | * The __GFP_THISNODE placement logic is really handled elsewhere, | 2468 | * The __GFP_THISNODE placement logic is really handled elsewhere, |
2476 | * by forcibly using a zonelist starting at a specified node, and by | 2469 | * by forcibly using a zonelist starting at a specified node, and by |
2477 | * (in get_page_from_freelist()) refusing to consider the zones for | 2470 | * (in get_page_from_freelist()) refusing to consider the zones for |
@@ -2506,13 +2499,8 @@ static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs) | |||
2506 | * TIF_MEMDIE - any node ok | 2499 | * TIF_MEMDIE - any node ok |
2507 | * GFP_KERNEL - any node in enclosing hardwalled cpuset ok | 2500 | * GFP_KERNEL - any node in enclosing hardwalled cpuset ok |
2508 | * GFP_USER - only nodes in current tasks mems allowed ok. | 2501 | * GFP_USER - only nodes in current tasks mems allowed ok. |
2509 | * | ||
2510 | * Rule: | ||
2511 | * Don't call cpuset_node_allowed_softwall if you can't sleep, unless you | ||
2512 | * pass in the __GFP_HARDWALL flag set in gfp_flag, which disables | ||
2513 | * the code that might scan up ancestor cpusets and sleep. | ||
2514 | */ | 2502 | */ |
2515 | int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) | 2503 | int __cpuset_node_allowed(int node, gfp_t gfp_mask) |
2516 | { | 2504 | { |
2517 | struct cpuset *cs; /* current cpuset ancestors */ | 2505 | struct cpuset *cs; /* current cpuset ancestors */ |
2518 | int allowed; /* is allocation in zone z allowed? */ | 2506 | int allowed; /* is allocation in zone z allowed? */ |
@@ -2520,7 +2508,6 @@ int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) | |||
2520 | 2508 | ||
2521 | if (in_interrupt() || (gfp_mask & __GFP_THISNODE)) | 2509 | if (in_interrupt() || (gfp_mask & __GFP_THISNODE)) |
2522 | return 1; | 2510 | return 1; |
2523 | might_sleep_if(!(gfp_mask & __GFP_HARDWALL)); | ||
2524 | if (node_isset(node, current->mems_allowed)) | 2511 | if (node_isset(node, current->mems_allowed)) |
2525 | return 1; | 2512 | return 1; |
2526 | /* | 2513 | /* |
@@ -2547,44 +2534,6 @@ int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) | |||
2547 | return allowed; | 2534 | return allowed; |
2548 | } | 2535 | } |
2549 | 2536 | ||
2550 | /* | ||
2551 | * cpuset_node_allowed_hardwall - Can we allocate on a memory node? | ||
2552 | * @node: is this an allowed node? | ||
2553 | * @gfp_mask: memory allocation flags | ||
2554 | * | ||
2555 | * If we're in interrupt, yes, we can always allocate. If __GFP_THISNODE is | ||
2556 | * set, yes, we can always allocate. If node is in our task's mems_allowed, | ||
2557 | * yes. If the task has been OOM killed and has access to memory reserves as | ||
2558 | * specified by the TIF_MEMDIE flag, yes. | ||
2559 | * Otherwise, no. | ||
2560 | * | ||
2561 | * The __GFP_THISNODE placement logic is really handled elsewhere, | ||
2562 | * by forcibly using a zonelist starting at a specified node, and by | ||
2563 | * (in get_page_from_freelist()) refusing to consider the zones for | ||
2564 | * any node on the zonelist except the first. By the time any such | ||
2565 | * calls get to this routine, we should just shut up and say 'yes'. | ||
2566 | * | ||
2567 | * Unlike the cpuset_node_allowed_softwall() variant, above, | ||
2568 | * this variant requires that the node be in the current task's | ||
2569 | * mems_allowed or that we're in interrupt. It does not scan up the | ||
2570 | * cpuset hierarchy for the nearest enclosing mem_exclusive cpuset. | ||
2571 | * It never sleeps. | ||
2572 | */ | ||
2573 | int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask) | ||
2574 | { | ||
2575 | if (in_interrupt() || (gfp_mask & __GFP_THISNODE)) | ||
2576 | return 1; | ||
2577 | if (node_isset(node, current->mems_allowed)) | ||
2578 | return 1; | ||
2579 | /* | ||
2580 | * Allow tasks that have access to memory reserves because they have | ||
2581 | * been OOM killed to get memory anywhere. | ||
2582 | */ | ||
2583 | if (unlikely(test_thread_flag(TIF_MEMDIE))) | ||
2584 | return 1; | ||
2585 | return 0; | ||
2586 | } | ||
2587 | |||
2588 | /** | 2537 | /** |
2589 | * cpuset_mem_spread_node() - On which node to begin search for a file page | 2538 | * cpuset_mem_spread_node() - On which node to begin search for a file page |
2590 | * cpuset_slab_spread_node() - On which node to begin search for a slab page | 2539 | * cpuset_slab_spread_node() - On which node to begin search for a slab page |