diff options
-rw-r--r-- | arch/tile/kernel/setup.c | 4 | ||||
-rw-r--r-- | arch/x86/mm/numa.c | 4 | ||||
-rw-r--r-- | include/linux/nodemask.h | 11 | ||||
-rw-r--r-- | kernel/cpuset.c | 8 | ||||
-rw-r--r-- | lib/Makefile | 2 | ||||
-rw-r--r-- | lib/nodemask.c | 30 | ||||
-rw-r--r-- | mm/hugetlb.c | 4 | ||||
-rw-r--r-- | mm/memcontrol.c | 4 | ||||
-rw-r--r-- | mm/mempolicy.c | 24 | ||||
-rw-r--r-- | mm/page_isolation.c | 9 | ||||
-rw-r--r-- | mm/slab.c | 13 |
11 files changed, 54 insertions, 59 deletions
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c index a992238e9b58..153020abd2f5 100644 --- a/arch/tile/kernel/setup.c +++ b/arch/tile/kernel/setup.c | |||
@@ -962,9 +962,7 @@ static void __init setup_numa_mapping(void) | |||
962 | cpumask_set_cpu(best_cpu, &node_2_cpu_mask[node]); | 962 | cpumask_set_cpu(best_cpu, &node_2_cpu_mask[node]); |
963 | cpu_2_node[best_cpu] = node; | 963 | cpu_2_node[best_cpu] = node; |
964 | cpumask_clear_cpu(best_cpu, &unbound_cpus); | 964 | cpumask_clear_cpu(best_cpu, &unbound_cpus); |
965 | node = next_node(node, default_nodes); | 965 | node = next_node_in(node, default_nodes); |
966 | if (node == MAX_NUMNODES) | ||
967 | node = first_node(default_nodes); | ||
968 | } | 966 | } |
969 | 967 | ||
970 | /* Print out node assignments and set defaults for disabled cpus */ | 968 | /* Print out node assignments and set defaults for disabled cpus */ |
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c index f70c1ff46125..9c086c57105c 100644 --- a/arch/x86/mm/numa.c +++ b/arch/x86/mm/numa.c | |||
@@ -617,9 +617,7 @@ static void __init numa_init_array(void) | |||
617 | if (early_cpu_to_node(i) != NUMA_NO_NODE) | 617 | if (early_cpu_to_node(i) != NUMA_NO_NODE) |
618 | continue; | 618 | continue; |
619 | numa_set_node(i, rr); | 619 | numa_set_node(i, rr); |
620 | rr = next_node(rr, node_online_map); | 620 | rr = next_node_in(rr, node_online_map); |
621 | if (rr == MAX_NUMNODES) | ||
622 | rr = first_node(node_online_map); | ||
623 | } | 621 | } |
624 | } | 622 | } |
625 | 623 | ||
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h index 6e85889cf9ab..f746e44d4046 100644 --- a/include/linux/nodemask.h +++ b/include/linux/nodemask.h | |||
@@ -43,8 +43,10 @@ | |||
43 | * | 43 | * |
44 | * int first_node(mask) Number lowest set bit, or MAX_NUMNODES | 44 | * int first_node(mask) Number lowest set bit, or MAX_NUMNODES |
45 | * int next_node(node, mask) Next node past 'node', or MAX_NUMNODES | 45 | * int next_node(node, mask) Next node past 'node', or MAX_NUMNODES |
46 | * int next_node_in(node, mask) Next node past 'node', or wrap to first, | ||
47 | * or MAX_NUMNODES | ||
46 | * int first_unset_node(mask) First node not set in mask, or | 48 | * int first_unset_node(mask) First node not set in mask, or |
47 | * MAX_NUMNODES. | 49 | * MAX_NUMNODES |
48 | * | 50 | * |
49 | * nodemask_t nodemask_of_node(node) Return nodemask with bit 'node' set | 51 | * nodemask_t nodemask_of_node(node) Return nodemask with bit 'node' set |
50 | * NODE_MASK_ALL Initializer - all bits set | 52 | * NODE_MASK_ALL Initializer - all bits set |
@@ -259,6 +261,13 @@ static inline int __next_node(int n, const nodemask_t *srcp) | |||
259 | return min_t(int,MAX_NUMNODES,find_next_bit(srcp->bits, MAX_NUMNODES, n+1)); | 261 | return min_t(int,MAX_NUMNODES,find_next_bit(srcp->bits, MAX_NUMNODES, n+1)); |
260 | } | 262 | } |
261 | 263 | ||
264 | /* | ||
265 | * Find the next present node in src, starting after node n, wrapping around to | ||
266 | * the first node in src if needed. Returns MAX_NUMNODES if src is empty. | ||
267 | */ | ||
268 | #define next_node_in(n, src) __next_node_in((n), &(src)) | ||
269 | int __next_node_in(int node, const nodemask_t *srcp); | ||
270 | |||
262 | static inline void init_nodemask_of_node(nodemask_t *mask, int node) | 271 | static inline void init_nodemask_of_node(nodemask_t *mask, int node) |
263 | { | 272 | { |
264 | nodes_clear(*mask); | 273 | nodes_clear(*mask); |
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 1902956baba1..611cc69af8f0 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -2591,13 +2591,7 @@ int __cpuset_node_allowed(int node, gfp_t gfp_mask) | |||
2591 | 2591 | ||
2592 | static int cpuset_spread_node(int *rotor) | 2592 | static int cpuset_spread_node(int *rotor) |
2593 | { | 2593 | { |
2594 | int node; | 2594 | return *rotor = next_node_in(*rotor, current->mems_allowed); |
2595 | |||
2596 | node = next_node(*rotor, current->mems_allowed); | ||
2597 | if (node == MAX_NUMNODES) | ||
2598 | node = first_node(current->mems_allowed); | ||
2599 | *rotor = node; | ||
2600 | return node; | ||
2601 | } | 2595 | } |
2602 | 2596 | ||
2603 | int cpuset_mem_spread_node(void) | 2597 | int cpuset_mem_spread_node(void) |
diff --git a/lib/Makefile b/lib/Makefile index 931396ada5eb..42b69185f963 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
@@ -25,7 +25,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \ | |||
25 | sha1.o md5.o irq_regs.o argv_split.o \ | 25 | sha1.o md5.o irq_regs.o argv_split.o \ |
26 | flex_proportions.o ratelimit.o show_mem.o \ | 26 | flex_proportions.o ratelimit.o show_mem.o \ |
27 | is_single_threaded.o plist.o decompress.o kobject_uevent.o \ | 27 | is_single_threaded.o plist.o decompress.o kobject_uevent.o \ |
28 | earlycpio.o seq_buf.o nmi_backtrace.o | 28 | earlycpio.o seq_buf.o nmi_backtrace.o nodemask.o |
29 | 29 | ||
30 | obj-$(CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS) += usercopy.o | 30 | obj-$(CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS) += usercopy.o |
31 | lib-$(CONFIG_MMU) += ioremap.o | 31 | lib-$(CONFIG_MMU) += ioremap.o |
diff --git a/lib/nodemask.c b/lib/nodemask.c new file mode 100644 index 000000000000..e42a5bf44d33 --- /dev/null +++ b/lib/nodemask.c | |||
@@ -0,0 +1,30 @@ | |||
1 | #include <linux/nodemask.h> | ||
2 | #include <linux/module.h> | ||
3 | #include <linux/random.h> | ||
4 | |||
5 | int __next_node_in(int node, const nodemask_t *srcp) | ||
6 | { | ||
7 | int ret = __next_node(node, srcp); | ||
8 | |||
9 | if (ret == MAX_NUMNODES) | ||
10 | ret = __first_node(srcp); | ||
11 | return ret; | ||
12 | } | ||
13 | EXPORT_SYMBOL(__next_node_in); | ||
14 | |||
15 | #ifdef CONFIG_NUMA | ||
16 | /* | ||
17 | * Return the bit number of a random bit set in the nodemask. | ||
18 | * (returns NUMA_NO_NODE if nodemask is empty) | ||
19 | */ | ||
20 | int node_random(const nodemask_t *maskp) | ||
21 | { | ||
22 | int w, bit = NUMA_NO_NODE; | ||
23 | |||
24 | w = nodes_weight(*maskp); | ||
25 | if (w) | ||
26 | bit = bitmap_ord_to_pos(maskp->bits, | ||
27 | get_random_int() % w, MAX_NUMNODES); | ||
28 | return bit; | ||
29 | } | ||
30 | #endif | ||
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 19d0d08b396f..5856093f9062 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -937,9 +937,7 @@ err: | |||
937 | */ | 937 | */ |
938 | static int next_node_allowed(int nid, nodemask_t *nodes_allowed) | 938 | static int next_node_allowed(int nid, nodemask_t *nodes_allowed) |
939 | { | 939 | { |
940 | nid = next_node(nid, *nodes_allowed); | 940 | nid = next_node_in(nid, *nodes_allowed); |
941 | if (nid == MAX_NUMNODES) | ||
942 | nid = first_node(*nodes_allowed); | ||
943 | VM_BUG_ON(nid >= MAX_NUMNODES); | 941 | VM_BUG_ON(nid >= MAX_NUMNODES); |
944 | 942 | ||
945 | return nid; | 943 | return nid; |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index fe787f5c41bd..6740c4c2b550 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -1389,9 +1389,7 @@ int mem_cgroup_select_victim_node(struct mem_cgroup *memcg) | |||
1389 | mem_cgroup_may_update_nodemask(memcg); | 1389 | mem_cgroup_may_update_nodemask(memcg); |
1390 | node = memcg->last_scanned_node; | 1390 | node = memcg->last_scanned_node; |
1391 | 1391 | ||
1392 | node = next_node(node, memcg->scan_nodes); | 1392 | node = next_node_in(node, memcg->scan_nodes); |
1393 | if (node == MAX_NUMNODES) | ||
1394 | node = first_node(memcg->scan_nodes); | ||
1395 | /* | 1393 | /* |
1396 | * We call this when we hit limit, not when pages are added to LRU. | 1394 | * We call this when we hit limit, not when pages are added to LRU. |
1397 | * No LRU may hold pages because all pages are UNEVICTABLE or | 1395 | * No LRU may hold pages because all pages are UNEVICTABLE or |
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 36cc01bc950a..8d369cee0cd6 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
@@ -97,7 +97,6 @@ | |||
97 | 97 | ||
98 | #include <asm/tlbflush.h> | 98 | #include <asm/tlbflush.h> |
99 | #include <asm/uaccess.h> | 99 | #include <asm/uaccess.h> |
100 | #include <linux/random.h> | ||
101 | 100 | ||
102 | #include "internal.h" | 101 | #include "internal.h" |
103 | 102 | ||
@@ -347,9 +346,7 @@ static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes, | |||
347 | BUG(); | 346 | BUG(); |
348 | 347 | ||
349 | if (!node_isset(current->il_next, tmp)) { | 348 | if (!node_isset(current->il_next, tmp)) { |
350 | current->il_next = next_node(current->il_next, tmp); | 349 | current->il_next = next_node_in(current->il_next, tmp); |
351 | if (current->il_next >= MAX_NUMNODES) | ||
352 | current->il_next = first_node(tmp); | ||
353 | if (current->il_next >= MAX_NUMNODES) | 350 | if (current->il_next >= MAX_NUMNODES) |
354 | current->il_next = numa_node_id(); | 351 | current->il_next = numa_node_id(); |
355 | } | 352 | } |
@@ -1709,9 +1706,7 @@ static unsigned interleave_nodes(struct mempolicy *policy) | |||
1709 | struct task_struct *me = current; | 1706 | struct task_struct *me = current; |
1710 | 1707 | ||
1711 | nid = me->il_next; | 1708 | nid = me->il_next; |
1712 | next = next_node(nid, policy->v.nodes); | 1709 | next = next_node_in(nid, policy->v.nodes); |
1713 | if (next >= MAX_NUMNODES) | ||
1714 | next = first_node(policy->v.nodes); | ||
1715 | if (next < MAX_NUMNODES) | 1710 | if (next < MAX_NUMNODES) |
1716 | me->il_next = next; | 1711 | me->il_next = next; |
1717 | return nid; | 1712 | return nid; |
@@ -1805,21 +1800,6 @@ static inline unsigned interleave_nid(struct mempolicy *pol, | |||
1805 | return interleave_nodes(pol); | 1800 | return interleave_nodes(pol); |
1806 | } | 1801 | } |
1807 | 1802 | ||
1808 | /* | ||
1809 | * Return the bit number of a random bit set in the nodemask. | ||
1810 | * (returns NUMA_NO_NODE if nodemask is empty) | ||
1811 | */ | ||
1812 | int node_random(const nodemask_t *maskp) | ||
1813 | { | ||
1814 | int w, bit = NUMA_NO_NODE; | ||
1815 | |||
1816 | w = nodes_weight(*maskp); | ||
1817 | if (w) | ||
1818 | bit = bitmap_ord_to_pos(maskp->bits, | ||
1819 | get_random_int() % w, MAX_NUMNODES); | ||
1820 | return bit; | ||
1821 | } | ||
1822 | |||
1823 | #ifdef CONFIG_HUGETLBFS | 1803 | #ifdef CONFIG_HUGETLBFS |
1824 | /* | 1804 | /* |
1825 | * huge_zonelist(@vma, @addr, @gfp_flags, @mpol) | 1805 | * huge_zonelist(@vma, @addr, @gfp_flags, @mpol) |
diff --git a/mm/page_isolation.c b/mm/page_isolation.c index c4f568206544..67bedd18429c 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c | |||
@@ -288,13 +288,10 @@ struct page *alloc_migrate_target(struct page *page, unsigned long private, | |||
288 | * accordance with memory policy of the user process if possible. For | 288 | * accordance with memory policy of the user process if possible. For |
289 | * now as a simple work-around, we use the next node for destination. | 289 | * now as a simple work-around, we use the next node for destination. |
290 | */ | 290 | */ |
291 | if (PageHuge(page)) { | 291 | if (PageHuge(page)) |
292 | int node = next_online_node(page_to_nid(page)); | ||
293 | if (node == MAX_NUMNODES) | ||
294 | node = first_online_node; | ||
295 | return alloc_huge_page_node(page_hstate(compound_head(page)), | 292 | return alloc_huge_page_node(page_hstate(compound_head(page)), |
296 | node); | 293 | next_node_in(page_to_nid(page), |
297 | } | 294 | node_online_map)); |
298 | 295 | ||
299 | if (PageHighMem(page)) | 296 | if (PageHighMem(page)) |
300 | gfp_mask |= __GFP_HIGHMEM; | 297 | gfp_mask |= __GFP_HIGHMEM; |
@@ -522,22 +522,15 @@ static DEFINE_PER_CPU(unsigned long, slab_reap_node); | |||
522 | 522 | ||
523 | static void init_reap_node(int cpu) | 523 | static void init_reap_node(int cpu) |
524 | { | 524 | { |
525 | int node; | 525 | per_cpu(slab_reap_node, cpu) = next_node_in(cpu_to_mem(cpu), |
526 | 526 | node_online_map); | |
527 | node = next_node(cpu_to_mem(cpu), node_online_map); | ||
528 | if (node == MAX_NUMNODES) | ||
529 | node = first_node(node_online_map); | ||
530 | |||
531 | per_cpu(slab_reap_node, cpu) = node; | ||
532 | } | 527 | } |
533 | 528 | ||
534 | static void next_reap_node(void) | 529 | static void next_reap_node(void) |
535 | { | 530 | { |
536 | int node = __this_cpu_read(slab_reap_node); | 531 | int node = __this_cpu_read(slab_reap_node); |
537 | 532 | ||
538 | node = next_node(node, node_online_map); | 533 | node = next_node_in(node, node_online_map); |
539 | if (unlikely(node >= MAX_NUMNODES)) | ||
540 | node = first_node(node_online_map); | ||
541 | __this_cpu_write(slab_reap_node, node); | 534 | __this_cpu_write(slab_reap_node, node); |
542 | } | 535 | } |
543 | 536 | ||