diff options
author | Alexey Dobriyan <adobriyan@gmail.com> | 2019-03-05 18:48:26 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-03-06 00:07:19 -0500 |
commit | b9726c26dc21b15a2faea96fae3a42f2f7fffdcb (patch) | |
tree | 28c39800b37b56f9cb026a8dfc79e35c6208e2b4 | |
parent | d342a0b38674867ea67fde47b0e1e60ffe9f17a2 (diff) |
numa: make "nr_node_ids" unsigned int
Number of NUMA nodes can't be negative.
This saves a few bytes on x86_64:
add/remove: 0/0 grow/shrink: 4/21 up/down: 27/-265 (-238)
Function old new delta
hv_synic_alloc.cold 88 110 +22
prealloc_shrinker 260 262 +2
bootstrap 249 251 +2
sched_init_numa 1566 1567 +1
show_slab_objects 778 777 -1
s_show 1201 1200 -1
kmem_cache_init 346 345 -1
__alloc_workqueue_key 1146 1145 -1
mem_cgroup_css_alloc 1614 1612 -2
__do_sys_swapon 4702 4699 -3
__list_lru_init 655 651 -4
nic_probe 2379 2374 -5
store_user_store 118 111 -7
red_zone_store 106 99 -7
poison_store 106 99 -7
wq_numa_init 348 338 -10
__kmem_cache_empty 75 65 -10
task_numa_free 186 173 -13
merge_across_nodes_store 351 336 -15
irq_create_affinity_masks 1261 1246 -15
do_numa_crng_init 343 321 -22
task_numa_fault 4760 4737 -23
swapfile_init 179 156 -23
hv_synic_alloc 536 492 -44
apply_wqattrs_prepare 746 695 -51
Link: http://lkml.kernel.org/r/20190201223029.GA15820@avx2
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | arch/arm64/mm/numa.c | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/numa.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/setup_percpu.c | 2 | ||||
-rw-r--r-- | arch/x86/mm/numa.c | 4 | ||||
-rw-r--r-- | include/linux/nodemask.h | 4 | ||||
-rw-r--r-- | mm/list_lru.c | 3 | ||||
-rw-r--r-- | mm/memcontrol.c | 2 | ||||
-rw-r--r-- | mm/page_alloc.c | 2 | ||||
-rw-r--r-- | mm/slab.c | 3 | ||||
-rw-r--r-- | mm/slub.c | 2 | ||||
-rw-r--r-- | mm/swapfile.c | 2 | ||||
-rw-r--r-- | mm/vmscan.c | 2 |
12 files changed, 14 insertions, 16 deletions
diff --git a/arch/arm64/mm/numa.c b/arch/arm64/mm/numa.c index ae34e3a1cef1..7a0a555b366a 100644 --- a/arch/arm64/mm/numa.c +++ b/arch/arm64/mm/numa.c | |||
@@ -120,7 +120,7 @@ static void __init setup_node_to_cpumask_map(void) | |||
120 | } | 120 | } |
121 | 121 | ||
122 | /* cpumask_of_node() will now work */ | 122 | /* cpumask_of_node() will now work */ |
123 | pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids); | 123 | pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids); |
124 | } | 124 | } |
125 | 125 | ||
126 | /* | 126 | /* |
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 270cefb75cca..df1e11ebbabb 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c | |||
@@ -84,7 +84,7 @@ static void __init setup_node_to_cpumask_map(void) | |||
84 | alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]); | 84 | alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]); |
85 | 85 | ||
86 | /* cpumask_of_node() will now work */ | 86 | /* cpumask_of_node() will now work */ |
87 | dbg("Node to cpumask map for %d nodes\n", nr_node_ids); | 87 | dbg("Node to cpumask map for %u nodes\n", nr_node_ids); |
88 | } | 88 | } |
89 | 89 | ||
90 | static int __init fake_numa_create_new_node(unsigned long end_pfn, | 90 | static int __init fake_numa_create_new_node(unsigned long end_pfn, |
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index e8796fcd7e5a..13af08827eef 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c | |||
@@ -171,7 +171,7 @@ void __init setup_per_cpu_areas(void) | |||
171 | unsigned long delta; | 171 | unsigned long delta; |
172 | int rc; | 172 | int rc; |
173 | 173 | ||
174 | pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%u nr_node_ids:%d\n", | 174 | pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%u nr_node_ids:%u\n", |
175 | NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids); | 175 | NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids); |
176 | 176 | ||
177 | /* | 177 | /* |
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c index 1308f5408bf7..12c1b7a83ed7 100644 --- a/arch/x86/mm/numa.c +++ b/arch/x86/mm/numa.c | |||
@@ -123,7 +123,7 @@ void __init setup_node_to_cpumask_map(void) | |||
123 | alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]); | 123 | alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]); |
124 | 124 | ||
125 | /* cpumask_of_node() will now work */ | 125 | /* cpumask_of_node() will now work */ |
126 | pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids); | 126 | pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids); |
127 | } | 127 | } |
128 | 128 | ||
129 | static int __init numa_add_memblk_to(int nid, u64 start, u64 end, | 129 | static int __init numa_add_memblk_to(int nid, u64 start, u64 end, |
@@ -866,7 +866,7 @@ const struct cpumask *cpumask_of_node(int node) | |||
866 | { | 866 | { |
867 | if (node >= nr_node_ids) { | 867 | if (node >= nr_node_ids) { |
868 | printk(KERN_WARNING | 868 | printk(KERN_WARNING |
869 | "cpumask_of_node(%d): node > nr_node_ids(%d)\n", | 869 | "cpumask_of_node(%d): node > nr_node_ids(%u)\n", |
870 | node, nr_node_ids); | 870 | node, nr_node_ids); |
871 | dump_stack(); | 871 | dump_stack(); |
872 | return cpu_none_mask; | 872 | return cpu_none_mask; |
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h index 5a30ad594ccc..962c5e783d50 100644 --- a/include/linux/nodemask.h +++ b/include/linux/nodemask.h | |||
@@ -444,7 +444,7 @@ static inline int next_memory_node(int nid) | |||
444 | return next_node(nid, node_states[N_MEMORY]); | 444 | return next_node(nid, node_states[N_MEMORY]); |
445 | } | 445 | } |
446 | 446 | ||
447 | extern int nr_node_ids; | 447 | extern unsigned int nr_node_ids; |
448 | extern int nr_online_nodes; | 448 | extern int nr_online_nodes; |
449 | 449 | ||
450 | static inline void node_set_online(int nid) | 450 | static inline void node_set_online(int nid) |
@@ -485,7 +485,7 @@ static inline int num_node_state(enum node_states state) | |||
485 | #define first_online_node 0 | 485 | #define first_online_node 0 |
486 | #define first_memory_node 0 | 486 | #define first_memory_node 0 |
487 | #define next_online_node(nid) (MAX_NUMNODES) | 487 | #define next_online_node(nid) (MAX_NUMNODES) |
488 | #define nr_node_ids 1 | 488 | #define nr_node_ids 1U |
489 | #define nr_online_nodes 1 | 489 | #define nr_online_nodes 1 |
490 | 490 | ||
491 | #define node_set_online(node) node_set_state((node), N_ONLINE) | 491 | #define node_set_online(node) node_set_state((node), N_ONLINE) |
diff --git a/mm/list_lru.c b/mm/list_lru.c index 5b30625fd365..0730bf8ff39f 100644 --- a/mm/list_lru.c +++ b/mm/list_lru.c | |||
@@ -601,7 +601,6 @@ int __list_lru_init(struct list_lru *lru, bool memcg_aware, | |||
601 | struct lock_class_key *key, struct shrinker *shrinker) | 601 | struct lock_class_key *key, struct shrinker *shrinker) |
602 | { | 602 | { |
603 | int i; | 603 | int i; |
604 | size_t size = sizeof(*lru->node) * nr_node_ids; | ||
605 | int err = -ENOMEM; | 604 | int err = -ENOMEM; |
606 | 605 | ||
607 | #ifdef CONFIG_MEMCG_KMEM | 606 | #ifdef CONFIG_MEMCG_KMEM |
@@ -612,7 +611,7 @@ int __list_lru_init(struct list_lru *lru, bool memcg_aware, | |||
612 | #endif | 611 | #endif |
613 | memcg_get_cache_ids(); | 612 | memcg_get_cache_ids(); |
614 | 613 | ||
615 | lru->node = kzalloc(size, GFP_KERNEL); | 614 | lru->node = kcalloc(nr_node_ids, sizeof(*lru->node), GFP_KERNEL); |
616 | if (!lru->node) | 615 | if (!lru->node) |
617 | goto out; | 616 | goto out; |
618 | 617 | ||
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 30bda8d7fb5c..45cd1f84268a 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -4429,7 +4429,7 @@ static void mem_cgroup_free(struct mem_cgroup *memcg) | |||
4429 | static struct mem_cgroup *mem_cgroup_alloc(void) | 4429 | static struct mem_cgroup *mem_cgroup_alloc(void) |
4430 | { | 4430 | { |
4431 | struct mem_cgroup *memcg; | 4431 | struct mem_cgroup *memcg; |
4432 | size_t size; | 4432 | unsigned int size; |
4433 | int node; | 4433 | int node; |
4434 | 4434 | ||
4435 | size = sizeof(struct mem_cgroup); | 4435 | size = sizeof(struct mem_cgroup); |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 11a5f50efd97..8df43caf2eb7 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -289,7 +289,7 @@ EXPORT_SYMBOL(movable_zone); | |||
289 | #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ | 289 | #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ |
290 | 290 | ||
291 | #if MAX_NUMNODES > 1 | 291 | #if MAX_NUMNODES > 1 |
292 | int nr_node_ids __read_mostly = MAX_NUMNODES; | 292 | unsigned int nr_node_ids __read_mostly = MAX_NUMNODES; |
293 | int nr_online_nodes __read_mostly = 1; | 293 | int nr_online_nodes __read_mostly = 1; |
294 | EXPORT_SYMBOL(nr_node_ids); | 294 | EXPORT_SYMBOL(nr_node_ids); |
295 | EXPORT_SYMBOL(nr_online_nodes); | 295 | EXPORT_SYMBOL(nr_online_nodes); |
@@ -677,12 +677,11 @@ static struct alien_cache *__alloc_alien_cache(int node, int entries, | |||
677 | static struct alien_cache **alloc_alien_cache(int node, int limit, gfp_t gfp) | 677 | static struct alien_cache **alloc_alien_cache(int node, int limit, gfp_t gfp) |
678 | { | 678 | { |
679 | struct alien_cache **alc_ptr; | 679 | struct alien_cache **alc_ptr; |
680 | size_t memsize = sizeof(void *) * nr_node_ids; | ||
681 | int i; | 680 | int i; |
682 | 681 | ||
683 | if (limit > 1) | 682 | if (limit > 1) |
684 | limit = 12; | 683 | limit = 12; |
685 | alc_ptr = kzalloc_node(memsize, gfp, node); | 684 | alc_ptr = kcalloc_node(nr_node_ids, sizeof(void *), gfp, node); |
686 | if (!alc_ptr) | 685 | if (!alc_ptr) |
687 | return NULL; | 686 | return NULL; |
688 | 687 | ||
@@ -4262,7 +4262,7 @@ void __init kmem_cache_init(void) | |||
4262 | cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL, | 4262 | cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL, |
4263 | slub_cpu_dead); | 4263 | slub_cpu_dead); |
4264 | 4264 | ||
4265 | pr_info("SLUB: HWalign=%d, Order=%u-%u, MinObjects=%u, CPUs=%u, Nodes=%d\n", | 4265 | pr_info("SLUB: HWalign=%d, Order=%u-%u, MinObjects=%u, CPUs=%u, Nodes=%u\n", |
4266 | cache_line_size(), | 4266 | cache_line_size(), |
4267 | slub_min_order, slub_max_order, slub_min_objects, | 4267 | slub_min_order, slub_max_order, slub_min_objects, |
4268 | nr_cpu_ids, nr_node_ids); | 4268 | nr_cpu_ids, nr_node_ids); |
diff --git a/mm/swapfile.c b/mm/swapfile.c index 57e9b1b31d55..a14257ac0476 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c | |||
@@ -2713,7 +2713,7 @@ static struct swap_info_struct *alloc_swap_info(void) | |||
2713 | struct swap_info_struct *p; | 2713 | struct swap_info_struct *p; |
2714 | unsigned int type; | 2714 | unsigned int type; |
2715 | int i; | 2715 | int i; |
2716 | int size = sizeof(*p) + nr_node_ids * sizeof(struct plist_node); | 2716 | unsigned int size = sizeof(*p) + nr_node_ids * sizeof(struct plist_node); |
2717 | 2717 | ||
2718 | p = kvzalloc(size, GFP_KERNEL); | 2718 | p = kvzalloc(size, GFP_KERNEL); |
2719 | if (!p) | 2719 | if (!p) |
diff --git a/mm/vmscan.c b/mm/vmscan.c index 209c2c78a087..e1f7ccdc0a90 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -374,7 +374,7 @@ unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone | |||
374 | */ | 374 | */ |
375 | int prealloc_shrinker(struct shrinker *shrinker) | 375 | int prealloc_shrinker(struct shrinker *shrinker) |
376 | { | 376 | { |
377 | size_t size = sizeof(*shrinker->nr_deferred); | 377 | unsigned int size = sizeof(*shrinker->nr_deferred); |
378 | 378 | ||
379 | if (shrinker->flags & SHRINKER_NUMA_AWARE) | 379 | if (shrinker->flags & SHRINKER_NUMA_AWARE) |
380 | size *= nr_node_ids; | 380 | size *= nr_node_ids; |