diff options
author | Alexey Dobriyan <adobriyan@gmail.com> | 2017-09-08 19:14:18 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-09-08 21:26:48 -0400 |
commit | 9b130ad5bb8255ee8534d92d67e12b2a4887eacb (patch) | |
tree | 125a5e867be6b0dbd4958ca41426e2c733768110 | |
parent | ac036f9570a2d318b7d8dbbdbf0e269d7cc68cef (diff) |
treewide: make "nr_cpu_ids" unsigned
First, number of CPUs can't be negative number.
Second, different signnnedness leads to suboptimal code in the following
cases:
1)
kmalloc(nr_cpu_ids * sizeof(X));
"int" has to be sign extended to size_t.
2)
while (loff_t *pos < nr_cpu_ids)
MOVSXD is 1 byte longed than the same MOV.
Other cases exist as well. Basically compiler is told that nr_cpu_ids
can't be negative which can't be deduced if it is "int".
Code savings on allyesconfig kernel: -3KB
add/remove: 0/0 grow/shrink: 25/264 up/down: 261/-3631 (-3370)
function old new delta
coretemp_cpu_online 450 512 +62
rcu_init_one 1234 1272 +38
pci_device_probe 374 399 +25
...
pgdat_reclaimable_pages 628 556 -72
select_fallback_rq 446 369 -77
task_numa_find_cpu 1923 1807 -116
Link: http://lkml.kernel.org/r/20170819114959.GA30580@avx2
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | arch/arm64/kernel/smp.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/paca.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/setup-common.c | 2 | ||||
-rw-r--r-- | arch/powerpc/sysdev/xive/native.c | 4 | ||||
-rw-r--r-- | arch/tile/kernel/setup.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/apic/apic.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/setup_percpu.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/smpboot.c | 2 | ||||
-rw-r--r-- | drivers/base/cpu.c | 4 | ||||
-rw-r--r-- | drivers/scsi/scsi_debug.c | 2 | ||||
-rw-r--r-- | include/linux/cpumask.h | 6 | ||||
-rw-r--r-- | kernel/rcu/tree.c | 2 | ||||
-rw-r--r-- | kernel/rcu/tree_plugin.h | 2 | ||||
-rw-r--r-- | kernel/sched/topology.c | 2 | ||||
-rw-r--r-- | kernel/smp.c | 2 | ||||
-rw-r--r-- | kernel/trace/trace_functions_graph.c | 2 | ||||
-rw-r--r-- | mm/slub.c | 2 |
17 files changed, 21 insertions, 21 deletions
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index ffe089942ac4..9f7195a5773e 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c | |||
@@ -690,7 +690,7 @@ void __init smp_init_cpus(void) | |||
690 | acpi_parse_gic_cpu_interface, 0); | 690 | acpi_parse_gic_cpu_interface, 0); |
691 | 691 | ||
692 | if (cpu_count > nr_cpu_ids) | 692 | if (cpu_count > nr_cpu_ids) |
693 | pr_warn("Number of cores (%d) exceeds configured maximum of %d - clipping\n", | 693 | pr_warn("Number of cores (%d) exceeds configured maximum of %u - clipping\n", |
694 | cpu_count, nr_cpu_ids); | 694 | cpu_count, nr_cpu_ids); |
695 | 695 | ||
696 | if (!bootcpu_valid) { | 696 | if (!bootcpu_valid) { |
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index 70f073d6c3b2..2ff2b8a19f71 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c | |||
@@ -224,7 +224,7 @@ void __init allocate_pacas(void) | |||
224 | paca = __va(memblock_alloc_base(paca_size, PAGE_SIZE, limit)); | 224 | paca = __va(memblock_alloc_base(paca_size, PAGE_SIZE, limit)); |
225 | memset(paca, 0, paca_size); | 225 | memset(paca, 0, paca_size); |
226 | 226 | ||
227 | printk(KERN_DEBUG "Allocated %u bytes for %d pacas at %p\n", | 227 | printk(KERN_DEBUG "Allocated %u bytes for %u pacas at %p\n", |
228 | paca_size, nr_cpu_ids, paca); | 228 | paca_size, nr_cpu_ids, paca); |
229 | 229 | ||
230 | allocate_lppacas(nr_cpu_ids, limit); | 230 | allocate_lppacas(nr_cpu_ids, limit); |
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 7de73589d8e2..0ac741fae90e 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c | |||
@@ -551,7 +551,7 @@ void __init smp_setup_cpu_maps(void) | |||
551 | if (maxcpus > nr_cpu_ids) { | 551 | if (maxcpus > nr_cpu_ids) { |
552 | printk(KERN_WARNING | 552 | printk(KERN_WARNING |
553 | "Partition configured for %d cpus, " | 553 | "Partition configured for %d cpus, " |
554 | "operating system maximum is %d.\n", | 554 | "operating system maximum is %u.\n", |
555 | maxcpus, nr_cpu_ids); | 555 | maxcpus, nr_cpu_ids); |
556 | maxcpus = nr_cpu_ids; | 556 | maxcpus = nr_cpu_ids; |
557 | } else | 557 | } else |
diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c index 44f3a25ca630..ebc244b08d67 100644 --- a/arch/powerpc/sysdev/xive/native.c +++ b/arch/powerpc/sysdev/xive/native.c | |||
@@ -511,13 +511,13 @@ static bool xive_parse_provisioning(struct device_node *np) | |||
511 | static void xive_native_setup_pools(void) | 511 | static void xive_native_setup_pools(void) |
512 | { | 512 | { |
513 | /* Allocate a pool big enough */ | 513 | /* Allocate a pool big enough */ |
514 | pr_debug("XIVE: Allocating VP block for pool size %d\n", nr_cpu_ids); | 514 | pr_debug("XIVE: Allocating VP block for pool size %u\n", nr_cpu_ids); |
515 | 515 | ||
516 | xive_pool_vps = xive_native_alloc_vp_block(nr_cpu_ids); | 516 | xive_pool_vps = xive_native_alloc_vp_block(nr_cpu_ids); |
517 | if (WARN_ON(xive_pool_vps == XIVE_INVALID_VP)) | 517 | if (WARN_ON(xive_pool_vps == XIVE_INVALID_VP)) |
518 | pr_err("XIVE: Failed to allocate pool VP, KVM might not function\n"); | 518 | pr_err("XIVE: Failed to allocate pool VP, KVM might not function\n"); |
519 | 519 | ||
520 | pr_debug("XIVE: Pool VPs allocated at 0x%x for %d max CPUs\n", | 520 | pr_debug("XIVE: Pool VPs allocated at 0x%x for %u max CPUs\n", |
521 | xive_pool_vps, nr_cpu_ids); | 521 | xive_pool_vps, nr_cpu_ids); |
522 | } | 522 | } |
523 | 523 | ||
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c index 443a70bccc1c..6becb96c60a0 100644 --- a/arch/tile/kernel/setup.c +++ b/arch/tile/kernel/setup.c | |||
@@ -1200,7 +1200,7 @@ static void __init validate_hv(void) | |||
1200 | * We use a struct cpumask for this, so it must be big enough. | 1200 | * We use a struct cpumask for this, so it must be big enough. |
1201 | */ | 1201 | */ |
1202 | if ((smp_height * smp_width) > nr_cpu_ids) | 1202 | if ((smp_height * smp_width) > nr_cpu_ids) |
1203 | early_panic("Hypervisor %d x %d grid too big for Linux NR_CPUS %d\n", | 1203 | early_panic("Hypervisor %d x %d grid too big for Linux NR_CPUS %u\n", |
1204 | smp_height, smp_width, nr_cpu_ids); | 1204 | smp_height, smp_width, nr_cpu_ids); |
1205 | #endif | 1205 | #endif |
1206 | 1206 | ||
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 7834f73efbf1..8315e2f517a7 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
@@ -2097,7 +2097,7 @@ static int allocate_logical_cpuid(int apicid) | |||
2097 | 2097 | ||
2098 | /* Allocate a new cpuid. */ | 2098 | /* Allocate a new cpuid. */ |
2099 | if (nr_logical_cpuids >= nr_cpu_ids) { | 2099 | if (nr_logical_cpuids >= nr_cpu_ids) { |
2100 | WARN_ONCE(1, "APIC: NR_CPUS/possible_cpus limit of %i reached. " | 2100 | WARN_ONCE(1, "APIC: NR_CPUS/possible_cpus limit of %u reached. " |
2101 | "Processor %d/0x%x and the rest are ignored.\n", | 2101 | "Processor %d/0x%x and the rest are ignored.\n", |
2102 | nr_cpu_ids, nr_logical_cpuids, apicid); | 2102 | nr_cpu_ids, nr_logical_cpuids, apicid); |
2103 | return -EINVAL; | 2103 | return -EINVAL; |
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index 6e8fcb6f7e1e..28dafed6c682 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c | |||
@@ -168,7 +168,7 @@ void __init setup_per_cpu_areas(void) | |||
168 | unsigned long delta; | 168 | unsigned long delta; |
169 | int rc; | 169 | int rc; |
170 | 170 | ||
171 | pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n", | 171 | pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%u nr_node_ids:%d\n", |
172 | NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids); | 172 | NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids); |
173 | 173 | ||
174 | /* | 174 | /* |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 54b9e89d4d6b..cd6622c3204e 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -1461,7 +1461,7 @@ __init void prefill_possible_map(void) | |||
1461 | 1461 | ||
1462 | /* nr_cpu_ids could be reduced via nr_cpus= */ | 1462 | /* nr_cpu_ids could be reduced via nr_cpus= */ |
1463 | if (possible > nr_cpu_ids) { | 1463 | if (possible > nr_cpu_ids) { |
1464 | pr_warn("%d Processors exceeds NR_CPUS limit of %d\n", | 1464 | pr_warn("%d Processors exceeds NR_CPUS limit of %u\n", |
1465 | possible, nr_cpu_ids); | 1465 | possible, nr_cpu_ids); |
1466 | possible = nr_cpu_ids; | 1466 | possible = nr_cpu_ids; |
1467 | } | 1467 | } |
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 2c3b359b3536..321cd7b4d817 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c | |||
@@ -256,9 +256,9 @@ static ssize_t print_cpus_offline(struct device *dev, | |||
256 | buf[n++] = ','; | 256 | buf[n++] = ','; |
257 | 257 | ||
258 | if (nr_cpu_ids == total_cpus-1) | 258 | if (nr_cpu_ids == total_cpus-1) |
259 | n += snprintf(&buf[n], len - n, "%d", nr_cpu_ids); | 259 | n += snprintf(&buf[n], len - n, "%u", nr_cpu_ids); |
260 | else | 260 | else |
261 | n += snprintf(&buf[n], len - n, "%d-%d", | 261 | n += snprintf(&buf[n], len - n, "%u-%d", |
262 | nr_cpu_ids, total_cpus-1); | 262 | nr_cpu_ids, total_cpus-1); |
263 | } | 263 | } |
264 | 264 | ||
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 77a0335eb757..09ba494f8896 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c | |||
@@ -5465,7 +5465,7 @@ static int sdebug_driver_probe(struct device * dev) | |||
5465 | return error; | 5465 | return error; |
5466 | } | 5466 | } |
5467 | if (submit_queues > nr_cpu_ids) { | 5467 | if (submit_queues > nr_cpu_ids) { |
5468 | pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%d\n", | 5468 | pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n", |
5469 | my_name, submit_queues, nr_cpu_ids); | 5469 | my_name, submit_queues, nr_cpu_ids); |
5470 | submit_queues = nr_cpu_ids; | 5470 | submit_queues = nr_cpu_ids; |
5471 | } | 5471 | } |
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 4bf4479a3a80..68c5a8290275 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h | |||
@@ -32,15 +32,15 @@ typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t; | |||
32 | #define cpumask_pr_args(maskp) nr_cpu_ids, cpumask_bits(maskp) | 32 | #define cpumask_pr_args(maskp) nr_cpu_ids, cpumask_bits(maskp) |
33 | 33 | ||
34 | #if NR_CPUS == 1 | 34 | #if NR_CPUS == 1 |
35 | #define nr_cpu_ids 1 | 35 | #define nr_cpu_ids 1U |
36 | #else | 36 | #else |
37 | extern int nr_cpu_ids; | 37 | extern unsigned int nr_cpu_ids; |
38 | #endif | 38 | #endif |
39 | 39 | ||
40 | #ifdef CONFIG_CPUMASK_OFFSTACK | 40 | #ifdef CONFIG_CPUMASK_OFFSTACK |
41 | /* Assuming NR_CPUS is huge, a runtime limit is more efficient. Also, | 41 | /* Assuming NR_CPUS is huge, a runtime limit is more efficient. Also, |
42 | * not all bits may be allocated. */ | 42 | * not all bits may be allocated. */ |
43 | #define nr_cpumask_bits ((unsigned int)nr_cpu_ids) | 43 | #define nr_cpumask_bits nr_cpu_ids |
44 | #else | 44 | #else |
45 | #define nr_cpumask_bits ((unsigned int)NR_CPUS) | 45 | #define nr_cpumask_bits ((unsigned int)NR_CPUS) |
46 | #endif | 46 | #endif |
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 84fe96641b2e..1250e4bd4b85 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c | |||
@@ -4091,7 +4091,7 @@ static void __init rcu_init_geometry(void) | |||
4091 | if (rcu_fanout_leaf == RCU_FANOUT_LEAF && | 4091 | if (rcu_fanout_leaf == RCU_FANOUT_LEAF && |
4092 | nr_cpu_ids == NR_CPUS) | 4092 | nr_cpu_ids == NR_CPUS) |
4093 | return; | 4093 | return; |
4094 | pr_info("RCU: Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%d\n", | 4094 | pr_info("RCU: Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%u\n", |
4095 | rcu_fanout_leaf, nr_cpu_ids); | 4095 | rcu_fanout_leaf, nr_cpu_ids); |
4096 | 4096 | ||
4097 | /* | 4097 | /* |
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 55bde94b9572..e012b9be777e 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h | |||
@@ -89,7 +89,7 @@ static void __init rcu_bootup_announce_oddness(void) | |||
89 | if (rcu_fanout_leaf != RCU_FANOUT_LEAF) | 89 | if (rcu_fanout_leaf != RCU_FANOUT_LEAF) |
90 | pr_info("\tBoot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf); | 90 | pr_info("\tBoot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf); |
91 | if (nr_cpu_ids != NR_CPUS) | 91 | if (nr_cpu_ids != NR_CPUS) |
92 | pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%d.\n", NR_CPUS, nr_cpu_ids); | 92 | pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%u.\n", NR_CPUS, nr_cpu_ids); |
93 | #ifdef CONFIG_RCU_BOOST | 93 | #ifdef CONFIG_RCU_BOOST |
94 | pr_info("\tRCU priority boosting: priority %d delay %d ms.\n", kthread_prio, CONFIG_RCU_BOOST_DELAY); | 94 | pr_info("\tRCU priority boosting: priority %d delay %d ms.\n", kthread_prio, CONFIG_RCU_BOOST_DELAY); |
95 | #endif | 95 | #endif |
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 6f7b43982f73..5d0062cc10cb 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c | |||
@@ -473,7 +473,7 @@ static int __init isolated_cpu_setup(char *str) | |||
473 | alloc_bootmem_cpumask_var(&cpu_isolated_map); | 473 | alloc_bootmem_cpumask_var(&cpu_isolated_map); |
474 | ret = cpulist_parse(str, cpu_isolated_map); | 474 | ret = cpulist_parse(str, cpu_isolated_map); |
475 | if (ret) { | 475 | if (ret) { |
476 | pr_err("sched: Error, all isolcpus= values must be between 0 and %d\n", nr_cpu_ids); | 476 | pr_err("sched: Error, all isolcpus= values must be between 0 and %u\n", nr_cpu_ids); |
477 | return 0; | 477 | return 0; |
478 | } | 478 | } |
479 | return 1; | 479 | return 1; |
diff --git a/kernel/smp.c b/kernel/smp.c index 81cfca9b4cc3..c94dd85c8d41 100644 --- a/kernel/smp.c +++ b/kernel/smp.c | |||
@@ -550,7 +550,7 @@ static int __init maxcpus(char *str) | |||
550 | early_param("maxcpus", maxcpus); | 550 | early_param("maxcpus", maxcpus); |
551 | 551 | ||
552 | /* Setup number of possible processor ids */ | 552 | /* Setup number of possible processor ids */ |
553 | int nr_cpu_ids __read_mostly = NR_CPUS; | 553 | unsigned int nr_cpu_ids __read_mostly = NR_CPUS; |
554 | EXPORT_SYMBOL(nr_cpu_ids); | 554 | EXPORT_SYMBOL(nr_cpu_ids); |
555 | 555 | ||
556 | /* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */ | 556 | /* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */ |
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index d56123cdcc89..b8f1f54731af 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -1543,7 +1543,7 @@ fs_initcall(init_graph_tracefs); | |||
1543 | 1543 | ||
1544 | static __init int init_graph_trace(void) | 1544 | static __init int init_graph_trace(void) |
1545 | { | 1545 | { |
1546 | max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1); | 1546 | max_bytes_for_cpu = snprintf(NULL, 0, "%u", nr_cpu_ids - 1); |
1547 | 1547 | ||
1548 | if (!register_trace_event(&graph_trace_entry_event)) { | 1548 | if (!register_trace_event(&graph_trace_entry_event)) { |
1549 | pr_warn("Warning: could not register graph trace events\n"); | 1549 | pr_warn("Warning: could not register graph trace events\n"); |
@@ -4232,7 +4232,7 @@ void __init kmem_cache_init(void) | |||
4232 | cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL, | 4232 | cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL, |
4233 | slub_cpu_dead); | 4233 | slub_cpu_dead); |
4234 | 4234 | ||
4235 | pr_info("SLUB: HWalign=%d, Order=%d-%d, MinObjects=%d, CPUs=%d, Nodes=%d\n", | 4235 | pr_info("SLUB: HWalign=%d, Order=%d-%d, MinObjects=%d, CPUs=%u, Nodes=%d\n", |
4236 | cache_line_size(), | 4236 | cache_line_size(), |
4237 | slub_min_order, slub_max_order, slub_min_objects, | 4237 | slub_min_order, slub_max_order, slub_min_objects, |
4238 | nr_cpu_ids, nr_node_ids); | 4238 | nr_cpu_ids, nr_node_ids); |