diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-07-03 19:50:31 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-07-03 19:50:31 -0400 |
commit | 03ffbcdd7898c0b5299efeb9f18de927487ec1cf (patch) | |
tree | 0569222e4dc9db22049d7d8d15920cc085a194f6 /block/blk-mq-cpumap.c | |
parent | 1b044f1cfc65a7d90b209dfabd57e16d98b58c5b (diff) | |
parent | f9632de40ee0161e864bea8c1b017d957fd7312c (diff) |
Merge branch 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull irq updates from Thomas Gleixner:
"The irq department delivers:
- Expand the generic infrastructure handling the irq migration on CPU
hotplug and convert X86 over to it. (Thomas Gleixner)
Aside of consolidating code this is a preparatory change for:
- Finalizing the affinity management for multi-queue devices. The
main change here is to shut down interrupts which are affine to a
outgoing CPU and reenabling them when the CPU comes online again.
That avoids moving interrupts pointlessly around and breaking and
reestablishing affinities for no value. (Christoph Hellwig)
Note: This contains also the BLOCK-MQ and NVME changes which depend
on the rework of the irq core infrastructure. Jens acked them and
agreed that they should go with the irq changes.
- Consolidation of irq domain code (Marc Zyngier)
- State tracking consolidation in the core code (Jeffy Chen)
- Add debug infrastructure for hierarchical irq domains (Thomas
Gleixner)
- Infrastructure enhancement for managing generic interrupt chips via
devmem (Bartosz Golaszewski)
- Constification work all over the place (Tobias Klauser)
- Two new interrupt controller drivers for MVEBU (Thomas Petazzoni)
- The usual set of fixes, updates and enhancements all over the
place"
* 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (112 commits)
irqchip/or1k-pic: Fix interrupt acknowledgement
irqchip/irq-mvebu-gicp: Allocate enough memory for spi_bitmap
irqchip/gic-v3: Fix out-of-bound access in gic_set_affinity
nvme: Allocate queues for all possible CPUs
blk-mq: Create hctx for each present CPU
blk-mq: Include all present CPUs in the default queue mapping
genirq: Avoid unnecessary low level irq function calls
genirq: Set irq masked state when initializing irq_desc
genirq/timings: Add infrastructure for estimating the next interrupt arrival time
genirq/timings: Add infrastructure to track the interrupt timings
genirq/debugfs: Remove pointless NULL pointer check
irqchip/gic-v3-its: Don't assume GICv3 hardware supports 16bit INTID
irqchip/gic-v3-its: Add ACPI NUMA node mapping
irqchip/gic-v3-its-platform-msi: Make of_device_ids const
irqchip/gic-v3-its: Make of_device_ids const
irqchip/irq-mvebu-icu: Add new driver for Marvell ICU
irqchip/irq-mvebu-gicp: Add new driver for Marvell GICP
dt-bindings/interrupt-controller: Add DT binding for the Marvell ICU
genirq/irqdomain: Remove auto-recursive hierarchy support
irqchip/MSI: Use irq_domain_update_bus_token instead of an open coded access
...
Diffstat (limited to 'block/blk-mq-cpumap.c')
-rw-r--r-- | block/blk-mq-cpumap.c | 10 |
1 files changed, 4 insertions, 6 deletions
diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c index 2cca4fc43f45..4891f042a22f 100644 --- a/block/blk-mq-cpumap.c +++ b/block/blk-mq-cpumap.c | |||
@@ -14,13 +14,12 @@ | |||
14 | #include "blk.h" | 14 | #include "blk.h" |
15 | #include "blk-mq.h" | 15 | #include "blk-mq.h" |
16 | 16 | ||
17 | static int cpu_to_queue_index(unsigned int nr_queues, const int cpu, | 17 | static int cpu_to_queue_index(unsigned int nr_queues, const int cpu) |
18 | const struct cpumask *online_mask) | ||
19 | { | 18 | { |
20 | /* | 19 | /* |
21 | * Non online CPU will be mapped to queue index 0. | 20 | * Non online CPU will be mapped to queue index 0. |
22 | */ | 21 | */ |
23 | if (!cpumask_test_cpu(cpu, online_mask)) | 22 | if (!cpu_online(cpu)) |
24 | return 0; | 23 | return 0; |
25 | return cpu % nr_queues; | 24 | return cpu % nr_queues; |
26 | } | 25 | } |
@@ -40,7 +39,6 @@ int blk_mq_map_queues(struct blk_mq_tag_set *set) | |||
40 | { | 39 | { |
41 | unsigned int *map = set->mq_map; | 40 | unsigned int *map = set->mq_map; |
42 | unsigned int nr_queues = set->nr_hw_queues; | 41 | unsigned int nr_queues = set->nr_hw_queues; |
43 | const struct cpumask *online_mask = cpu_online_mask; | ||
44 | unsigned int cpu, first_sibling; | 42 | unsigned int cpu, first_sibling; |
45 | 43 | ||
46 | for_each_possible_cpu(cpu) { | 44 | for_each_possible_cpu(cpu) { |
@@ -51,11 +49,11 @@ int blk_mq_map_queues(struct blk_mq_tag_set *set) | |||
51 | * performace optimizations. | 49 | * performace optimizations. |
52 | */ | 50 | */ |
53 | if (cpu < nr_queues) { | 51 | if (cpu < nr_queues) { |
54 | map[cpu] = cpu_to_queue_index(nr_queues, cpu, online_mask); | 52 | map[cpu] = cpu_to_queue_index(nr_queues, cpu); |
55 | } else { | 53 | } else { |
56 | first_sibling = get_first_sibling(cpu); | 54 | first_sibling = get_first_sibling(cpu); |
57 | if (first_sibling == cpu) | 55 | if (first_sibling == cpu) |
58 | map[cpu] = cpu_to_queue_index(nr_queues, cpu, online_mask); | 56 | map[cpu] = cpu_to_queue_index(nr_queues, cpu); |
59 | else | 57 | else |
60 | map[cpu] = map[first_sibling]; | 58 | map[cpu] = map[first_sibling]; |
61 | } | 59 | } |