diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-07-03 19:50:31 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-07-03 19:50:31 -0400 |
commit | 03ffbcdd7898c0b5299efeb9f18de927487ec1cf (patch) | |
tree | 0569222e4dc9db22049d7d8d15920cc085a194f6 /kernel/irq/migration.c | |
parent | 1b044f1cfc65a7d90b209dfabd57e16d98b58c5b (diff) | |
parent | f9632de40ee0161e864bea8c1b017d957fd7312c (diff) |
Merge branch 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull irq updates from Thomas Gleixner:
"The irq department delivers:
- Expand the generic infrastructure handling the irq migration on CPU
hotplug and convert X86 over to it. (Thomas Gleixner)
Aside of consolidating code this is a preparatory change for:
- Finalizing the affinity management for multi-queue devices. The
main change here is to shut down interrupts which are affine to a
outgoing CPU and reenabling them when the CPU comes online again.
That avoids moving interrupts pointlessly around and breaking and
reestablishing affinities for no value. (Christoph Hellwig)
Note: This contains also the BLOCK-MQ and NVME changes which depend
on the rework of the irq core infrastructure. Jens acked them and
agreed that they should go with the irq changes.
- Consolidation of irq domain code (Marc Zyngier)
- State tracking consolidation in the core code (Jeffy Chen)
- Add debug infrastructure for hierarchical irq domains (Thomas
Gleixner)
- Infrastructure enhancement for managing generic interrupt chips via
devmem (Bartosz Golaszewski)
- Constification work all over the place (Tobias Klauser)
- Two new interrupt controller drivers for MVEBU (Thomas Petazzoni)
- The usual set of fixes, updates and enhancements all over the
place"
* 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (112 commits)
irqchip/or1k-pic: Fix interrupt acknowledgement
irqchip/irq-mvebu-gicp: Allocate enough memory for spi_bitmap
irqchip/gic-v3: Fix out-of-bound access in gic_set_affinity
nvme: Allocate queues for all possible CPUs
blk-mq: Create hctx for each present CPU
blk-mq: Include all present CPUs in the default queue mapping
genirq: Avoid unnecessary low level irq function calls
genirq: Set irq masked state when initializing irq_desc
genirq/timings: Add infrastructure for estimating the next interrupt arrival time
genirq/timings: Add infrastructure to track the interrupt timings
genirq/debugfs: Remove pointless NULL pointer check
irqchip/gic-v3-its: Don't assume GICv3 hardware supports 16bit INTID
irqchip/gic-v3-its: Add ACPI NUMA node mapping
irqchip/gic-v3-its-platform-msi: Make of_device_ids const
irqchip/gic-v3-its: Make of_device_ids const
irqchip/irq-mvebu-icu: Add new driver for Marvell ICU
irqchip/irq-mvebu-gicp: Add new driver for Marvell GICP
dt-bindings/interrupt-controller: Add DT binding for the Marvell ICU
genirq/irqdomain: Remove auto-recursive hierarchy support
irqchip/MSI: Use irq_domain_update_bus_token instead of an open coded access
...
Diffstat (limited to 'kernel/irq/migration.c')
-rw-r--r-- | kernel/irq/migration.c | 30 |
1 files changed, 30 insertions, 0 deletions
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c index 37ddb7bda651..6ca054a3f91d 100644 --- a/kernel/irq/migration.c +++ b/kernel/irq/migration.c | |||
@@ -4,6 +4,36 @@ | |||
4 | 4 | ||
5 | #include "internals.h" | 5 | #include "internals.h" |
6 | 6 | ||
7 | /** | ||
8 | * irq_fixup_move_pending - Cleanup irq move pending from a dying CPU | ||
9 | * @desc: Interrupt descpriptor to clean up | ||
10 | * @force_clear: If set clear the move pending bit unconditionally. | ||
11 | * If not set, clear it only when the dying CPU is the | ||
12 | * last one in the pending mask. | ||
13 | * | ||
14 | * Returns true if the pending bit was set and the pending mask contains an | ||
15 | * online CPU other than the dying CPU. | ||
16 | */ | ||
17 | bool irq_fixup_move_pending(struct irq_desc *desc, bool force_clear) | ||
18 | { | ||
19 | struct irq_data *data = irq_desc_get_irq_data(desc); | ||
20 | |||
21 | if (!irqd_is_setaffinity_pending(data)) | ||
22 | return false; | ||
23 | |||
24 | /* | ||
25 | * The outgoing CPU might be the last online target in a pending | ||
26 | * interrupt move. If that's the case clear the pending move bit. | ||
27 | */ | ||
28 | if (cpumask_any_and(desc->pending_mask, cpu_online_mask) >= nr_cpu_ids) { | ||
29 | irqd_clr_move_pending(data); | ||
30 | return false; | ||
31 | } | ||
32 | if (force_clear) | ||
33 | irqd_clr_move_pending(data); | ||
34 | return true; | ||
35 | } | ||
36 | |||
7 | void irq_move_masked_irq(struct irq_data *idata) | 37 | void irq_move_masked_irq(struct irq_data *idata) |
8 | { | 38 | { |
9 | struct irq_desc *desc = irq_data_to_desc(idata); | 39 | struct irq_desc *desc = irq_data_to_desc(idata); |