diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-01-16 11:46:22 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-01-16 11:46:22 -0500 |
commit | 5a2dd72abdae75ea2960145e0549635ce4e0be96 (patch) | |
tree | 44dba0119c75679a17215200f92ab23bdde9efc2 /kernel/irq/manage.c | |
parent | efdc64f0c792ea744bcc9203f35b908e66d42f41 (diff) | |
parent | 7cb36b6ccdca03bd87e8faca7fd920643dd1aec7 (diff) |
Merge branch 'linus' into irq/genirq
Diffstat (limited to 'kernel/irq/manage.c')
-rw-r--r-- | kernel/irq/manage.c | 43 |
1 files changed, 30 insertions, 13 deletions
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 46953a06f4a8..cd0cd8dcb345 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -16,8 +16,15 @@ | |||
16 | #include "internals.h" | 16 | #include "internals.h" |
17 | 17 | ||
18 | #ifdef CONFIG_SMP | 18 | #ifdef CONFIG_SMP |
19 | cpumask_var_t irq_default_affinity; | ||
19 | 20 | ||
20 | cpumask_t irq_default_affinity = CPU_MASK_ALL; | 21 | static int init_irq_default_affinity(void) |
22 | { | ||
23 | alloc_cpumask_var(&irq_default_affinity, GFP_KERNEL); | ||
24 | cpumask_setall(irq_default_affinity); | ||
25 | return 0; | ||
26 | } | ||
27 | core_initcall(init_irq_default_affinity); | ||
21 | 28 | ||
22 | /** | 29 | /** |
23 | * synchronize_irq - wait for pending IRQ handlers (on other CPUs) | 30 | * synchronize_irq - wait for pending IRQ handlers (on other CPUs) |
@@ -79,7 +86,7 @@ int irq_can_set_affinity(unsigned int irq) | |||
79 | * @cpumask: cpumask | 86 | * @cpumask: cpumask |
80 | * | 87 | * |
81 | */ | 88 | */ |
82 | int irq_set_affinity(unsigned int irq, cpumask_t cpumask) | 89 | int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) |
83 | { | 90 | { |
84 | struct irq_desc *desc = irq_to_desc(irq); | 91 | struct irq_desc *desc = irq_to_desc(irq); |
85 | unsigned long flags; | 92 | unsigned long flags; |
@@ -91,14 +98,14 @@ int irq_set_affinity(unsigned int irq, cpumask_t cpumask) | |||
91 | 98 | ||
92 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 99 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
93 | if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) { | 100 | if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) { |
94 | desc->affinity = cpumask; | 101 | cpumask_copy(&desc->affinity, cpumask); |
95 | desc->chip->set_affinity(irq, cpumask); | 102 | desc->chip->set_affinity(irq, cpumask); |
96 | } else { | 103 | } else { |
97 | desc->status |= IRQ_MOVE_PENDING; | 104 | desc->status |= IRQ_MOVE_PENDING; |
98 | desc->pending_mask = cpumask; | 105 | cpumask_copy(&desc->pending_mask, cpumask); |
99 | } | 106 | } |
100 | #else | 107 | #else |
101 | desc->affinity = cpumask; | 108 | cpumask_copy(&desc->affinity, cpumask); |
102 | desc->chip->set_affinity(irq, cpumask); | 109 | desc->chip->set_affinity(irq, cpumask); |
103 | #endif | 110 | #endif |
104 | desc->status |= IRQ_AFFINITY_SET; | 111 | desc->status |= IRQ_AFFINITY_SET; |
@@ -112,26 +119,24 @@ int irq_set_affinity(unsigned int irq, cpumask_t cpumask) | |||
112 | */ | 119 | */ |
113 | int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc) | 120 | int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc) |
114 | { | 121 | { |
115 | cpumask_t mask; | ||
116 | |||
117 | if (!irq_can_set_affinity(irq)) | 122 | if (!irq_can_set_affinity(irq)) |
118 | return 0; | 123 | return 0; |
119 | 124 | ||
120 | cpus_and(mask, cpu_online_map, irq_default_affinity); | ||
121 | |||
122 | /* | 125 | /* |
123 | * Preserve an userspace affinity setup, but make sure that | 126 | * Preserve an userspace affinity setup, but make sure that |
124 | * one of the targets is online. | 127 | * one of the targets is online. |
125 | */ | 128 | */ |
126 | if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { | 129 | if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { |
127 | if (cpus_intersects(desc->affinity, cpu_online_map)) | 130 | if (cpumask_any_and(&desc->affinity, cpu_online_mask) |
128 | mask = desc->affinity; | 131 | < nr_cpu_ids) |
132 | goto set_affinity; | ||
129 | else | 133 | else |
130 | desc->status &= ~IRQ_AFFINITY_SET; | 134 | desc->status &= ~IRQ_AFFINITY_SET; |
131 | } | 135 | } |
132 | 136 | ||
133 | desc->affinity = mask; | 137 | cpumask_and(&desc->affinity, cpu_online_mask, irq_default_affinity); |
134 | desc->chip->set_affinity(irq, mask); | 138 | set_affinity: |
139 | desc->chip->set_affinity(irq, &desc->affinity); | ||
135 | 140 | ||
136 | return 0; | 141 | return 0; |
137 | } | 142 | } |
@@ -676,6 +681,18 @@ int request_irq(unsigned int irq, irq_handler_t handler, | |||
676 | struct irq_desc *desc; | 681 | struct irq_desc *desc; |
677 | int retval; | 682 | int retval; |
678 | 683 | ||
684 | /* | ||
685 | * handle_IRQ_event() always ignores IRQF_DISABLED except for | ||
686 | * the _first_ irqaction (sigh). That can cause oopsing, but | ||
687 | * the behavior is classified as "will not fix" so we need to | ||
688 | * start nudging drivers away from using that idiom. | ||
689 | */ | ||
690 | if ((irqflags & (IRQF_SHARED|IRQF_DISABLED)) | ||
691 | == (IRQF_SHARED|IRQF_DISABLED)) | ||
692 | pr_warning("IRQ %d/%s: IRQF_DISABLED is not " | ||
693 | "guaranteed on shared IRQs\n", | ||
694 | irq, devname); | ||
695 | |||
679 | #ifdef CONFIG_LOCKDEP | 696 | #ifdef CONFIG_LOCKDEP |
680 | /* | 697 | /* |
681 | * Lockdep wants atomic interrupt handlers: | 698 | * Lockdep wants atomic interrupt handlers: |