diff options
| author | Len Brown <len.brown@intel.com> | 2009-04-05 02:14:15 -0400 |
|---|---|---|
| committer | Len Brown <len.brown@intel.com> | 2009-04-05 02:14:15 -0400 |
| commit | 478c6a43fcbc6c11609f8cee7c7b57223907754f (patch) | |
| tree | a7f7952099da60d33032aed6de9c0c56c9f8779e /kernel/irq | |
| parent | 8a3f257c704e02aee9869decd069a806b45be3f1 (diff) | |
| parent | 6bb597507f9839b13498781e481f5458aea33620 (diff) | |
Merge branch 'linus' into release
Conflicts:
arch/x86/kernel/cpu/cpufreq/longhaul.c
Signed-off-by: Len Brown <len.brown@intel.com>
Diffstat (limited to 'kernel/irq')
| -rw-r--r-- | kernel/irq/Makefile | 1 | ||||
| -rw-r--r-- | kernel/irq/chip.c | 12 | ||||
| -rw-r--r-- | kernel/irq/handle.c | 93 | ||||
| -rw-r--r-- | kernel/irq/internals.h | 10 | ||||
| -rw-r--r-- | kernel/irq/manage.c | 235 | ||||
| -rw-r--r-- | kernel/irq/migration.c | 12 | ||||
| -rw-r--r-- | kernel/irq/numa_migrate.c | 30 | ||||
| -rw-r--r-- | kernel/irq/pm.c | 79 | ||||
| -rw-r--r-- | kernel/irq/proc.c | 4 | ||||
| -rw-r--r-- | kernel/irq/spurious.c | 14 |
10 files changed, 338 insertions, 152 deletions
diff --git a/kernel/irq/Makefile b/kernel/irq/Makefile index 4dd5b1edac98..3394f8f52964 100644 --- a/kernel/irq/Makefile +++ b/kernel/irq/Makefile | |||
| @@ -4,3 +4,4 @@ obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o | |||
| 4 | obj-$(CONFIG_PROC_FS) += proc.o | 4 | obj-$(CONFIG_PROC_FS) += proc.o |
| 5 | obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o | 5 | obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o |
| 6 | obj-$(CONFIG_NUMA_MIGRATE_IRQ_DESC) += numa_migrate.o | 6 | obj-$(CONFIG_NUMA_MIGRATE_IRQ_DESC) += numa_migrate.o |
| 7 | obj-$(CONFIG_PM_SLEEP) += pm.o | ||
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 7de11bd64dfe..c687ba4363f2 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
| @@ -46,7 +46,10 @@ void dynamic_irq_init(unsigned int irq) | |||
| 46 | desc->irq_count = 0; | 46 | desc->irq_count = 0; |
| 47 | desc->irqs_unhandled = 0; | 47 | desc->irqs_unhandled = 0; |
| 48 | #ifdef CONFIG_SMP | 48 | #ifdef CONFIG_SMP |
| 49 | cpumask_setall(&desc->affinity); | 49 | cpumask_setall(desc->affinity); |
| 50 | #ifdef CONFIG_GENERIC_PENDING_IRQ | ||
| 51 | cpumask_clear(desc->pending_mask); | ||
| 52 | #endif | ||
| 50 | #endif | 53 | #endif |
| 51 | spin_unlock_irqrestore(&desc->lock, flags); | 54 | spin_unlock_irqrestore(&desc->lock, flags); |
| 52 | } | 55 | } |
| @@ -78,6 +81,7 @@ void dynamic_irq_cleanup(unsigned int irq) | |||
| 78 | desc->handle_irq = handle_bad_irq; | 81 | desc->handle_irq = handle_bad_irq; |
| 79 | desc->chip = &no_irq_chip; | 82 | desc->chip = &no_irq_chip; |
| 80 | desc->name = NULL; | 83 | desc->name = NULL; |
| 84 | clear_kstat_irqs(desc); | ||
| 81 | spin_unlock_irqrestore(&desc->lock, flags); | 85 | spin_unlock_irqrestore(&desc->lock, flags); |
| 82 | } | 86 | } |
| 83 | 87 | ||
| @@ -290,7 +294,8 @@ static inline void mask_ack_irq(struct irq_desc *desc, int irq) | |||
| 290 | desc->chip->mask_ack(irq); | 294 | desc->chip->mask_ack(irq); |
| 291 | else { | 295 | else { |
| 292 | desc->chip->mask(irq); | 296 | desc->chip->mask(irq); |
| 293 | desc->chip->ack(irq); | 297 | if (desc->chip->ack) |
| 298 | desc->chip->ack(irq); | ||
| 294 | } | 299 | } |
| 295 | } | 300 | } |
| 296 | 301 | ||
| @@ -476,7 +481,8 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc) | |||
| 476 | kstat_incr_irqs_this_cpu(irq, desc); | 481 | kstat_incr_irqs_this_cpu(irq, desc); |
| 477 | 482 | ||
| 478 | /* Start handling the irq */ | 483 | /* Start handling the irq */ |
| 479 | desc->chip->ack(irq); | 484 | if (desc->chip->ack) |
| 485 | desc->chip->ack(irq); | ||
| 480 | desc = irq_remap_to_desc(irq, desc); | 486 | desc = irq_remap_to_desc(irq, desc); |
| 481 | 487 | ||
| 482 | /* Mark the IRQ currently in progress.*/ | 488 | /* Mark the IRQ currently in progress.*/ |
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index 3aba8d12f328..9ebf77968871 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c | |||
| @@ -17,6 +17,7 @@ | |||
| 17 | #include <linux/kernel_stat.h> | 17 | #include <linux/kernel_stat.h> |
| 18 | #include <linux/rculist.h> | 18 | #include <linux/rculist.h> |
| 19 | #include <linux/hash.h> | 19 | #include <linux/hash.h> |
| 20 | #include <linux/bootmem.h> | ||
| 20 | 21 | ||
| 21 | #include "internals.h" | 22 | #include "internals.h" |
| 22 | 23 | ||
| @@ -69,6 +70,7 @@ int nr_irqs = NR_IRQS; | |||
| 69 | EXPORT_SYMBOL_GPL(nr_irqs); | 70 | EXPORT_SYMBOL_GPL(nr_irqs); |
| 70 | 71 | ||
| 71 | #ifdef CONFIG_SPARSE_IRQ | 72 | #ifdef CONFIG_SPARSE_IRQ |
| 73 | |||
| 72 | static struct irq_desc irq_desc_init = { | 74 | static struct irq_desc irq_desc_init = { |
| 73 | .irq = -1, | 75 | .irq = -1, |
| 74 | .status = IRQ_DISABLED, | 76 | .status = IRQ_DISABLED, |
| @@ -76,26 +78,25 @@ static struct irq_desc irq_desc_init = { | |||
| 76 | .handle_irq = handle_bad_irq, | 78 | .handle_irq = handle_bad_irq, |
| 77 | .depth = 1, | 79 | .depth = 1, |
| 78 | .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), | 80 | .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), |
| 79 | #ifdef CONFIG_SMP | ||
| 80 | .affinity = CPU_MASK_ALL | ||
| 81 | #endif | ||
| 82 | }; | 81 | }; |
| 83 | 82 | ||
| 84 | void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr) | 83 | void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr) |
| 85 | { | 84 | { |
| 86 | unsigned long bytes; | ||
| 87 | char *ptr; | ||
| 88 | int node; | 85 | int node; |
| 89 | 86 | void *ptr; | |
| 90 | /* Compute how many bytes we need per irq and allocate them */ | ||
| 91 | bytes = nr * sizeof(unsigned int); | ||
| 92 | 87 | ||
| 93 | node = cpu_to_node(cpu); | 88 | node = cpu_to_node(cpu); |
| 94 | ptr = kzalloc_node(bytes, GFP_ATOMIC, node); | 89 | ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs), GFP_ATOMIC, node); |
| 95 | printk(KERN_DEBUG " alloc kstat_irqs on cpu %d node %d\n", cpu, node); | ||
| 96 | 90 | ||
| 97 | if (ptr) | 91 | /* |
| 98 | desc->kstat_irqs = (unsigned int *)ptr; | 92 | * don't overwite if can not get new one |
| 93 | * init_copy_kstat_irqs() could still use old one | ||
| 94 | */ | ||
| 95 | if (ptr) { | ||
| 96 | printk(KERN_DEBUG " alloc kstat_irqs on cpu %d node %d\n", | ||
| 97 | cpu, node); | ||
| 98 | desc->kstat_irqs = ptr; | ||
| 99 | } | ||
| 99 | } | 100 | } |
| 100 | 101 | ||
| 101 | static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu) | 102 | static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu) |
| @@ -113,6 +114,10 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu) | |||
| 113 | printk(KERN_ERR "can not alloc kstat_irqs\n"); | 114 | printk(KERN_ERR "can not alloc kstat_irqs\n"); |
| 114 | BUG_ON(1); | 115 | BUG_ON(1); |
| 115 | } | 116 | } |
| 117 | if (!init_alloc_desc_masks(desc, cpu, false)) { | ||
| 118 | printk(KERN_ERR "can not alloc irq_desc cpumasks\n"); | ||
| 119 | BUG_ON(1); | ||
| 120 | } | ||
| 116 | arch_init_chip_data(desc, cpu); | 121 | arch_init_chip_data(desc, cpu); |
| 117 | } | 122 | } |
| 118 | 123 | ||
| @@ -121,7 +126,7 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu) | |||
| 121 | */ | 126 | */ |
| 122 | DEFINE_SPINLOCK(sparse_irq_lock); | 127 | DEFINE_SPINLOCK(sparse_irq_lock); |
| 123 | 128 | ||
| 124 | struct irq_desc *irq_desc_ptrs[NR_IRQS] __read_mostly; | 129 | struct irq_desc **irq_desc_ptrs __read_mostly; |
| 125 | 130 | ||
| 126 | static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = { | 131 | static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = { |
| 127 | [0 ... NR_IRQS_LEGACY-1] = { | 132 | [0 ... NR_IRQS_LEGACY-1] = { |
| @@ -131,14 +136,10 @@ static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_sm | |||
| 131 | .handle_irq = handle_bad_irq, | 136 | .handle_irq = handle_bad_irq, |
| 132 | .depth = 1, | 137 | .depth = 1, |
| 133 | .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), | 138 | .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), |
| 134 | #ifdef CONFIG_SMP | ||
| 135 | .affinity = CPU_MASK_ALL | ||
| 136 | #endif | ||
| 137 | } | 139 | } |
| 138 | }; | 140 | }; |
| 139 | 141 | ||
| 140 | /* FIXME: use bootmem alloc ...*/ | 142 | static unsigned int *kstat_irqs_legacy; |
| 141 | static unsigned int kstat_irqs_legacy[NR_IRQS_LEGACY][NR_CPUS]; | ||
| 142 | 143 | ||
| 143 | int __init early_irq_init(void) | 144 | int __init early_irq_init(void) |
| 144 | { | 145 | { |
| @@ -148,18 +149,30 @@ int __init early_irq_init(void) | |||
| 148 | 149 | ||
| 149 | init_irq_default_affinity(); | 150 | init_irq_default_affinity(); |
| 150 | 151 | ||
| 152 | /* initialize nr_irqs based on nr_cpu_ids */ | ||
| 153 | arch_probe_nr_irqs(); | ||
| 154 | printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d\n", NR_IRQS, nr_irqs); | ||
| 155 | |||
| 151 | desc = irq_desc_legacy; | 156 | desc = irq_desc_legacy; |
| 152 | legacy_count = ARRAY_SIZE(irq_desc_legacy); | 157 | legacy_count = ARRAY_SIZE(irq_desc_legacy); |
| 153 | 158 | ||
| 159 | /* allocate irq_desc_ptrs array based on nr_irqs */ | ||
| 160 | irq_desc_ptrs = alloc_bootmem(nr_irqs * sizeof(void *)); | ||
| 161 | |||
| 162 | /* allocate based on nr_cpu_ids */ | ||
| 163 | /* FIXME: invert kstat_irgs, and it'd be a per_cpu_alloc'd thing */ | ||
| 164 | kstat_irqs_legacy = alloc_bootmem(NR_IRQS_LEGACY * nr_cpu_ids * | ||
| 165 | sizeof(int)); | ||
| 166 | |||
| 154 | for (i = 0; i < legacy_count; i++) { | 167 | for (i = 0; i < legacy_count; i++) { |
| 155 | desc[i].irq = i; | 168 | desc[i].irq = i; |
| 156 | desc[i].kstat_irqs = kstat_irqs_legacy[i]; | 169 | desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids; |
| 157 | lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); | 170 | lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); |
| 158 | 171 | init_alloc_desc_masks(&desc[i], 0, true); | |
| 159 | irq_desc_ptrs[i] = desc + i; | 172 | irq_desc_ptrs[i] = desc + i; |
| 160 | } | 173 | } |
| 161 | 174 | ||
| 162 | for (i = legacy_count; i < NR_IRQS; i++) | 175 | for (i = legacy_count; i < nr_irqs; i++) |
| 163 | irq_desc_ptrs[i] = NULL; | 176 | irq_desc_ptrs[i] = NULL; |
| 164 | 177 | ||
| 165 | return arch_early_irq_init(); | 178 | return arch_early_irq_init(); |
| @@ -167,7 +180,10 @@ int __init early_irq_init(void) | |||
| 167 | 180 | ||
| 168 | struct irq_desc *irq_to_desc(unsigned int irq) | 181 | struct irq_desc *irq_to_desc(unsigned int irq) |
| 169 | { | 182 | { |
| 170 | return (irq < NR_IRQS) ? irq_desc_ptrs[irq] : NULL; | 183 | if (irq_desc_ptrs && irq < nr_irqs) |
| 184 | return irq_desc_ptrs[irq]; | ||
| 185 | |||
| 186 | return NULL; | ||
| 171 | } | 187 | } |
| 172 | 188 | ||
| 173 | struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu) | 189 | struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu) |
| @@ -176,10 +192,9 @@ struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu) | |||
| 176 | unsigned long flags; | 192 | unsigned long flags; |
| 177 | int node; | 193 | int node; |
| 178 | 194 | ||
| 179 | if (irq >= NR_IRQS) { | 195 | if (irq >= nr_irqs) { |
| 180 | printk(KERN_WARNING "irq >= NR_IRQS in irq_to_desc_alloc: %d %d\n", | 196 | WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n", |
| 181 | irq, NR_IRQS); | 197 | irq, nr_irqs); |
| 182 | WARN_ON(1); | ||
| 183 | return NULL; | 198 | return NULL; |
| 184 | } | 199 | } |
| 185 | 200 | ||
| @@ -221,12 +236,10 @@ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { | |||
| 221 | .handle_irq = handle_bad_irq, | 236 | .handle_irq = handle_bad_irq, |
| 222 | .depth = 1, | 237 | .depth = 1, |
| 223 | .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock), | 238 | .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock), |
| 224 | #ifdef CONFIG_SMP | ||
| 225 | .affinity = CPU_MASK_ALL | ||
| 226 | #endif | ||
| 227 | } | 239 | } |
| 228 | }; | 240 | }; |
| 229 | 241 | ||
| 242 | static unsigned int kstat_irqs_all[NR_IRQS][NR_CPUS]; | ||
| 230 | int __init early_irq_init(void) | 243 | int __init early_irq_init(void) |
| 231 | { | 244 | { |
| 232 | struct irq_desc *desc; | 245 | struct irq_desc *desc; |
| @@ -235,12 +248,16 @@ int __init early_irq_init(void) | |||
| 235 | 248 | ||
| 236 | init_irq_default_affinity(); | 249 | init_irq_default_affinity(); |
| 237 | 250 | ||
| 251 | printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS); | ||
| 252 | |||
| 238 | desc = irq_desc; | 253 | desc = irq_desc; |
| 239 | count = ARRAY_SIZE(irq_desc); | 254 | count = ARRAY_SIZE(irq_desc); |
| 240 | 255 | ||
| 241 | for (i = 0; i < count; i++) | 256 | for (i = 0; i < count; i++) { |
| 242 | desc[i].irq = i; | 257 | desc[i].irq = i; |
| 243 | 258 | init_alloc_desc_masks(&desc[i], 0, true); | |
| 259 | desc[i].kstat_irqs = kstat_irqs_all[i]; | ||
| 260 | } | ||
| 244 | return arch_early_irq_init(); | 261 | return arch_early_irq_init(); |
| 245 | } | 262 | } |
| 246 | 263 | ||
| @@ -255,6 +272,11 @@ struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu) | |||
| 255 | } | 272 | } |
| 256 | #endif /* !CONFIG_SPARSE_IRQ */ | 273 | #endif /* !CONFIG_SPARSE_IRQ */ |
| 257 | 274 | ||
| 275 | void clear_kstat_irqs(struct irq_desc *desc) | ||
| 276 | { | ||
| 277 | memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs))); | ||
| 278 | } | ||
| 279 | |||
| 258 | /* | 280 | /* |
| 259 | * What should we do if we get a hw irq event on an illegal vector? | 281 | * What should we do if we get a hw irq event on an illegal vector? |
| 260 | * Each architecture has to answer this themself. | 282 | * Each architecture has to answer this themself. |
| @@ -328,6 +350,8 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action) | |||
| 328 | irqreturn_t ret, retval = IRQ_NONE; | 350 | irqreturn_t ret, retval = IRQ_NONE; |
| 329 | unsigned int status = 0; | 351 | unsigned int status = 0; |
| 330 | 352 | ||
| 353 | WARN_ONCE(!in_irq(), "BUG: IRQ handler called from non-hardirq context!"); | ||
| 354 | |||
| 331 | if (!(action->flags & IRQF_DISABLED)) | 355 | if (!(action->flags & IRQF_DISABLED)) |
| 332 | local_irq_enable_in_hardirq(); | 356 | local_irq_enable_in_hardirq(); |
| 333 | 357 | ||
| @@ -347,6 +371,11 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action) | |||
| 347 | } | 371 | } |
| 348 | 372 | ||
| 349 | #ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ | 373 | #ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ |
| 374 | |||
| 375 | #ifdef CONFIG_ENABLE_WARN_DEPRECATED | ||
| 376 | # warning __do_IRQ is deprecated. Please convert to proper flow handlers | ||
| 377 | #endif | ||
| 378 | |||
| 350 | /** | 379 | /** |
| 351 | * __do_IRQ - original all in one highlevel IRQ handler | 380 | * __do_IRQ - original all in one highlevel IRQ handler |
| 352 | * @irq: the interrupt number | 381 | * @irq: the interrupt number |
| @@ -467,12 +496,10 @@ void early_init_irq_lock_class(void) | |||
| 467 | } | 496 | } |
| 468 | } | 497 | } |
| 469 | 498 | ||
| 470 | #ifdef CONFIG_SPARSE_IRQ | ||
| 471 | unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) | 499 | unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) |
| 472 | { | 500 | { |
| 473 | struct irq_desc *desc = irq_to_desc(irq); | 501 | struct irq_desc *desc = irq_to_desc(irq); |
| 474 | return desc ? desc->kstat_irqs[cpu] : 0; | 502 | return desc ? desc->kstat_irqs[cpu] : 0; |
| 475 | } | 503 | } |
| 476 | #endif | ||
| 477 | EXPORT_SYMBOL(kstat_irqs_cpu); | 504 | EXPORT_SYMBOL(kstat_irqs_cpu); |
| 478 | 505 | ||
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index e6d0a43cc125..01ce20eab38f 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h | |||
| @@ -12,11 +12,21 @@ extern void compat_irq_chip_set_default_handler(struct irq_desc *desc); | |||
| 12 | 12 | ||
| 13 | extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | 13 | extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, |
| 14 | unsigned long flags); | 14 | unsigned long flags); |
| 15 | extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp); | ||
| 16 | extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume); | ||
| 15 | 17 | ||
| 16 | extern struct lock_class_key irq_desc_lock_class; | 18 | extern struct lock_class_key irq_desc_lock_class; |
| 17 | extern void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr); | 19 | extern void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr); |
| 20 | extern void clear_kstat_irqs(struct irq_desc *desc); | ||
| 18 | extern spinlock_t sparse_irq_lock; | 21 | extern spinlock_t sparse_irq_lock; |
| 22 | |||
| 23 | #ifdef CONFIG_SPARSE_IRQ | ||
| 24 | /* irq_desc_ptrs allocated at boot time */ | ||
| 25 | extern struct irq_desc **irq_desc_ptrs; | ||
| 26 | #else | ||
| 27 | /* irq_desc_ptrs is a fixed size array */ | ||
| 19 | extern struct irq_desc *irq_desc_ptrs[NR_IRQS]; | 28 | extern struct irq_desc *irq_desc_ptrs[NR_IRQS]; |
| 29 | #endif | ||
| 20 | 30 | ||
| 21 | #ifdef CONFIG_PROC_FS | 31 | #ifdef CONFIG_PROC_FS |
| 22 | extern void register_irq_proc(unsigned int irq, struct irq_desc *desc); | 32 | extern void register_irq_proc(unsigned int irq, struct irq_desc *desc); |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 291f03664552..1516ab77355c 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
| @@ -90,14 +90,14 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) | |||
| 90 | 90 | ||
| 91 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 91 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
| 92 | if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) { | 92 | if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) { |
| 93 | cpumask_copy(&desc->affinity, cpumask); | 93 | cpumask_copy(desc->affinity, cpumask); |
| 94 | desc->chip->set_affinity(irq, cpumask); | 94 | desc->chip->set_affinity(irq, cpumask); |
| 95 | } else { | 95 | } else { |
| 96 | desc->status |= IRQ_MOVE_PENDING; | 96 | desc->status |= IRQ_MOVE_PENDING; |
| 97 | cpumask_copy(&desc->pending_mask, cpumask); | 97 | cpumask_copy(desc->pending_mask, cpumask); |
| 98 | } | 98 | } |
| 99 | #else | 99 | #else |
| 100 | cpumask_copy(&desc->affinity, cpumask); | 100 | cpumask_copy(desc->affinity, cpumask); |
| 101 | desc->chip->set_affinity(irq, cpumask); | 101 | desc->chip->set_affinity(irq, cpumask); |
| 102 | #endif | 102 | #endif |
| 103 | desc->status |= IRQ_AFFINITY_SET; | 103 | desc->status |= IRQ_AFFINITY_SET; |
| @@ -109,7 +109,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) | |||
| 109 | /* | 109 | /* |
| 110 | * Generic version of the affinity autoselector. | 110 | * Generic version of the affinity autoselector. |
| 111 | */ | 111 | */ |
| 112 | int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc) | 112 | static int setup_affinity(unsigned int irq, struct irq_desc *desc) |
| 113 | { | 113 | { |
| 114 | if (!irq_can_set_affinity(irq)) | 114 | if (!irq_can_set_affinity(irq)) |
| 115 | return 0; | 115 | return 0; |
| @@ -119,21 +119,21 @@ int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc) | |||
| 119 | * one of the targets is online. | 119 | * one of the targets is online. |
| 120 | */ | 120 | */ |
| 121 | if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { | 121 | if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { |
| 122 | if (cpumask_any_and(&desc->affinity, cpu_online_mask) | 122 | if (cpumask_any_and(desc->affinity, cpu_online_mask) |
| 123 | < nr_cpu_ids) | 123 | < nr_cpu_ids) |
| 124 | goto set_affinity; | 124 | goto set_affinity; |
| 125 | else | 125 | else |
| 126 | desc->status &= ~IRQ_AFFINITY_SET; | 126 | desc->status &= ~IRQ_AFFINITY_SET; |
| 127 | } | 127 | } |
| 128 | 128 | ||
| 129 | cpumask_and(&desc->affinity, cpu_online_mask, irq_default_affinity); | 129 | cpumask_and(desc->affinity, cpu_online_mask, irq_default_affinity); |
| 130 | set_affinity: | 130 | set_affinity: |
| 131 | desc->chip->set_affinity(irq, &desc->affinity); | 131 | desc->chip->set_affinity(irq, desc->affinity); |
| 132 | 132 | ||
| 133 | return 0; | 133 | return 0; |
| 134 | } | 134 | } |
| 135 | #else | 135 | #else |
| 136 | static inline int do_irq_select_affinity(unsigned int irq, struct irq_desc *d) | 136 | static inline int setup_affinity(unsigned int irq, struct irq_desc *d) |
| 137 | { | 137 | { |
| 138 | return irq_select_affinity(irq); | 138 | return irq_select_affinity(irq); |
| 139 | } | 139 | } |
| @@ -149,19 +149,33 @@ int irq_select_affinity_usr(unsigned int irq) | |||
| 149 | int ret; | 149 | int ret; |
| 150 | 150 | ||
| 151 | spin_lock_irqsave(&desc->lock, flags); | 151 | spin_lock_irqsave(&desc->lock, flags); |
| 152 | ret = do_irq_select_affinity(irq, desc); | 152 | ret = setup_affinity(irq, desc); |
| 153 | spin_unlock_irqrestore(&desc->lock, flags); | 153 | spin_unlock_irqrestore(&desc->lock, flags); |
| 154 | 154 | ||
| 155 | return ret; | 155 | return ret; |
| 156 | } | 156 | } |
| 157 | 157 | ||
| 158 | #else | 158 | #else |
| 159 | static inline int do_irq_select_affinity(int irq, struct irq_desc *desc) | 159 | static inline int setup_affinity(unsigned int irq, struct irq_desc *desc) |
| 160 | { | 160 | { |
| 161 | return 0; | 161 | return 0; |
| 162 | } | 162 | } |
| 163 | #endif | 163 | #endif |
| 164 | 164 | ||
| 165 | void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend) | ||
| 166 | { | ||
| 167 | if (suspend) { | ||
| 168 | if (!desc->action || (desc->action->flags & IRQF_TIMER)) | ||
| 169 | return; | ||
| 170 | desc->status |= IRQ_SUSPENDED; | ||
| 171 | } | ||
| 172 | |||
| 173 | if (!desc->depth++) { | ||
| 174 | desc->status |= IRQ_DISABLED; | ||
| 175 | desc->chip->disable(irq); | ||
| 176 | } | ||
| 177 | } | ||
| 178 | |||
| 165 | /** | 179 | /** |
| 166 | * disable_irq_nosync - disable an irq without waiting | 180 | * disable_irq_nosync - disable an irq without waiting |
| 167 | * @irq: Interrupt to disable | 181 | * @irq: Interrupt to disable |
| @@ -182,10 +196,7 @@ void disable_irq_nosync(unsigned int irq) | |||
| 182 | return; | 196 | return; |
| 183 | 197 | ||
| 184 | spin_lock_irqsave(&desc->lock, flags); | 198 | spin_lock_irqsave(&desc->lock, flags); |
| 185 | if (!desc->depth++) { | 199 | __disable_irq(desc, irq, false); |
| 186 | desc->status |= IRQ_DISABLED; | ||
| 187 | desc->chip->disable(irq); | ||
| 188 | } | ||
| 189 | spin_unlock_irqrestore(&desc->lock, flags); | 200 | spin_unlock_irqrestore(&desc->lock, flags); |
| 190 | } | 201 | } |
| 191 | EXPORT_SYMBOL(disable_irq_nosync); | 202 | EXPORT_SYMBOL(disable_irq_nosync); |
| @@ -215,15 +226,21 @@ void disable_irq(unsigned int irq) | |||
| 215 | } | 226 | } |
| 216 | EXPORT_SYMBOL(disable_irq); | 227 | EXPORT_SYMBOL(disable_irq); |
| 217 | 228 | ||
| 218 | static void __enable_irq(struct irq_desc *desc, unsigned int irq) | 229 | void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) |
| 219 | { | 230 | { |
| 231 | if (resume) | ||
| 232 | desc->status &= ~IRQ_SUSPENDED; | ||
| 233 | |||
| 220 | switch (desc->depth) { | 234 | switch (desc->depth) { |
| 221 | case 0: | 235 | case 0: |
| 236 | err_out: | ||
| 222 | WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq); | 237 | WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq); |
| 223 | break; | 238 | break; |
| 224 | case 1: { | 239 | case 1: { |
| 225 | unsigned int status = desc->status & ~IRQ_DISABLED; | 240 | unsigned int status = desc->status & ~IRQ_DISABLED; |
| 226 | 241 | ||
| 242 | if (desc->status & IRQ_SUSPENDED) | ||
| 243 | goto err_out; | ||
| 227 | /* Prevent probing on this irq: */ | 244 | /* Prevent probing on this irq: */ |
| 228 | desc->status = status | IRQ_NOPROBE; | 245 | desc->status = status | IRQ_NOPROBE; |
| 229 | check_irq_resend(desc, irq); | 246 | check_irq_resend(desc, irq); |
| @@ -253,7 +270,7 @@ void enable_irq(unsigned int irq) | |||
| 253 | return; | 270 | return; |
| 254 | 271 | ||
| 255 | spin_lock_irqsave(&desc->lock, flags); | 272 | spin_lock_irqsave(&desc->lock, flags); |
| 256 | __enable_irq(desc, irq); | 273 | __enable_irq(desc, irq, false); |
| 257 | spin_unlock_irqrestore(&desc->lock, flags); | 274 | spin_unlock_irqrestore(&desc->lock, flags); |
| 258 | } | 275 | } |
| 259 | EXPORT_SYMBOL(enable_irq); | 276 | EXPORT_SYMBOL(enable_irq); |
| @@ -389,9 +406,9 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | |||
| 389 | * allocate special interrupts that are part of the architecture. | 406 | * allocate special interrupts that are part of the architecture. |
| 390 | */ | 407 | */ |
| 391 | static int | 408 | static int |
| 392 | __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new) | 409 | __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) |
| 393 | { | 410 | { |
| 394 | struct irqaction *old, **p; | 411 | struct irqaction *old, **old_ptr; |
| 395 | const char *old_name = NULL; | 412 | const char *old_name = NULL; |
| 396 | unsigned long flags; | 413 | unsigned long flags; |
| 397 | int shared = 0; | 414 | int shared = 0; |
| @@ -423,8 +440,8 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new) | |||
| 423 | * The following block of code has to be executed atomically | 440 | * The following block of code has to be executed atomically |
| 424 | */ | 441 | */ |
| 425 | spin_lock_irqsave(&desc->lock, flags); | 442 | spin_lock_irqsave(&desc->lock, flags); |
| 426 | p = &desc->action; | 443 | old_ptr = &desc->action; |
| 427 | old = *p; | 444 | old = *old_ptr; |
| 428 | if (old) { | 445 | if (old) { |
| 429 | /* | 446 | /* |
| 430 | * Can't share interrupts unless both agree to and are | 447 | * Can't share interrupts unless both agree to and are |
| @@ -447,8 +464,8 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new) | |||
| 447 | 464 | ||
| 448 | /* add new interrupt at end of irq queue */ | 465 | /* add new interrupt at end of irq queue */ |
| 449 | do { | 466 | do { |
| 450 | p = &old->next; | 467 | old_ptr = &old->next; |
| 451 | old = *p; | 468 | old = *old_ptr; |
| 452 | } while (old); | 469 | } while (old); |
| 453 | shared = 1; | 470 | shared = 1; |
| 454 | } | 471 | } |
| @@ -488,7 +505,7 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new) | |||
| 488 | desc->status |= IRQ_NO_BALANCING; | 505 | desc->status |= IRQ_NO_BALANCING; |
| 489 | 506 | ||
| 490 | /* Set default affinity mask once everything is setup */ | 507 | /* Set default affinity mask once everything is setup */ |
| 491 | do_irq_select_affinity(irq, desc); | 508 | setup_affinity(irq, desc); |
| 492 | 509 | ||
| 493 | } else if ((new->flags & IRQF_TRIGGER_MASK) | 510 | } else if ((new->flags & IRQF_TRIGGER_MASK) |
| 494 | && (new->flags & IRQF_TRIGGER_MASK) | 511 | && (new->flags & IRQF_TRIGGER_MASK) |
| @@ -499,7 +516,7 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new) | |||
| 499 | (int)(new->flags & IRQF_TRIGGER_MASK)); | 516 | (int)(new->flags & IRQF_TRIGGER_MASK)); |
| 500 | } | 517 | } |
| 501 | 518 | ||
| 502 | *p = new; | 519 | *old_ptr = new; |
| 503 | 520 | ||
| 504 | /* Reset broken irq detection when installing new handler */ | 521 | /* Reset broken irq detection when installing new handler */ |
| 505 | desc->irq_count = 0; | 522 | desc->irq_count = 0; |
| @@ -511,7 +528,7 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new) | |||
| 511 | */ | 528 | */ |
| 512 | if (shared && (desc->status & IRQ_SPURIOUS_DISABLED)) { | 529 | if (shared && (desc->status & IRQ_SPURIOUS_DISABLED)) { |
| 513 | desc->status &= ~IRQ_SPURIOUS_DISABLED; | 530 | desc->status &= ~IRQ_SPURIOUS_DISABLED; |
| 514 | __enable_irq(desc, irq); | 531 | __enable_irq(desc, irq, false); |
| 515 | } | 532 | } |
| 516 | 533 | ||
| 517 | spin_unlock_irqrestore(&desc->lock, flags); | 534 | spin_unlock_irqrestore(&desc->lock, flags); |
| @@ -549,90 +566,117 @@ int setup_irq(unsigned int irq, struct irqaction *act) | |||
| 549 | 566 | ||
| 550 | return __setup_irq(irq, desc, act); | 567 | return __setup_irq(irq, desc, act); |
| 551 | } | 568 | } |
| 569 | EXPORT_SYMBOL_GPL(setup_irq); | ||
| 552 | 570 | ||
| 553 | /** | 571 | /* |
| 554 | * free_irq - free an interrupt | 572 | * Internal function to unregister an irqaction - used to free |
| 555 | * @irq: Interrupt line to free | 573 | * regular and special interrupts that are part of the architecture. |
| 556 | * @dev_id: Device identity to free | ||
| 557 | * | ||
| 558 | * Remove an interrupt handler. The handler is removed and if the | ||
| 559 | * interrupt line is no longer in use by any driver it is disabled. | ||
| 560 | * On a shared IRQ the caller must ensure the interrupt is disabled | ||
| 561 | * on the card it drives before calling this function. The function | ||
| 562 | * does not return until any executing interrupts for this IRQ | ||
| 563 | * have completed. | ||
| 564 | * | ||
| 565 | * This function must not be called from interrupt context. | ||
| 566 | */ | 574 | */ |
| 567 | void free_irq(unsigned int irq, void *dev_id) | 575 | static struct irqaction *__free_irq(unsigned int irq, void *dev_id) |
| 568 | { | 576 | { |
| 569 | struct irq_desc *desc = irq_to_desc(irq); | 577 | struct irq_desc *desc = irq_to_desc(irq); |
| 570 | struct irqaction **p; | 578 | struct irqaction *action, **action_ptr; |
| 571 | unsigned long flags; | 579 | unsigned long flags; |
| 572 | 580 | ||
| 573 | WARN_ON(in_interrupt()); | 581 | WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); |
| 574 | 582 | ||
| 575 | if (!desc) | 583 | if (!desc) |
| 576 | return; | 584 | return NULL; |
| 577 | 585 | ||
| 578 | spin_lock_irqsave(&desc->lock, flags); | 586 | spin_lock_irqsave(&desc->lock, flags); |
| 579 | p = &desc->action; | 587 | |
| 588 | /* | ||
| 589 | * There can be multiple actions per IRQ descriptor, find the right | ||
| 590 | * one based on the dev_id: | ||
| 591 | */ | ||
| 592 | action_ptr = &desc->action; | ||
| 580 | for (;;) { | 593 | for (;;) { |
| 581 | struct irqaction *action = *p; | 594 | action = *action_ptr; |
| 582 | 595 | ||
| 583 | if (action) { | 596 | if (!action) { |
| 584 | struct irqaction **pp = p; | 597 | WARN(1, "Trying to free already-free IRQ %d\n", irq); |
| 598 | spin_unlock_irqrestore(&desc->lock, flags); | ||
| 585 | 599 | ||
| 586 | p = &action->next; | 600 | return NULL; |
| 587 | if (action->dev_id != dev_id) | 601 | } |
| 588 | continue; | ||
| 589 | 602 | ||
| 590 | /* Found it - now remove it from the list of entries */ | 603 | if (action->dev_id == dev_id) |
| 591 | *pp = action->next; | 604 | break; |
| 605 | action_ptr = &action->next; | ||
| 606 | } | ||
| 592 | 607 | ||
| 593 | /* Currently used only by UML, might disappear one day.*/ | 608 | /* Found it - now remove it from the list of entries: */ |
| 609 | *action_ptr = action->next; | ||
| 610 | |||
| 611 | /* Currently used only by UML, might disappear one day: */ | ||
| 594 | #ifdef CONFIG_IRQ_RELEASE_METHOD | 612 | #ifdef CONFIG_IRQ_RELEASE_METHOD |
| 595 | if (desc->chip->release) | 613 | if (desc->chip->release) |
| 596 | desc->chip->release(irq, dev_id); | 614 | desc->chip->release(irq, dev_id); |
| 597 | #endif | 615 | #endif |
| 598 | 616 | ||
| 599 | if (!desc->action) { | 617 | /* If this was the last handler, shut down the IRQ line: */ |
| 600 | desc->status |= IRQ_DISABLED; | 618 | if (!desc->action) { |
| 601 | if (desc->chip->shutdown) | 619 | desc->status |= IRQ_DISABLED; |
| 602 | desc->chip->shutdown(irq); | 620 | if (desc->chip->shutdown) |
| 603 | else | 621 | desc->chip->shutdown(irq); |
| 604 | desc->chip->disable(irq); | 622 | else |
| 605 | } | 623 | desc->chip->disable(irq); |
| 606 | spin_unlock_irqrestore(&desc->lock, flags); | 624 | } |
| 607 | unregister_handler_proc(irq, action); | 625 | spin_unlock_irqrestore(&desc->lock, flags); |
| 626 | |||
| 627 | unregister_handler_proc(irq, action); | ||
| 628 | |||
| 629 | /* Make sure it's not being used on another CPU: */ | ||
| 630 | synchronize_irq(irq); | ||
| 608 | 631 | ||
| 609 | /* Make sure it's not being used on another CPU */ | ||
| 610 | synchronize_irq(irq); | ||
| 611 | #ifdef CONFIG_DEBUG_SHIRQ | ||
| 612 | /* | ||
| 613 | * It's a shared IRQ -- the driver ought to be | ||
| 614 | * prepared for it to happen even now it's | ||
| 615 | * being freed, so let's make sure.... We do | ||
| 616 | * this after actually deregistering it, to | ||
| 617 | * make sure that a 'real' IRQ doesn't run in | ||
| 618 | * parallel with our fake | ||
| 619 | */ | ||
| 620 | if (action->flags & IRQF_SHARED) { | ||
| 621 | local_irq_save(flags); | ||
| 622 | action->handler(irq, dev_id); | ||
| 623 | local_irq_restore(flags); | ||
| 624 | } | ||
| 625 | #endif | ||
| 626 | kfree(action); | ||
| 627 | return; | ||
| 628 | } | ||
| 629 | printk(KERN_ERR "Trying to free already-free IRQ %d\n", irq); | ||
| 630 | #ifdef CONFIG_DEBUG_SHIRQ | 632 | #ifdef CONFIG_DEBUG_SHIRQ |
| 631 | dump_stack(); | 633 | /* |
| 632 | #endif | 634 | * It's a shared IRQ -- the driver ought to be prepared for an IRQ |
| 633 | spin_unlock_irqrestore(&desc->lock, flags); | 635 | * event to happen even now it's being freed, so let's make sure that |
| 634 | return; | 636 | * is so by doing an extra call to the handler .... |
| 637 | * | ||
| 638 | * ( We do this after actually deregistering it, to make sure that a | ||
| 639 | * 'real' IRQ doesn't run in * parallel with our fake. ) | ||
| 640 | */ | ||
| 641 | if (action->flags & IRQF_SHARED) { | ||
| 642 | local_irq_save(flags); | ||
| 643 | action->handler(irq, dev_id); | ||
| 644 | local_irq_restore(flags); | ||
| 635 | } | 645 | } |
| 646 | #endif | ||
| 647 | return action; | ||
| 648 | } | ||
| 649 | |||
| 650 | /** | ||
| 651 | * remove_irq - free an interrupt | ||
| 652 | * @irq: Interrupt line to free | ||
| 653 | * @act: irqaction for the interrupt | ||
| 654 | * | ||
| 655 | * Used to remove interrupts statically setup by the early boot process. | ||
| 656 | */ | ||
| 657 | void remove_irq(unsigned int irq, struct irqaction *act) | ||
| 658 | { | ||
| 659 | __free_irq(irq, act->dev_id); | ||
| 660 | } | ||
| 661 | EXPORT_SYMBOL_GPL(remove_irq); | ||
| 662 | |||
| 663 | /** | ||
| 664 | * free_irq - free an interrupt allocated with request_irq | ||
| 665 | * @irq: Interrupt line to free | ||
| 666 | * @dev_id: Device identity to free | ||
| 667 | * | ||
| 668 | * Remove an interrupt handler. The handler is removed and if the | ||
| 669 | * interrupt line is no longer in use by any driver it is disabled. | ||
| 670 | * On a shared IRQ the caller must ensure the interrupt is disabled | ||
| 671 | * on the card it drives before calling this function. The function | ||
| 672 | * does not return until any executing interrupts for this IRQ | ||
| 673 | * have completed. | ||
| 674 | * | ||
| 675 | * This function must not be called from interrupt context. | ||
| 676 | */ | ||
| 677 | void free_irq(unsigned int irq, void *dev_id) | ||
| 678 | { | ||
| 679 | kfree(__free_irq(irq, dev_id)); | ||
| 636 | } | 680 | } |
| 637 | EXPORT_SYMBOL(free_irq); | 681 | EXPORT_SYMBOL(free_irq); |
| 638 | 682 | ||
| @@ -679,11 +723,12 @@ int request_irq(unsigned int irq, irq_handler_t handler, | |||
| 679 | * the behavior is classified as "will not fix" so we need to | 723 | * the behavior is classified as "will not fix" so we need to |
| 680 | * start nudging drivers away from using that idiom. | 724 | * start nudging drivers away from using that idiom. |
| 681 | */ | 725 | */ |
| 682 | if ((irqflags & (IRQF_SHARED|IRQF_DISABLED)) | 726 | if ((irqflags & (IRQF_SHARED|IRQF_DISABLED)) == |
| 683 | == (IRQF_SHARED|IRQF_DISABLED)) | 727 | (IRQF_SHARED|IRQF_DISABLED)) { |
| 684 | pr_warning("IRQ %d/%s: IRQF_DISABLED is not " | 728 | pr_warning( |
| 685 | "guaranteed on shared IRQs\n", | 729 | "IRQ %d/%s: IRQF_DISABLED is not guaranteed on shared IRQs\n", |
| 686 | irq, devname); | 730 | irq, devname); |
| 731 | } | ||
| 687 | 732 | ||
| 688 | #ifdef CONFIG_LOCKDEP | 733 | #ifdef CONFIG_LOCKDEP |
| 689 | /* | 734 | /* |
| @@ -709,15 +754,13 @@ int request_irq(unsigned int irq, irq_handler_t handler, | |||
| 709 | if (!handler) | 754 | if (!handler) |
| 710 | return -EINVAL; | 755 | return -EINVAL; |
| 711 | 756 | ||
| 712 | action = kmalloc(sizeof(struct irqaction), GFP_ATOMIC); | 757 | action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); |
| 713 | if (!action) | 758 | if (!action) |
| 714 | return -ENOMEM; | 759 | return -ENOMEM; |
| 715 | 760 | ||
| 716 | action->handler = handler; | 761 | action->handler = handler; |
| 717 | action->flags = irqflags; | 762 | action->flags = irqflags; |
| 718 | cpus_clear(action->mask); | ||
| 719 | action->name = devname; | 763 | action->name = devname; |
| 720 | action->next = NULL; | ||
| 721 | action->dev_id = dev_id; | 764 | action->dev_id = dev_id; |
| 722 | 765 | ||
| 723 | retval = __setup_irq(irq, desc, action); | 766 | retval = __setup_irq(irq, desc, action); |
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c index bd72329e630c..e05ad9be43b7 100644 --- a/kernel/irq/migration.c +++ b/kernel/irq/migration.c | |||
| @@ -18,7 +18,7 @@ void move_masked_irq(int irq) | |||
| 18 | 18 | ||
| 19 | desc->status &= ~IRQ_MOVE_PENDING; | 19 | desc->status &= ~IRQ_MOVE_PENDING; |
| 20 | 20 | ||
| 21 | if (unlikely(cpumask_empty(&desc->pending_mask))) | 21 | if (unlikely(cpumask_empty(desc->pending_mask))) |
| 22 | return; | 22 | return; |
| 23 | 23 | ||
| 24 | if (!desc->chip->set_affinity) | 24 | if (!desc->chip->set_affinity) |
| @@ -38,13 +38,13 @@ void move_masked_irq(int irq) | |||
| 38 | * For correct operation this depends on the caller | 38 | * For correct operation this depends on the caller |
| 39 | * masking the irqs. | 39 | * masking the irqs. |
| 40 | */ | 40 | */ |
| 41 | if (likely(cpumask_any_and(&desc->pending_mask, cpu_online_mask) | 41 | if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask) |
| 42 | < nr_cpu_ids)) { | 42 | < nr_cpu_ids)) { |
| 43 | cpumask_and(&desc->affinity, | 43 | cpumask_and(desc->affinity, |
| 44 | &desc->pending_mask, cpu_online_mask); | 44 | desc->pending_mask, cpu_online_mask); |
| 45 | desc->chip->set_affinity(irq, &desc->affinity); | 45 | desc->chip->set_affinity(irq, desc->affinity); |
| 46 | } | 46 | } |
| 47 | cpumask_clear(&desc->pending_mask); | 47 | cpumask_clear(desc->pending_mask); |
| 48 | } | 48 | } |
| 49 | 49 | ||
| 50 | void move_native_irq(int irq) | 50 | void move_native_irq(int irq) |
diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c index acd88356ac76..243d6121e50e 100644 --- a/kernel/irq/numa_migrate.c +++ b/kernel/irq/numa_migrate.c | |||
| @@ -17,16 +17,11 @@ static void init_copy_kstat_irqs(struct irq_desc *old_desc, | |||
| 17 | struct irq_desc *desc, | 17 | struct irq_desc *desc, |
| 18 | int cpu, int nr) | 18 | int cpu, int nr) |
| 19 | { | 19 | { |
| 20 | unsigned long bytes; | ||
| 21 | |||
| 22 | init_kstat_irqs(desc, cpu, nr); | 20 | init_kstat_irqs(desc, cpu, nr); |
| 23 | 21 | ||
| 24 | if (desc->kstat_irqs != old_desc->kstat_irqs) { | 22 | if (desc->kstat_irqs != old_desc->kstat_irqs) |
| 25 | /* Compute how many bytes we need per irq and allocate them */ | 23 | memcpy(desc->kstat_irqs, old_desc->kstat_irqs, |
| 26 | bytes = nr * sizeof(unsigned int); | 24 | nr * sizeof(*desc->kstat_irqs)); |
| 27 | |||
| 28 | memcpy(desc->kstat_irqs, old_desc->kstat_irqs, bytes); | ||
| 29 | } | ||
| 30 | } | 25 | } |
| 31 | 26 | ||
| 32 | static void free_kstat_irqs(struct irq_desc *old_desc, struct irq_desc *desc) | 27 | static void free_kstat_irqs(struct irq_desc *old_desc, struct irq_desc *desc) |
| @@ -38,15 +33,22 @@ static void free_kstat_irqs(struct irq_desc *old_desc, struct irq_desc *desc) | |||
| 38 | old_desc->kstat_irqs = NULL; | 33 | old_desc->kstat_irqs = NULL; |
| 39 | } | 34 | } |
| 40 | 35 | ||
| 41 | static void init_copy_one_irq_desc(int irq, struct irq_desc *old_desc, | 36 | static bool init_copy_one_irq_desc(int irq, struct irq_desc *old_desc, |
| 42 | struct irq_desc *desc, int cpu) | 37 | struct irq_desc *desc, int cpu) |
| 43 | { | 38 | { |
| 44 | memcpy(desc, old_desc, sizeof(struct irq_desc)); | 39 | memcpy(desc, old_desc, sizeof(struct irq_desc)); |
| 40 | if (!init_alloc_desc_masks(desc, cpu, false)) { | ||
| 41 | printk(KERN_ERR "irq %d: can not get new irq_desc cpumask " | ||
| 42 | "for migration.\n", irq); | ||
| 43 | return false; | ||
| 44 | } | ||
| 45 | spin_lock_init(&desc->lock); | 45 | spin_lock_init(&desc->lock); |
| 46 | desc->cpu = cpu; | 46 | desc->cpu = cpu; |
| 47 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); | 47 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); |
| 48 | init_copy_kstat_irqs(old_desc, desc, cpu, nr_cpu_ids); | 48 | init_copy_kstat_irqs(old_desc, desc, cpu, nr_cpu_ids); |
| 49 | init_copy_desc_masks(old_desc, desc); | ||
| 49 | arch_init_copy_chip_data(old_desc, desc, cpu); | 50 | arch_init_copy_chip_data(old_desc, desc, cpu); |
| 51 | return true; | ||
| 50 | } | 52 | } |
| 51 | 53 | ||
| 52 | static void free_one_irq_desc(struct irq_desc *old_desc, struct irq_desc *desc) | 54 | static void free_one_irq_desc(struct irq_desc *old_desc, struct irq_desc *desc) |
| @@ -76,12 +78,18 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc, | |||
| 76 | node = cpu_to_node(cpu); | 78 | node = cpu_to_node(cpu); |
| 77 | desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); | 79 | desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); |
| 78 | if (!desc) { | 80 | if (!desc) { |
| 79 | printk(KERN_ERR "irq %d: can not get new irq_desc for migration.\n", irq); | 81 | printk(KERN_ERR "irq %d: can not get new irq_desc " |
| 82 | "for migration.\n", irq); | ||
| 83 | /* still use old one */ | ||
| 84 | desc = old_desc; | ||
| 85 | goto out_unlock; | ||
| 86 | } | ||
| 87 | if (!init_copy_one_irq_desc(irq, old_desc, desc, cpu)) { | ||
| 80 | /* still use old one */ | 88 | /* still use old one */ |
| 89 | kfree(desc); | ||
| 81 | desc = old_desc; | 90 | desc = old_desc; |
| 82 | goto out_unlock; | 91 | goto out_unlock; |
| 83 | } | 92 | } |
| 84 | init_copy_one_irq_desc(irq, old_desc, desc, cpu); | ||
| 85 | 93 | ||
| 86 | irq_desc_ptrs[irq] = desc; | 94 | irq_desc_ptrs[irq] = desc; |
| 87 | spin_unlock_irqrestore(&sparse_irq_lock, flags); | 95 | spin_unlock_irqrestore(&sparse_irq_lock, flags); |
diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c new file mode 100644 index 000000000000..638d8bedec14 --- /dev/null +++ b/kernel/irq/pm.c | |||
| @@ -0,0 +1,79 @@ | |||
| 1 | /* | ||
| 2 | * linux/kernel/irq/pm.c | ||
| 3 | * | ||
| 4 | * Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. | ||
| 5 | * | ||
| 6 | * This file contains power management functions related to interrupts. | ||
| 7 | */ | ||
| 8 | |||
| 9 | #include <linux/irq.h> | ||
| 10 | #include <linux/module.h> | ||
| 11 | #include <linux/interrupt.h> | ||
| 12 | |||
| 13 | #include "internals.h" | ||
| 14 | |||
| 15 | /** | ||
| 16 | * suspend_device_irqs - disable all currently enabled interrupt lines | ||
| 17 | * | ||
| 18 | * During system-wide suspend or hibernation device interrupts need to be | ||
| 19 | * disabled at the chip level and this function is provided for this purpose. | ||
| 20 | * It disables all interrupt lines that are enabled at the moment and sets the | ||
| 21 | * IRQ_SUSPENDED flag for them. | ||
| 22 | */ | ||
| 23 | void suspend_device_irqs(void) | ||
| 24 | { | ||
| 25 | struct irq_desc *desc; | ||
| 26 | int irq; | ||
| 27 | |||
| 28 | for_each_irq_desc(irq, desc) { | ||
| 29 | unsigned long flags; | ||
| 30 | |||
| 31 | spin_lock_irqsave(&desc->lock, flags); | ||
| 32 | __disable_irq(desc, irq, true); | ||
| 33 | spin_unlock_irqrestore(&desc->lock, flags); | ||
| 34 | } | ||
| 35 | |||
| 36 | for_each_irq_desc(irq, desc) | ||
| 37 | if (desc->status & IRQ_SUSPENDED) | ||
| 38 | synchronize_irq(irq); | ||
| 39 | } | ||
| 40 | EXPORT_SYMBOL_GPL(suspend_device_irqs); | ||
| 41 | |||
| 42 | /** | ||
| 43 | * resume_device_irqs - enable interrupt lines disabled by suspend_device_irqs() | ||
| 44 | * | ||
| 45 | * Enable all interrupt lines previously disabled by suspend_device_irqs() that | ||
| 46 | * have the IRQ_SUSPENDED flag set. | ||
| 47 | */ | ||
| 48 | void resume_device_irqs(void) | ||
| 49 | { | ||
| 50 | struct irq_desc *desc; | ||
| 51 | int irq; | ||
| 52 | |||
| 53 | for_each_irq_desc(irq, desc) { | ||
| 54 | unsigned long flags; | ||
| 55 | |||
| 56 | if (!(desc->status & IRQ_SUSPENDED)) | ||
| 57 | continue; | ||
| 58 | |||
| 59 | spin_lock_irqsave(&desc->lock, flags); | ||
| 60 | __enable_irq(desc, irq, true); | ||
| 61 | spin_unlock_irqrestore(&desc->lock, flags); | ||
| 62 | } | ||
| 63 | } | ||
| 64 | EXPORT_SYMBOL_GPL(resume_device_irqs); | ||
| 65 | |||
| 66 | /** | ||
| 67 | * check_wakeup_irqs - check if any wake-up interrupts are pending | ||
| 68 | */ | ||
| 69 | int check_wakeup_irqs(void) | ||
| 70 | { | ||
| 71 | struct irq_desc *desc; | ||
| 72 | int irq; | ||
| 73 | |||
| 74 | for_each_irq_desc(irq, desc) | ||
| 75 | if ((desc->status & IRQ_WAKEUP) && (desc->status & IRQ_PENDING)) | ||
| 76 | return -EBUSY; | ||
| 77 | |||
| 78 | return 0; | ||
| 79 | } | ||
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index aae3f742bcec..692363dd591f 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c | |||
| @@ -20,11 +20,11 @@ static struct proc_dir_entry *root_irq_dir; | |||
| 20 | static int irq_affinity_proc_show(struct seq_file *m, void *v) | 20 | static int irq_affinity_proc_show(struct seq_file *m, void *v) |
| 21 | { | 21 | { |
| 22 | struct irq_desc *desc = irq_to_desc((long)m->private); | 22 | struct irq_desc *desc = irq_to_desc((long)m->private); |
| 23 | const struct cpumask *mask = &desc->affinity; | 23 | const struct cpumask *mask = desc->affinity; |
| 24 | 24 | ||
| 25 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 25 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
| 26 | if (desc->status & IRQ_MOVE_PENDING) | 26 | if (desc->status & IRQ_MOVE_PENDING) |
| 27 | mask = &desc->pending_mask; | 27 | mask = desc->pending_mask; |
| 28 | #endif | 28 | #endif |
| 29 | seq_cpumask(m, mask); | 29 | seq_cpumask(m, mask); |
| 30 | seq_putc(m, '\n'); | 30 | seq_putc(m, '\n'); |
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index dd364c11e56e..4d568294de3e 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c | |||
| @@ -104,7 +104,7 @@ static int misrouted_irq(int irq) | |||
| 104 | return ok; | 104 | return ok; |
| 105 | } | 105 | } |
| 106 | 106 | ||
| 107 | static void poll_spurious_irqs(unsigned long dummy) | 107 | static void poll_all_shared_irqs(void) |
| 108 | { | 108 | { |
| 109 | struct irq_desc *desc; | 109 | struct irq_desc *desc; |
| 110 | int i; | 110 | int i; |
| @@ -123,11 +123,23 @@ static void poll_spurious_irqs(unsigned long dummy) | |||
| 123 | 123 | ||
| 124 | try_one_irq(i, desc); | 124 | try_one_irq(i, desc); |
| 125 | } | 125 | } |
| 126 | } | ||
| 127 | |||
| 128 | static void poll_spurious_irqs(unsigned long dummy) | ||
| 129 | { | ||
| 130 | poll_all_shared_irqs(); | ||
| 126 | 131 | ||
| 127 | mod_timer(&poll_spurious_irq_timer, | 132 | mod_timer(&poll_spurious_irq_timer, |
| 128 | jiffies + POLL_SPURIOUS_IRQ_INTERVAL); | 133 | jiffies + POLL_SPURIOUS_IRQ_INTERVAL); |
| 129 | } | 134 | } |
| 130 | 135 | ||
| 136 | #ifdef CONFIG_DEBUG_SHIRQ | ||
| 137 | void debug_poll_all_shared_irqs(void) | ||
| 138 | { | ||
| 139 | poll_all_shared_irqs(); | ||
| 140 | } | ||
| 141 | #endif | ||
| 142 | |||
| 131 | /* | 143 | /* |
| 132 | * If 99,900 of the previous 100,000 interrupts have not been handled | 144 | * If 99,900 of the previous 100,000 interrupts have not been handled |
| 133 | * then assume that the IRQ is stuck in some manner. Drop a diagnostic | 145 | * then assume that the IRQ is stuck in some manner. Drop a diagnostic |
