diff options
| author | Takashi Iwai <tiwai@suse.de> | 2011-04-05 03:12:21 -0400 |
|---|---|---|
| committer | Takashi Iwai <tiwai@suse.de> | 2011-04-05 03:12:21 -0400 |
| commit | 4e29402fe4b2006c994eed5020c42b2cc87d9b42 (patch) | |
| tree | c0229c107045ab21487729f6a6cab6b70ed30bfa /kernel | |
| parent | f8852b12200df393b0a4db1a7052454bbc335443 (diff) | |
| parent | 00b317a41c5428b13eb7e5b4bbc691b1aa7afa80 (diff) | |
Merge branch 'for-2.6.39' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound-2.6 into fix/asoc
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/irq/Kconfig | 8 | ||||
| -rw-r--r-- | kernel/irq/autoprobe.c | 4 | ||||
| -rw-r--r-- | kernel/irq/chip.c | 283 | ||||
| -rw-r--r-- | kernel/irq/compat.h | 72 | ||||
| -rw-r--r-- | kernel/irq/debug.h | 12 | ||||
| -rw-r--r-- | kernel/irq/dummychip.c | 9 | ||||
| -rw-r--r-- | kernel/irq/handle.c | 19 | ||||
| -rw-r--r-- | kernel/irq/internals.h | 16 | ||||
| -rw-r--r-- | kernel/irq/irqdesc.c | 3 | ||||
| -rw-r--r-- | kernel/irq/manage.c | 102 | ||||
| -rw-r--r-- | kernel/irq/migration.c | 15 | ||||
| -rw-r--r-- | kernel/irq/proc.c | 8 | ||||
| -rw-r--r-- | kernel/irq/resend.c | 1 | ||||
| -rw-r--r-- | kernel/irq/settings.h | 55 | ||||
| -rw-r--r-- | kernel/irq/spurious.c | 11 | ||||
| -rw-r--r-- | kernel/signal.c | 4 |
16 files changed, 237 insertions, 385 deletions
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig index 00f2c037267a..a69c333f78e4 100644 --- a/kernel/irq/Kconfig +++ b/kernel/irq/Kconfig | |||
| @@ -10,10 +10,6 @@ menu "IRQ subsystem" | |||
| 10 | config GENERIC_HARDIRQS | 10 | config GENERIC_HARDIRQS |
| 11 | def_bool y | 11 | def_bool y |
| 12 | 12 | ||
| 13 | # Select this to disable the deprecated stuff | ||
| 14 | config GENERIC_HARDIRQS_NO_DEPRECATED | ||
| 15 | bool | ||
| 16 | |||
| 17 | config GENERIC_HARDIRQS_NO_COMPAT | 13 | config GENERIC_HARDIRQS_NO_COMPAT |
| 18 | bool | 14 | bool |
| 19 | 15 | ||
| @@ -51,6 +47,10 @@ config HARDIRQS_SW_RESEND | |||
| 51 | config IRQ_PREFLOW_FASTEOI | 47 | config IRQ_PREFLOW_FASTEOI |
| 52 | bool | 48 | bool |
| 53 | 49 | ||
| 50 | # Edge style eoi based handler (cell) | ||
| 51 | config IRQ_EDGE_EOI_HANDLER | ||
| 52 | bool | ||
| 53 | |||
| 54 | # Support forced irq threading | 54 | # Support forced irq threading |
| 55 | config IRQ_FORCED_THREADING | 55 | config IRQ_FORCED_THREADING |
| 56 | bool | 56 | bool |
diff --git a/kernel/irq/autoprobe.c b/kernel/irq/autoprobe.c index 394784c57060..342d8f44e401 100644 --- a/kernel/irq/autoprobe.c +++ b/kernel/irq/autoprobe.c | |||
| @@ -70,10 +70,8 @@ unsigned long probe_irq_on(void) | |||
| 70 | raw_spin_lock_irq(&desc->lock); | 70 | raw_spin_lock_irq(&desc->lock); |
| 71 | if (!desc->action && irq_settings_can_probe(desc)) { | 71 | if (!desc->action && irq_settings_can_probe(desc)) { |
| 72 | desc->istate |= IRQS_AUTODETECT | IRQS_WAITING; | 72 | desc->istate |= IRQS_AUTODETECT | IRQS_WAITING; |
| 73 | if (irq_startup(desc)) { | 73 | if (irq_startup(desc)) |
| 74 | irq_compat_set_pending(desc); | ||
| 75 | desc->istate |= IRQS_PENDING; | 74 | desc->istate |= IRQS_PENDING; |
| 76 | } | ||
| 77 | } | 75 | } |
| 78 | raw_spin_unlock_irq(&desc->lock); | 76 | raw_spin_unlock_irq(&desc->lock); |
| 79 | } | 77 | } |
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index c9c0601f0615..616ec1c6b06f 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
| @@ -34,9 +34,14 @@ int irq_set_chip(unsigned int irq, struct irq_chip *chip) | |||
| 34 | if (!chip) | 34 | if (!chip) |
| 35 | chip = &no_irq_chip; | 35 | chip = &no_irq_chip; |
| 36 | 36 | ||
| 37 | irq_chip_set_defaults(chip); | ||
| 38 | desc->irq_data.chip = chip; | 37 | desc->irq_data.chip = chip; |
| 39 | irq_put_desc_unlock(desc, flags); | 38 | irq_put_desc_unlock(desc, flags); |
| 39 | /* | ||
| 40 | * For !CONFIG_SPARSE_IRQ make the irq show up in | ||
| 41 | * allocated_irqs. For the CONFIG_SPARSE_IRQ case, it is | ||
| 42 | * already marked, and this call is harmless. | ||
| 43 | */ | ||
| 44 | irq_reserve_irq(irq); | ||
| 40 | return 0; | 45 | return 0; |
| 41 | } | 46 | } |
| 42 | EXPORT_SYMBOL(irq_set_chip); | 47 | EXPORT_SYMBOL(irq_set_chip); |
| @@ -134,26 +139,22 @@ EXPORT_SYMBOL_GPL(irq_get_irq_data); | |||
| 134 | 139 | ||
| 135 | static void irq_state_clr_disabled(struct irq_desc *desc) | 140 | static void irq_state_clr_disabled(struct irq_desc *desc) |
| 136 | { | 141 | { |
| 137 | desc->istate &= ~IRQS_DISABLED; | 142 | irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED); |
| 138 | irq_compat_clr_disabled(desc); | ||
| 139 | } | 143 | } |
| 140 | 144 | ||
| 141 | static void irq_state_set_disabled(struct irq_desc *desc) | 145 | static void irq_state_set_disabled(struct irq_desc *desc) |
| 142 | { | 146 | { |
| 143 | desc->istate |= IRQS_DISABLED; | 147 | irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); |
| 144 | irq_compat_set_disabled(desc); | ||
| 145 | } | 148 | } |
| 146 | 149 | ||
| 147 | static void irq_state_clr_masked(struct irq_desc *desc) | 150 | static void irq_state_clr_masked(struct irq_desc *desc) |
| 148 | { | 151 | { |
| 149 | desc->istate &= ~IRQS_MASKED; | 152 | irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED); |
| 150 | irq_compat_clr_masked(desc); | ||
| 151 | } | 153 | } |
| 152 | 154 | ||
| 153 | static void irq_state_set_masked(struct irq_desc *desc) | 155 | static void irq_state_set_masked(struct irq_desc *desc) |
| 154 | { | 156 | { |
| 155 | desc->istate |= IRQS_MASKED; | 157 | irqd_set(&desc->irq_data, IRQD_IRQ_MASKED); |
| 156 | irq_compat_set_masked(desc); | ||
| 157 | } | 158 | } |
| 158 | 159 | ||
| 159 | int irq_startup(struct irq_desc *desc) | 160 | int irq_startup(struct irq_desc *desc) |
| @@ -203,126 +204,6 @@ void irq_disable(struct irq_desc *desc) | |||
| 203 | } | 204 | } |
| 204 | } | 205 | } |
| 205 | 206 | ||
| 206 | #ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED | ||
| 207 | /* Temporary migration helpers */ | ||
| 208 | static void compat_irq_mask(struct irq_data *data) | ||
| 209 | { | ||
| 210 | data->chip->mask(data->irq); | ||
| 211 | } | ||
| 212 | |||
| 213 | static void compat_irq_unmask(struct irq_data *data) | ||
| 214 | { | ||
| 215 | data->chip->unmask(data->irq); | ||
| 216 | } | ||
| 217 | |||
| 218 | static void compat_irq_ack(struct irq_data *data) | ||
| 219 | { | ||
| 220 | data->chip->ack(data->irq); | ||
| 221 | } | ||
| 222 | |||
| 223 | static void compat_irq_mask_ack(struct irq_data *data) | ||
| 224 | { | ||
| 225 | data->chip->mask_ack(data->irq); | ||
| 226 | } | ||
| 227 | |||
| 228 | static void compat_irq_eoi(struct irq_data *data) | ||
| 229 | { | ||
| 230 | data->chip->eoi(data->irq); | ||
| 231 | } | ||
| 232 | |||
| 233 | static void compat_irq_enable(struct irq_data *data) | ||
| 234 | { | ||
| 235 | data->chip->enable(data->irq); | ||
| 236 | } | ||
| 237 | |||
| 238 | static void compat_irq_disable(struct irq_data *data) | ||
| 239 | { | ||
| 240 | data->chip->disable(data->irq); | ||
| 241 | } | ||
| 242 | |||
| 243 | static void compat_irq_shutdown(struct irq_data *data) | ||
| 244 | { | ||
| 245 | data->chip->shutdown(data->irq); | ||
| 246 | } | ||
| 247 | |||
| 248 | static unsigned int compat_irq_startup(struct irq_data *data) | ||
| 249 | { | ||
| 250 | return data->chip->startup(data->irq); | ||
| 251 | } | ||
| 252 | |||
| 253 | static int compat_irq_set_affinity(struct irq_data *data, | ||
| 254 | const struct cpumask *dest, bool force) | ||
| 255 | { | ||
| 256 | return data->chip->set_affinity(data->irq, dest); | ||
| 257 | } | ||
| 258 | |||
| 259 | static int compat_irq_set_type(struct irq_data *data, unsigned int type) | ||
| 260 | { | ||
| 261 | return data->chip->set_type(data->irq, type); | ||
| 262 | } | ||
| 263 | |||
| 264 | static int compat_irq_set_wake(struct irq_data *data, unsigned int on) | ||
| 265 | { | ||
| 266 | return data->chip->set_wake(data->irq, on); | ||
| 267 | } | ||
| 268 | |||
| 269 | static int compat_irq_retrigger(struct irq_data *data) | ||
| 270 | { | ||
| 271 | return data->chip->retrigger(data->irq); | ||
| 272 | } | ||
| 273 | |||
| 274 | static void compat_bus_lock(struct irq_data *data) | ||
| 275 | { | ||
| 276 | data->chip->bus_lock(data->irq); | ||
| 277 | } | ||
| 278 | |||
| 279 | static void compat_bus_sync_unlock(struct irq_data *data) | ||
| 280 | { | ||
| 281 | data->chip->bus_sync_unlock(data->irq); | ||
| 282 | } | ||
| 283 | #endif | ||
| 284 | |||
| 285 | /* | ||
| 286 | * Fixup enable/disable function pointers | ||
| 287 | */ | ||
| 288 | void irq_chip_set_defaults(struct irq_chip *chip) | ||
| 289 | { | ||
| 290 | #ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED | ||
| 291 | if (chip->enable) | ||
| 292 | chip->irq_enable = compat_irq_enable; | ||
| 293 | if (chip->disable) | ||
| 294 | chip->irq_disable = compat_irq_disable; | ||
| 295 | if (chip->shutdown) | ||
| 296 | chip->irq_shutdown = compat_irq_shutdown; | ||
| 297 | if (chip->startup) | ||
| 298 | chip->irq_startup = compat_irq_startup; | ||
| 299 | if (!chip->end) | ||
| 300 | chip->end = dummy_irq_chip.end; | ||
| 301 | if (chip->bus_lock) | ||
| 302 | chip->irq_bus_lock = compat_bus_lock; | ||
| 303 | if (chip->bus_sync_unlock) | ||
| 304 | chip->irq_bus_sync_unlock = compat_bus_sync_unlock; | ||
| 305 | if (chip->mask) | ||
| 306 | chip->irq_mask = compat_irq_mask; | ||
| 307 | if (chip->unmask) | ||
| 308 | chip->irq_unmask = compat_irq_unmask; | ||
| 309 | if (chip->ack) | ||
| 310 | chip->irq_ack = compat_irq_ack; | ||
| 311 | if (chip->mask_ack) | ||
| 312 | chip->irq_mask_ack = compat_irq_mask_ack; | ||
| 313 | if (chip->eoi) | ||
| 314 | chip->irq_eoi = compat_irq_eoi; | ||
| 315 | if (chip->set_affinity) | ||
| 316 | chip->irq_set_affinity = compat_irq_set_affinity; | ||
| 317 | if (chip->set_type) | ||
| 318 | chip->irq_set_type = compat_irq_set_type; | ||
| 319 | if (chip->set_wake) | ||
| 320 | chip->irq_set_wake = compat_irq_set_wake; | ||
| 321 | if (chip->retrigger) | ||
| 322 | chip->irq_retrigger = compat_irq_retrigger; | ||
| 323 | #endif | ||
| 324 | } | ||
| 325 | |||
| 326 | static inline void mask_ack_irq(struct irq_desc *desc) | 207 | static inline void mask_ack_irq(struct irq_desc *desc) |
| 327 | { | 208 | { |
| 328 | if (desc->irq_data.chip->irq_mask_ack) | 209 | if (desc->irq_data.chip->irq_mask_ack) |
| @@ -372,11 +253,10 @@ void handle_nested_irq(unsigned int irq) | |||
| 372 | kstat_incr_irqs_this_cpu(irq, desc); | 253 | kstat_incr_irqs_this_cpu(irq, desc); |
| 373 | 254 | ||
| 374 | action = desc->action; | 255 | action = desc->action; |
| 375 | if (unlikely(!action || (desc->istate & IRQS_DISABLED))) | 256 | if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) |
| 376 | goto out_unlock; | 257 | goto out_unlock; |
| 377 | 258 | ||
| 378 | irq_compat_set_progress(desc); | 259 | irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); |
| 379 | desc->istate |= IRQS_INPROGRESS; | ||
| 380 | raw_spin_unlock_irq(&desc->lock); | 260 | raw_spin_unlock_irq(&desc->lock); |
| 381 | 261 | ||
| 382 | action_ret = action->thread_fn(action->irq, action->dev_id); | 262 | action_ret = action->thread_fn(action->irq, action->dev_id); |
| @@ -384,8 +264,7 @@ void handle_nested_irq(unsigned int irq) | |||
| 384 | note_interrupt(irq, desc, action_ret); | 264 | note_interrupt(irq, desc, action_ret); |
| 385 | 265 | ||
| 386 | raw_spin_lock_irq(&desc->lock); | 266 | raw_spin_lock_irq(&desc->lock); |
| 387 | desc->istate &= ~IRQS_INPROGRESS; | 267 | irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); |
| 388 | irq_compat_clr_progress(desc); | ||
| 389 | 268 | ||
| 390 | out_unlock: | 269 | out_unlock: |
| 391 | raw_spin_unlock_irq(&desc->lock); | 270 | raw_spin_unlock_irq(&desc->lock); |
| @@ -416,14 +295,14 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc) | |||
| 416 | { | 295 | { |
| 417 | raw_spin_lock(&desc->lock); | 296 | raw_spin_lock(&desc->lock); |
| 418 | 297 | ||
| 419 | if (unlikely(desc->istate & IRQS_INPROGRESS)) | 298 | if (unlikely(irqd_irq_inprogress(&desc->irq_data))) |
| 420 | if (!irq_check_poll(desc)) | 299 | if (!irq_check_poll(desc)) |
| 421 | goto out_unlock; | 300 | goto out_unlock; |
| 422 | 301 | ||
| 423 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); | 302 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
| 424 | kstat_incr_irqs_this_cpu(irq, desc); | 303 | kstat_incr_irqs_this_cpu(irq, desc); |
| 425 | 304 | ||
| 426 | if (unlikely(!desc->action || (desc->istate & IRQS_DISABLED))) | 305 | if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) |
| 427 | goto out_unlock; | 306 | goto out_unlock; |
| 428 | 307 | ||
| 429 | handle_irq_event(desc); | 308 | handle_irq_event(desc); |
| @@ -448,7 +327,7 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc) | |||
| 448 | raw_spin_lock(&desc->lock); | 327 | raw_spin_lock(&desc->lock); |
| 449 | mask_ack_irq(desc); | 328 | mask_ack_irq(desc); |
| 450 | 329 | ||
| 451 | if (unlikely(desc->istate & IRQS_INPROGRESS)) | 330 | if (unlikely(irqd_irq_inprogress(&desc->irq_data))) |
| 452 | if (!irq_check_poll(desc)) | 331 | if (!irq_check_poll(desc)) |
| 453 | goto out_unlock; | 332 | goto out_unlock; |
| 454 | 333 | ||
| @@ -459,12 +338,12 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc) | |||
| 459 | * If its disabled or no action available | 338 | * If its disabled or no action available |
| 460 | * keep it masked and get out of here | 339 | * keep it masked and get out of here |
| 461 | */ | 340 | */ |
| 462 | if (unlikely(!desc->action || (desc->istate & IRQS_DISABLED))) | 341 | if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) |
| 463 | goto out_unlock; | 342 | goto out_unlock; |
| 464 | 343 | ||
| 465 | handle_irq_event(desc); | 344 | handle_irq_event(desc); |
| 466 | 345 | ||
| 467 | if (!(desc->istate & (IRQS_DISABLED | IRQS_ONESHOT))) | 346 | if (!irqd_irq_disabled(&desc->irq_data) && !(desc->istate & IRQS_ONESHOT)) |
| 468 | unmask_irq(desc); | 347 | unmask_irq(desc); |
| 469 | out_unlock: | 348 | out_unlock: |
| 470 | raw_spin_unlock(&desc->lock); | 349 | raw_spin_unlock(&desc->lock); |
| @@ -496,7 +375,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) | |||
| 496 | { | 375 | { |
| 497 | raw_spin_lock(&desc->lock); | 376 | raw_spin_lock(&desc->lock); |
| 498 | 377 | ||
| 499 | if (unlikely(desc->istate & IRQS_INPROGRESS)) | 378 | if (unlikely(irqd_irq_inprogress(&desc->irq_data))) |
| 500 | if (!irq_check_poll(desc)) | 379 | if (!irq_check_poll(desc)) |
| 501 | goto out; | 380 | goto out; |
| 502 | 381 | ||
| @@ -507,8 +386,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) | |||
| 507 | * If its disabled or no action available | 386 | * If its disabled or no action available |
| 508 | * then mask it and get out of here: | 387 | * then mask it and get out of here: |
| 509 | */ | 388 | */ |
| 510 | if (unlikely(!desc->action || (desc->istate & IRQS_DISABLED))) { | 389 | if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { |
| 511 | irq_compat_set_pending(desc); | ||
| 512 | desc->istate |= IRQS_PENDING; | 390 | desc->istate |= IRQS_PENDING; |
| 513 | mask_irq(desc); | 391 | mask_irq(desc); |
| 514 | goto out; | 392 | goto out; |
| @@ -558,10 +436,9 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc) | |||
| 558 | * we shouldn't process the IRQ. Mark it pending, handle | 436 | * we shouldn't process the IRQ. Mark it pending, handle |
| 559 | * the necessary masking and go out | 437 | * the necessary masking and go out |
| 560 | */ | 438 | */ |
| 561 | if (unlikely((desc->istate & (IRQS_DISABLED | IRQS_INPROGRESS) || | 439 | if (unlikely(irqd_irq_disabled(&desc->irq_data) || |
| 562 | !desc->action))) { | 440 | irqd_irq_inprogress(&desc->irq_data) || !desc->action)) { |
| 563 | if (!irq_check_poll(desc)) { | 441 | if (!irq_check_poll(desc)) { |
| 564 | irq_compat_set_pending(desc); | ||
| 565 | desc->istate |= IRQS_PENDING; | 442 | desc->istate |= IRQS_PENDING; |
| 566 | mask_ack_irq(desc); | 443 | mask_ack_irq(desc); |
| 567 | goto out_unlock; | 444 | goto out_unlock; |
| @@ -584,20 +461,65 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc) | |||
| 584 | * Renable it, if it was not disabled in meantime. | 461 | * Renable it, if it was not disabled in meantime. |
| 585 | */ | 462 | */ |
| 586 | if (unlikely(desc->istate & IRQS_PENDING)) { | 463 | if (unlikely(desc->istate & IRQS_PENDING)) { |
| 587 | if (!(desc->istate & IRQS_DISABLED) && | 464 | if (!irqd_irq_disabled(&desc->irq_data) && |
| 588 | (desc->istate & IRQS_MASKED)) | 465 | irqd_irq_masked(&desc->irq_data)) |
| 589 | unmask_irq(desc); | 466 | unmask_irq(desc); |
| 590 | } | 467 | } |
| 591 | 468 | ||
| 592 | handle_irq_event(desc); | 469 | handle_irq_event(desc); |
| 593 | 470 | ||
| 594 | } while ((desc->istate & IRQS_PENDING) && | 471 | } while ((desc->istate & IRQS_PENDING) && |
| 595 | !(desc->istate & IRQS_DISABLED)); | 472 | !irqd_irq_disabled(&desc->irq_data)); |
| 596 | 473 | ||
| 597 | out_unlock: | 474 | out_unlock: |
| 598 | raw_spin_unlock(&desc->lock); | 475 | raw_spin_unlock(&desc->lock); |
| 599 | } | 476 | } |
| 600 | 477 | ||
| 478 | #ifdef CONFIG_IRQ_EDGE_EOI_HANDLER | ||
| 479 | /** | ||
| 480 | * handle_edge_eoi_irq - edge eoi type IRQ handler | ||
| 481 | * @irq: the interrupt number | ||
| 482 | * @desc: the interrupt description structure for this irq | ||
| 483 | * | ||
| 484 | * Similar as the above handle_edge_irq, but using eoi and w/o the | ||
| 485 | * mask/unmask logic. | ||
| 486 | */ | ||
| 487 | void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc) | ||
| 488 | { | ||
| 489 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
| 490 | |||
| 491 | raw_spin_lock(&desc->lock); | ||
| 492 | |||
| 493 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); | ||
| 494 | /* | ||
| 495 | * If we're currently running this IRQ, or its disabled, | ||
| 496 | * we shouldn't process the IRQ. Mark it pending, handle | ||
| 497 | * the necessary masking and go out | ||
| 498 | */ | ||
| 499 | if (unlikely(irqd_irq_disabled(&desc->irq_data) || | ||
| 500 | irqd_irq_inprogress(&desc->irq_data) || !desc->action)) { | ||
| 501 | if (!irq_check_poll(desc)) { | ||
| 502 | desc->istate |= IRQS_PENDING; | ||
| 503 | goto out_eoi; | ||
| 504 | } | ||
| 505 | } | ||
| 506 | kstat_incr_irqs_this_cpu(irq, desc); | ||
| 507 | |||
| 508 | do { | ||
| 509 | if (unlikely(!desc->action)) | ||
| 510 | goto out_eoi; | ||
| 511 | |||
| 512 | handle_irq_event(desc); | ||
| 513 | |||
| 514 | } while ((desc->istate & IRQS_PENDING) && | ||
| 515 | !irqd_irq_disabled(&desc->irq_data)); | ||
| 516 | |||
| 517 | out_unlock: | ||
| 518 | chip->irq_eoi(&desc->irq_data); | ||
| 519 | raw_spin_unlock(&desc->lock); | ||
| 520 | } | ||
| 521 | #endif | ||
| 522 | |||
| 601 | /** | 523 | /** |
| 602 | * handle_percpu_irq - Per CPU local irq handler | 524 | * handle_percpu_irq - Per CPU local irq handler |
| 603 | * @irq: the interrupt number | 525 | * @irq: the interrupt number |
| @@ -642,8 +564,7 @@ __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, | |||
| 642 | if (handle == handle_bad_irq) { | 564 | if (handle == handle_bad_irq) { |
| 643 | if (desc->irq_data.chip != &no_irq_chip) | 565 | if (desc->irq_data.chip != &no_irq_chip) |
| 644 | mask_ack_irq(desc); | 566 | mask_ack_irq(desc); |
| 645 | irq_compat_set_disabled(desc); | 567 | irq_state_set_disabled(desc); |
| 646 | desc->istate |= IRQS_DISABLED; | ||
| 647 | desc->depth = 1; | 568 | desc->depth = 1; |
| 648 | } | 569 | } |
| 649 | desc->handle_irq = handle; | 570 | desc->handle_irq = handle; |
| @@ -684,8 +605,70 @@ void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set) | |||
| 684 | irqd_set(&desc->irq_data, IRQD_PER_CPU); | 605 | irqd_set(&desc->irq_data, IRQD_PER_CPU); |
| 685 | if (irq_settings_can_move_pcntxt(desc)) | 606 | if (irq_settings_can_move_pcntxt(desc)) |
| 686 | irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT); | 607 | irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT); |
| 608 | if (irq_settings_is_level(desc)) | ||
| 609 | irqd_set(&desc->irq_data, IRQD_LEVEL); | ||
| 687 | 610 | ||
| 688 | irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc)); | 611 | irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc)); |
| 689 | 612 | ||
| 690 | irq_put_desc_unlock(desc, flags); | 613 | irq_put_desc_unlock(desc, flags); |
| 691 | } | 614 | } |
| 615 | |||
| 616 | /** | ||
| 617 | * irq_cpu_online - Invoke all irq_cpu_online functions. | ||
| 618 | * | ||
| 619 | * Iterate through all irqs and invoke the chip.irq_cpu_online() | ||
| 620 | * for each. | ||
| 621 | */ | ||
| 622 | void irq_cpu_online(void) | ||
| 623 | { | ||
| 624 | struct irq_desc *desc; | ||
| 625 | struct irq_chip *chip; | ||
| 626 | unsigned long flags; | ||
| 627 | unsigned int irq; | ||
| 628 | |||
| 629 | for_each_active_irq(irq) { | ||
| 630 | desc = irq_to_desc(irq); | ||
| 631 | if (!desc) | ||
| 632 | continue; | ||
| 633 | |||
| 634 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
| 635 | |||
| 636 | chip = irq_data_get_irq_chip(&desc->irq_data); | ||
| 637 | if (chip && chip->irq_cpu_online && | ||
| 638 | (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || | ||
| 639 | !irqd_irq_disabled(&desc->irq_data))) | ||
| 640 | chip->irq_cpu_online(&desc->irq_data); | ||
| 641 | |||
| 642 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
| 643 | } | ||
| 644 | } | ||
| 645 | |||
| 646 | /** | ||
| 647 | * irq_cpu_offline - Invoke all irq_cpu_offline functions. | ||
| 648 | * | ||
| 649 | * Iterate through all irqs and invoke the chip.irq_cpu_offline() | ||
| 650 | * for each. | ||
| 651 | */ | ||
| 652 | void irq_cpu_offline(void) | ||
| 653 | { | ||
| 654 | struct irq_desc *desc; | ||
| 655 | struct irq_chip *chip; | ||
| 656 | unsigned long flags; | ||
| 657 | unsigned int irq; | ||
| 658 | |||
| 659 | for_each_active_irq(irq) { | ||
| 660 | desc = irq_to_desc(irq); | ||
| 661 | if (!desc) | ||
| 662 | continue; | ||
| 663 | |||
| 664 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
| 665 | |||
| 666 | chip = irq_data_get_irq_chip(&desc->irq_data); | ||
| 667 | if (chip && chip->irq_cpu_offline && | ||
| 668 | (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || | ||
| 669 | !irqd_irq_disabled(&desc->irq_data))) | ||
| 670 | chip->irq_cpu_offline(&desc->irq_data); | ||
| 671 | |||
| 672 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
| 673 | } | ||
| 674 | } | ||
diff --git a/kernel/irq/compat.h b/kernel/irq/compat.h deleted file mode 100644 index 6bbaf66aca85..000000000000 --- a/kernel/irq/compat.h +++ /dev/null | |||
| @@ -1,72 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * Compat layer for transition period | ||
| 3 | */ | ||
| 4 | #ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT | ||
| 5 | static inline void irq_compat_set_progress(struct irq_desc *desc) | ||
| 6 | { | ||
| 7 | desc->status |= IRQ_INPROGRESS; | ||
| 8 | } | ||
| 9 | |||
| 10 | static inline void irq_compat_clr_progress(struct irq_desc *desc) | ||
| 11 | { | ||
| 12 | desc->status &= ~IRQ_INPROGRESS; | ||
| 13 | } | ||
| 14 | static inline void irq_compat_set_disabled(struct irq_desc *desc) | ||
| 15 | { | ||
| 16 | desc->status |= IRQ_DISABLED; | ||
| 17 | } | ||
| 18 | static inline void irq_compat_clr_disabled(struct irq_desc *desc) | ||
| 19 | { | ||
| 20 | desc->status &= ~IRQ_DISABLED; | ||
| 21 | } | ||
| 22 | static inline void irq_compat_set_pending(struct irq_desc *desc) | ||
| 23 | { | ||
| 24 | desc->status |= IRQ_PENDING; | ||
| 25 | } | ||
| 26 | |||
| 27 | static inline void irq_compat_clr_pending(struct irq_desc *desc) | ||
| 28 | { | ||
| 29 | desc->status &= ~IRQ_PENDING; | ||
| 30 | } | ||
| 31 | static inline void irq_compat_set_masked(struct irq_desc *desc) | ||
| 32 | { | ||
| 33 | desc->status |= IRQ_MASKED; | ||
| 34 | } | ||
| 35 | |||
| 36 | static inline void irq_compat_clr_masked(struct irq_desc *desc) | ||
| 37 | { | ||
| 38 | desc->status &= ~IRQ_MASKED; | ||
| 39 | } | ||
| 40 | static inline void irq_compat_set_move_pending(struct irq_desc *desc) | ||
| 41 | { | ||
| 42 | desc->status |= IRQ_MOVE_PENDING; | ||
| 43 | } | ||
| 44 | |||
| 45 | static inline void irq_compat_clr_move_pending(struct irq_desc *desc) | ||
| 46 | { | ||
| 47 | desc->status &= ~IRQ_MOVE_PENDING; | ||
| 48 | } | ||
| 49 | static inline void irq_compat_set_affinity(struct irq_desc *desc) | ||
| 50 | { | ||
| 51 | desc->status |= IRQ_AFFINITY_SET; | ||
| 52 | } | ||
| 53 | |||
| 54 | static inline void irq_compat_clr_affinity(struct irq_desc *desc) | ||
| 55 | { | ||
| 56 | desc->status &= ~IRQ_AFFINITY_SET; | ||
| 57 | } | ||
| 58 | #else | ||
| 59 | static inline void irq_compat_set_progress(struct irq_desc *desc) { } | ||
| 60 | static inline void irq_compat_clr_progress(struct irq_desc *desc) { } | ||
| 61 | static inline void irq_compat_set_disabled(struct irq_desc *desc) { } | ||
| 62 | static inline void irq_compat_clr_disabled(struct irq_desc *desc) { } | ||
| 63 | static inline void irq_compat_set_pending(struct irq_desc *desc) { } | ||
| 64 | static inline void irq_compat_clr_pending(struct irq_desc *desc) { } | ||
| 65 | static inline void irq_compat_set_masked(struct irq_desc *desc) { } | ||
| 66 | static inline void irq_compat_clr_masked(struct irq_desc *desc) { } | ||
| 67 | static inline void irq_compat_set_move_pending(struct irq_desc *desc) { } | ||
| 68 | static inline void irq_compat_clr_move_pending(struct irq_desc *desc) { } | ||
| 69 | static inline void irq_compat_set_affinity(struct irq_desc *desc) { } | ||
| 70 | static inline void irq_compat_clr_affinity(struct irq_desc *desc) { } | ||
| 71 | #endif | ||
| 72 | |||
diff --git a/kernel/irq/debug.h b/kernel/irq/debug.h index d1a33b7fa61d..306cba37e9a5 100644 --- a/kernel/irq/debug.h +++ b/kernel/irq/debug.h | |||
| @@ -4,8 +4,10 @@ | |||
| 4 | 4 | ||
| 5 | #include <linux/kallsyms.h> | 5 | #include <linux/kallsyms.h> |
| 6 | 6 | ||
| 7 | #define P(f) if (desc->status & f) printk("%14s set\n", #f) | 7 | #define P(f) if (desc->status_use_accessors & f) printk("%14s set\n", #f) |
| 8 | #define PS(f) if (desc->istate & f) printk("%14s set\n", #f) | 8 | #define PS(f) if (desc->istate & f) printk("%14s set\n", #f) |
| 9 | /* FIXME */ | ||
| 10 | #define PD(f) do { } while (0) | ||
| 9 | 11 | ||
| 10 | static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc) | 12 | static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc) |
| 11 | { | 13 | { |
| @@ -28,13 +30,15 @@ static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc) | |||
| 28 | P(IRQ_NOAUTOEN); | 30 | P(IRQ_NOAUTOEN); |
| 29 | 31 | ||
| 30 | PS(IRQS_AUTODETECT); | 32 | PS(IRQS_AUTODETECT); |
| 31 | PS(IRQS_INPROGRESS); | ||
| 32 | PS(IRQS_REPLAY); | 33 | PS(IRQS_REPLAY); |
| 33 | PS(IRQS_WAITING); | 34 | PS(IRQS_WAITING); |
| 34 | PS(IRQS_DISABLED); | ||
| 35 | PS(IRQS_PENDING); | 35 | PS(IRQS_PENDING); |
| 36 | PS(IRQS_MASKED); | 36 | |
| 37 | PD(IRQS_INPROGRESS); | ||
| 38 | PD(IRQS_DISABLED); | ||
| 39 | PD(IRQS_MASKED); | ||
| 37 | } | 40 | } |
| 38 | 41 | ||
| 39 | #undef P | 42 | #undef P |
| 40 | #undef PS | 43 | #undef PS |
| 44 | #undef PD | ||
diff --git a/kernel/irq/dummychip.c b/kernel/irq/dummychip.c index 20dc5474947e..b5fcd96c7102 100644 --- a/kernel/irq/dummychip.c +++ b/kernel/irq/dummychip.c | |||
| @@ -31,13 +31,6 @@ static unsigned int noop_ret(struct irq_data *data) | |||
| 31 | return 0; | 31 | return 0; |
| 32 | } | 32 | } |
| 33 | 33 | ||
| 34 | #ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED | ||
| 35 | static void compat_noop(unsigned int irq) { } | ||
| 36 | #define END_INIT .end = compat_noop | ||
| 37 | #else | ||
| 38 | #define END_INIT | ||
| 39 | #endif | ||
| 40 | |||
| 41 | /* | 34 | /* |
| 42 | * Generic no controller implementation | 35 | * Generic no controller implementation |
| 43 | */ | 36 | */ |
| @@ -48,7 +41,6 @@ struct irq_chip no_irq_chip = { | |||
| 48 | .irq_enable = noop, | 41 | .irq_enable = noop, |
| 49 | .irq_disable = noop, | 42 | .irq_disable = noop, |
| 50 | .irq_ack = ack_bad, | 43 | .irq_ack = ack_bad, |
| 51 | END_INIT | ||
| 52 | }; | 44 | }; |
| 53 | 45 | ||
| 54 | /* | 46 | /* |
| @@ -64,5 +56,4 @@ struct irq_chip dummy_irq_chip = { | |||
| 64 | .irq_ack = noop, | 56 | .irq_ack = noop, |
| 65 | .irq_mask = noop, | 57 | .irq_mask = noop, |
| 66 | .irq_unmask = noop, | 58 | .irq_unmask = noop, |
| 67 | END_INIT | ||
| 68 | }; | 59 | }; |
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index 517561fc7317..90cb55f6d7eb 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c | |||
| @@ -175,28 +175,13 @@ irqreturn_t handle_irq_event(struct irq_desc *desc) | |||
| 175 | struct irqaction *action = desc->action; | 175 | struct irqaction *action = desc->action; |
| 176 | irqreturn_t ret; | 176 | irqreturn_t ret; |
| 177 | 177 | ||
| 178 | irq_compat_clr_pending(desc); | ||
| 179 | desc->istate &= ~IRQS_PENDING; | 178 | desc->istate &= ~IRQS_PENDING; |
| 180 | irq_compat_set_progress(desc); | 179 | irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); |
| 181 | desc->istate |= IRQS_INPROGRESS; | ||
| 182 | raw_spin_unlock(&desc->lock); | 180 | raw_spin_unlock(&desc->lock); |
| 183 | 181 | ||
| 184 | ret = handle_irq_event_percpu(desc, action); | 182 | ret = handle_irq_event_percpu(desc, action); |
| 185 | 183 | ||
| 186 | raw_spin_lock(&desc->lock); | 184 | raw_spin_lock(&desc->lock); |
| 187 | desc->istate &= ~IRQS_INPROGRESS; | 185 | irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); |
| 188 | irq_compat_clr_progress(desc); | ||
| 189 | return ret; | 186 | return ret; |
| 190 | } | 187 | } |
| 191 | |||
| 192 | /** | ||
| 193 | * handle_IRQ_event - irq action chain handler | ||
| 194 | * @irq: the interrupt number | ||
| 195 | * @action: the interrupt action chain for this irq | ||
| 196 | * | ||
| 197 | * Handles the action chain of an irq event | ||
| 198 | */ | ||
| 199 | irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action) | ||
| 200 | { | ||
| 201 | return handle_irq_event_percpu(irq_to_desc(irq), action); | ||
| 202 | } | ||
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index 6c6ec9a49027..6546431447d7 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h | |||
| @@ -15,10 +15,6 @@ | |||
| 15 | 15 | ||
| 16 | #define istate core_internal_state__do_not_mess_with_it | 16 | #define istate core_internal_state__do_not_mess_with_it |
| 17 | 17 | ||
| 18 | #ifdef CONFIG_GENERIC_HARDIRQS_NO_COMPAT | ||
| 19 | # define status status_use_accessors | ||
| 20 | #endif | ||
| 21 | |||
| 22 | extern int noirqdebug; | 18 | extern int noirqdebug; |
| 23 | 19 | ||
| 24 | /* | 20 | /* |
| @@ -44,38 +40,28 @@ enum { | |||
| 44 | * IRQS_SPURIOUS_DISABLED - was disabled due to spurious interrupt | 40 | * IRQS_SPURIOUS_DISABLED - was disabled due to spurious interrupt |
| 45 | * detection | 41 | * detection |
| 46 | * IRQS_POLL_INPROGRESS - polling in progress | 42 | * IRQS_POLL_INPROGRESS - polling in progress |
| 47 | * IRQS_INPROGRESS - Interrupt in progress | ||
| 48 | * IRQS_ONESHOT - irq is not unmasked in primary handler | 43 | * IRQS_ONESHOT - irq is not unmasked in primary handler |
| 49 | * IRQS_REPLAY - irq is replayed | 44 | * IRQS_REPLAY - irq is replayed |
| 50 | * IRQS_WAITING - irq is waiting | 45 | * IRQS_WAITING - irq is waiting |
| 51 | * IRQS_DISABLED - irq is disabled | ||
| 52 | * IRQS_PENDING - irq is pending and replayed later | 46 | * IRQS_PENDING - irq is pending and replayed later |
| 53 | * IRQS_MASKED - irq is masked | ||
| 54 | * IRQS_SUSPENDED - irq is suspended | 47 | * IRQS_SUSPENDED - irq is suspended |
| 55 | */ | 48 | */ |
| 56 | enum { | 49 | enum { |
| 57 | IRQS_AUTODETECT = 0x00000001, | 50 | IRQS_AUTODETECT = 0x00000001, |
| 58 | IRQS_SPURIOUS_DISABLED = 0x00000002, | 51 | IRQS_SPURIOUS_DISABLED = 0x00000002, |
| 59 | IRQS_POLL_INPROGRESS = 0x00000008, | 52 | IRQS_POLL_INPROGRESS = 0x00000008, |
| 60 | IRQS_INPROGRESS = 0x00000010, | ||
| 61 | IRQS_ONESHOT = 0x00000020, | 53 | IRQS_ONESHOT = 0x00000020, |
| 62 | IRQS_REPLAY = 0x00000040, | 54 | IRQS_REPLAY = 0x00000040, |
| 63 | IRQS_WAITING = 0x00000080, | 55 | IRQS_WAITING = 0x00000080, |
| 64 | IRQS_DISABLED = 0x00000100, | ||
| 65 | IRQS_PENDING = 0x00000200, | 56 | IRQS_PENDING = 0x00000200, |
| 66 | IRQS_MASKED = 0x00000400, | ||
| 67 | IRQS_SUSPENDED = 0x00000800, | 57 | IRQS_SUSPENDED = 0x00000800, |
| 68 | }; | 58 | }; |
| 69 | 59 | ||
| 70 | #include "compat.h" | ||
| 71 | #include "debug.h" | 60 | #include "debug.h" |
| 72 | #include "settings.h" | 61 | #include "settings.h" |
| 73 | 62 | ||
| 74 | #define irq_data_to_desc(data) container_of(data, struct irq_desc, irq_data) | 63 | #define irq_data_to_desc(data) container_of(data, struct irq_desc, irq_data) |
| 75 | 64 | ||
| 76 | /* Set default functions for irq_chip structures: */ | ||
| 77 | extern void irq_chip_set_defaults(struct irq_chip *chip); | ||
| 78 | |||
| 79 | extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | 65 | extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, |
| 80 | unsigned long flags); | 66 | unsigned long flags); |
| 81 | extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp); | 67 | extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp); |
| @@ -162,13 +148,11 @@ irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags) | |||
| 162 | static inline void irqd_set_move_pending(struct irq_data *d) | 148 | static inline void irqd_set_move_pending(struct irq_data *d) |
| 163 | { | 149 | { |
| 164 | d->state_use_accessors |= IRQD_SETAFFINITY_PENDING; | 150 | d->state_use_accessors |= IRQD_SETAFFINITY_PENDING; |
| 165 | irq_compat_set_move_pending(irq_data_to_desc(d)); | ||
| 166 | } | 151 | } |
| 167 | 152 | ||
| 168 | static inline void irqd_clr_move_pending(struct irq_data *d) | 153 | static inline void irqd_clr_move_pending(struct irq_data *d) |
| 169 | { | 154 | { |
| 170 | d->state_use_accessors &= ~IRQD_SETAFFINITY_PENDING; | 155 | d->state_use_accessors &= ~IRQD_SETAFFINITY_PENDING; |
| 171 | irq_compat_clr_move_pending(irq_data_to_desc(d)); | ||
| 172 | } | 156 | } |
| 173 | 157 | ||
| 174 | static inline void irqd_clear(struct irq_data *d, unsigned int mask) | 158 | static inline void irqd_clear(struct irq_data *d, unsigned int mask) |
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 6fb014f172f7..2c039c9b9383 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c | |||
| @@ -80,7 +80,7 @@ static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node) | |||
| 80 | desc->irq_data.handler_data = NULL; | 80 | desc->irq_data.handler_data = NULL; |
| 81 | desc->irq_data.msi_desc = NULL; | 81 | desc->irq_data.msi_desc = NULL; |
| 82 | irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS); | 82 | irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS); |
| 83 | desc->istate = IRQS_DISABLED; | 83 | irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); |
| 84 | desc->handle_irq = handle_bad_irq; | 84 | desc->handle_irq = handle_bad_irq; |
| 85 | desc->depth = 1; | 85 | desc->depth = 1; |
| 86 | desc->irq_count = 0; | 86 | desc->irq_count = 0; |
| @@ -238,7 +238,6 @@ int __init early_irq_init(void) | |||
| 238 | 238 | ||
| 239 | struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { | 239 | struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { |
| 240 | [0 ... NR_IRQS-1] = { | 240 | [0 ... NR_IRQS-1] = { |
| 241 | .istate = IRQS_DISABLED, | ||
| 242 | .handle_irq = handle_bad_irq, | 241 | .handle_irq = handle_bad_irq, |
| 243 | .depth = 1, | 242 | .depth = 1, |
| 244 | .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock), | 243 | .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock), |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 0a2aa73e536c..12a80fdae11c 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
| @@ -41,7 +41,7 @@ early_param("threadirqs", setup_forced_irqthreads); | |||
| 41 | void synchronize_irq(unsigned int irq) | 41 | void synchronize_irq(unsigned int irq) |
| 42 | { | 42 | { |
| 43 | struct irq_desc *desc = irq_to_desc(irq); | 43 | struct irq_desc *desc = irq_to_desc(irq); |
| 44 | unsigned int state; | 44 | bool inprogress; |
| 45 | 45 | ||
| 46 | if (!desc) | 46 | if (!desc) |
| 47 | return; | 47 | return; |
| @@ -53,16 +53,16 @@ void synchronize_irq(unsigned int irq) | |||
| 53 | * Wait until we're out of the critical section. This might | 53 | * Wait until we're out of the critical section. This might |
| 54 | * give the wrong answer due to the lack of memory barriers. | 54 | * give the wrong answer due to the lack of memory barriers. |
| 55 | */ | 55 | */ |
| 56 | while (desc->istate & IRQS_INPROGRESS) | 56 | while (irqd_irq_inprogress(&desc->irq_data)) |
| 57 | cpu_relax(); | 57 | cpu_relax(); |
| 58 | 58 | ||
| 59 | /* Ok, that indicated we're done: double-check carefully. */ | 59 | /* Ok, that indicated we're done: double-check carefully. */ |
| 60 | raw_spin_lock_irqsave(&desc->lock, flags); | 60 | raw_spin_lock_irqsave(&desc->lock, flags); |
| 61 | state = desc->istate; | 61 | inprogress = irqd_irq_inprogress(&desc->irq_data); |
| 62 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 62 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
| 63 | 63 | ||
| 64 | /* Oops, that failed? */ | 64 | /* Oops, that failed? */ |
| 65 | } while (state & IRQS_INPROGRESS); | 65 | } while (inprogress); |
| 66 | 66 | ||
| 67 | /* | 67 | /* |
| 68 | * We made sure that no hardirq handler is running. Now verify | 68 | * We made sure that no hardirq handler is running. Now verify |
| @@ -112,13 +112,13 @@ void irq_set_thread_affinity(struct irq_desc *desc) | |||
| 112 | } | 112 | } |
| 113 | 113 | ||
| 114 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 114 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
| 115 | static inline bool irq_can_move_pcntxt(struct irq_desc *desc) | 115 | static inline bool irq_can_move_pcntxt(struct irq_data *data) |
| 116 | { | 116 | { |
| 117 | return irq_settings_can_move_pcntxt(desc); | 117 | return irqd_can_move_in_process_context(data); |
| 118 | } | 118 | } |
| 119 | static inline bool irq_move_pending(struct irq_desc *desc) | 119 | static inline bool irq_move_pending(struct irq_data *data) |
| 120 | { | 120 | { |
| 121 | return irqd_is_setaffinity_pending(&desc->irq_data); | 121 | return irqd_is_setaffinity_pending(data); |
| 122 | } | 122 | } |
| 123 | static inline void | 123 | static inline void |
| 124 | irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) | 124 | irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) |
| @@ -131,43 +131,34 @@ irq_get_pending(struct cpumask *mask, struct irq_desc *desc) | |||
| 131 | cpumask_copy(mask, desc->pending_mask); | 131 | cpumask_copy(mask, desc->pending_mask); |
| 132 | } | 132 | } |
| 133 | #else | 133 | #else |
| 134 | static inline bool irq_can_move_pcntxt(struct irq_desc *desc) { return true; } | 134 | static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; } |
| 135 | static inline bool irq_move_pending(struct irq_desc *desc) { return false; } | 135 | static inline bool irq_move_pending(struct irq_data *data) { return false; } |
| 136 | static inline void | 136 | static inline void |
| 137 | irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { } | 137 | irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { } |
| 138 | static inline void | 138 | static inline void |
| 139 | irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { } | 139 | irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { } |
| 140 | #endif | 140 | #endif |
| 141 | 141 | ||
| 142 | /** | 142 | int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask) |
| 143 | * irq_set_affinity - Set the irq affinity of a given irq | ||
| 144 | * @irq: Interrupt to set affinity | ||
| 145 | * @cpumask: cpumask | ||
| 146 | * | ||
| 147 | */ | ||
| 148 | int irq_set_affinity(unsigned int irq, const struct cpumask *mask) | ||
| 149 | { | 143 | { |
| 150 | struct irq_desc *desc = irq_to_desc(irq); | 144 | struct irq_chip *chip = irq_data_get_irq_chip(data); |
| 151 | struct irq_chip *chip = desc->irq_data.chip; | 145 | struct irq_desc *desc = irq_data_to_desc(data); |
| 152 | unsigned long flags; | ||
| 153 | int ret = 0; | 146 | int ret = 0; |
| 154 | 147 | ||
| 155 | if (!chip->irq_set_affinity) | 148 | if (!chip || !chip->irq_set_affinity) |
| 156 | return -EINVAL; | 149 | return -EINVAL; |
| 157 | 150 | ||
| 158 | raw_spin_lock_irqsave(&desc->lock, flags); | 151 | if (irq_can_move_pcntxt(data)) { |
| 159 | 152 | ret = chip->irq_set_affinity(data, mask, false); | |
| 160 | if (irq_can_move_pcntxt(desc)) { | ||
| 161 | ret = chip->irq_set_affinity(&desc->irq_data, mask, false); | ||
| 162 | switch (ret) { | 153 | switch (ret) { |
| 163 | case IRQ_SET_MASK_OK: | 154 | case IRQ_SET_MASK_OK: |
| 164 | cpumask_copy(desc->irq_data.affinity, mask); | 155 | cpumask_copy(data->affinity, mask); |
| 165 | case IRQ_SET_MASK_OK_NOCOPY: | 156 | case IRQ_SET_MASK_OK_NOCOPY: |
| 166 | irq_set_thread_affinity(desc); | 157 | irq_set_thread_affinity(desc); |
| 167 | ret = 0; | 158 | ret = 0; |
| 168 | } | 159 | } |
| 169 | } else { | 160 | } else { |
| 170 | irqd_set_move_pending(&desc->irq_data); | 161 | irqd_set_move_pending(data); |
| 171 | irq_copy_pending(desc, mask); | 162 | irq_copy_pending(desc, mask); |
| 172 | } | 163 | } |
| 173 | 164 | ||
| @@ -175,8 +166,28 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *mask) | |||
| 175 | kref_get(&desc->affinity_notify->kref); | 166 | kref_get(&desc->affinity_notify->kref); |
| 176 | schedule_work(&desc->affinity_notify->work); | 167 | schedule_work(&desc->affinity_notify->work); |
| 177 | } | 168 | } |
| 178 | irq_compat_set_affinity(desc); | 169 | irqd_set(data, IRQD_AFFINITY_SET); |
| 179 | irqd_set(&desc->irq_data, IRQD_AFFINITY_SET); | 170 | |
| 171 | return ret; | ||
| 172 | } | ||
| 173 | |||
| 174 | /** | ||
| 175 | * irq_set_affinity - Set the irq affinity of a given irq | ||
| 176 | * @irq: Interrupt to set affinity | ||
| 177 | * @mask: cpumask | ||
| 178 | * | ||
| 179 | */ | ||
| 180 | int irq_set_affinity(unsigned int irq, const struct cpumask *mask) | ||
| 181 | { | ||
| 182 | struct irq_desc *desc = irq_to_desc(irq); | ||
| 183 | unsigned long flags; | ||
| 184 | int ret; | ||
| 185 | |||
| 186 | if (!desc) | ||
| 187 | return -EINVAL; | ||
| 188 | |||
| 189 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
| 190 | ret = __irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask); | ||
| 180 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 191 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
| 181 | return ret; | 192 | return ret; |
| 182 | } | 193 | } |
| @@ -206,7 +217,7 @@ static void irq_affinity_notify(struct work_struct *work) | |||
| 206 | goto out; | 217 | goto out; |
| 207 | 218 | ||
| 208 | raw_spin_lock_irqsave(&desc->lock, flags); | 219 | raw_spin_lock_irqsave(&desc->lock, flags); |
| 209 | if (irq_move_pending(desc)) | 220 | if (irq_move_pending(&desc->irq_data)) |
| 210 | irq_get_pending(cpumask, desc); | 221 | irq_get_pending(cpumask, desc); |
| 211 | else | 222 | else |
| 212 | cpumask_copy(cpumask, desc->irq_data.affinity); | 223 | cpumask_copy(cpumask, desc->irq_data.affinity); |
| @@ -285,10 +296,8 @@ setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) | |||
| 285 | if (cpumask_intersects(desc->irq_data.affinity, | 296 | if (cpumask_intersects(desc->irq_data.affinity, |
| 286 | cpu_online_mask)) | 297 | cpu_online_mask)) |
| 287 | set = desc->irq_data.affinity; | 298 | set = desc->irq_data.affinity; |
| 288 | else { | 299 | else |
| 289 | irq_compat_clr_affinity(desc); | ||
| 290 | irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET); | 300 | irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET); |
| 291 | } | ||
| 292 | } | 301 | } |
| 293 | 302 | ||
| 294 | cpumask_and(mask, cpu_online_mask, set); | 303 | cpumask_and(mask, cpu_online_mask, set); |
| @@ -551,9 +560,9 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | |||
| 551 | flags &= IRQ_TYPE_SENSE_MASK; | 560 | flags &= IRQ_TYPE_SENSE_MASK; |
| 552 | 561 | ||
| 553 | if (chip->flags & IRQCHIP_SET_TYPE_MASKED) { | 562 | if (chip->flags & IRQCHIP_SET_TYPE_MASKED) { |
| 554 | if (!(desc->istate & IRQS_MASKED)) | 563 | if (!irqd_irq_masked(&desc->irq_data)) |
| 555 | mask_irq(desc); | 564 | mask_irq(desc); |
| 556 | if (!(desc->istate & IRQS_DISABLED)) | 565 | if (!irqd_irq_disabled(&desc->irq_data)) |
| 557 | unmask = 1; | 566 | unmask = 1; |
| 558 | } | 567 | } |
| 559 | 568 | ||
| @@ -575,8 +584,6 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | |||
| 575 | irqd_set(&desc->irq_data, IRQD_LEVEL); | 584 | irqd_set(&desc->irq_data, IRQD_LEVEL); |
| 576 | } | 585 | } |
| 577 | 586 | ||
| 578 | if (chip != desc->irq_data.chip) | ||
| 579 | irq_chip_set_defaults(desc->irq_data.chip); | ||
| 580 | ret = 0; | 587 | ret = 0; |
| 581 | break; | 588 | break; |
| 582 | default: | 589 | default: |
| @@ -651,7 +658,7 @@ again: | |||
| 651 | * irq_wake_thread(). See the comment there which explains the | 658 | * irq_wake_thread(). See the comment there which explains the |
| 652 | * serialization. | 659 | * serialization. |
| 653 | */ | 660 | */ |
| 654 | if (unlikely(desc->istate & IRQS_INPROGRESS)) { | 661 | if (unlikely(irqd_irq_inprogress(&desc->irq_data))) { |
| 655 | raw_spin_unlock_irq(&desc->lock); | 662 | raw_spin_unlock_irq(&desc->lock); |
| 656 | chip_bus_sync_unlock(desc); | 663 | chip_bus_sync_unlock(desc); |
| 657 | cpu_relax(); | 664 | cpu_relax(); |
| @@ -668,12 +675,10 @@ again: | |||
| 668 | 675 | ||
| 669 | desc->threads_oneshot &= ~action->thread_mask; | 676 | desc->threads_oneshot &= ~action->thread_mask; |
| 670 | 677 | ||
| 671 | if (!desc->threads_oneshot && !(desc->istate & IRQS_DISABLED) && | 678 | if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) && |
| 672 | (desc->istate & IRQS_MASKED)) { | 679 | irqd_irq_masked(&desc->irq_data)) |
| 673 | irq_compat_clr_masked(desc); | 680 | unmask_irq(desc); |
| 674 | desc->istate &= ~IRQS_MASKED; | 681 | |
| 675 | desc->irq_data.chip->irq_unmask(&desc->irq_data); | ||
| 676 | } | ||
| 677 | out_unlock: | 682 | out_unlock: |
| 678 | raw_spin_unlock_irq(&desc->lock); | 683 | raw_spin_unlock_irq(&desc->lock); |
| 679 | chip_bus_sync_unlock(desc); | 684 | chip_bus_sync_unlock(desc); |
| @@ -767,7 +772,7 @@ static int irq_thread(void *data) | |||
| 767 | atomic_inc(&desc->threads_active); | 772 | atomic_inc(&desc->threads_active); |
| 768 | 773 | ||
| 769 | raw_spin_lock_irq(&desc->lock); | 774 | raw_spin_lock_irq(&desc->lock); |
| 770 | if (unlikely(desc->istate & IRQS_DISABLED)) { | 775 | if (unlikely(irqd_irq_disabled(&desc->irq_data))) { |
| 771 | /* | 776 | /* |
| 772 | * CHECKME: We might need a dedicated | 777 | * CHECKME: We might need a dedicated |
| 773 | * IRQ_THREAD_PENDING flag here, which | 778 | * IRQ_THREAD_PENDING flag here, which |
| @@ -775,7 +780,6 @@ static int irq_thread(void *data) | |||
| 775 | * but AFAICT IRQS_PENDING should be fine as it | 780 | * but AFAICT IRQS_PENDING should be fine as it |
| 776 | * retriggers the interrupt itself --- tglx | 781 | * retriggers the interrupt itself --- tglx |
| 777 | */ | 782 | */ |
| 778 | irq_compat_set_pending(desc); | ||
| 779 | desc->istate |= IRQS_PENDING; | 783 | desc->istate |= IRQS_PENDING; |
| 780 | raw_spin_unlock_irq(&desc->lock); | 784 | raw_spin_unlock_irq(&desc->lock); |
| 781 | } else { | 785 | } else { |
| @@ -971,8 +975,6 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
| 971 | new->thread_mask = 1 << ffz(thread_mask); | 975 | new->thread_mask = 1 << ffz(thread_mask); |
| 972 | 976 | ||
| 973 | if (!shared) { | 977 | if (!shared) { |
| 974 | irq_chip_set_defaults(desc->irq_data.chip); | ||
| 975 | |||
| 976 | init_waitqueue_head(&desc->wait_for_threads); | 978 | init_waitqueue_head(&desc->wait_for_threads); |
| 977 | 979 | ||
| 978 | /* Setup the type (level, edge polarity) if configured: */ | 980 | /* Setup the type (level, edge polarity) if configured: */ |
| @@ -985,8 +987,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
| 985 | } | 987 | } |
| 986 | 988 | ||
| 987 | desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \ | 989 | desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \ |
| 988 | IRQS_INPROGRESS | IRQS_ONESHOT | \ | 990 | IRQS_ONESHOT | IRQS_WAITING); |
| 989 | IRQS_WAITING); | 991 | irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); |
| 990 | 992 | ||
| 991 | if (new->flags & IRQF_PERCPU) { | 993 | if (new->flags & IRQF_PERCPU) { |
| 992 | irqd_set(&desc->irq_data, IRQD_PER_CPU); | 994 | irqd_set(&desc->irq_data, IRQD_PER_CPU); |
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c index ec4806d4778b..bc6194698dfd 100644 --- a/kernel/irq/migration.c +++ b/kernel/irq/migration.c | |||
| @@ -53,20 +53,14 @@ void irq_move_masked_irq(struct irq_data *idata) | |||
| 53 | cpumask_clear(desc->pending_mask); | 53 | cpumask_clear(desc->pending_mask); |
| 54 | } | 54 | } |
| 55 | 55 | ||
| 56 | void move_masked_irq(int irq) | ||
| 57 | { | ||
| 58 | irq_move_masked_irq(irq_get_irq_data(irq)); | ||
| 59 | } | ||
| 60 | |||
| 61 | void irq_move_irq(struct irq_data *idata) | 56 | void irq_move_irq(struct irq_data *idata) |
| 62 | { | 57 | { |
| 63 | struct irq_desc *desc = irq_data_to_desc(idata); | ||
| 64 | bool masked; | 58 | bool masked; |
| 65 | 59 | ||
| 66 | if (likely(!irqd_is_setaffinity_pending(idata))) | 60 | if (likely(!irqd_is_setaffinity_pending(idata))) |
| 67 | return; | 61 | return; |
| 68 | 62 | ||
| 69 | if (unlikely(desc->istate & IRQS_DISABLED)) | 63 | if (unlikely(irqd_irq_disabled(idata))) |
| 70 | return; | 64 | return; |
| 71 | 65 | ||
| 72 | /* | 66 | /* |
| @@ -74,15 +68,10 @@ void irq_move_irq(struct irq_data *idata) | |||
| 74 | * threaded interrupt with ONESHOT set, we can end up with an | 68 | * threaded interrupt with ONESHOT set, we can end up with an |
| 75 | * interrupt storm. | 69 | * interrupt storm. |
| 76 | */ | 70 | */ |
| 77 | masked = desc->istate & IRQS_MASKED; | 71 | masked = irqd_irq_masked(idata); |
| 78 | if (!masked) | 72 | if (!masked) |
| 79 | idata->chip->irq_mask(idata); | 73 | idata->chip->irq_mask(idata); |
| 80 | irq_move_masked_irq(idata); | 74 | irq_move_masked_irq(idata); |
| 81 | if (!masked) | 75 | if (!masked) |
| 82 | idata->chip->irq_unmask(idata); | 76 | idata->chip->irq_unmask(idata); |
| 83 | } | 77 | } |
| 84 | |||
| 85 | void move_native_irq(int irq) | ||
| 86 | { | ||
| 87 | irq_move_irq(irq_get_irq_data(irq)); | ||
| 88 | } | ||
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index 626d092eed9a..dd201bd35103 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c | |||
| @@ -364,6 +364,10 @@ int __weak arch_show_interrupts(struct seq_file *p, int prec) | |||
| 364 | return 0; | 364 | return 0; |
| 365 | } | 365 | } |
| 366 | 366 | ||
| 367 | #ifndef ACTUAL_NR_IRQS | ||
| 368 | # define ACTUAL_NR_IRQS nr_irqs | ||
| 369 | #endif | ||
| 370 | |||
| 367 | int show_interrupts(struct seq_file *p, void *v) | 371 | int show_interrupts(struct seq_file *p, void *v) |
| 368 | { | 372 | { |
| 369 | static int prec; | 373 | static int prec; |
| @@ -373,10 +377,10 @@ int show_interrupts(struct seq_file *p, void *v) | |||
| 373 | struct irqaction *action; | 377 | struct irqaction *action; |
| 374 | struct irq_desc *desc; | 378 | struct irq_desc *desc; |
| 375 | 379 | ||
| 376 | if (i > nr_irqs) | 380 | if (i > ACTUAL_NR_IRQS) |
| 377 | return 0; | 381 | return 0; |
| 378 | 382 | ||
| 379 | if (i == nr_irqs) | 383 | if (i == ACTUAL_NR_IRQS) |
| 380 | return arch_show_interrupts(p, prec); | 384 | return arch_show_interrupts(p, prec); |
| 381 | 385 | ||
| 382 | /* print header and calculate the width of the first column */ | 386 | /* print header and calculate the width of the first column */ |
diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c index ad683a99b1ec..14dd5761e8c9 100644 --- a/kernel/irq/resend.c +++ b/kernel/irq/resend.c | |||
| @@ -65,7 +65,6 @@ void check_irq_resend(struct irq_desc *desc, unsigned int irq) | |||
| 65 | if (desc->istate & IRQS_REPLAY) | 65 | if (desc->istate & IRQS_REPLAY) |
| 66 | return; | 66 | return; |
| 67 | if (desc->istate & IRQS_PENDING) { | 67 | if (desc->istate & IRQS_PENDING) { |
| 68 | irq_compat_clr_pending(desc); | ||
| 69 | desc->istate &= ~IRQS_PENDING; | 68 | desc->istate &= ~IRQS_PENDING; |
| 70 | desc->istate |= IRQS_REPLAY; | 69 | desc->istate |= IRQS_REPLAY; |
| 71 | 70 | ||
diff --git a/kernel/irq/settings.h b/kernel/irq/settings.h index 0227ad358272..0d91730b6330 100644 --- a/kernel/irq/settings.h +++ b/kernel/irq/settings.h | |||
| @@ -15,17 +15,8 @@ enum { | |||
| 15 | _IRQF_MODIFY_MASK = IRQF_MODIFY_MASK, | 15 | _IRQF_MODIFY_MASK = IRQF_MODIFY_MASK, |
| 16 | }; | 16 | }; |
| 17 | 17 | ||
| 18 | #define IRQ_INPROGRESS GOT_YOU_MORON | ||
| 19 | #define IRQ_REPLAY GOT_YOU_MORON | ||
| 20 | #define IRQ_WAITING GOT_YOU_MORON | ||
| 21 | #define IRQ_DISABLED GOT_YOU_MORON | ||
| 22 | #define IRQ_PENDING GOT_YOU_MORON | ||
| 23 | #define IRQ_MASKED GOT_YOU_MORON | ||
| 24 | #define IRQ_WAKEUP GOT_YOU_MORON | ||
| 25 | #define IRQ_MOVE_PENDING GOT_YOU_MORON | ||
| 26 | #define IRQ_PER_CPU GOT_YOU_MORON | 18 | #define IRQ_PER_CPU GOT_YOU_MORON |
| 27 | #define IRQ_NO_BALANCING GOT_YOU_MORON | 19 | #define IRQ_NO_BALANCING GOT_YOU_MORON |
| 28 | #define IRQ_AFFINITY_SET GOT_YOU_MORON | ||
| 29 | #define IRQ_LEVEL GOT_YOU_MORON | 20 | #define IRQ_LEVEL GOT_YOU_MORON |
| 30 | #define IRQ_NOPROBE GOT_YOU_MORON | 21 | #define IRQ_NOPROBE GOT_YOU_MORON |
| 31 | #define IRQ_NOREQUEST GOT_YOU_MORON | 22 | #define IRQ_NOREQUEST GOT_YOU_MORON |
| @@ -37,102 +28,98 @@ enum { | |||
| 37 | static inline void | 28 | static inline void |
| 38 | irq_settings_clr_and_set(struct irq_desc *desc, u32 clr, u32 set) | 29 | irq_settings_clr_and_set(struct irq_desc *desc, u32 clr, u32 set) |
| 39 | { | 30 | { |
| 40 | desc->status &= ~(clr & _IRQF_MODIFY_MASK); | 31 | desc->status_use_accessors &= ~(clr & _IRQF_MODIFY_MASK); |
| 41 | desc->status |= (set & _IRQF_MODIFY_MASK); | 32 | desc->status_use_accessors |= (set & _IRQF_MODIFY_MASK); |
| 42 | } | 33 | } |
| 43 | 34 | ||
| 44 | static inline bool irq_settings_is_per_cpu(struct irq_desc *desc) | 35 | static inline bool irq_settings_is_per_cpu(struct irq_desc *desc) |
| 45 | { | 36 | { |
| 46 | return desc->status & _IRQ_PER_CPU; | 37 | return desc->status_use_accessors & _IRQ_PER_CPU; |
| 47 | } | 38 | } |
| 48 | 39 | ||
| 49 | static inline void irq_settings_set_per_cpu(struct irq_desc *desc) | 40 | static inline void irq_settings_set_per_cpu(struct irq_desc *desc) |
| 50 | { | 41 | { |
| 51 | desc->status |= _IRQ_PER_CPU; | 42 | desc->status_use_accessors |= _IRQ_PER_CPU; |
| 52 | } | 43 | } |
| 53 | 44 | ||
| 54 | static inline void irq_settings_set_no_balancing(struct irq_desc *desc) | 45 | static inline void irq_settings_set_no_balancing(struct irq_desc *desc) |
| 55 | { | 46 | { |
| 56 | desc->status |= _IRQ_NO_BALANCING; | 47 | desc->status_use_accessors |= _IRQ_NO_BALANCING; |
| 57 | } | 48 | } |
| 58 | 49 | ||
| 59 | static inline bool irq_settings_has_no_balance_set(struct irq_desc *desc) | 50 | static inline bool irq_settings_has_no_balance_set(struct irq_desc *desc) |
| 60 | { | 51 | { |
| 61 | return desc->status & _IRQ_NO_BALANCING; | 52 | return desc->status_use_accessors & _IRQ_NO_BALANCING; |
| 62 | } | 53 | } |
| 63 | 54 | ||
| 64 | static inline u32 irq_settings_get_trigger_mask(struct irq_desc *desc) | 55 | static inline u32 irq_settings_get_trigger_mask(struct irq_desc *desc) |
| 65 | { | 56 | { |
| 66 | return desc->status & IRQ_TYPE_SENSE_MASK; | 57 | return desc->status_use_accessors & IRQ_TYPE_SENSE_MASK; |
| 67 | } | 58 | } |
| 68 | 59 | ||
| 69 | static inline void | 60 | static inline void |
| 70 | irq_settings_set_trigger_mask(struct irq_desc *desc, u32 mask) | 61 | irq_settings_set_trigger_mask(struct irq_desc *desc, u32 mask) |
| 71 | { | 62 | { |
| 72 | desc->status &= ~IRQ_TYPE_SENSE_MASK; | 63 | desc->status_use_accessors &= ~IRQ_TYPE_SENSE_MASK; |
| 73 | desc->status |= mask & IRQ_TYPE_SENSE_MASK; | 64 | desc->status_use_accessors |= mask & IRQ_TYPE_SENSE_MASK; |
| 74 | } | 65 | } |
| 75 | 66 | ||
| 76 | static inline bool irq_settings_is_level(struct irq_desc *desc) | 67 | static inline bool irq_settings_is_level(struct irq_desc *desc) |
| 77 | { | 68 | { |
| 78 | return desc->status & _IRQ_LEVEL; | 69 | return desc->status_use_accessors & _IRQ_LEVEL; |
| 79 | } | 70 | } |
| 80 | 71 | ||
| 81 | static inline void irq_settings_clr_level(struct irq_desc *desc) | 72 | static inline void irq_settings_clr_level(struct irq_desc *desc) |
| 82 | { | 73 | { |
| 83 | desc->status &= ~_IRQ_LEVEL; | 74 | desc->status_use_accessors &= ~_IRQ_LEVEL; |
| 84 | } | 75 | } |
| 85 | 76 | ||
| 86 | static inline void irq_settings_set_level(struct irq_desc *desc) | 77 | static inline void irq_settings_set_level(struct irq_desc *desc) |
| 87 | { | 78 | { |
| 88 | desc->status |= _IRQ_LEVEL; | 79 | desc->status_use_accessors |= _IRQ_LEVEL; |
| 89 | } | 80 | } |
| 90 | 81 | ||
| 91 | static inline bool irq_settings_can_request(struct irq_desc *desc) | 82 | static inline bool irq_settings_can_request(struct irq_desc *desc) |
| 92 | { | 83 | { |
| 93 | return !(desc->status & _IRQ_NOREQUEST); | 84 | return !(desc->status_use_accessors & _IRQ_NOREQUEST); |
| 94 | } | 85 | } |
| 95 | 86 | ||
| 96 | static inline void irq_settings_clr_norequest(struct irq_desc *desc) | 87 | static inline void irq_settings_clr_norequest(struct irq_desc *desc) |
| 97 | { | 88 | { |
| 98 | desc->status &= ~_IRQ_NOREQUEST; | 89 | desc->status_use_accessors &= ~_IRQ_NOREQUEST; |
| 99 | } | 90 | } |
| 100 | 91 | ||
| 101 | static inline void irq_settings_set_norequest(struct irq_desc *desc) | 92 | static inline void irq_settings_set_norequest(struct irq_desc *desc) |
| 102 | { | 93 | { |
| 103 | desc->status |= _IRQ_NOREQUEST; | 94 | desc->status_use_accessors |= _IRQ_NOREQUEST; |
| 104 | } | 95 | } |
| 105 | 96 | ||
| 106 | static inline bool irq_settings_can_probe(struct irq_desc *desc) | 97 | static inline bool irq_settings_can_probe(struct irq_desc *desc) |
| 107 | { | 98 | { |
| 108 | return !(desc->status & _IRQ_NOPROBE); | 99 | return !(desc->status_use_accessors & _IRQ_NOPROBE); |
| 109 | } | 100 | } |
| 110 | 101 | ||
| 111 | static inline void irq_settings_clr_noprobe(struct irq_desc *desc) | 102 | static inline void irq_settings_clr_noprobe(struct irq_desc *desc) |
| 112 | { | 103 | { |
| 113 | desc->status &= ~_IRQ_NOPROBE; | 104 | desc->status_use_accessors &= ~_IRQ_NOPROBE; |
| 114 | } | 105 | } |
| 115 | 106 | ||
| 116 | static inline void irq_settings_set_noprobe(struct irq_desc *desc) | 107 | static inline void irq_settings_set_noprobe(struct irq_desc *desc) |
| 117 | { | 108 | { |
| 118 | desc->status |= _IRQ_NOPROBE; | 109 | desc->status_use_accessors |= _IRQ_NOPROBE; |
| 119 | } | 110 | } |
| 120 | 111 | ||
| 121 | static inline bool irq_settings_can_move_pcntxt(struct irq_desc *desc) | 112 | static inline bool irq_settings_can_move_pcntxt(struct irq_desc *desc) |
| 122 | { | 113 | { |
| 123 | return desc->status & _IRQ_MOVE_PCNTXT; | 114 | return desc->status_use_accessors & _IRQ_MOVE_PCNTXT; |
| 124 | } | 115 | } |
| 125 | 116 | ||
| 126 | static inline bool irq_settings_can_autoenable(struct irq_desc *desc) | 117 | static inline bool irq_settings_can_autoenable(struct irq_desc *desc) |
| 127 | { | 118 | { |
| 128 | return !(desc->status & _IRQ_NOAUTOEN); | 119 | return !(desc->status_use_accessors & _IRQ_NOAUTOEN); |
| 129 | } | 120 | } |
| 130 | 121 | ||
| 131 | static inline bool irq_settings_is_nested_thread(struct irq_desc *desc) | 122 | static inline bool irq_settings_is_nested_thread(struct irq_desc *desc) |
| 132 | { | 123 | { |
| 133 | return desc->status & _IRQ_NESTED_THREAD; | 124 | return desc->status_use_accessors & _IRQ_NESTED_THREAD; |
| 134 | } | 125 | } |
| 135 | |||
| 136 | /* Nothing should touch desc->status from now on */ | ||
| 137 | #undef status | ||
| 138 | #define status USE_THE_PROPER_WRAPPERS_YOU_MORON | ||
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index dd586ebf9c8c..dfbd550401b2 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c | |||
| @@ -45,12 +45,12 @@ bool irq_wait_for_poll(struct irq_desc *desc) | |||
| 45 | #ifdef CONFIG_SMP | 45 | #ifdef CONFIG_SMP |
| 46 | do { | 46 | do { |
| 47 | raw_spin_unlock(&desc->lock); | 47 | raw_spin_unlock(&desc->lock); |
| 48 | while (desc->istate & IRQS_INPROGRESS) | 48 | while (irqd_irq_inprogress(&desc->irq_data)) |
| 49 | cpu_relax(); | 49 | cpu_relax(); |
| 50 | raw_spin_lock(&desc->lock); | 50 | raw_spin_lock(&desc->lock); |
| 51 | } while (desc->istate & IRQS_INPROGRESS); | 51 | } while (irqd_irq_inprogress(&desc->irq_data)); |
| 52 | /* Might have been disabled in meantime */ | 52 | /* Might have been disabled in meantime */ |
| 53 | return !(desc->istate & IRQS_DISABLED) && desc->action; | 53 | return !irqd_irq_disabled(&desc->irq_data) && desc->action; |
| 54 | #else | 54 | #else |
| 55 | return false; | 55 | return false; |
| 56 | #endif | 56 | #endif |
| @@ -75,7 +75,7 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force) | |||
| 75 | * Do not poll disabled interrupts unless the spurious | 75 | * Do not poll disabled interrupts unless the spurious |
| 76 | * disabled poller asks explicitely. | 76 | * disabled poller asks explicitely. |
| 77 | */ | 77 | */ |
| 78 | if ((desc->istate & IRQS_DISABLED) && !force) | 78 | if (irqd_irq_disabled(&desc->irq_data) && !force) |
| 79 | goto out; | 79 | goto out; |
| 80 | 80 | ||
| 81 | /* | 81 | /* |
| @@ -88,12 +88,11 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force) | |||
| 88 | goto out; | 88 | goto out; |
| 89 | 89 | ||
| 90 | /* Already running on another processor */ | 90 | /* Already running on another processor */ |
| 91 | if (desc->istate & IRQS_INPROGRESS) { | 91 | if (irqd_irq_inprogress(&desc->irq_data)) { |
| 92 | /* | 92 | /* |
| 93 | * Already running: If it is shared get the other | 93 | * Already running: If it is shared get the other |
| 94 | * CPU to go looking for our mystery interrupt too | 94 | * CPU to go looking for our mystery interrupt too |
| 95 | */ | 95 | */ |
| 96 | irq_compat_set_pending(desc); | ||
| 97 | desc->istate |= IRQS_PENDING; | 96 | desc->istate |= IRQS_PENDING; |
| 98 | goto out; | 97 | goto out; |
| 99 | } | 98 | } |
diff --git a/kernel/signal.c b/kernel/signal.c index 324eff5468ad..1186cf7fac77 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
| @@ -2437,7 +2437,7 @@ SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig, | |||
| 2437 | /* Not even root can pretend to send signals from the kernel. | 2437 | /* Not even root can pretend to send signals from the kernel. |
| 2438 | * Nor can they impersonate a kill()/tgkill(), which adds source info. | 2438 | * Nor can they impersonate a kill()/tgkill(), which adds source info. |
| 2439 | */ | 2439 | */ |
| 2440 | if (info.si_code != SI_QUEUE) { | 2440 | if (info.si_code >= 0 || info.si_code == SI_TKILL) { |
| 2441 | /* We used to allow any < 0 si_code */ | 2441 | /* We used to allow any < 0 si_code */ |
| 2442 | WARN_ON_ONCE(info.si_code < 0); | 2442 | WARN_ON_ONCE(info.si_code < 0); |
| 2443 | return -EPERM; | 2443 | return -EPERM; |
| @@ -2457,7 +2457,7 @@ long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info) | |||
| 2457 | /* Not even root can pretend to send signals from the kernel. | 2457 | /* Not even root can pretend to send signals from the kernel. |
| 2458 | * Nor can they impersonate a kill()/tgkill(), which adds source info. | 2458 | * Nor can they impersonate a kill()/tgkill(), which adds source info. |
| 2459 | */ | 2459 | */ |
| 2460 | if (info->si_code != SI_QUEUE) { | 2460 | if (info->si_code >= 0 || info->si_code == SI_TKILL) { |
| 2461 | /* We used to allow any < 0 si_code */ | 2461 | /* We used to allow any < 0 si_code */ |
| 2462 | WARN_ON_ONCE(info->si_code < 0); | 2462 | WARN_ON_ONCE(info->si_code < 0); |
| 2463 | return -EPERM; | 2463 | return -EPERM; |
