diff options
| -rw-r--r-- | arch/alpha/include/asm/hardirq.h | 13 | ||||
| -rw-r--r-- | arch/avr32/include/asm/hardirq.h | 11 | ||||
| -rw-r--r-- | arch/ia64/include/asm/hardirq.h | 10 | ||||
| -rw-r--r-- | include/linux/hardirq.h | 48 |
4 files changed, 24 insertions, 58 deletions
diff --git a/arch/alpha/include/asm/hardirq.h b/arch/alpha/include/asm/hardirq.h index d953e234daa8..88971460fa6c 100644 --- a/arch/alpha/include/asm/hardirq.h +++ b/arch/alpha/include/asm/hardirq.h | |||
| @@ -14,17 +14,4 @@ typedef struct { | |||
| 14 | 14 | ||
| 15 | void ack_bad_irq(unsigned int irq); | 15 | void ack_bad_irq(unsigned int irq); |
| 16 | 16 | ||
| 17 | #define HARDIRQ_BITS 12 | ||
| 18 | |||
| 19 | /* | ||
| 20 | * The hardirq mask has to be large enough to have | ||
| 21 | * space for potentially nestable IRQ sources in the system | ||
| 22 | * to nest on a single CPU. On Alpha, interrupts are masked at the CPU | ||
| 23 | * by IPL as well as at the system level. We only have 8 IPLs (UNIX PALcode) | ||
| 24 | * so we really only have 8 nestable IRQs, but allow some overhead | ||
| 25 | */ | ||
| 26 | #if (1 << HARDIRQ_BITS) < 16 | ||
| 27 | #error HARDIRQ_BITS is too low! | ||
| 28 | #endif | ||
| 29 | |||
| 30 | #endif /* _ALPHA_HARDIRQ_H */ | 17 | #endif /* _ALPHA_HARDIRQ_H */ |
diff --git a/arch/avr32/include/asm/hardirq.h b/arch/avr32/include/asm/hardirq.h index 267354356f60..015bc75ea798 100644 --- a/arch/avr32/include/asm/hardirq.h +++ b/arch/avr32/include/asm/hardirq.h | |||
| @@ -20,15 +20,4 @@ void ack_bad_irq(unsigned int irq); | |||
| 20 | 20 | ||
| 21 | #endif /* __ASSEMBLY__ */ | 21 | #endif /* __ASSEMBLY__ */ |
| 22 | 22 | ||
| 23 | #define HARDIRQ_BITS 12 | ||
| 24 | |||
| 25 | /* | ||
| 26 | * The hardirq mask has to be large enough to have | ||
| 27 | * space for potentially all IRQ sources in the system | ||
| 28 | * nesting on a single CPU: | ||
| 29 | */ | ||
| 30 | #if (1 << HARDIRQ_BITS) < NR_IRQS | ||
| 31 | # error HARDIRQ_BITS is too low! | ||
| 32 | #endif | ||
| 33 | |||
| 34 | #endif /* __ASM_AVR32_HARDIRQ_H */ | 23 | #endif /* __ASM_AVR32_HARDIRQ_H */ |
diff --git a/arch/ia64/include/asm/hardirq.h b/arch/ia64/include/asm/hardirq.h index 140e495b8e0e..d514cd9edb49 100644 --- a/arch/ia64/include/asm/hardirq.h +++ b/arch/ia64/include/asm/hardirq.h | |||
| @@ -20,16 +20,6 @@ | |||
| 20 | 20 | ||
| 21 | #define local_softirq_pending() (local_cpu_data->softirq_pending) | 21 | #define local_softirq_pending() (local_cpu_data->softirq_pending) |
| 22 | 22 | ||
| 23 | #define HARDIRQ_BITS 14 | ||
| 24 | |||
| 25 | /* | ||
| 26 | * The hardirq mask has to be large enough to have space for potentially all IRQ sources | ||
| 27 | * in the system nesting on a single CPU: | ||
| 28 | */ | ||
| 29 | #if (1 << HARDIRQ_BITS) < NR_IRQS | ||
| 30 | # error HARDIRQ_BITS is too low! | ||
| 31 | #endif | ||
| 32 | |||
| 33 | extern void __iomem *ipi_base_addr; | 23 | extern void __iomem *ipi_base_addr; |
| 34 | 24 | ||
| 35 | void ack_bad_irq(unsigned int irq); | 25 | void ack_bad_irq(unsigned int irq); |
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index f3cf86e1465b..9841221f53f2 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h | |||
| @@ -15,61 +15,61 @@ | |||
| 15 | * - bits 0-7 are the preemption count (max preemption depth: 256) | 15 | * - bits 0-7 are the preemption count (max preemption depth: 256) |
| 16 | * - bits 8-15 are the softirq count (max # of softirqs: 256) | 16 | * - bits 8-15 are the softirq count (max # of softirqs: 256) |
| 17 | * | 17 | * |
| 18 | * The hardirq count can be overridden per architecture, the default is: | 18 | * The hardirq count can in theory reach the same as NR_IRQS. |
| 19 | * In reality, the number of nested IRQS is limited to the stack | ||
| 20 | * size as well. For archs with over 1000 IRQS it is not practical | ||
| 21 | * to expect that they will all nest. We give a max of 10 bits for | ||
| 22 | * hardirq nesting. An arch may choose to give less than 10 bits. | ||
| 23 | * m68k expects it to be 8. | ||
| 19 | * | 24 | * |
| 20 | * - bits 16-27 are the hardirq count (max # of hardirqs: 4096) | 25 | * - bits 16-25 are the hardirq count (max # of nested hardirqs: 1024) |
| 21 | * - ( bit 28 is the PREEMPT_ACTIVE flag. ) | 26 | * - bit 26 is the NMI_MASK |
| 27 | * - bit 28 is the PREEMPT_ACTIVE flag | ||
| 22 | * | 28 | * |
| 23 | * PREEMPT_MASK: 0x000000ff | 29 | * PREEMPT_MASK: 0x000000ff |
| 24 | * SOFTIRQ_MASK: 0x0000ff00 | 30 | * SOFTIRQ_MASK: 0x0000ff00 |
| 25 | * HARDIRQ_MASK: 0x0fff0000 | 31 | * HARDIRQ_MASK: 0x03ff0000 |
| 32 | * NMI_MASK: 0x04000000 | ||
| 26 | */ | 33 | */ |
| 27 | #define PREEMPT_BITS 8 | 34 | #define PREEMPT_BITS 8 |
| 28 | #define SOFTIRQ_BITS 8 | 35 | #define SOFTIRQ_BITS 8 |
| 36 | #define NMI_BITS 1 | ||
| 29 | 37 | ||
| 30 | #ifndef HARDIRQ_BITS | 38 | #define MAX_HARDIRQ_BITS 10 |
| 31 | #define HARDIRQ_BITS 12 | ||
| 32 | 39 | ||
| 33 | #ifndef MAX_HARDIRQS_PER_CPU | 40 | #ifndef HARDIRQ_BITS |
| 34 | #define MAX_HARDIRQS_PER_CPU NR_IRQS | 41 | # define HARDIRQ_BITS MAX_HARDIRQ_BITS |
| 35 | #endif | 42 | #endif |
| 36 | 43 | ||
| 37 | /* | 44 | #if HARDIRQ_BITS > MAX_HARDIRQ_BITS |
| 38 | * The hardirq mask has to be large enough to have space for potentially | 45 | #error HARDIRQ_BITS too high! |
| 39 | * all IRQ sources in the system nesting on a single CPU. | ||
| 40 | */ | ||
| 41 | #if (1 << HARDIRQ_BITS) < MAX_HARDIRQS_PER_CPU | ||
| 42 | # error HARDIRQ_BITS is too low! | ||
| 43 | #endif | ||
| 44 | #endif | 46 | #endif |
| 45 | 47 | ||
| 46 | #define PREEMPT_SHIFT 0 | 48 | #define PREEMPT_SHIFT 0 |
| 47 | #define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) | 49 | #define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) |
| 48 | #define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS) | 50 | #define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS) |
| 51 | #define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS) | ||
| 49 | 52 | ||
| 50 | #define __IRQ_MASK(x) ((1UL << (x))-1) | 53 | #define __IRQ_MASK(x) ((1UL << (x))-1) |
| 51 | 54 | ||
| 52 | #define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT) | 55 | #define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT) |
| 53 | #define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) | 56 | #define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) |
| 54 | #define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT) | 57 | #define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT) |
| 58 | #define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT) | ||
| 55 | 59 | ||
| 56 | #define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT) | 60 | #define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT) |
| 57 | #define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT) | 61 | #define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT) |
| 58 | #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) | 62 | #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) |
| 63 | #define NMI_OFFSET (1UL << NMI_SHIFT) | ||
| 59 | 64 | ||
| 60 | #if PREEMPT_ACTIVE < (1 << (HARDIRQ_SHIFT + HARDIRQ_BITS)) | 65 | #if PREEMPT_ACTIVE < (1 << (NMI_SHIFT + NMI_BITS)) |
| 61 | #error PREEMPT_ACTIVE is too low! | 66 | #error PREEMPT_ACTIVE is too low! |
| 62 | #endif | 67 | #endif |
| 63 | 68 | ||
| 64 | #define NMI_OFFSET (PREEMPT_ACTIVE << 1) | ||
| 65 | |||
| 66 | #if NMI_OFFSET >= 0x80000000 | ||
| 67 | #error PREEMPT_ACTIVE too high! | ||
| 68 | #endif | ||
| 69 | |||
| 70 | #define hardirq_count() (preempt_count() & HARDIRQ_MASK) | 69 | #define hardirq_count() (preempt_count() & HARDIRQ_MASK) |
| 71 | #define softirq_count() (preempt_count() & SOFTIRQ_MASK) | 70 | #define softirq_count() (preempt_count() & SOFTIRQ_MASK) |
| 72 | #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK)) | 71 | #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \ |
| 72 | | NMI_MASK)) | ||
| 73 | 73 | ||
| 74 | /* | 74 | /* |
| 75 | * Are we doing bottom half or hardware interrupt processing? | 75 | * Are we doing bottom half or hardware interrupt processing? |
| @@ -82,7 +82,7 @@ | |||
| 82 | /* | 82 | /* |
| 83 | * Are we in NMI context? | 83 | * Are we in NMI context? |
| 84 | */ | 84 | */ |
| 85 | #define in_nmi() (preempt_count() & NMI_OFFSET) | 85 | #define in_nmi() (preempt_count() & NMI_MASK) |
| 86 | 86 | ||
| 87 | #if defined(CONFIG_PREEMPT) | 87 | #if defined(CONFIG_PREEMPT) |
| 88 | # define PREEMPT_INATOMIC_BASE kernel_locked() | 88 | # define PREEMPT_INATOMIC_BASE kernel_locked() |
