diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/irq/internals.h | 6 | ||||
| -rw-r--r-- | kernel/irq/irqdesc.c | 11 | ||||
| -rw-r--r-- | kernel/irq/manage.c | 2 | ||||
| -rw-r--r-- | kernel/irq/resend.c | 2 | ||||
| -rw-r--r-- | kernel/perf_event.c | 19 |
5 files changed, 33 insertions, 7 deletions
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index 4571ae7e085a..99c3bc8a6fb4 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h | |||
| @@ -3,6 +3,12 @@ | |||
| 3 | */ | 3 | */ |
| 4 | #include <linux/irqdesc.h> | 4 | #include <linux/irqdesc.h> |
| 5 | 5 | ||
| 6 | #ifdef CONFIG_SPARSE_IRQ | ||
| 7 | # define IRQ_BITMAP_BITS (NR_IRQS + 8196) | ||
| 8 | #else | ||
| 9 | # define IRQ_BITMAP_BITS NR_IRQS | ||
| 10 | #endif | ||
| 11 | |||
| 6 | extern int noirqdebug; | 12 | extern int noirqdebug; |
| 7 | 13 | ||
| 8 | #define irq_data_to_desc(data) container_of(data, struct irq_desc, irq_data) | 14 | #define irq_data_to_desc(data) container_of(data, struct irq_desc, irq_data) |
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 282f20230e67..2039bea31bdf 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c | |||
| @@ -94,7 +94,7 @@ int nr_irqs = NR_IRQS; | |||
| 94 | EXPORT_SYMBOL_GPL(nr_irqs); | 94 | EXPORT_SYMBOL_GPL(nr_irqs); |
| 95 | 95 | ||
| 96 | static DEFINE_MUTEX(sparse_irq_lock); | 96 | static DEFINE_MUTEX(sparse_irq_lock); |
| 97 | static DECLARE_BITMAP(allocated_irqs, NR_IRQS); | 97 | static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS); |
| 98 | 98 | ||
| 99 | #ifdef CONFIG_SPARSE_IRQ | 99 | #ifdef CONFIG_SPARSE_IRQ |
| 100 | 100 | ||
| @@ -217,6 +217,15 @@ int __init early_irq_init(void) | |||
| 217 | initcnt = arch_probe_nr_irqs(); | 217 | initcnt = arch_probe_nr_irqs(); |
| 218 | printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt); | 218 | printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt); |
| 219 | 219 | ||
| 220 | if (WARN_ON(nr_irqs > IRQ_BITMAP_BITS)) | ||
| 221 | nr_irqs = IRQ_BITMAP_BITS; | ||
| 222 | |||
| 223 | if (WARN_ON(initcnt > IRQ_BITMAP_BITS)) | ||
| 224 | initcnt = IRQ_BITMAP_BITS; | ||
| 225 | |||
| 226 | if (initcnt > nr_irqs) | ||
| 227 | nr_irqs = initcnt; | ||
| 228 | |||
| 220 | for (i = 0; i < initcnt; i++) { | 229 | for (i = 0; i < initcnt; i++) { |
| 221 | desc = alloc_desc(i, node); | 230 | desc = alloc_desc(i, node); |
| 222 | set_bit(i, allocated_irqs); | 231 | set_bit(i, allocated_irqs); |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 0caa59f747dd..9033c1c70828 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
| @@ -1100,7 +1100,7 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler, | |||
| 1100 | if (retval) | 1100 | if (retval) |
| 1101 | kfree(action); | 1101 | kfree(action); |
| 1102 | 1102 | ||
| 1103 | #ifdef CONFIG_DEBUG_SHIRQ | 1103 | #ifdef CONFIG_DEBUG_SHIRQ_FIXME |
| 1104 | if (!retval && (irqflags & IRQF_SHARED)) { | 1104 | if (!retval && (irqflags & IRQF_SHARED)) { |
| 1105 | /* | 1105 | /* |
| 1106 | * It's a shared IRQ -- the driver ought to be prepared for it | 1106 | * It's a shared IRQ -- the driver ought to be prepared for it |
diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c index 891115a929aa..dc49358b73fa 100644 --- a/kernel/irq/resend.c +++ b/kernel/irq/resend.c | |||
| @@ -23,7 +23,7 @@ | |||
| 23 | #ifdef CONFIG_HARDIRQS_SW_RESEND | 23 | #ifdef CONFIG_HARDIRQS_SW_RESEND |
| 24 | 24 | ||
| 25 | /* Bitmap to handle software resend of interrupts: */ | 25 | /* Bitmap to handle software resend of interrupts: */ |
| 26 | static DECLARE_BITMAP(irqs_resend, NR_IRQS); | 26 | static DECLARE_BITMAP(irqs_resend, IRQ_BITMAP_BITS); |
| 27 | 27 | ||
| 28 | /* | 28 | /* |
| 29 | * Run software resends of IRQ's | 29 | * Run software resends of IRQ's |
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 999835b6112b..656222fcf767 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
| @@ -782,6 +782,10 @@ retry: | |||
| 782 | raw_spin_unlock_irq(&ctx->lock); | 782 | raw_spin_unlock_irq(&ctx->lock); |
| 783 | } | 783 | } |
| 784 | 784 | ||
| 785 | #define MAX_INTERRUPTS (~0ULL) | ||
| 786 | |||
| 787 | static void perf_log_throttle(struct perf_event *event, int enable); | ||
| 788 | |||
| 785 | static int | 789 | static int |
| 786 | event_sched_in(struct perf_event *event, | 790 | event_sched_in(struct perf_event *event, |
| 787 | struct perf_cpu_context *cpuctx, | 791 | struct perf_cpu_context *cpuctx, |
| @@ -794,6 +798,17 @@ event_sched_in(struct perf_event *event, | |||
| 794 | 798 | ||
| 795 | event->state = PERF_EVENT_STATE_ACTIVE; | 799 | event->state = PERF_EVENT_STATE_ACTIVE; |
| 796 | event->oncpu = smp_processor_id(); | 800 | event->oncpu = smp_processor_id(); |
| 801 | |||
| 802 | /* | ||
| 803 | * Unthrottle events, since we scheduled we might have missed several | ||
| 804 | * ticks already, also for a heavily scheduling task there is little | ||
| 805 | * guarantee it'll get a tick in a timely manner. | ||
| 806 | */ | ||
| 807 | if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) { | ||
| 808 | perf_log_throttle(event, 1); | ||
| 809 | event->hw.interrupts = 0; | ||
| 810 | } | ||
| 811 | |||
| 797 | /* | 812 | /* |
| 798 | * The new state must be visible before we turn it on in the hardware: | 813 | * The new state must be visible before we turn it on in the hardware: |
| 799 | */ | 814 | */ |
| @@ -1596,10 +1611,6 @@ void __perf_event_task_sched_in(struct task_struct *task) | |||
| 1596 | } | 1611 | } |
| 1597 | } | 1612 | } |
| 1598 | 1613 | ||
| 1599 | #define MAX_INTERRUPTS (~0ULL) | ||
| 1600 | |||
| 1601 | static void perf_log_throttle(struct perf_event *event, int enable); | ||
| 1602 | |||
| 1603 | static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) | 1614 | static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) |
| 1604 | { | 1615 | { |
| 1605 | u64 frequency = event->attr.sample_freq; | 1616 | u64 frequency = event->attr.sample_freq; |
