aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/irq/autoprobe.c9
-rw-r--r--kernel/irq/handle.c2
-rw-r--r--kernel/irq/spurious.c113
-rw-r--r--kernel/itimer.c8
-rw-r--r--kernel/kexec.c10
-rw-r--r--kernel/sched.c9
6 files changed, 135 insertions, 16 deletions
diff --git a/kernel/irq/autoprobe.c b/kernel/irq/autoprobe.c
index 98d62d8efeaf..3467097ca61a 100644
--- a/kernel/irq/autoprobe.c
+++ b/kernel/irq/autoprobe.c
@@ -9,6 +9,7 @@
9#include <linux/irq.h> 9#include <linux/irq.h>
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/interrupt.h> 11#include <linux/interrupt.h>
12#include <linux/delay.h>
12 13
13/* 14/*
14 * Autodetection depends on the fact that any interrupt that 15 * Autodetection depends on the fact that any interrupt that
@@ -26,7 +27,7 @@ static DECLARE_MUTEX(probe_sem);
26 */ 27 */
27unsigned long probe_irq_on(void) 28unsigned long probe_irq_on(void)
28{ 29{
29 unsigned long val, delay; 30 unsigned long val;
30 irq_desc_t *desc; 31 irq_desc_t *desc;
31 unsigned int i; 32 unsigned int i;
32 33
@@ -45,8 +46,7 @@ unsigned long probe_irq_on(void)
45 } 46 }
46 47
47 /* Wait for longstanding interrupts to trigger. */ 48 /* Wait for longstanding interrupts to trigger. */
48 for (delay = jiffies + HZ/50; time_after(delay, jiffies); ) 49 msleep(20);
49 /* about 20ms delay */ barrier();
50 50
51 /* 51 /*
52 * enable any unassigned irqs 52 * enable any unassigned irqs
@@ -68,8 +68,7 @@ unsigned long probe_irq_on(void)
68 /* 68 /*
69 * Wait for spurious interrupts to trigger 69 * Wait for spurious interrupts to trigger
70 */ 70 */
71 for (delay = jiffies + HZ/10; time_after(delay, jiffies); ) 71 msleep(100);
72 /* about 100ms delay */ barrier();
73 72
74 /* 73 /*
75 * Now filter out any obviously spurious interrupts 74 * Now filter out any obviously spurious interrupts
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 436c7d93c00a..c29f83c16497 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -172,7 +172,7 @@ fastcall unsigned int __do_IRQ(unsigned int irq, struct pt_regs *regs)
172 172
173 spin_lock(&desc->lock); 173 spin_lock(&desc->lock);
174 if (!noirqdebug) 174 if (!noirqdebug)
175 note_interrupt(irq, desc, action_ret); 175 note_interrupt(irq, desc, action_ret, regs);
176 if (likely(!(desc->status & IRQ_PENDING))) 176 if (likely(!(desc->status & IRQ_PENDING)))
177 break; 177 break;
178 desc->status &= ~IRQ_PENDING; 178 desc->status &= ~IRQ_PENDING;
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index ba039e827d58..7df9abd5ec86 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -11,6 +11,83 @@
11#include <linux/kallsyms.h> 11#include <linux/kallsyms.h>
12#include <linux/interrupt.h> 12#include <linux/interrupt.h>
13 13
14static int irqfixup;
15
16/*
17 * Recovery handler for misrouted interrupts.
18 */
19
20static int misrouted_irq(int irq, struct pt_regs *regs)
21{
22 int i;
23 irq_desc_t *desc;
24 int ok = 0;
25 int work = 0; /* Did we do work for a real IRQ */
26
27 for(i = 1; i < NR_IRQS; i++) {
28 struct irqaction *action;
29
30 if (i == irq) /* Already tried */
31 continue;
32 desc = &irq_desc[i];
33 spin_lock(&desc->lock);
34 action = desc->action;
35 /* Already running on another processor */
36 if (desc->status & IRQ_INPROGRESS) {
37 /*
38 * Already running: If it is shared get the other
39 * CPU to go looking for our mystery interrupt too
40 */
41 if (desc->action && (desc->action->flags & SA_SHIRQ))
42 desc->status |= IRQ_PENDING;
43 spin_unlock(&desc->lock);
44 continue;
45 }
46 /* Honour the normal IRQ locking */
47 desc->status |= IRQ_INPROGRESS;
48 spin_unlock(&desc->lock);
49 while (action) {
50 /* Only shared IRQ handlers are safe to call */
51 if (action->flags & SA_SHIRQ) {
52 if (action->handler(i, action->dev_id, regs) ==
53 IRQ_HANDLED)
54 ok = 1;
55 }
56 action = action->next;
57 }
58 local_irq_disable();
59 /* Now clean up the flags */
60 spin_lock(&desc->lock);
61 action = desc->action;
62
63 /*
64 * While we were looking for a fixup someone queued a real
65 * IRQ clashing with our walk
66 */
67
68 while ((desc->status & IRQ_PENDING) && action) {
69 /*
70 * Perform real IRQ processing for the IRQ we deferred
71 */
72 work = 1;
73 spin_unlock(&desc->lock);
74 handle_IRQ_event(i, regs, action);
75 spin_lock(&desc->lock);
76 desc->status &= ~IRQ_PENDING;
77 }
78 desc->status &= ~IRQ_INPROGRESS;
79 /*
80 * If we did actual work for the real IRQ line we must let the
81 * IRQ controller clean up too
82 */
83 if(work)
84 desc->handler->end(i);
85 spin_unlock(&desc->lock);
86 }
87 /* So the caller can adjust the irq error counts */
88 return ok;
89}
90
14/* 91/*
15 * If 99,900 of the previous 100,000 interrupts have not been handled 92 * If 99,900 of the previous 100,000 interrupts have not been handled
16 * then assume that the IRQ is stuck in some manner. Drop a diagnostic 93 * then assume that the IRQ is stuck in some manner. Drop a diagnostic
@@ -31,7 +108,8 @@ __report_bad_irq(unsigned int irq, irq_desc_t *desc, irqreturn_t action_ret)
31 printk(KERN_ERR "irq event %d: bogus return value %x\n", 108 printk(KERN_ERR "irq event %d: bogus return value %x\n",
32 irq, action_ret); 109 irq, action_ret);
33 } else { 110 } else {
34 printk(KERN_ERR "irq %d: nobody cared!\n", irq); 111 printk(KERN_ERR "irq %d: nobody cared (try booting with "
112 "the \"irqpoll\" option)\n", irq);
35 } 113 }
36 dump_stack(); 114 dump_stack();
37 printk(KERN_ERR "handlers:\n"); 115 printk(KERN_ERR "handlers:\n");
@@ -55,7 +133,8 @@ static void report_bad_irq(unsigned int irq, irq_desc_t *desc, irqreturn_t actio
55 } 133 }
56} 134}
57 135
58void note_interrupt(unsigned int irq, irq_desc_t *desc, irqreturn_t action_ret) 136void note_interrupt(unsigned int irq, irq_desc_t *desc, irqreturn_t action_ret,
137 struct pt_regs *regs)
59{ 138{
60 if (action_ret != IRQ_HANDLED) { 139 if (action_ret != IRQ_HANDLED) {
61 desc->irqs_unhandled++; 140 desc->irqs_unhandled++;
@@ -63,6 +142,15 @@ void note_interrupt(unsigned int irq, irq_desc_t *desc, irqreturn_t action_ret)
63 report_bad_irq(irq, desc, action_ret); 142 report_bad_irq(irq, desc, action_ret);
64 } 143 }
65 144
145 if (unlikely(irqfixup)) {
146 /* Don't punish working computers */
147 if ((irqfixup == 2 && irq == 0) || action_ret == IRQ_NONE) {
148 int ok = misrouted_irq(irq, regs);
149 if (action_ret == IRQ_NONE)
150 desc->irqs_unhandled -= ok;
151 }
152 }
153
66 desc->irq_count++; 154 desc->irq_count++;
67 if (desc->irq_count < 100000) 155 if (desc->irq_count < 100000)
68 return; 156 return;
@@ -94,3 +182,24 @@ int __init noirqdebug_setup(char *str)
94 182
95__setup("noirqdebug", noirqdebug_setup); 183__setup("noirqdebug", noirqdebug_setup);
96 184
185static int __init irqfixup_setup(char *str)
186{
187 irqfixup = 1;
188 printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n");
189 printk(KERN_WARNING "This may impact system performance.\n");
190 return 1;
191}
192
193__setup("irqfixup", irqfixup_setup);
194
195static int __init irqpoll_setup(char *str)
196{
197 irqfixup = 2;
198 printk(KERN_WARNING "Misrouted IRQ fixup and polling support "
199 "enabled\n");
200 printk(KERN_WARNING "This may significantly impact system "
201 "performance\n");
202 return 1;
203}
204
205__setup("irqpoll", irqpoll_setup);
diff --git a/kernel/itimer.c b/kernel/itimer.c
index 1dc988e0d2c7..a72cb0e5aa4b 100644
--- a/kernel/itimer.c
+++ b/kernel/itimer.c
@@ -153,11 +153,15 @@ int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue)
153 153
154 switch (which) { 154 switch (which) {
155 case ITIMER_REAL: 155 case ITIMER_REAL:
156again:
156 spin_lock_irq(&tsk->sighand->siglock); 157 spin_lock_irq(&tsk->sighand->siglock);
157 interval = tsk->signal->it_real_incr; 158 interval = tsk->signal->it_real_incr;
158 val = it_real_value(tsk->signal); 159 val = it_real_value(tsk->signal);
159 if (val) 160 /* We are sharing ->siglock with it_real_fn() */
160 del_timer_sync(&tsk->signal->real_timer); 161 if (try_to_del_timer_sync(&tsk->signal->real_timer) < 0) {
162 spin_unlock_irq(&tsk->sighand->siglock);
163 goto again;
164 }
161 tsk->signal->it_real_incr = 165 tsk->signal->it_real_incr =
162 timeval_to_jiffies(&value->it_interval); 166 timeval_to_jiffies(&value->it_interval);
163 it_real_arm(tsk, timeval_to_jiffies(&value->it_value)); 167 it_real_arm(tsk, timeval_to_jiffies(&value->it_value));
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 7843548cf2d9..cdd4dcd8fb63 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -241,7 +241,7 @@ static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry,
241 241
242static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry, 242static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry,
243 unsigned long nr_segments, 243 unsigned long nr_segments,
244 struct kexec_segment *segments) 244 struct kexec_segment __user *segments)
245{ 245{
246 int result; 246 int result;
247 struct kimage *image; 247 struct kimage *image;
@@ -650,7 +650,7 @@ static kimage_entry_t *kimage_dst_used(struct kimage *image,
650 } 650 }
651 } 651 }
652 652
653 return 0; 653 return NULL;
654} 654}
655 655
656static struct page *kimage_alloc_page(struct kimage *image, 656static struct page *kimage_alloc_page(struct kimage *image,
@@ -696,7 +696,7 @@ static struct page *kimage_alloc_page(struct kimage *image,
696 /* Allocate a page, if we run out of memory give up */ 696 /* Allocate a page, if we run out of memory give up */
697 page = kimage_alloc_pages(gfp_mask, 0); 697 page = kimage_alloc_pages(gfp_mask, 0);
698 if (!page) 698 if (!page)
699 return 0; 699 return NULL;
700 /* If the page cannot be used file it away */ 700 /* If the page cannot be used file it away */
701 if (page_to_pfn(page) > 701 if (page_to_pfn(page) >
702 (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) { 702 (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
@@ -754,7 +754,7 @@ static int kimage_load_normal_segment(struct kimage *image,
754 unsigned long maddr; 754 unsigned long maddr;
755 unsigned long ubytes, mbytes; 755 unsigned long ubytes, mbytes;
756 int result; 756 int result;
757 unsigned char *buf; 757 unsigned char __user *buf;
758 758
759 result = 0; 759 result = 0;
760 buf = segment->buf; 760 buf = segment->buf;
@@ -818,7 +818,7 @@ static int kimage_load_crash_segment(struct kimage *image,
818 unsigned long maddr; 818 unsigned long maddr;
819 unsigned long ubytes, mbytes; 819 unsigned long ubytes, mbytes;
820 int result; 820 int result;
821 unsigned char *buf; 821 unsigned char __user *buf;
822 822
823 result = 0; 823 result = 0;
824 buf = segment->buf; 824 buf = segment->buf;
diff --git a/kernel/sched.c b/kernel/sched.c
index e2b0d3e4dd06..5f2182d42241 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4166,6 +4166,14 @@ void show_state(void)
4166 read_unlock(&tasklist_lock); 4166 read_unlock(&tasklist_lock);
4167} 4167}
4168 4168
4169/**
4170 * init_idle - set up an idle thread for a given CPU
4171 * @idle: task in question
4172 * @cpu: cpu the idle task belongs to
4173 *
4174 * NOTE: this function does not set the idle thread's NEED_RESCHED
4175 * flag, to make booting more robust.
4176 */
4169void __devinit init_idle(task_t *idle, int cpu) 4177void __devinit init_idle(task_t *idle, int cpu)
4170{ 4178{
4171 runqueue_t *rq = cpu_rq(cpu); 4179 runqueue_t *rq = cpu_rq(cpu);
@@ -4183,7 +4191,6 @@ void __devinit init_idle(task_t *idle, int cpu)
4183#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) 4191#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
4184 idle->oncpu = 1; 4192 idle->oncpu = 1;
4185#endif 4193#endif
4186 set_tsk_need_resched(idle);
4187 spin_unlock_irqrestore(&rq->lock, flags); 4194 spin_unlock_irqrestore(&rq->lock, flags);
4188 4195
4189 /* Set the preempt count _outside_ the spinlocks! */ 4196 /* Set the preempt count _outside_ the spinlocks! */