aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorArtem B. Bityuckiy <dedekind@infradead.org>2005-07-06 10:43:18 -0400
committerThomas Gleixner <tglx@mtd.linutronix.de>2005-07-06 13:40:38 -0400
commitb3539219c9ea20ebf6a5ea3cc534f423a3607c41 (patch)
treed17c31c0eac0a7290ba5011b59a100fd9e9c9532 /kernel
parent6430a8def12edebc1c9c7c2621d33ca0e8653c33 (diff)
parenta18bcb7450840f07a772a45229de4811d930f461 (diff)
Merge with rsync://fileserver/linux
Update to 2.6.12-rc3
Diffstat (limited to 'kernel')
-rw-r--r--kernel/exit.c2
-rw-r--r--kernel/fork.c5
-rw-r--r--kernel/irq/autoprobe.c9
-rw-r--r--kernel/irq/handle.c2
-rw-r--r--kernel/irq/spurious.c113
-rw-r--r--kernel/itimer.c8
-rw-r--r--kernel/kexec.c10
-rw-r--r--kernel/kprobes.c170
-rw-r--r--kernel/sched.c17
9 files changed, 262 insertions, 74 deletions
diff --git a/kernel/exit.c b/kernel/exit.c
index 3ebcd60a19c6..9d1b10ed0135 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -784,6 +784,8 @@ fastcall NORET_TYPE void do_exit(long code)
784 784
785 profile_task_exit(tsk); 785 profile_task_exit(tsk);
786 786
787 WARN_ON(atomic_read(&tsk->fs_excl));
788
787 if (unlikely(in_interrupt())) 789 if (unlikely(in_interrupt()))
788 panic("Aiee, killing interrupt handler!"); 790 panic("Aiee, killing interrupt handler!");
789 if (unlikely(!tsk->pid)) 791 if (unlikely(!tsk->pid))
diff --git a/kernel/fork.c b/kernel/fork.c
index 2c7806873bfd..cdef6cea8900 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1090,6 +1090,11 @@ static task_t *copy_process(unsigned long clone_flags,
1090 spin_unlock(&current->sighand->siglock); 1090 spin_unlock(&current->sighand->siglock);
1091 } 1091 }
1092 1092
1093 /*
1094 * inherit ioprio
1095 */
1096 p->ioprio = current->ioprio;
1097
1093 SET_LINKS(p); 1098 SET_LINKS(p);
1094 if (unlikely(p->ptrace & PT_PTRACED)) 1099 if (unlikely(p->ptrace & PT_PTRACED))
1095 __ptrace_link(p, current->parent); 1100 __ptrace_link(p, current->parent);
diff --git a/kernel/irq/autoprobe.c b/kernel/irq/autoprobe.c
index 98d62d8efeaf..3467097ca61a 100644
--- a/kernel/irq/autoprobe.c
+++ b/kernel/irq/autoprobe.c
@@ -9,6 +9,7 @@
9#include <linux/irq.h> 9#include <linux/irq.h>
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/interrupt.h> 11#include <linux/interrupt.h>
12#include <linux/delay.h>
12 13
13/* 14/*
14 * Autodetection depends on the fact that any interrupt that 15 * Autodetection depends on the fact that any interrupt that
@@ -26,7 +27,7 @@ static DECLARE_MUTEX(probe_sem);
26 */ 27 */
27unsigned long probe_irq_on(void) 28unsigned long probe_irq_on(void)
28{ 29{
29 unsigned long val, delay; 30 unsigned long val;
30 irq_desc_t *desc; 31 irq_desc_t *desc;
31 unsigned int i; 32 unsigned int i;
32 33
@@ -45,8 +46,7 @@ unsigned long probe_irq_on(void)
45 } 46 }
46 47
47 /* Wait for longstanding interrupts to trigger. */ 48 /* Wait for longstanding interrupts to trigger. */
48 for (delay = jiffies + HZ/50; time_after(delay, jiffies); ) 49 msleep(20);
49 /* about 20ms delay */ barrier();
50 50
51 /* 51 /*
52 * enable any unassigned irqs 52 * enable any unassigned irqs
@@ -68,8 +68,7 @@ unsigned long probe_irq_on(void)
68 /* 68 /*
69 * Wait for spurious interrupts to trigger 69 * Wait for spurious interrupts to trigger
70 */ 70 */
71 for (delay = jiffies + HZ/10; time_after(delay, jiffies); ) 71 msleep(100);
72 /* about 100ms delay */ barrier();
73 72
74 /* 73 /*
75 * Now filter out any obviously spurious interrupts 74 * Now filter out any obviously spurious interrupts
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 436c7d93c00a..c29f83c16497 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -172,7 +172,7 @@ fastcall unsigned int __do_IRQ(unsigned int irq, struct pt_regs *regs)
172 172
173 spin_lock(&desc->lock); 173 spin_lock(&desc->lock);
174 if (!noirqdebug) 174 if (!noirqdebug)
175 note_interrupt(irq, desc, action_ret); 175 note_interrupt(irq, desc, action_ret, regs);
176 if (likely(!(desc->status & IRQ_PENDING))) 176 if (likely(!(desc->status & IRQ_PENDING)))
177 break; 177 break;
178 desc->status &= ~IRQ_PENDING; 178 desc->status &= ~IRQ_PENDING;
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index ba039e827d58..7df9abd5ec86 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -11,6 +11,83 @@
11#include <linux/kallsyms.h> 11#include <linux/kallsyms.h>
12#include <linux/interrupt.h> 12#include <linux/interrupt.h>
13 13
14static int irqfixup;
15
16/*
17 * Recovery handler for misrouted interrupts.
18 */
19
20static int misrouted_irq(int irq, struct pt_regs *regs)
21{
22 int i;
23 irq_desc_t *desc;
24 int ok = 0;
25 int work = 0; /* Did we do work for a real IRQ */
26
27 for(i = 1; i < NR_IRQS; i++) {
28 struct irqaction *action;
29
30 if (i == irq) /* Already tried */
31 continue;
32 desc = &irq_desc[i];
33 spin_lock(&desc->lock);
34 action = desc->action;
35 /* Already running on another processor */
36 if (desc->status & IRQ_INPROGRESS) {
37 /*
38 * Already running: If it is shared get the other
39 * CPU to go looking for our mystery interrupt too
40 */
41 if (desc->action && (desc->action->flags & SA_SHIRQ))
42 desc->status |= IRQ_PENDING;
43 spin_unlock(&desc->lock);
44 continue;
45 }
46 /* Honour the normal IRQ locking */
47 desc->status |= IRQ_INPROGRESS;
48 spin_unlock(&desc->lock);
49 while (action) {
50 /* Only shared IRQ handlers are safe to call */
51 if (action->flags & SA_SHIRQ) {
52 if (action->handler(i, action->dev_id, regs) ==
53 IRQ_HANDLED)
54 ok = 1;
55 }
56 action = action->next;
57 }
58 local_irq_disable();
59 /* Now clean up the flags */
60 spin_lock(&desc->lock);
61 action = desc->action;
62
63 /*
64 * While we were looking for a fixup someone queued a real
65 * IRQ clashing with our walk
66 */
67
68 while ((desc->status & IRQ_PENDING) && action) {
69 /*
70 * Perform real IRQ processing for the IRQ we deferred
71 */
72 work = 1;
73 spin_unlock(&desc->lock);
74 handle_IRQ_event(i, regs, action);
75 spin_lock(&desc->lock);
76 desc->status &= ~IRQ_PENDING;
77 }
78 desc->status &= ~IRQ_INPROGRESS;
79 /*
80 * If we did actual work for the real IRQ line we must let the
81 * IRQ controller clean up too
82 */
83 if(work)
84 desc->handler->end(i);
85 spin_unlock(&desc->lock);
86 }
87 /* So the caller can adjust the irq error counts */
88 return ok;
89}
90
14/* 91/*
15 * If 99,900 of the previous 100,000 interrupts have not been handled 92 * If 99,900 of the previous 100,000 interrupts have not been handled
16 * then assume that the IRQ is stuck in some manner. Drop a diagnostic 93 * then assume that the IRQ is stuck in some manner. Drop a diagnostic
@@ -31,7 +108,8 @@ __report_bad_irq(unsigned int irq, irq_desc_t *desc, irqreturn_t action_ret)
31 printk(KERN_ERR "irq event %d: bogus return value %x\n", 108 printk(KERN_ERR "irq event %d: bogus return value %x\n",
32 irq, action_ret); 109 irq, action_ret);
33 } else { 110 } else {
34 printk(KERN_ERR "irq %d: nobody cared!\n", irq); 111 printk(KERN_ERR "irq %d: nobody cared (try booting with "
112 "the \"irqpoll\" option)\n", irq);
35 } 113 }
36 dump_stack(); 114 dump_stack();
37 printk(KERN_ERR "handlers:\n"); 115 printk(KERN_ERR "handlers:\n");
@@ -55,7 +133,8 @@ static void report_bad_irq(unsigned int irq, irq_desc_t *desc, irqreturn_t actio
55 } 133 }
56} 134}
57 135
58void note_interrupt(unsigned int irq, irq_desc_t *desc, irqreturn_t action_ret) 136void note_interrupt(unsigned int irq, irq_desc_t *desc, irqreturn_t action_ret,
137 struct pt_regs *regs)
59{ 138{
60 if (action_ret != IRQ_HANDLED) { 139 if (action_ret != IRQ_HANDLED) {
61 desc->irqs_unhandled++; 140 desc->irqs_unhandled++;
@@ -63,6 +142,15 @@ void note_interrupt(unsigned int irq, irq_desc_t *desc, irqreturn_t action_ret)
63 report_bad_irq(irq, desc, action_ret); 142 report_bad_irq(irq, desc, action_ret);
64 } 143 }
65 144
145 if (unlikely(irqfixup)) {
146 /* Don't punish working computers */
147 if ((irqfixup == 2 && irq == 0) || action_ret == IRQ_NONE) {
148 int ok = misrouted_irq(irq, regs);
149 if (action_ret == IRQ_NONE)
150 desc->irqs_unhandled -= ok;
151 }
152 }
153
66 desc->irq_count++; 154 desc->irq_count++;
67 if (desc->irq_count < 100000) 155 if (desc->irq_count < 100000)
68 return; 156 return;
@@ -94,3 +182,24 @@ int __init noirqdebug_setup(char *str)
94 182
95__setup("noirqdebug", noirqdebug_setup); 183__setup("noirqdebug", noirqdebug_setup);
96 184
185static int __init irqfixup_setup(char *str)
186{
187 irqfixup = 1;
188 printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n");
189 printk(KERN_WARNING "This may impact system performance.\n");
190 return 1;
191}
192
193__setup("irqfixup", irqfixup_setup);
194
195static int __init irqpoll_setup(char *str)
196{
197 irqfixup = 2;
198 printk(KERN_WARNING "Misrouted IRQ fixup and polling support "
199 "enabled\n");
200 printk(KERN_WARNING "This may significantly impact system "
201 "performance\n");
202 return 1;
203}
204
205__setup("irqpoll", irqpoll_setup);
diff --git a/kernel/itimer.c b/kernel/itimer.c
index 1dc988e0d2c7..a72cb0e5aa4b 100644
--- a/kernel/itimer.c
+++ b/kernel/itimer.c
@@ -153,11 +153,15 @@ int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue)
153 153
154 switch (which) { 154 switch (which) {
155 case ITIMER_REAL: 155 case ITIMER_REAL:
156again:
156 spin_lock_irq(&tsk->sighand->siglock); 157 spin_lock_irq(&tsk->sighand->siglock);
157 interval = tsk->signal->it_real_incr; 158 interval = tsk->signal->it_real_incr;
158 val = it_real_value(tsk->signal); 159 val = it_real_value(tsk->signal);
159 if (val) 160 /* We are sharing ->siglock with it_real_fn() */
160 del_timer_sync(&tsk->signal->real_timer); 161 if (try_to_del_timer_sync(&tsk->signal->real_timer) < 0) {
162 spin_unlock_irq(&tsk->sighand->siglock);
163 goto again;
164 }
161 tsk->signal->it_real_incr = 165 tsk->signal->it_real_incr =
162 timeval_to_jiffies(&value->it_interval); 166 timeval_to_jiffies(&value->it_interval);
163 it_real_arm(tsk, timeval_to_jiffies(&value->it_value)); 167 it_real_arm(tsk, timeval_to_jiffies(&value->it_value));
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 7843548cf2d9..cdd4dcd8fb63 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -241,7 +241,7 @@ static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry,
241 241
242static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry, 242static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry,
243 unsigned long nr_segments, 243 unsigned long nr_segments,
244 struct kexec_segment *segments) 244 struct kexec_segment __user *segments)
245{ 245{
246 int result; 246 int result;
247 struct kimage *image; 247 struct kimage *image;
@@ -650,7 +650,7 @@ static kimage_entry_t *kimage_dst_used(struct kimage *image,
650 } 650 }
651 } 651 }
652 652
653 return 0; 653 return NULL;
654} 654}
655 655
656static struct page *kimage_alloc_page(struct kimage *image, 656static struct page *kimage_alloc_page(struct kimage *image,
@@ -696,7 +696,7 @@ static struct page *kimage_alloc_page(struct kimage *image,
696 /* Allocate a page, if we run out of memory give up */ 696 /* Allocate a page, if we run out of memory give up */
697 page = kimage_alloc_pages(gfp_mask, 0); 697 page = kimage_alloc_pages(gfp_mask, 0);
698 if (!page) 698 if (!page)
699 return 0; 699 return NULL;
700 /* If the page cannot be used file it away */ 700 /* If the page cannot be used file it away */
701 if (page_to_pfn(page) > 701 if (page_to_pfn(page) >
702 (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) { 702 (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
@@ -754,7 +754,7 @@ static int kimage_load_normal_segment(struct kimage *image,
754 unsigned long maddr; 754 unsigned long maddr;
755 unsigned long ubytes, mbytes; 755 unsigned long ubytes, mbytes;
756 int result; 756 int result;
757 unsigned char *buf; 757 unsigned char __user *buf;
758 758
759 result = 0; 759 result = 0;
760 buf = segment->buf; 760 buf = segment->buf;
@@ -818,7 +818,7 @@ static int kimage_load_crash_segment(struct kimage *image,
818 unsigned long maddr; 818 unsigned long maddr;
819 unsigned long ubytes, mbytes; 819 unsigned long ubytes, mbytes;
820 int result; 820 int result;
821 unsigned char *buf; 821 unsigned char __user *buf;
822 822
823 result = 0; 823 result = 0;
824 buf = segment->buf; 824 buf = segment->buf;
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 334f37472c56..b0237122b24e 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -36,6 +36,7 @@
36#include <linux/hash.h> 36#include <linux/hash.h>
37#include <linux/init.h> 37#include <linux/init.h>
38#include <linux/module.h> 38#include <linux/module.h>
39#include <linux/moduleloader.h>
39#include <asm/cacheflush.h> 40#include <asm/cacheflush.h>
40#include <asm/errno.h> 41#include <asm/errno.h>
41#include <asm/kdebug.h> 42#include <asm/kdebug.h>
@@ -50,6 +51,106 @@ unsigned int kprobe_cpu = NR_CPUS;
50static DEFINE_SPINLOCK(kprobe_lock); 51static DEFINE_SPINLOCK(kprobe_lock);
51static struct kprobe *curr_kprobe; 52static struct kprobe *curr_kprobe;
52 53
54/*
55 * kprobe->ainsn.insn points to the copy of the instruction to be
56 * single-stepped. x86_64, POWER4 and above have no-exec support and
57 * stepping on the instruction on a vmalloced/kmalloced/data page
58 * is a recipe for disaster
59 */
60#define INSNS_PER_PAGE (PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
61
62struct kprobe_insn_page {
63 struct hlist_node hlist;
64 kprobe_opcode_t *insns; /* Page of instruction slots */
65 char slot_used[INSNS_PER_PAGE];
66 int nused;
67};
68
69static struct hlist_head kprobe_insn_pages;
70
71/**
72 * get_insn_slot() - Find a slot on an executable page for an instruction.
73 * We allocate an executable page if there's no room on existing ones.
74 */
75kprobe_opcode_t *get_insn_slot(void)
76{
77 struct kprobe_insn_page *kip;
78 struct hlist_node *pos;
79
80 hlist_for_each(pos, &kprobe_insn_pages) {
81 kip = hlist_entry(pos, struct kprobe_insn_page, hlist);
82 if (kip->nused < INSNS_PER_PAGE) {
83 int i;
84 for (i = 0; i < INSNS_PER_PAGE; i++) {
85 if (!kip->slot_used[i]) {
86 kip->slot_used[i] = 1;
87 kip->nused++;
88 return kip->insns + (i * MAX_INSN_SIZE);
89 }
90 }
91 /* Surprise! No unused slots. Fix kip->nused. */
92 kip->nused = INSNS_PER_PAGE;
93 }
94 }
95
96 /* All out of space. Need to allocate a new page. Use slot 0.*/
97 kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL);
98 if (!kip) {
99 return NULL;
100 }
101
102 /*
103 * Use module_alloc so this page is within +/- 2GB of where the
104 * kernel image and loaded module images reside. This is required
105 * so x86_64 can correctly handle the %rip-relative fixups.
106 */
107 kip->insns = module_alloc(PAGE_SIZE);
108 if (!kip->insns) {
109 kfree(kip);
110 return NULL;
111 }
112 INIT_HLIST_NODE(&kip->hlist);
113 hlist_add_head(&kip->hlist, &kprobe_insn_pages);
114 memset(kip->slot_used, 0, INSNS_PER_PAGE);
115 kip->slot_used[0] = 1;
116 kip->nused = 1;
117 return kip->insns;
118}
119
120void free_insn_slot(kprobe_opcode_t *slot)
121{
122 struct kprobe_insn_page *kip;
123 struct hlist_node *pos;
124
125 hlist_for_each(pos, &kprobe_insn_pages) {
126 kip = hlist_entry(pos, struct kprobe_insn_page, hlist);
127 if (kip->insns <= slot &&
128 slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) {
129 int i = (slot - kip->insns) / MAX_INSN_SIZE;
130 kip->slot_used[i] = 0;
131 kip->nused--;
132 if (kip->nused == 0) {
133 /*
134 * Page is no longer in use. Free it unless
135 * it's the last one. We keep the last one
136 * so as not to have to set it up again the
137 * next time somebody inserts a probe.
138 */
139 hlist_del(&kip->hlist);
140 if (hlist_empty(&kprobe_insn_pages)) {
141 INIT_HLIST_NODE(&kip->hlist);
142 hlist_add_head(&kip->hlist,
143 &kprobe_insn_pages);
144 } else {
145 module_free(NULL, kip->insns);
146 kfree(kip);
147 }
148 }
149 return;
150 }
151 }
152}
153
53/* Locks kprobe: irqs must be disabled */ 154/* Locks kprobe: irqs must be disabled */
54void lock_kprobes(void) 155void lock_kprobes(void)
55{ 156{
@@ -139,12 +240,6 @@ static int aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
139 return 0; 240 return 0;
140} 241}
141 242
142struct kprobe trampoline_p = {
143 .addr = (kprobe_opcode_t *) &kretprobe_trampoline,
144 .pre_handler = trampoline_probe_handler,
145 .post_handler = trampoline_post_handler
146};
147
148struct kretprobe_instance *get_free_rp_inst(struct kretprobe *rp) 243struct kretprobe_instance *get_free_rp_inst(struct kretprobe *rp)
149{ 244{
150 struct hlist_node *node; 245 struct hlist_node *node;
@@ -163,35 +258,18 @@ static struct kretprobe_instance *get_used_rp_inst(struct kretprobe *rp)
163 return NULL; 258 return NULL;
164} 259}
165 260
166struct kretprobe_instance *get_rp_inst(void *sara)
167{
168 struct hlist_head *head;
169 struct hlist_node *node;
170 struct task_struct *tsk;
171 struct kretprobe_instance *ri;
172
173 tsk = arch_get_kprobe_task(sara);
174 head = &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)];
175 hlist_for_each_entry(ri, node, head, hlist) {
176 if (ri->stack_addr == sara)
177 return ri;
178 }
179 return NULL;
180}
181
182void add_rp_inst(struct kretprobe_instance *ri) 261void add_rp_inst(struct kretprobe_instance *ri)
183{ 262{
184 struct task_struct *tsk;
185 /* 263 /*
186 * Remove rp inst off the free list - 264 * Remove rp inst off the free list -
187 * Add it back when probed function returns 265 * Add it back when probed function returns
188 */ 266 */
189 hlist_del(&ri->uflist); 267 hlist_del(&ri->uflist);
190 tsk = arch_get_kprobe_task(ri->stack_addr); 268
191 /* Add rp inst onto table */ 269 /* Add rp inst onto table */
192 INIT_HLIST_NODE(&ri->hlist); 270 INIT_HLIST_NODE(&ri->hlist);
193 hlist_add_head(&ri->hlist, 271 hlist_add_head(&ri->hlist,
194 &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)]); 272 &kretprobe_inst_table[hash_ptr(ri->task, KPROBE_HASH_BITS)]);
195 273
196 /* Also add this rp inst to the used list. */ 274 /* Also add this rp inst to the used list. */
197 INIT_HLIST_NODE(&ri->uflist); 275 INIT_HLIST_NODE(&ri->uflist);
@@ -218,34 +296,25 @@ struct hlist_head * kretprobe_inst_table_head(struct task_struct *tsk)
218 return &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)]; 296 return &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)];
219} 297}
220 298
221struct kretprobe_instance *get_rp_inst_tsk(struct task_struct *tk)
222{
223 struct task_struct *tsk;
224 struct hlist_head *head;
225 struct hlist_node *node;
226 struct kretprobe_instance *ri;
227
228 head = &kretprobe_inst_table[hash_ptr(tk, KPROBE_HASH_BITS)];
229
230 hlist_for_each_entry(ri, node, head, hlist) {
231 tsk = arch_get_kprobe_task(ri->stack_addr);
232 if (tsk == tk)
233 return ri;
234 }
235 return NULL;
236}
237
238/* 299/*
239 * This function is called from do_exit or do_execv when task tk's stack is 300 * This function is called from exit_thread or flush_thread when task tk's
240 * about to be recycled. Recycle any function-return probe instances 301 * stack is being recycled so that we can recycle any function-return probe
241 * associated with this task. These represent probed functions that have 302 * instances associated with this task. These left over instances represent
242 * been called but may never return. 303 * probed functions that have been called but will never return.
243 */ 304 */
244void kprobe_flush_task(struct task_struct *tk) 305void kprobe_flush_task(struct task_struct *tk)
245{ 306{
307 struct kretprobe_instance *ri;
308 struct hlist_head *head;
309 struct hlist_node *node, *tmp;
246 unsigned long flags = 0; 310 unsigned long flags = 0;
311
247 spin_lock_irqsave(&kprobe_lock, flags); 312 spin_lock_irqsave(&kprobe_lock, flags);
248 arch_kprobe_flush_task(tk); 313 head = kretprobe_inst_table_head(current);
314 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
315 if (ri->task == tk)
316 recycle_rp_inst(ri);
317 }
249 spin_unlock_irqrestore(&kprobe_lock, flags); 318 spin_unlock_irqrestore(&kprobe_lock, flags);
250} 319}
251 320
@@ -505,9 +574,10 @@ static int __init init_kprobes(void)
505 INIT_HLIST_HEAD(&kretprobe_inst_table[i]); 574 INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
506 } 575 }
507 576
508 err = register_die_notifier(&kprobe_exceptions_nb); 577 err = arch_init_kprobes();
509 /* Register the trampoline probe for return probe */ 578 if (!err)
510 register_kprobe(&trampoline_p); 579 err = register_die_notifier(&kprobe_exceptions_nb);
580
511 return err; 581 return err;
512} 582}
513 583
diff --git a/kernel/sched.c b/kernel/sched.c
index a07cff90d849..5f2182d42241 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3448,15 +3448,7 @@ int task_nice(const task_t *p)
3448{ 3448{
3449 return TASK_NICE(p); 3449 return TASK_NICE(p);
3450} 3450}
3451
3452/*
3453 * The only users of task_nice are binfmt_elf and binfmt_elf32.
3454 * binfmt_elf is no longer modular, but binfmt_elf32 still is.
3455 * Therefore, task_nice is needed if there is a compat_mode.
3456 */
3457#ifdef CONFIG_COMPAT
3458EXPORT_SYMBOL_GPL(task_nice); 3451EXPORT_SYMBOL_GPL(task_nice);
3459#endif
3460 3452
3461/** 3453/**
3462 * idle_cpu - is a given cpu idle currently? 3454 * idle_cpu - is a given cpu idle currently?
@@ -4174,6 +4166,14 @@ void show_state(void)
4174 read_unlock(&tasklist_lock); 4166 read_unlock(&tasklist_lock);
4175} 4167}
4176 4168
4169/**
4170 * init_idle - set up an idle thread for a given CPU
4171 * @idle: task in question
4172 * @cpu: cpu the idle task belongs to
4173 *
4174 * NOTE: this function does not set the idle thread's NEED_RESCHED
4175 * flag, to make booting more robust.
4176 */
4177void __devinit init_idle(task_t *idle, int cpu) 4177void __devinit init_idle(task_t *idle, int cpu)
4178{ 4178{
4179 runqueue_t *rq = cpu_rq(cpu); 4179 runqueue_t *rq = cpu_rq(cpu);
@@ -4191,7 +4191,6 @@ void __devinit init_idle(task_t *idle, int cpu)
4191#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) 4191#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
4192 idle->oncpu = 1; 4192 idle->oncpu = 1;
4193#endif 4193#endif
4194 set_tsk_need_resched(idle);
4195 spin_unlock_irqrestore(&rq->lock, flags); 4194 spin_unlock_irqrestore(&rq->lock, flags);
4196 4195
4197 /* Set the preempt count _outside_ the spinlocks! */ 4196 /* Set the preempt count _outside_ the spinlocks! */