diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/audit.c | 3 | ||||
-rw-r--r-- | kernel/cpu.c | 24 | ||||
-rw-r--r-- | kernel/cpuset.c | 2 | ||||
-rw-r--r-- | kernel/fork.c | 4 | ||||
-rw-r--r-- | kernel/futex.c | 15 | ||||
-rw-r--r-- | kernel/futex_compat.c | 12 | ||||
-rw-r--r-- | kernel/irq/chip.c | 77 | ||||
-rw-r--r-- | kernel/irq/handle.c | 19 | ||||
-rw-r--r-- | kernel/irq/manage.c | 4 | ||||
-rw-r--r-- | kernel/irq/proc.c | 2 | ||||
-rw-r--r-- | kernel/irq/resend.c | 2 | ||||
-rw-r--r-- | kernel/irq/spurious.c | 10 | ||||
-rw-r--r-- | kernel/lockdep.c | 23 | ||||
-rw-r--r-- | kernel/module.c | 94 | ||||
-rw-r--r-- | kernel/mutex-debug.c | 2 | ||||
-rw-r--r-- | kernel/nsproxy.c | 6 | ||||
-rw-r--r-- | kernel/posix-cpu-timers.c | 27 | ||||
-rw-r--r-- | kernel/power/disk.c | 8 | ||||
-rw-r--r-- | kernel/power/poweroff.c | 3 | ||||
-rw-r--r-- | kernel/power/swap.c | 3 | ||||
-rw-r--r-- | kernel/power/user.c | 10 | ||||
-rw-r--r-- | kernel/printk.c | 11 | ||||
-rw-r--r-- | kernel/profile.c | 7 | ||||
-rw-r--r-- | kernel/relay.c | 41 | ||||
-rw-r--r-- | kernel/sched.c | 24 | ||||
-rw-r--r-- | kernel/sys_ni.c | 1 | ||||
-rw-r--r-- | kernel/sysctl.c | 4 | ||||
-rw-r--r-- | kernel/time/jiffies.c | 2 | ||||
-rw-r--r-- | kernel/unwind.c | 318 | ||||
-rw-r--r-- | kernel/workqueue.c | 7 |
30 files changed, 512 insertions, 253 deletions
diff --git a/kernel/audit.c b/kernel/audit.c index f9889ee77825..98106f6078b0 100644 --- a/kernel/audit.c +++ b/kernel/audit.c | |||
@@ -340,7 +340,7 @@ static int kauditd_thread(void *dummy) | |||
340 | { | 340 | { |
341 | struct sk_buff *skb; | 341 | struct sk_buff *skb; |
342 | 342 | ||
343 | while (1) { | 343 | while (!kthread_should_stop()) { |
344 | skb = skb_dequeue(&audit_skb_queue); | 344 | skb = skb_dequeue(&audit_skb_queue); |
345 | wake_up(&audit_backlog_wait); | 345 | wake_up(&audit_backlog_wait); |
346 | if (skb) { | 346 | if (skb) { |
@@ -369,6 +369,7 @@ static int kauditd_thread(void *dummy) | |||
369 | remove_wait_queue(&kauditd_wait, &wait); | 369 | remove_wait_queue(&kauditd_wait, &wait); |
370 | } | 370 | } |
371 | } | 371 | } |
372 | return 0; | ||
372 | } | 373 | } |
373 | 374 | ||
374 | int audit_send_list(void *_dest) | 375 | int audit_send_list(void *_dest) |
diff --git a/kernel/cpu.c b/kernel/cpu.c index 32c96628463e..27dd3ee47099 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
@@ -19,7 +19,7 @@ | |||
19 | static DEFINE_MUTEX(cpu_add_remove_lock); | 19 | static DEFINE_MUTEX(cpu_add_remove_lock); |
20 | static DEFINE_MUTEX(cpu_bitmask_lock); | 20 | static DEFINE_MUTEX(cpu_bitmask_lock); |
21 | 21 | ||
22 | static __cpuinitdata BLOCKING_NOTIFIER_HEAD(cpu_chain); | 22 | static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain); |
23 | 23 | ||
24 | /* If set, cpu_up and cpu_down will return -EBUSY and do nothing. | 24 | /* If set, cpu_up and cpu_down will return -EBUSY and do nothing. |
25 | * Should always be manipulated under cpu_add_remove_lock | 25 | * Should always be manipulated under cpu_add_remove_lock |
@@ -68,7 +68,11 @@ EXPORT_SYMBOL_GPL(unlock_cpu_hotplug); | |||
68 | /* Need to know about CPUs going up/down? */ | 68 | /* Need to know about CPUs going up/down? */ |
69 | int __cpuinit register_cpu_notifier(struct notifier_block *nb) | 69 | int __cpuinit register_cpu_notifier(struct notifier_block *nb) |
70 | { | 70 | { |
71 | return blocking_notifier_chain_register(&cpu_chain, nb); | 71 | int ret; |
72 | mutex_lock(&cpu_add_remove_lock); | ||
73 | ret = raw_notifier_chain_register(&cpu_chain, nb); | ||
74 | mutex_unlock(&cpu_add_remove_lock); | ||
75 | return ret; | ||
72 | } | 76 | } |
73 | 77 | ||
74 | #ifdef CONFIG_HOTPLUG_CPU | 78 | #ifdef CONFIG_HOTPLUG_CPU |
@@ -77,7 +81,9 @@ EXPORT_SYMBOL(register_cpu_notifier); | |||
77 | 81 | ||
78 | void unregister_cpu_notifier(struct notifier_block *nb) | 82 | void unregister_cpu_notifier(struct notifier_block *nb) |
79 | { | 83 | { |
80 | blocking_notifier_chain_unregister(&cpu_chain, nb); | 84 | mutex_lock(&cpu_add_remove_lock); |
85 | raw_notifier_chain_unregister(&cpu_chain, nb); | ||
86 | mutex_unlock(&cpu_add_remove_lock); | ||
81 | } | 87 | } |
82 | EXPORT_SYMBOL(unregister_cpu_notifier); | 88 | EXPORT_SYMBOL(unregister_cpu_notifier); |
83 | 89 | ||
@@ -126,7 +132,7 @@ static int _cpu_down(unsigned int cpu) | |||
126 | if (!cpu_online(cpu)) | 132 | if (!cpu_online(cpu)) |
127 | return -EINVAL; | 133 | return -EINVAL; |
128 | 134 | ||
129 | err = blocking_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE, | 135 | err = raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE, |
130 | (void *)(long)cpu); | 136 | (void *)(long)cpu); |
131 | if (err == NOTIFY_BAD) { | 137 | if (err == NOTIFY_BAD) { |
132 | printk("%s: attempt to take down CPU %u failed\n", | 138 | printk("%s: attempt to take down CPU %u failed\n", |
@@ -146,7 +152,7 @@ static int _cpu_down(unsigned int cpu) | |||
146 | 152 | ||
147 | if (IS_ERR(p)) { | 153 | if (IS_ERR(p)) { |
148 | /* CPU didn't die: tell everyone. Can't complain. */ | 154 | /* CPU didn't die: tell everyone. Can't complain. */ |
149 | if (blocking_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED, | 155 | if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED, |
150 | (void *)(long)cpu) == NOTIFY_BAD) | 156 | (void *)(long)cpu) == NOTIFY_BAD) |
151 | BUG(); | 157 | BUG(); |
152 | 158 | ||
@@ -169,7 +175,7 @@ static int _cpu_down(unsigned int cpu) | |||
169 | put_cpu(); | 175 | put_cpu(); |
170 | 176 | ||
171 | /* CPU is completely dead: tell everyone. Too late to complain. */ | 177 | /* CPU is completely dead: tell everyone. Too late to complain. */ |
172 | if (blocking_notifier_call_chain(&cpu_chain, CPU_DEAD, | 178 | if (raw_notifier_call_chain(&cpu_chain, CPU_DEAD, |
173 | (void *)(long)cpu) == NOTIFY_BAD) | 179 | (void *)(long)cpu) == NOTIFY_BAD) |
174 | BUG(); | 180 | BUG(); |
175 | 181 | ||
@@ -206,7 +212,7 @@ static int __devinit _cpu_up(unsigned int cpu) | |||
206 | if (cpu_online(cpu) || !cpu_present(cpu)) | 212 | if (cpu_online(cpu) || !cpu_present(cpu)) |
207 | return -EINVAL; | 213 | return -EINVAL; |
208 | 214 | ||
209 | ret = blocking_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu); | 215 | ret = raw_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu); |
210 | if (ret == NOTIFY_BAD) { | 216 | if (ret == NOTIFY_BAD) { |
211 | printk("%s: attempt to bring up CPU %u failed\n", | 217 | printk("%s: attempt to bring up CPU %u failed\n", |
212 | __FUNCTION__, cpu); | 218 | __FUNCTION__, cpu); |
@@ -223,11 +229,11 @@ static int __devinit _cpu_up(unsigned int cpu) | |||
223 | BUG_ON(!cpu_online(cpu)); | 229 | BUG_ON(!cpu_online(cpu)); |
224 | 230 | ||
225 | /* Now call notifier in preparation. */ | 231 | /* Now call notifier in preparation. */ |
226 | blocking_notifier_call_chain(&cpu_chain, CPU_ONLINE, hcpu); | 232 | raw_notifier_call_chain(&cpu_chain, CPU_ONLINE, hcpu); |
227 | 233 | ||
228 | out_notify: | 234 | out_notify: |
229 | if (ret != 0) | 235 | if (ret != 0) |
230 | blocking_notifier_call_chain(&cpu_chain, | 236 | raw_notifier_call_chain(&cpu_chain, |
231 | CPU_UP_CANCELED, hcpu); | 237 | CPU_UP_CANCELED, hcpu); |
232 | 238 | ||
233 | return ret; | 239 | return ret; |
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 9d850ae13b1b..6313c38c930e 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -2137,7 +2137,7 @@ static int cpuset_handle_cpuhp(struct notifier_block *nb, | |||
2137 | * See also the previous routine cpuset_handle_cpuhp(). | 2137 | * See also the previous routine cpuset_handle_cpuhp(). |
2138 | */ | 2138 | */ |
2139 | 2139 | ||
2140 | void cpuset_track_online_nodes() | 2140 | void cpuset_track_online_nodes(void) |
2141 | { | 2141 | { |
2142 | common_cpu_mem_hotplug_unplug(); | 2142 | common_cpu_mem_hotplug_unplug(); |
2143 | } | 2143 | } |
diff --git a/kernel/fork.c b/kernel/fork.c index 7dc6140baac6..29ebb30850ed 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -984,6 +984,8 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
984 | if (!p) | 984 | if (!p) |
985 | goto fork_out; | 985 | goto fork_out; |
986 | 986 | ||
987 | rt_mutex_init_task(p); | ||
988 | |||
987 | #ifdef CONFIG_TRACE_IRQFLAGS | 989 | #ifdef CONFIG_TRACE_IRQFLAGS |
988 | DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled); | 990 | DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled); |
989 | DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); | 991 | DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); |
@@ -1088,8 +1090,6 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1088 | p->lockdep_recursion = 0; | 1090 | p->lockdep_recursion = 0; |
1089 | #endif | 1091 | #endif |
1090 | 1092 | ||
1091 | rt_mutex_init_task(p); | ||
1092 | |||
1093 | #ifdef CONFIG_DEBUG_MUTEXES | 1093 | #ifdef CONFIG_DEBUG_MUTEXES |
1094 | p->blocked_on = NULL; /* not blocked yet */ | 1094 | p->blocked_on = NULL; /* not blocked yet */ |
1095 | #endif | 1095 | #endif |
diff --git a/kernel/futex.c b/kernel/futex.c index 4aaf91951a43..b364e0026191 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
@@ -1612,10 +1612,10 @@ sys_set_robust_list(struct robust_list_head __user *head, | |||
1612 | * @len_ptr: pointer to a length field, the kernel fills in the header size | 1612 | * @len_ptr: pointer to a length field, the kernel fills in the header size |
1613 | */ | 1613 | */ |
1614 | asmlinkage long | 1614 | asmlinkage long |
1615 | sys_get_robust_list(int pid, struct robust_list_head __user **head_ptr, | 1615 | sys_get_robust_list(int pid, struct robust_list_head __user * __user *head_ptr, |
1616 | size_t __user *len_ptr) | 1616 | size_t __user *len_ptr) |
1617 | { | 1617 | { |
1618 | struct robust_list_head *head; | 1618 | struct robust_list_head __user *head; |
1619 | unsigned long ret; | 1619 | unsigned long ret; |
1620 | 1620 | ||
1621 | if (!pid) | 1621 | if (!pid) |
@@ -1694,14 +1694,15 @@ retry: | |||
1694 | * Fetch a robust-list pointer. Bit 0 signals PI futexes: | 1694 | * Fetch a robust-list pointer. Bit 0 signals PI futexes: |
1695 | */ | 1695 | */ |
1696 | static inline int fetch_robust_entry(struct robust_list __user **entry, | 1696 | static inline int fetch_robust_entry(struct robust_list __user **entry, |
1697 | struct robust_list __user **head, int *pi) | 1697 | struct robust_list __user * __user *head, |
1698 | int *pi) | ||
1698 | { | 1699 | { |
1699 | unsigned long uentry; | 1700 | unsigned long uentry; |
1700 | 1701 | ||
1701 | if (get_user(uentry, (unsigned long *)head)) | 1702 | if (get_user(uentry, (unsigned long __user *)head)) |
1702 | return -EFAULT; | 1703 | return -EFAULT; |
1703 | 1704 | ||
1704 | *entry = (void *)(uentry & ~1UL); | 1705 | *entry = (void __user *)(uentry & ~1UL); |
1705 | *pi = uentry & 1; | 1706 | *pi = uentry & 1; |
1706 | 1707 | ||
1707 | return 0; | 1708 | return 0; |
@@ -1739,7 +1740,7 @@ void exit_robust_list(struct task_struct *curr) | |||
1739 | return; | 1740 | return; |
1740 | 1741 | ||
1741 | if (pending) | 1742 | if (pending) |
1742 | handle_futex_death((void *)pending + futex_offset, curr, pip); | 1743 | handle_futex_death((void __user *)pending + futex_offset, curr, pip); |
1743 | 1744 | ||
1744 | while (entry != &head->list) { | 1745 | while (entry != &head->list) { |
1745 | /* | 1746 | /* |
@@ -1747,7 +1748,7 @@ void exit_robust_list(struct task_struct *curr) | |||
1747 | * don't process it twice: | 1748 | * don't process it twice: |
1748 | */ | 1749 | */ |
1749 | if (entry != pending) | 1750 | if (entry != pending) |
1750 | if (handle_futex_death((void *)entry + futex_offset, | 1751 | if (handle_futex_death((void __user *)entry + futex_offset, |
1751 | curr, pi)) | 1752 | curr, pi)) |
1752 | return; | 1753 | return; |
1753 | /* | 1754 | /* |
diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c index c5cca3f65cb7..50f24eea6cd0 100644 --- a/kernel/futex_compat.c +++ b/kernel/futex_compat.c | |||
@@ -18,7 +18,7 @@ | |||
18 | */ | 18 | */ |
19 | static inline int | 19 | static inline int |
20 | fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry, | 20 | fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry, |
21 | compat_uptr_t *head, int *pi) | 21 | compat_uptr_t __user *head, int *pi) |
22 | { | 22 | { |
23 | if (get_user(*uentry, head)) | 23 | if (get_user(*uentry, head)) |
24 | return -EFAULT; | 24 | return -EFAULT; |
@@ -62,7 +62,7 @@ void compat_exit_robust_list(struct task_struct *curr) | |||
62 | &head->list_op_pending, &pip)) | 62 | &head->list_op_pending, &pip)) |
63 | return; | 63 | return; |
64 | if (upending) | 64 | if (upending) |
65 | handle_futex_death((void *)pending + futex_offset, curr, pip); | 65 | handle_futex_death((void __user *)pending + futex_offset, curr, pip); |
66 | 66 | ||
67 | while (compat_ptr(uentry) != &head->list) { | 67 | while (compat_ptr(uentry) != &head->list) { |
68 | /* | 68 | /* |
@@ -70,7 +70,7 @@ void compat_exit_robust_list(struct task_struct *curr) | |||
70 | * dont process it twice: | 70 | * dont process it twice: |
71 | */ | 71 | */ |
72 | if (entry != pending) | 72 | if (entry != pending) |
73 | if (handle_futex_death((void *)entry + futex_offset, | 73 | if (handle_futex_death((void __user *)entry + futex_offset, |
74 | curr, pi)) | 74 | curr, pi)) |
75 | return; | 75 | return; |
76 | 76 | ||
@@ -78,7 +78,7 @@ void compat_exit_robust_list(struct task_struct *curr) | |||
78 | * Fetch the next entry in the list: | 78 | * Fetch the next entry in the list: |
79 | */ | 79 | */ |
80 | if (fetch_robust_entry(&uentry, &entry, | 80 | if (fetch_robust_entry(&uentry, &entry, |
81 | (compat_uptr_t *)&entry->next, &pi)) | 81 | (compat_uptr_t __user *)&entry->next, &pi)) |
82 | return; | 82 | return; |
83 | /* | 83 | /* |
84 | * Avoid excessively long or circular lists: | 84 | * Avoid excessively long or circular lists: |
@@ -103,10 +103,10 @@ compat_sys_set_robust_list(struct compat_robust_list_head __user *head, | |||
103 | } | 103 | } |
104 | 104 | ||
105 | asmlinkage long | 105 | asmlinkage long |
106 | compat_sys_get_robust_list(int pid, compat_uptr_t *head_ptr, | 106 | compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr, |
107 | compat_size_t __user *len_ptr) | 107 | compat_size_t __user *len_ptr) |
108 | { | 108 | { |
109 | struct compat_robust_list_head *head; | 109 | struct compat_robust_list_head __user *head; |
110 | unsigned long ret; | 110 | unsigned long ret; |
111 | 111 | ||
112 | if (!pid) | 112 | if (!pid) |
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 4cf65f5c6a74..2d0dc3efe813 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
@@ -249,7 +249,6 @@ static inline void mask_ack_irq(struct irq_desc *desc, int irq) | |||
249 | * handle_simple_irq - Simple and software-decoded IRQs. | 249 | * handle_simple_irq - Simple and software-decoded IRQs. |
250 | * @irq: the interrupt number | 250 | * @irq: the interrupt number |
251 | * @desc: the interrupt description structure for this irq | 251 | * @desc: the interrupt description structure for this irq |
252 | * @regs: pointer to a register structure | ||
253 | * | 252 | * |
254 | * Simple interrupts are either sent from a demultiplexing interrupt | 253 | * Simple interrupts are either sent from a demultiplexing interrupt |
255 | * handler or come from hardware, where no interrupt hardware control | 254 | * handler or come from hardware, where no interrupt hardware control |
@@ -259,7 +258,7 @@ static inline void mask_ack_irq(struct irq_desc *desc, int irq) | |||
259 | * unmask issues if necessary. | 258 | * unmask issues if necessary. |
260 | */ | 259 | */ |
261 | void fastcall | 260 | void fastcall |
262 | handle_simple_irq(unsigned int irq, struct irq_desc *desc, struct pt_regs *regs) | 261 | handle_simple_irq(unsigned int irq, struct irq_desc *desc) |
263 | { | 262 | { |
264 | struct irqaction *action; | 263 | struct irqaction *action; |
265 | irqreturn_t action_ret; | 264 | irqreturn_t action_ret; |
@@ -279,9 +278,9 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc, struct pt_regs *regs) | |||
279 | desc->status |= IRQ_INPROGRESS; | 278 | desc->status |= IRQ_INPROGRESS; |
280 | spin_unlock(&desc->lock); | 279 | spin_unlock(&desc->lock); |
281 | 280 | ||
282 | action_ret = handle_IRQ_event(irq, regs, action); | 281 | action_ret = handle_IRQ_event(irq, action); |
283 | if (!noirqdebug) | 282 | if (!noirqdebug) |
284 | note_interrupt(irq, desc, action_ret, regs); | 283 | note_interrupt(irq, desc, action_ret); |
285 | 284 | ||
286 | spin_lock(&desc->lock); | 285 | spin_lock(&desc->lock); |
287 | desc->status &= ~IRQ_INPROGRESS; | 286 | desc->status &= ~IRQ_INPROGRESS; |
@@ -293,7 +292,6 @@ out_unlock: | |||
293 | * handle_level_irq - Level type irq handler | 292 | * handle_level_irq - Level type irq handler |
294 | * @irq: the interrupt number | 293 | * @irq: the interrupt number |
295 | * @desc: the interrupt description structure for this irq | 294 | * @desc: the interrupt description structure for this irq |
296 | * @regs: pointer to a register structure | ||
297 | * | 295 | * |
298 | * Level type interrupts are active as long as the hardware line has | 296 | * Level type interrupts are active as long as the hardware line has |
299 | * the active level. This may require to mask the interrupt and unmask | 297 | * the active level. This may require to mask the interrupt and unmask |
@@ -301,7 +299,7 @@ out_unlock: | |||
301 | * interrupt line is back to inactive. | 299 | * interrupt line is back to inactive. |
302 | */ | 300 | */ |
303 | void fastcall | 301 | void fastcall |
304 | handle_level_irq(unsigned int irq, struct irq_desc *desc, struct pt_regs *regs) | 302 | handle_level_irq(unsigned int irq, struct irq_desc *desc) |
305 | { | 303 | { |
306 | unsigned int cpu = smp_processor_id(); | 304 | unsigned int cpu = smp_processor_id(); |
307 | struct irqaction *action; | 305 | struct irqaction *action; |
@@ -329,9 +327,9 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc, struct pt_regs *regs) | |||
329 | desc->status &= ~IRQ_PENDING; | 327 | desc->status &= ~IRQ_PENDING; |
330 | spin_unlock(&desc->lock); | 328 | spin_unlock(&desc->lock); |
331 | 329 | ||
332 | action_ret = handle_IRQ_event(irq, regs, action); | 330 | action_ret = handle_IRQ_event(irq, action); |
333 | if (!noirqdebug) | 331 | if (!noirqdebug) |
334 | note_interrupt(irq, desc, action_ret, regs); | 332 | note_interrupt(irq, desc, action_ret); |
335 | 333 | ||
336 | spin_lock(&desc->lock); | 334 | spin_lock(&desc->lock); |
337 | desc->status &= ~IRQ_INPROGRESS; | 335 | desc->status &= ~IRQ_INPROGRESS; |
@@ -345,7 +343,6 @@ out_unlock: | |||
345 | * handle_fasteoi_irq - irq handler for transparent controllers | 343 | * handle_fasteoi_irq - irq handler for transparent controllers |
346 | * @irq: the interrupt number | 344 | * @irq: the interrupt number |
347 | * @desc: the interrupt description structure for this irq | 345 | * @desc: the interrupt description structure for this irq |
348 | * @regs: pointer to a register structure | ||
349 | * | 346 | * |
350 | * Only a single callback will be issued to the chip: an ->eoi() | 347 | * Only a single callback will be issued to the chip: an ->eoi() |
351 | * call when the interrupt has been serviced. This enables support | 348 | * call when the interrupt has been serviced. This enables support |
@@ -353,8 +350,7 @@ out_unlock: | |||
353 | * details in hardware, transparently. | 350 | * details in hardware, transparently. |
354 | */ | 351 | */ |
355 | void fastcall | 352 | void fastcall |
356 | handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc, | 353 | handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) |
357 | struct pt_regs *regs) | ||
358 | { | 354 | { |
359 | unsigned int cpu = smp_processor_id(); | 355 | unsigned int cpu = smp_processor_id(); |
360 | struct irqaction *action; | 356 | struct irqaction *action; |
@@ -382,9 +378,9 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc, | |||
382 | desc->status &= ~IRQ_PENDING; | 378 | desc->status &= ~IRQ_PENDING; |
383 | spin_unlock(&desc->lock); | 379 | spin_unlock(&desc->lock); |
384 | 380 | ||
385 | action_ret = handle_IRQ_event(irq, regs, action); | 381 | action_ret = handle_IRQ_event(irq, action); |
386 | if (!noirqdebug) | 382 | if (!noirqdebug) |
387 | note_interrupt(irq, desc, action_ret, regs); | 383 | note_interrupt(irq, desc, action_ret); |
388 | 384 | ||
389 | spin_lock(&desc->lock); | 385 | spin_lock(&desc->lock); |
390 | desc->status &= ~IRQ_INPROGRESS; | 386 | desc->status &= ~IRQ_INPROGRESS; |
@@ -398,7 +394,6 @@ out: | |||
398 | * handle_edge_irq - edge type IRQ handler | 394 | * handle_edge_irq - edge type IRQ handler |
399 | * @irq: the interrupt number | 395 | * @irq: the interrupt number |
400 | * @desc: the interrupt description structure for this irq | 396 | * @desc: the interrupt description structure for this irq |
401 | * @regs: pointer to a register structure | ||
402 | * | 397 | * |
403 | * Interrupt occures on the falling and/or rising edge of a hardware | 398 | * Interrupt occures on the falling and/or rising edge of a hardware |
404 | * signal. The occurence is latched into the irq controller hardware | 399 | * signal. The occurence is latched into the irq controller hardware |
@@ -412,7 +407,7 @@ out: | |||
412 | * loop is left. | 407 | * loop is left. |
413 | */ | 408 | */ |
414 | void fastcall | 409 | void fastcall |
415 | handle_edge_irq(unsigned int irq, struct irq_desc *desc, struct pt_regs *regs) | 410 | handle_edge_irq(unsigned int irq, struct irq_desc *desc) |
416 | { | 411 | { |
417 | const unsigned int cpu = smp_processor_id(); | 412 | const unsigned int cpu = smp_processor_id(); |
418 | 413 | ||
@@ -463,9 +458,9 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc, struct pt_regs *regs) | |||
463 | 458 | ||
464 | desc->status &= ~IRQ_PENDING; | 459 | desc->status &= ~IRQ_PENDING; |
465 | spin_unlock(&desc->lock); | 460 | spin_unlock(&desc->lock); |
466 | action_ret = handle_IRQ_event(irq, regs, action); | 461 | action_ret = handle_IRQ_event(irq, action); |
467 | if (!noirqdebug) | 462 | if (!noirqdebug) |
468 | note_interrupt(irq, desc, action_ret, regs); | 463 | note_interrupt(irq, desc, action_ret); |
469 | spin_lock(&desc->lock); | 464 | spin_lock(&desc->lock); |
470 | 465 | ||
471 | } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING); | 466 | } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING); |
@@ -480,12 +475,11 @@ out_unlock: | |||
480 | * handle_percpu_IRQ - Per CPU local irq handler | 475 | * handle_percpu_IRQ - Per CPU local irq handler |
481 | * @irq: the interrupt number | 476 | * @irq: the interrupt number |
482 | * @desc: the interrupt description structure for this irq | 477 | * @desc: the interrupt description structure for this irq |
483 | * @regs: pointer to a register structure | ||
484 | * | 478 | * |
485 | * Per CPU interrupts on SMP machines without locking requirements | 479 | * Per CPU interrupts on SMP machines without locking requirements |
486 | */ | 480 | */ |
487 | void fastcall | 481 | void fastcall |
488 | handle_percpu_irq(unsigned int irq, struct irq_desc *desc, struct pt_regs *regs) | 482 | handle_percpu_irq(unsigned int irq, struct irq_desc *desc) |
489 | { | 483 | { |
490 | irqreturn_t action_ret; | 484 | irqreturn_t action_ret; |
491 | 485 | ||
@@ -494,9 +488,9 @@ handle_percpu_irq(unsigned int irq, struct irq_desc *desc, struct pt_regs *regs) | |||
494 | if (desc->chip->ack) | 488 | if (desc->chip->ack) |
495 | desc->chip->ack(irq); | 489 | desc->chip->ack(irq); |
496 | 490 | ||
497 | action_ret = handle_IRQ_event(irq, regs, desc->action); | 491 | action_ret = handle_IRQ_event(irq, desc->action); |
498 | if (!noirqdebug) | 492 | if (!noirqdebug) |
499 | note_interrupt(irq, desc, action_ret, regs); | 493 | note_interrupt(irq, desc, action_ret); |
500 | 494 | ||
501 | if (desc->chip->eoi) | 495 | if (desc->chip->eoi) |
502 | desc->chip->eoi(irq); | 496 | desc->chip->eoi(irq); |
@@ -505,10 +499,8 @@ handle_percpu_irq(unsigned int irq, struct irq_desc *desc, struct pt_regs *regs) | |||
505 | #endif /* CONFIG_SMP */ | 499 | #endif /* CONFIG_SMP */ |
506 | 500 | ||
507 | void | 501 | void |
508 | __set_irq_handler(unsigned int irq, | 502 | __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, |
509 | void fastcall (*handle)(unsigned int, irq_desc_t *, | 503 | const char *name) |
510 | struct pt_regs *), | ||
511 | int is_chained) | ||
512 | { | 504 | { |
513 | struct irq_desc *desc; | 505 | struct irq_desc *desc; |
514 | unsigned long flags; | 506 | unsigned long flags; |
@@ -549,6 +541,7 @@ __set_irq_handler(unsigned int irq, | |||
549 | desc->depth = 1; | 541 | desc->depth = 1; |
550 | } | 542 | } |
551 | desc->handle_irq = handle; | 543 | desc->handle_irq = handle; |
544 | desc->name = name; | ||
552 | 545 | ||
553 | if (handle != handle_bad_irq && is_chained) { | 546 | if (handle != handle_bad_irq && is_chained) { |
554 | desc->status &= ~IRQ_DISABLED; | 547 | desc->status &= ~IRQ_DISABLED; |
@@ -561,36 +554,16 @@ __set_irq_handler(unsigned int irq, | |||
561 | 554 | ||
562 | void | 555 | void |
563 | set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip, | 556 | set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip, |
564 | void fastcall (*handle)(unsigned int, | 557 | irq_flow_handler_t handle) |
565 | struct irq_desc *, | ||
566 | struct pt_regs *)) | ||
567 | { | 558 | { |
568 | set_irq_chip(irq, chip); | 559 | set_irq_chip(irq, chip); |
569 | __set_irq_handler(irq, handle, 0); | 560 | __set_irq_handler(irq, handle, 0, NULL); |
570 | } | 561 | } |
571 | 562 | ||
572 | /* | 563 | void |
573 | * Get a descriptive string for the highlevel handler, for | 564 | set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, |
574 | * /proc/interrupts output: | 565 | irq_flow_handler_t handle, const char *name) |
575 | */ | ||
576 | const char * | ||
577 | handle_irq_name(void fastcall (*handle)(unsigned int, struct irq_desc *, | ||
578 | struct pt_regs *)) | ||
579 | { | 566 | { |
580 | if (handle == handle_level_irq) | 567 | set_irq_chip(irq, chip); |
581 | return "level "; | 568 | __set_irq_handler(irq, handle, 0, name); |
582 | if (handle == handle_fasteoi_irq) | ||
583 | return "fasteoi"; | ||
584 | if (handle == handle_edge_irq) | ||
585 | return "edge "; | ||
586 | if (handle == handle_simple_irq) | ||
587 | return "simple "; | ||
588 | #ifdef CONFIG_SMP | ||
589 | if (handle == handle_percpu_irq) | ||
590 | return "percpu "; | ||
591 | #endif | ||
592 | if (handle == handle_bad_irq) | ||
593 | return "bad "; | ||
594 | |||
595 | return NULL; | ||
596 | } | 569 | } |
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index 4c6cdbaed661..42aa6f1a3f0f 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c | |||
@@ -27,7 +27,7 @@ | |||
27 | * Handles spurious and unhandled IRQ's. It also prints a debugmessage. | 27 | * Handles spurious and unhandled IRQ's. It also prints a debugmessage. |
28 | */ | 28 | */ |
29 | void fastcall | 29 | void fastcall |
30 | handle_bad_irq(unsigned int irq, struct irq_desc *desc, struct pt_regs *regs) | 30 | handle_bad_irq(unsigned int irq, struct irq_desc *desc) |
31 | { | 31 | { |
32 | print_irq_desc(irq, desc); | 32 | print_irq_desc(irq, desc); |
33 | kstat_this_cpu.irqs[irq]++; | 33 | kstat_this_cpu.irqs[irq]++; |
@@ -115,7 +115,7 @@ struct irq_chip dummy_irq_chip = { | |||
115 | /* | 115 | /* |
116 | * Special, empty irq handler: | 116 | * Special, empty irq handler: |
117 | */ | 117 | */ |
118 | irqreturn_t no_action(int cpl, void *dev_id, struct pt_regs *regs) | 118 | irqreturn_t no_action(int cpl, void *dev_id) |
119 | { | 119 | { |
120 | return IRQ_NONE; | 120 | return IRQ_NONE; |
121 | } | 121 | } |
@@ -123,13 +123,11 @@ irqreturn_t no_action(int cpl, void *dev_id, struct pt_regs *regs) | |||
123 | /** | 123 | /** |
124 | * handle_IRQ_event - irq action chain handler | 124 | * handle_IRQ_event - irq action chain handler |
125 | * @irq: the interrupt number | 125 | * @irq: the interrupt number |
126 | * @regs: pointer to a register structure | ||
127 | * @action: the interrupt action chain for this irq | 126 | * @action: the interrupt action chain for this irq |
128 | * | 127 | * |
129 | * Handles the action chain of an irq event | 128 | * Handles the action chain of an irq event |
130 | */ | 129 | */ |
131 | irqreturn_t handle_IRQ_event(unsigned int irq, struct pt_regs *regs, | 130 | irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action) |
132 | struct irqaction *action) | ||
133 | { | 131 | { |
134 | irqreturn_t ret, retval = IRQ_NONE; | 132 | irqreturn_t ret, retval = IRQ_NONE; |
135 | unsigned int status = 0; | 133 | unsigned int status = 0; |
@@ -140,7 +138,7 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct pt_regs *regs, | |||
140 | local_irq_enable_in_hardirq(); | 138 | local_irq_enable_in_hardirq(); |
141 | 139 | ||
142 | do { | 140 | do { |
143 | ret = action->handler(irq, action->dev_id, regs); | 141 | ret = action->handler(irq, action->dev_id); |
144 | if (ret == IRQ_HANDLED) | 142 | if (ret == IRQ_HANDLED) |
145 | status |= action->flags; | 143 | status |= action->flags; |
146 | retval |= ret; | 144 | retval |= ret; |
@@ -158,7 +156,6 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct pt_regs *regs, | |||
158 | /** | 156 | /** |
159 | * __do_IRQ - original all in one highlevel IRQ handler | 157 | * __do_IRQ - original all in one highlevel IRQ handler |
160 | * @irq: the interrupt number | 158 | * @irq: the interrupt number |
161 | * @regs: pointer to a register structure | ||
162 | * | 159 | * |
163 | * __do_IRQ handles all normal device IRQ's (the special | 160 | * __do_IRQ handles all normal device IRQ's (the special |
164 | * SMP cross-CPU interrupts have their own specific | 161 | * SMP cross-CPU interrupts have their own specific |
@@ -167,7 +164,7 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct pt_regs *regs, | |||
167 | * This is the original x86 implementation which is used for every | 164 | * This is the original x86 implementation which is used for every |
168 | * interrupt type. | 165 | * interrupt type. |
169 | */ | 166 | */ |
170 | fastcall unsigned int __do_IRQ(unsigned int irq, struct pt_regs *regs) | 167 | fastcall unsigned int __do_IRQ(unsigned int irq) |
171 | { | 168 | { |
172 | struct irq_desc *desc = irq_desc + irq; | 169 | struct irq_desc *desc = irq_desc + irq; |
173 | struct irqaction *action; | 170 | struct irqaction *action; |
@@ -182,7 +179,7 @@ fastcall unsigned int __do_IRQ(unsigned int irq, struct pt_regs *regs) | |||
182 | */ | 179 | */ |
183 | if (desc->chip->ack) | 180 | if (desc->chip->ack) |
184 | desc->chip->ack(irq); | 181 | desc->chip->ack(irq); |
185 | action_ret = handle_IRQ_event(irq, regs, desc->action); | 182 | action_ret = handle_IRQ_event(irq, desc->action); |
186 | desc->chip->end(irq); | 183 | desc->chip->end(irq); |
187 | return 1; | 184 | return 1; |
188 | } | 185 | } |
@@ -233,11 +230,11 @@ fastcall unsigned int __do_IRQ(unsigned int irq, struct pt_regs *regs) | |||
233 | 230 | ||
234 | spin_unlock(&desc->lock); | 231 | spin_unlock(&desc->lock); |
235 | 232 | ||
236 | action_ret = handle_IRQ_event(irq, regs, action); | 233 | action_ret = handle_IRQ_event(irq, action); |
237 | 234 | ||
238 | spin_lock(&desc->lock); | 235 | spin_lock(&desc->lock); |
239 | if (!noirqdebug) | 236 | if (!noirqdebug) |
240 | note_interrupt(irq, desc, action_ret, regs); | 237 | note_interrupt(irq, desc, action_ret); |
241 | if (likely(!(desc->status & IRQ_PENDING))) | 238 | if (likely(!(desc->status & IRQ_PENDING))) |
242 | break; | 239 | break; |
243 | desc->status &= ~IRQ_PENDING; | 240 | desc->status &= ~IRQ_PENDING; |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 92be519eff26..6879202afe9a 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -427,8 +427,7 @@ EXPORT_SYMBOL(free_irq); | |||
427 | * IRQF_SAMPLE_RANDOM The interrupt can be used for entropy | 427 | * IRQF_SAMPLE_RANDOM The interrupt can be used for entropy |
428 | * | 428 | * |
429 | */ | 429 | */ |
430 | int request_irq(unsigned int irq, | 430 | int request_irq(unsigned int irq, irq_handler_t handler, |
431 | irqreturn_t (*handler)(int, void *, struct pt_regs *), | ||
432 | unsigned long irqflags, const char *devname, void *dev_id) | 431 | unsigned long irqflags, const char *devname, void *dev_id) |
433 | { | 432 | { |
434 | struct irqaction *action; | 433 | struct irqaction *action; |
@@ -475,4 +474,3 @@ int request_irq(unsigned int irq, | |||
475 | return retval; | 474 | return retval; |
476 | } | 475 | } |
477 | EXPORT_SYMBOL(request_irq); | 476 | EXPORT_SYMBOL(request_irq); |
478 | |||
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index 607c7809ad01..9a352667007c 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c | |||
@@ -57,7 +57,7 @@ static int irq_affinity_write_proc(struct file *file, const char __user *buffer, | |||
57 | if (!irq_desc[irq].chip->set_affinity || no_irq_affinity) | 57 | if (!irq_desc[irq].chip->set_affinity || no_irq_affinity) |
58 | return -EIO; | 58 | return -EIO; |
59 | 59 | ||
60 | err = cpumask_parse(buffer, count, new_value); | 60 | err = cpumask_parse_user(buffer, count, new_value); |
61 | if (err) | 61 | if (err) |
62 | return err; | 62 | return err; |
63 | 63 | ||
diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c index 35f10f7ff94a..5bfeaed7e487 100644 --- a/kernel/irq/resend.c +++ b/kernel/irq/resend.c | |||
@@ -38,7 +38,7 @@ static void resend_irqs(unsigned long arg) | |||
38 | clear_bit(irq, irqs_resend); | 38 | clear_bit(irq, irqs_resend); |
39 | desc = irq_desc + irq; | 39 | desc = irq_desc + irq; |
40 | local_irq_disable(); | 40 | local_irq_disable(); |
41 | desc->handle_irq(irq, desc, NULL); | 41 | desc->handle_irq(irq, desc); |
42 | local_irq_enable(); | 42 | local_irq_enable(); |
43 | } | 43 | } |
44 | } | 44 | } |
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index 417e98092cf2..543ea2e5ad93 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c | |||
@@ -16,7 +16,7 @@ static int irqfixup __read_mostly; | |||
16 | /* | 16 | /* |
17 | * Recovery handler for misrouted interrupts. | 17 | * Recovery handler for misrouted interrupts. |
18 | */ | 18 | */ |
19 | static int misrouted_irq(int irq, struct pt_regs *regs) | 19 | static int misrouted_irq(int irq) |
20 | { | 20 | { |
21 | int i; | 21 | int i; |
22 | int ok = 0; | 22 | int ok = 0; |
@@ -49,7 +49,7 @@ static int misrouted_irq(int irq, struct pt_regs *regs) | |||
49 | while (action) { | 49 | while (action) { |
50 | /* Only shared IRQ handlers are safe to call */ | 50 | /* Only shared IRQ handlers are safe to call */ |
51 | if (action->flags & IRQF_SHARED) { | 51 | if (action->flags & IRQF_SHARED) { |
52 | if (action->handler(i, action->dev_id, regs) == | 52 | if (action->handler(i, action->dev_id) == |
53 | IRQ_HANDLED) | 53 | IRQ_HANDLED) |
54 | ok = 1; | 54 | ok = 1; |
55 | } | 55 | } |
@@ -70,7 +70,7 @@ static int misrouted_irq(int irq, struct pt_regs *regs) | |||
70 | */ | 70 | */ |
71 | work = 1; | 71 | work = 1; |
72 | spin_unlock(&desc->lock); | 72 | spin_unlock(&desc->lock); |
73 | handle_IRQ_event(i, regs, action); | 73 | handle_IRQ_event(i, action); |
74 | spin_lock(&desc->lock); | 74 | spin_lock(&desc->lock); |
75 | desc->status &= ~IRQ_PENDING; | 75 | desc->status &= ~IRQ_PENDING; |
76 | } | 76 | } |
@@ -136,7 +136,7 @@ report_bad_irq(unsigned int irq, struct irq_desc *desc, irqreturn_t action_ret) | |||
136 | } | 136 | } |
137 | 137 | ||
138 | void note_interrupt(unsigned int irq, struct irq_desc *desc, | 138 | void note_interrupt(unsigned int irq, struct irq_desc *desc, |
139 | irqreturn_t action_ret, struct pt_regs *regs) | 139 | irqreturn_t action_ret) |
140 | { | 140 | { |
141 | if (unlikely(action_ret != IRQ_HANDLED)) { | 141 | if (unlikely(action_ret != IRQ_HANDLED)) { |
142 | desc->irqs_unhandled++; | 142 | desc->irqs_unhandled++; |
@@ -147,7 +147,7 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc, | |||
147 | if (unlikely(irqfixup)) { | 147 | if (unlikely(irqfixup)) { |
148 | /* Don't punish working computers */ | 148 | /* Don't punish working computers */ |
149 | if ((irqfixup == 2 && irq == 0) || action_ret == IRQ_NONE) { | 149 | if ((irqfixup == 2 && irq == 0) || action_ret == IRQ_NONE) { |
150 | int ok = misrouted_irq(irq, regs); | 150 | int ok = misrouted_irq(irq); |
151 | if (action_ret == IRQ_NONE) | 151 | if (action_ret == IRQ_NONE) |
152 | desc->irqs_unhandled -= ok; | 152 | desc->irqs_unhandled -= ok; |
153 | } | 153 | } |
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 4c0553461000..b739be2a6dc9 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
@@ -575,6 +575,8 @@ static noinline int print_circular_bug_tail(void) | |||
575 | return 0; | 575 | return 0; |
576 | } | 576 | } |
577 | 577 | ||
578 | #define RECURSION_LIMIT 40 | ||
579 | |||
578 | static int noinline print_infinite_recursion_bug(void) | 580 | static int noinline print_infinite_recursion_bug(void) |
579 | { | 581 | { |
580 | __raw_spin_unlock(&hash_lock); | 582 | __raw_spin_unlock(&hash_lock); |
@@ -595,7 +597,7 @@ check_noncircular(struct lock_class *source, unsigned int depth) | |||
595 | debug_atomic_inc(&nr_cyclic_check_recursions); | 597 | debug_atomic_inc(&nr_cyclic_check_recursions); |
596 | if (depth > max_recursion_depth) | 598 | if (depth > max_recursion_depth) |
597 | max_recursion_depth = depth; | 599 | max_recursion_depth = depth; |
598 | if (depth >= 20) | 600 | if (depth >= RECURSION_LIMIT) |
599 | return print_infinite_recursion_bug(); | 601 | return print_infinite_recursion_bug(); |
600 | /* | 602 | /* |
601 | * Check this lock's dependency list: | 603 | * Check this lock's dependency list: |
@@ -645,7 +647,7 @@ find_usage_forwards(struct lock_class *source, unsigned int depth) | |||
645 | 647 | ||
646 | if (depth > max_recursion_depth) | 648 | if (depth > max_recursion_depth) |
647 | max_recursion_depth = depth; | 649 | max_recursion_depth = depth; |
648 | if (depth >= 20) | 650 | if (depth >= RECURSION_LIMIT) |
649 | return print_infinite_recursion_bug(); | 651 | return print_infinite_recursion_bug(); |
650 | 652 | ||
651 | debug_atomic_inc(&nr_find_usage_forwards_checks); | 653 | debug_atomic_inc(&nr_find_usage_forwards_checks); |
@@ -684,7 +686,7 @@ find_usage_backwards(struct lock_class *source, unsigned int depth) | |||
684 | 686 | ||
685 | if (depth > max_recursion_depth) | 687 | if (depth > max_recursion_depth) |
686 | max_recursion_depth = depth; | 688 | max_recursion_depth = depth; |
687 | if (depth >= 20) | 689 | if (depth >= RECURSION_LIMIT) |
688 | return print_infinite_recursion_bug(); | 690 | return print_infinite_recursion_bug(); |
689 | 691 | ||
690 | debug_atomic_inc(&nr_find_usage_backwards_checks); | 692 | debug_atomic_inc(&nr_find_usage_backwards_checks); |
@@ -1114,8 +1116,6 @@ static int count_matching_names(struct lock_class *new_class) | |||
1114 | return count + 1; | 1116 | return count + 1; |
1115 | } | 1117 | } |
1116 | 1118 | ||
1117 | extern void __error_too_big_MAX_LOCKDEP_SUBCLASSES(void); | ||
1118 | |||
1119 | /* | 1119 | /* |
1120 | * Register a lock's class in the hash-table, if the class is not present | 1120 | * Register a lock's class in the hash-table, if the class is not present |
1121 | * yet. Otherwise we look it up. We cache the result in the lock object | 1121 | * yet. Otherwise we look it up. We cache the result in the lock object |
@@ -1153,8 +1153,7 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass) | |||
1153 | * (or spin_lock_init()) call - which acts as the key. For static | 1153 | * (or spin_lock_init()) call - which acts as the key. For static |
1154 | * locks we use the lock object itself as the key. | 1154 | * locks we use the lock object itself as the key. |
1155 | */ | 1155 | */ |
1156 | if (sizeof(struct lock_class_key) > sizeof(struct lock_class)) | 1156 | BUILD_BUG_ON(sizeof(struct lock_class_key) > sizeof(struct lock_class)); |
1157 | __error_too_big_MAX_LOCKDEP_SUBCLASSES(); | ||
1158 | 1157 | ||
1159 | key = lock->key->subkeys + subclass; | 1158 | key = lock->key->subkeys + subclass; |
1160 | 1159 | ||
@@ -1177,7 +1176,7 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass) | |||
1177 | * itself, so actual lookup of the hash should be once per lock object. | 1176 | * itself, so actual lookup of the hash should be once per lock object. |
1178 | */ | 1177 | */ |
1179 | static inline struct lock_class * | 1178 | static inline struct lock_class * |
1180 | register_lock_class(struct lockdep_map *lock, unsigned int subclass) | 1179 | register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) |
1181 | { | 1180 | { |
1182 | struct lockdep_subclass_key *key; | 1181 | struct lockdep_subclass_key *key; |
1183 | struct list_head *hash_head; | 1182 | struct list_head *hash_head; |
@@ -1249,7 +1248,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass) | |||
1249 | out_unlock_set: | 1248 | out_unlock_set: |
1250 | __raw_spin_unlock(&hash_lock); | 1249 | __raw_spin_unlock(&hash_lock); |
1251 | 1250 | ||
1252 | if (!subclass) | 1251 | if (!subclass || force) |
1253 | lock->class_cache = class; | 1252 | lock->class_cache = class; |
1254 | 1253 | ||
1255 | DEBUG_LOCKS_WARN_ON(class->subclass != subclass); | 1254 | DEBUG_LOCKS_WARN_ON(class->subclass != subclass); |
@@ -1937,7 +1936,7 @@ void trace_softirqs_off(unsigned long ip) | |||
1937 | * Initialize a lock instance's lock-class mapping info: | 1936 | * Initialize a lock instance's lock-class mapping info: |
1938 | */ | 1937 | */ |
1939 | void lockdep_init_map(struct lockdep_map *lock, const char *name, | 1938 | void lockdep_init_map(struct lockdep_map *lock, const char *name, |
1940 | struct lock_class_key *key) | 1939 | struct lock_class_key *key, int subclass) |
1941 | { | 1940 | { |
1942 | if (unlikely(!debug_locks)) | 1941 | if (unlikely(!debug_locks)) |
1943 | return; | 1942 | return; |
@@ -1957,6 +1956,8 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name, | |||
1957 | lock->name = name; | 1956 | lock->name = name; |
1958 | lock->key = key; | 1957 | lock->key = key; |
1959 | lock->class_cache = NULL; | 1958 | lock->class_cache = NULL; |
1959 | if (subclass) | ||
1960 | register_lock_class(lock, subclass, 1); | ||
1960 | } | 1961 | } |
1961 | 1962 | ||
1962 | EXPORT_SYMBOL_GPL(lockdep_init_map); | 1963 | EXPORT_SYMBOL_GPL(lockdep_init_map); |
@@ -1995,7 +1996,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
1995 | * Not cached yet or subclass? | 1996 | * Not cached yet or subclass? |
1996 | */ | 1997 | */ |
1997 | if (unlikely(!class)) { | 1998 | if (unlikely(!class)) { |
1998 | class = register_lock_class(lock, subclass); | 1999 | class = register_lock_class(lock, subclass, 0); |
1999 | if (!class) | 2000 | if (!class) |
2000 | return 0; | 2001 | return 0; |
2001 | } | 2002 | } |
diff --git a/kernel/module.c b/kernel/module.c index 7f60e782de1e..67009bd56c52 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -87,6 +87,12 @@ static inline int strong_try_module_get(struct module *mod) | |||
87 | return try_module_get(mod); | 87 | return try_module_get(mod); |
88 | } | 88 | } |
89 | 89 | ||
90 | static inline void add_taint_module(struct module *mod, unsigned flag) | ||
91 | { | ||
92 | add_taint(flag); | ||
93 | mod->taints |= flag; | ||
94 | } | ||
95 | |||
90 | /* A thread that wants to hold a reference to a module only while it | 96 | /* A thread that wants to hold a reference to a module only while it |
91 | * is running can call ths to safely exit. | 97 | * is running can call ths to safely exit. |
92 | * nfsd and lockd use this. | 98 | * nfsd and lockd use this. |
@@ -847,12 +853,10 @@ static int check_version(Elf_Shdr *sechdrs, | |||
847 | return 0; | 853 | return 0; |
848 | } | 854 | } |
849 | /* Not in module's version table. OK, but that taints the kernel. */ | 855 | /* Not in module's version table. OK, but that taints the kernel. */ |
850 | if (!(tainted & TAINT_FORCED_MODULE)) { | 856 | if (!(tainted & TAINT_FORCED_MODULE)) |
851 | printk("%s: no version for \"%s\" found: kernel tainted.\n", | 857 | printk("%s: no version for \"%s\" found: kernel tainted.\n", |
852 | mod->name, symname); | 858 | mod->name, symname); |
853 | add_taint(TAINT_FORCED_MODULE); | 859 | add_taint_module(mod, TAINT_FORCED_MODULE); |
854 | mod->taints |= TAINT_FORCED_MODULE; | ||
855 | } | ||
856 | return 1; | 860 | return 1; |
857 | } | 861 | } |
858 | 862 | ||
@@ -910,7 +914,8 @@ static unsigned long resolve_symbol(Elf_Shdr *sechdrs, | |||
910 | unsigned long ret; | 914 | unsigned long ret; |
911 | const unsigned long *crc; | 915 | const unsigned long *crc; |
912 | 916 | ||
913 | ret = __find_symbol(name, &owner, &crc, mod->license_gplok); | 917 | ret = __find_symbol(name, &owner, &crc, |
918 | !(mod->taints & TAINT_PROPRIETARY_MODULE)); | ||
914 | if (ret) { | 919 | if (ret) { |
915 | /* use_module can fail due to OOM, or module unloading */ | 920 | /* use_module can fail due to OOM, or module unloading */ |
916 | if (!check_version(sechdrs, versindex, name, mod, crc) || | 921 | if (!check_version(sechdrs, versindex, name, mod, crc) || |
@@ -1335,12 +1340,11 @@ static void set_license(struct module *mod, const char *license) | |||
1335 | if (!license) | 1340 | if (!license) |
1336 | license = "unspecified"; | 1341 | license = "unspecified"; |
1337 | 1342 | ||
1338 | mod->license_gplok = license_is_gpl_compatible(license); | 1343 | if (!license_is_gpl_compatible(license)) { |
1339 | if (!mod->license_gplok && !(tainted & TAINT_PROPRIETARY_MODULE)) { | 1344 | if (!(tainted & TAINT_PROPRIETARY_MODULE)) |
1340 | printk(KERN_WARNING "%s: module license '%s' taints kernel.\n", | 1345 | printk(KERN_WARNING "%s: module license '%s' taints" |
1341 | mod->name, license); | 1346 | "kernel.\n", mod->name, license); |
1342 | add_taint(TAINT_PROPRIETARY_MODULE); | 1347 | add_taint_module(mod, TAINT_PROPRIETARY_MODULE); |
1343 | mod->taints |= TAINT_PROPRIETARY_MODULE; | ||
1344 | } | 1348 | } |
1345 | } | 1349 | } |
1346 | 1350 | ||
@@ -1619,8 +1623,7 @@ static struct module *load_module(void __user *umod, | |||
1619 | modmagic = get_modinfo(sechdrs, infoindex, "vermagic"); | 1623 | modmagic = get_modinfo(sechdrs, infoindex, "vermagic"); |
1620 | /* This is allowed: modprobe --force will invalidate it. */ | 1624 | /* This is allowed: modprobe --force will invalidate it. */ |
1621 | if (!modmagic) { | 1625 | if (!modmagic) { |
1622 | add_taint(TAINT_FORCED_MODULE); | 1626 | add_taint_module(mod, TAINT_FORCED_MODULE); |
1623 | mod->taints |= TAINT_FORCED_MODULE; | ||
1624 | printk(KERN_WARNING "%s: no version magic, tainting kernel.\n", | 1627 | printk(KERN_WARNING "%s: no version magic, tainting kernel.\n", |
1625 | mod->name); | 1628 | mod->name); |
1626 | } else if (!same_magic(modmagic, vermagic)) { | 1629 | } else if (!same_magic(modmagic, vermagic)) { |
@@ -1714,14 +1717,10 @@ static struct module *load_module(void __user *umod, | |||
1714 | /* Set up license info based on the info section */ | 1717 | /* Set up license info based on the info section */ |
1715 | set_license(mod, get_modinfo(sechdrs, infoindex, "license")); | 1718 | set_license(mod, get_modinfo(sechdrs, infoindex, "license")); |
1716 | 1719 | ||
1717 | if (strcmp(mod->name, "ndiswrapper") == 0) { | 1720 | if (strcmp(mod->name, "ndiswrapper") == 0) |
1718 | add_taint(TAINT_PROPRIETARY_MODULE); | 1721 | add_taint_module(mod, TAINT_PROPRIETARY_MODULE); |
1719 | mod->taints |= TAINT_PROPRIETARY_MODULE; | 1722 | if (strcmp(mod->name, "driverloader") == 0) |
1720 | } | 1723 | add_taint_module(mod, TAINT_PROPRIETARY_MODULE); |
1721 | if (strcmp(mod->name, "driverloader") == 0) { | ||
1722 | add_taint(TAINT_PROPRIETARY_MODULE); | ||
1723 | mod->taints |= TAINT_PROPRIETARY_MODULE; | ||
1724 | } | ||
1725 | 1724 | ||
1726 | /* Set up MODINFO_ATTR fields */ | 1725 | /* Set up MODINFO_ATTR fields */ |
1727 | setup_modinfo(mod, sechdrs, infoindex); | 1726 | setup_modinfo(mod, sechdrs, infoindex); |
@@ -1766,8 +1765,7 @@ static struct module *load_module(void __user *umod, | |||
1766 | (mod->num_unused_gpl_syms && !unusedgplcrcindex)) { | 1765 | (mod->num_unused_gpl_syms && !unusedgplcrcindex)) { |
1767 | printk(KERN_WARNING "%s: No versions for exported symbols." | 1766 | printk(KERN_WARNING "%s: No versions for exported symbols." |
1768 | " Tainting kernel.\n", mod->name); | 1767 | " Tainting kernel.\n", mod->name); |
1769 | add_taint(TAINT_FORCED_MODULE); | 1768 | add_taint_module(mod, TAINT_FORCED_MODULE); |
1770 | mod->taints |= TAINT_FORCED_MODULE; | ||
1771 | } | 1769 | } |
1772 | #endif | 1770 | #endif |
1773 | 1771 | ||
@@ -2132,9 +2130,33 @@ static void m_stop(struct seq_file *m, void *p) | |||
2132 | mutex_unlock(&module_mutex); | 2130 | mutex_unlock(&module_mutex); |
2133 | } | 2131 | } |
2134 | 2132 | ||
2133 | static char *taint_flags(unsigned int taints, char *buf) | ||
2134 | { | ||
2135 | int bx = 0; | ||
2136 | |||
2137 | if (taints) { | ||
2138 | buf[bx++] = '('; | ||
2139 | if (taints & TAINT_PROPRIETARY_MODULE) | ||
2140 | buf[bx++] = 'P'; | ||
2141 | if (taints & TAINT_FORCED_MODULE) | ||
2142 | buf[bx++] = 'F'; | ||
2143 | /* | ||
2144 | * TAINT_FORCED_RMMOD: could be added. | ||
2145 | * TAINT_UNSAFE_SMP, TAINT_MACHINE_CHECK, TAINT_BAD_PAGE don't | ||
2146 | * apply to modules. | ||
2147 | */ | ||
2148 | buf[bx++] = ')'; | ||
2149 | } | ||
2150 | buf[bx] = '\0'; | ||
2151 | |||
2152 | return buf; | ||
2153 | } | ||
2154 | |||
2135 | static int m_show(struct seq_file *m, void *p) | 2155 | static int m_show(struct seq_file *m, void *p) |
2136 | { | 2156 | { |
2137 | struct module *mod = list_entry(p, struct module, list); | 2157 | struct module *mod = list_entry(p, struct module, list); |
2158 | char buf[8]; | ||
2159 | |||
2138 | seq_printf(m, "%s %lu", | 2160 | seq_printf(m, "%s %lu", |
2139 | mod->name, mod->init_size + mod->core_size); | 2161 | mod->name, mod->init_size + mod->core_size); |
2140 | print_unload_info(m, mod); | 2162 | print_unload_info(m, mod); |
@@ -2147,6 +2169,10 @@ static int m_show(struct seq_file *m, void *p) | |||
2147 | /* Used by oprofile and other similar tools. */ | 2169 | /* Used by oprofile and other similar tools. */ |
2148 | seq_printf(m, " 0x%p", mod->module_core); | 2170 | seq_printf(m, " 0x%p", mod->module_core); |
2149 | 2171 | ||
2172 | /* Taints info */ | ||
2173 | if (mod->taints) | ||
2174 | seq_printf(m, " %s", taint_flags(mod->taints, buf)); | ||
2175 | |||
2150 | seq_printf(m, "\n"); | 2176 | seq_printf(m, "\n"); |
2151 | return 0; | 2177 | return 0; |
2152 | } | 2178 | } |
@@ -2235,28 +2261,6 @@ struct module *module_text_address(unsigned long addr) | |||
2235 | return mod; | 2261 | return mod; |
2236 | } | 2262 | } |
2237 | 2263 | ||
2238 | static char *taint_flags(unsigned int taints, char *buf) | ||
2239 | { | ||
2240 | *buf = '\0'; | ||
2241 | if (taints) { | ||
2242 | int bx; | ||
2243 | |||
2244 | buf[0] = '('; | ||
2245 | bx = 1; | ||
2246 | if (taints & TAINT_PROPRIETARY_MODULE) | ||
2247 | buf[bx++] = 'P'; | ||
2248 | if (taints & TAINT_FORCED_MODULE) | ||
2249 | buf[bx++] = 'F'; | ||
2250 | /* | ||
2251 | * TAINT_FORCED_RMMOD: could be added. | ||
2252 | * TAINT_UNSAFE_SMP, TAINT_MACHINE_CHECK, TAINT_BAD_PAGE don't | ||
2253 | * apply to modules. | ||
2254 | */ | ||
2255 | buf[bx] = ')'; | ||
2256 | } | ||
2257 | return buf; | ||
2258 | } | ||
2259 | |||
2260 | /* Don't grab lock, we're oopsing. */ | 2264 | /* Don't grab lock, we're oopsing. */ |
2261 | void print_modules(void) | 2265 | void print_modules(void) |
2262 | { | 2266 | { |
diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c index e3203c654dda..18651641a7b5 100644 --- a/kernel/mutex-debug.c +++ b/kernel/mutex-debug.c | |||
@@ -91,7 +91,7 @@ void debug_mutex_init(struct mutex *lock, const char *name, | |||
91 | * Make sure we are not reinitializing a held lock: | 91 | * Make sure we are not reinitializing a held lock: |
92 | */ | 92 | */ |
93 | debug_check_no_locks_freed((void *)lock, sizeof(*lock)); | 93 | debug_check_no_locks_freed((void *)lock, sizeof(*lock)); |
94 | lockdep_init_map(&lock->dep_map, name, key); | 94 | lockdep_init_map(&lock->dep_map, name, key, 0); |
95 | #endif | 95 | #endif |
96 | lock->owner = NULL; | 96 | lock->owner = NULL; |
97 | lock->magic = lock; | 97 | lock->magic = lock; |
diff --git a/kernel/nsproxy.c b/kernel/nsproxy.c index 6ebdb82a0ce4..674aceb7335a 100644 --- a/kernel/nsproxy.c +++ b/kernel/nsproxy.c | |||
@@ -44,11 +44,9 @@ static inline struct nsproxy *clone_namespaces(struct nsproxy *orig) | |||
44 | { | 44 | { |
45 | struct nsproxy *ns; | 45 | struct nsproxy *ns; |
46 | 46 | ||
47 | ns = kmalloc(sizeof(struct nsproxy), GFP_KERNEL); | 47 | ns = kmemdup(orig, sizeof(struct nsproxy), GFP_KERNEL); |
48 | if (ns) { | 48 | if (ns) |
49 | memcpy(ns, orig, sizeof(struct nsproxy)); | ||
50 | atomic_set(&ns->count, 1); | 49 | atomic_set(&ns->count, 1); |
51 | } | ||
52 | return ns; | 50 | return ns; |
53 | } | 51 | } |
54 | 52 | ||
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index 479b16b44f79..7c3e1e6dfb5b 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c | |||
@@ -88,6 +88,19 @@ static inline union cpu_time_count cpu_time_sub(const clockid_t which_clock, | |||
88 | } | 88 | } |
89 | 89 | ||
90 | /* | 90 | /* |
91 | * Divide and limit the result to res >= 1 | ||
92 | * | ||
93 | * This is necessary to prevent signal delivery starvation, when the result of | ||
94 | * the division would be rounded down to 0. | ||
95 | */ | ||
96 | static inline cputime_t cputime_div_non_zero(cputime_t time, unsigned long div) | ||
97 | { | ||
98 | cputime_t res = cputime_div(time, div); | ||
99 | |||
100 | return max_t(cputime_t, res, 1); | ||
101 | } | ||
102 | |||
103 | /* | ||
91 | * Update expiry time from increment, and increase overrun count, | 104 | * Update expiry time from increment, and increase overrun count, |
92 | * given the current clock sample. | 105 | * given the current clock sample. |
93 | */ | 106 | */ |
@@ -483,8 +496,8 @@ static void process_timer_rebalance(struct task_struct *p, | |||
483 | BUG(); | 496 | BUG(); |
484 | break; | 497 | break; |
485 | case CPUCLOCK_PROF: | 498 | case CPUCLOCK_PROF: |
486 | left = cputime_div(cputime_sub(expires.cpu, val.cpu), | 499 | left = cputime_div_non_zero(cputime_sub(expires.cpu, val.cpu), |
487 | nthreads); | 500 | nthreads); |
488 | do { | 501 | do { |
489 | if (likely(!(t->flags & PF_EXITING))) { | 502 | if (likely(!(t->flags & PF_EXITING))) { |
490 | ticks = cputime_add(prof_ticks(t), left); | 503 | ticks = cputime_add(prof_ticks(t), left); |
@@ -498,8 +511,8 @@ static void process_timer_rebalance(struct task_struct *p, | |||
498 | } while (t != p); | 511 | } while (t != p); |
499 | break; | 512 | break; |
500 | case CPUCLOCK_VIRT: | 513 | case CPUCLOCK_VIRT: |
501 | left = cputime_div(cputime_sub(expires.cpu, val.cpu), | 514 | left = cputime_div_non_zero(cputime_sub(expires.cpu, val.cpu), |
502 | nthreads); | 515 | nthreads); |
503 | do { | 516 | do { |
504 | if (likely(!(t->flags & PF_EXITING))) { | 517 | if (likely(!(t->flags & PF_EXITING))) { |
505 | ticks = cputime_add(virt_ticks(t), left); | 518 | ticks = cputime_add(virt_ticks(t), left); |
@@ -515,6 +528,7 @@ static void process_timer_rebalance(struct task_struct *p, | |||
515 | case CPUCLOCK_SCHED: | 528 | case CPUCLOCK_SCHED: |
516 | nsleft = expires.sched - val.sched; | 529 | nsleft = expires.sched - val.sched; |
517 | do_div(nsleft, nthreads); | 530 | do_div(nsleft, nthreads); |
531 | nsleft = max_t(unsigned long long, nsleft, 1); | ||
518 | do { | 532 | do { |
519 | if (likely(!(t->flags & PF_EXITING))) { | 533 | if (likely(!(t->flags & PF_EXITING))) { |
520 | ns = t->sched_time + nsleft; | 534 | ns = t->sched_time + nsleft; |
@@ -1159,12 +1173,13 @@ static void check_process_timers(struct task_struct *tsk, | |||
1159 | 1173 | ||
1160 | prof_left = cputime_sub(prof_expires, utime); | 1174 | prof_left = cputime_sub(prof_expires, utime); |
1161 | prof_left = cputime_sub(prof_left, stime); | 1175 | prof_left = cputime_sub(prof_left, stime); |
1162 | prof_left = cputime_div(prof_left, nthreads); | 1176 | prof_left = cputime_div_non_zero(prof_left, nthreads); |
1163 | virt_left = cputime_sub(virt_expires, utime); | 1177 | virt_left = cputime_sub(virt_expires, utime); |
1164 | virt_left = cputime_div(virt_left, nthreads); | 1178 | virt_left = cputime_div_non_zero(virt_left, nthreads); |
1165 | if (sched_expires) { | 1179 | if (sched_expires) { |
1166 | sched_left = sched_expires - sched_time; | 1180 | sched_left = sched_expires - sched_time; |
1167 | do_div(sched_left, nthreads); | 1181 | do_div(sched_left, nthreads); |
1182 | sched_left = max_t(unsigned long long, sched_left, 1); | ||
1168 | } else { | 1183 | } else { |
1169 | sched_left = 0; | 1184 | sched_left = 0; |
1170 | } | 1185 | } |
diff --git a/kernel/power/disk.c b/kernel/power/disk.c index d72234942798..d3a158a60312 100644 --- a/kernel/power/disk.c +++ b/kernel/power/disk.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/fs.h> | 18 | #include <linux/fs.h> |
19 | #include <linux/mount.h> | 19 | #include <linux/mount.h> |
20 | #include <linux/pm.h> | 20 | #include <linux/pm.h> |
21 | #include <linux/console.h> | ||
21 | #include <linux/cpu.h> | 22 | #include <linux/cpu.h> |
22 | 23 | ||
23 | #include "power.h" | 24 | #include "power.h" |
@@ -119,8 +120,10 @@ int pm_suspend_disk(void) | |||
119 | if (error) | 120 | if (error) |
120 | return error; | 121 | return error; |
121 | 122 | ||
123 | suspend_console(); | ||
122 | error = device_suspend(PMSG_FREEZE); | 124 | error = device_suspend(PMSG_FREEZE); |
123 | if (error) { | 125 | if (error) { |
126 | resume_console(); | ||
124 | printk("Some devices failed to suspend\n"); | 127 | printk("Some devices failed to suspend\n"); |
125 | unprepare_processes(); | 128 | unprepare_processes(); |
126 | return error; | 129 | return error; |
@@ -133,6 +136,7 @@ int pm_suspend_disk(void) | |||
133 | 136 | ||
134 | if (in_suspend) { | 137 | if (in_suspend) { |
135 | device_resume(); | 138 | device_resume(); |
139 | resume_console(); | ||
136 | pr_debug("PM: writing image.\n"); | 140 | pr_debug("PM: writing image.\n"); |
137 | error = swsusp_write(); | 141 | error = swsusp_write(); |
138 | if (!error) | 142 | if (!error) |
@@ -148,6 +152,7 @@ int pm_suspend_disk(void) | |||
148 | swsusp_free(); | 152 | swsusp_free(); |
149 | Done: | 153 | Done: |
150 | device_resume(); | 154 | device_resume(); |
155 | resume_console(); | ||
151 | unprepare_processes(); | 156 | unprepare_processes(); |
152 | return error; | 157 | return error; |
153 | } | 158 | } |
@@ -212,7 +217,9 @@ static int software_resume(void) | |||
212 | 217 | ||
213 | pr_debug("PM: Preparing devices for restore.\n"); | 218 | pr_debug("PM: Preparing devices for restore.\n"); |
214 | 219 | ||
220 | suspend_console(); | ||
215 | if ((error = device_suspend(PMSG_PRETHAW))) { | 221 | if ((error = device_suspend(PMSG_PRETHAW))) { |
222 | resume_console(); | ||
216 | printk("Some devices failed to suspend\n"); | 223 | printk("Some devices failed to suspend\n"); |
217 | swsusp_free(); | 224 | swsusp_free(); |
218 | goto Thaw; | 225 | goto Thaw; |
@@ -224,6 +231,7 @@ static int software_resume(void) | |||
224 | swsusp_resume(); | 231 | swsusp_resume(); |
225 | pr_debug("PM: Restore failed, recovering.n"); | 232 | pr_debug("PM: Restore failed, recovering.n"); |
226 | device_resume(); | 233 | device_resume(); |
234 | resume_console(); | ||
227 | Thaw: | 235 | Thaw: |
228 | unprepare_processes(); | 236 | unprepare_processes(); |
229 | Done: | 237 | Done: |
diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c index 7a4144ba3afd..f1f900ac3164 100644 --- a/kernel/power/poweroff.c +++ b/kernel/power/poweroff.c | |||
@@ -23,8 +23,7 @@ static void do_poweroff(void *dummy) | |||
23 | 23 | ||
24 | static DECLARE_WORK(poweroff_work, do_poweroff, NULL); | 24 | static DECLARE_WORK(poweroff_work, do_poweroff, NULL); |
25 | 25 | ||
26 | static void handle_poweroff(int key, struct pt_regs *pt_regs, | 26 | static void handle_poweroff(int key, struct tty_struct *tty) |
27 | struct tty_struct *tty) | ||
28 | { | 27 | { |
29 | schedule_work(&poweroff_work); | 28 | schedule_work(&poweroff_work); |
30 | } | 29 | } |
diff --git a/kernel/power/swap.c b/kernel/power/swap.c index 9b2ee5344dee..1a3b0dd2c3fc 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c | |||
@@ -425,7 +425,8 @@ static int submit(int rw, pgoff_t page_off, struct page *page, | |||
425 | bio_set_pages_dirty(bio); | 425 | bio_set_pages_dirty(bio); |
426 | bio_put(bio); | 426 | bio_put(bio); |
427 | } else { | 427 | } else { |
428 | get_page(page); | 428 | if (rw == READ) |
429 | get_page(page); /* These pages are freed later */ | ||
429 | bio->bi_private = *bio_chain; | 430 | bio->bi_private = *bio_chain; |
430 | *bio_chain = bio; | 431 | *bio_chain = bio; |
431 | submit_bio(rw | (1 << BIO_RW_SYNC), bio); | 432 | submit_bio(rw | (1 << BIO_RW_SYNC), bio); |
diff --git a/kernel/power/user.c b/kernel/power/user.c index 72825c853cd7..d991d3b0e5a4 100644 --- a/kernel/power/user.c +++ b/kernel/power/user.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/swapops.h> | 19 | #include <linux/swapops.h> |
20 | #include <linux/pm.h> | 20 | #include <linux/pm.h> |
21 | #include <linux/fs.h> | 21 | #include <linux/fs.h> |
22 | #include <linux/console.h> | ||
22 | #include <linux/cpu.h> | 23 | #include <linux/cpu.h> |
23 | 24 | ||
24 | #include <asm/uaccess.h> | 25 | #include <asm/uaccess.h> |
@@ -145,10 +146,10 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp, | |||
145 | error = freeze_processes(); | 146 | error = freeze_processes(); |
146 | if (error) { | 147 | if (error) { |
147 | thaw_processes(); | 148 | thaw_processes(); |
149 | enable_nonboot_cpus(); | ||
148 | error = -EBUSY; | 150 | error = -EBUSY; |
149 | } | 151 | } |
150 | } | 152 | } |
151 | enable_nonboot_cpus(); | ||
152 | up(&pm_sem); | 153 | up(&pm_sem); |
153 | if (!error) | 154 | if (!error) |
154 | data->frozen = 1; | 155 | data->frozen = 1; |
@@ -173,12 +174,14 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp, | |||
173 | /* Free memory before shutting down devices. */ | 174 | /* Free memory before shutting down devices. */ |
174 | error = swsusp_shrink_memory(); | 175 | error = swsusp_shrink_memory(); |
175 | if (!error) { | 176 | if (!error) { |
177 | suspend_console(); | ||
176 | error = device_suspend(PMSG_FREEZE); | 178 | error = device_suspend(PMSG_FREEZE); |
177 | if (!error) { | 179 | if (!error) { |
178 | in_suspend = 1; | 180 | in_suspend = 1; |
179 | error = swsusp_suspend(); | 181 | error = swsusp_suspend(); |
180 | device_resume(); | 182 | device_resume(); |
181 | } | 183 | } |
184 | resume_console(); | ||
182 | } | 185 | } |
183 | up(&pm_sem); | 186 | up(&pm_sem); |
184 | if (!error) | 187 | if (!error) |
@@ -196,11 +199,13 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp, | |||
196 | snapshot_free_unused_memory(&data->handle); | 199 | snapshot_free_unused_memory(&data->handle); |
197 | down(&pm_sem); | 200 | down(&pm_sem); |
198 | pm_prepare_console(); | 201 | pm_prepare_console(); |
202 | suspend_console(); | ||
199 | error = device_suspend(PMSG_PRETHAW); | 203 | error = device_suspend(PMSG_PRETHAW); |
200 | if (!error) { | 204 | if (!error) { |
201 | error = swsusp_resume(); | 205 | error = swsusp_resume(); |
202 | device_resume(); | 206 | device_resume(); |
203 | } | 207 | } |
208 | resume_console(); | ||
204 | pm_restore_console(); | 209 | pm_restore_console(); |
205 | up(&pm_sem); | 210 | up(&pm_sem); |
206 | break; | 211 | break; |
@@ -289,6 +294,7 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp, | |||
289 | } | 294 | } |
290 | 295 | ||
291 | /* Put devices to sleep */ | 296 | /* Put devices to sleep */ |
297 | suspend_console(); | ||
292 | error = device_suspend(PMSG_SUSPEND); | 298 | error = device_suspend(PMSG_SUSPEND); |
293 | if (error) { | 299 | if (error) { |
294 | printk(KERN_ERR "Failed to suspend some devices.\n"); | 300 | printk(KERN_ERR "Failed to suspend some devices.\n"); |
@@ -299,7 +305,7 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp, | |||
299 | /* Wake up devices */ | 305 | /* Wake up devices */ |
300 | device_resume(); | 306 | device_resume(); |
301 | } | 307 | } |
302 | 308 | resume_console(); | |
303 | if (pm_ops->finish) | 309 | if (pm_ops->finish) |
304 | pm_ops->finish(PM_SUSPEND_MEM); | 310 | pm_ops->finish(PM_SUSPEND_MEM); |
305 | 311 | ||
diff --git a/kernel/printk.c b/kernel/printk.c index 771f5e861bcd..f7d427ef5038 100644 --- a/kernel/printk.c +++ b/kernel/printk.c | |||
@@ -820,15 +820,8 @@ void release_console_sem(void) | |||
820 | console_locked = 0; | 820 | console_locked = 0; |
821 | up(&console_sem); | 821 | up(&console_sem); |
822 | spin_unlock_irqrestore(&logbuf_lock, flags); | 822 | spin_unlock_irqrestore(&logbuf_lock, flags); |
823 | if (wake_klogd && !oops_in_progress && waitqueue_active(&log_wait)) { | 823 | if (wake_klogd && !oops_in_progress && waitqueue_active(&log_wait)) |
824 | /* | 824 | wake_up_interruptible(&log_wait); |
825 | * If we printk from within the lock dependency code, | ||
826 | * from within the scheduler code, then do not lock | ||
827 | * up due to self-recursion: | ||
828 | */ | ||
829 | if (!lockdep_internal()) | ||
830 | wake_up_interruptible(&log_wait); | ||
831 | } | ||
832 | } | 825 | } |
833 | EXPORT_SYMBOL(release_console_sem); | 826 | EXPORT_SYMBOL(release_console_sem); |
834 | 827 | ||
diff --git a/kernel/profile.c b/kernel/profile.c index fb660c7d35ba..f940b462eec9 100644 --- a/kernel/profile.c +++ b/kernel/profile.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/mutex.h> | 25 | #include <linux/mutex.h> |
26 | #include <asm/sections.h> | 26 | #include <asm/sections.h> |
27 | #include <asm/semaphore.h> | 27 | #include <asm/semaphore.h> |
28 | #include <asm/irq_regs.h> | ||
28 | 29 | ||
29 | struct profile_hit { | 30 | struct profile_hit { |
30 | u32 pc, hits; | 31 | u32 pc, hits; |
@@ -366,8 +367,10 @@ void profile_hit(int type, void *__pc) | |||
366 | } | 367 | } |
367 | #endif /* !CONFIG_SMP */ | 368 | #endif /* !CONFIG_SMP */ |
368 | 369 | ||
369 | void profile_tick(int type, struct pt_regs *regs) | 370 | void profile_tick(int type) |
370 | { | 371 | { |
372 | struct pt_regs *regs = get_irq_regs(); | ||
373 | |||
371 | if (type == CPU_PROFILING && timer_hook) | 374 | if (type == CPU_PROFILING && timer_hook) |
372 | timer_hook(regs); | 375 | timer_hook(regs); |
373 | if (!user_mode(regs) && cpu_isset(smp_processor_id(), prof_cpu_mask)) | 376 | if (!user_mode(regs) && cpu_isset(smp_processor_id(), prof_cpu_mask)) |
@@ -396,7 +399,7 @@ static int prof_cpu_mask_write_proc (struct file *file, const char __user *buffe | |||
396 | unsigned long full_count = count, err; | 399 | unsigned long full_count = count, err; |
397 | cpumask_t new_value; | 400 | cpumask_t new_value; |
398 | 401 | ||
399 | err = cpumask_parse(buffer, count, new_value); | 402 | err = cpumask_parse_user(buffer, count, new_value); |
400 | if (err) | 403 | if (err) |
401 | return err; | 404 | return err; |
402 | 405 | ||
diff --git a/kernel/relay.c b/kernel/relay.c index 1d63ecddfa70..f04bbdb56ac2 100644 --- a/kernel/relay.c +++ b/kernel/relay.c | |||
@@ -887,7 +887,7 @@ static int subbuf_read_actor(size_t read_start, | |||
887 | 887 | ||
888 | from = buf->start + read_start; | 888 | from = buf->start + read_start; |
889 | ret = avail; | 889 | ret = avail; |
890 | if (copy_to_user(desc->arg.data, from, avail)) { | 890 | if (copy_to_user(desc->arg.buf, from, avail)) { |
891 | desc->error = -EFAULT; | 891 | desc->error = -EFAULT; |
892 | ret = 0; | 892 | ret = 0; |
893 | } | 893 | } |
@@ -946,24 +946,17 @@ typedef int (*subbuf_actor_t) (size_t read_start, | |||
946 | */ | 946 | */ |
947 | static inline ssize_t relay_file_read_subbufs(struct file *filp, | 947 | static inline ssize_t relay_file_read_subbufs(struct file *filp, |
948 | loff_t *ppos, | 948 | loff_t *ppos, |
949 | size_t count, | ||
950 | subbuf_actor_t subbuf_actor, | 949 | subbuf_actor_t subbuf_actor, |
951 | read_actor_t actor, | 950 | read_actor_t actor, |
952 | void *target) | 951 | read_descriptor_t *desc) |
953 | { | 952 | { |
954 | struct rchan_buf *buf = filp->private_data; | 953 | struct rchan_buf *buf = filp->private_data; |
955 | size_t read_start, avail; | 954 | size_t read_start, avail; |
956 | read_descriptor_t desc; | ||
957 | int ret; | 955 | int ret; |
958 | 956 | ||
959 | if (!count) | 957 | if (!desc->count) |
960 | return 0; | 958 | return 0; |
961 | 959 | ||
962 | desc.written = 0; | ||
963 | desc.count = count; | ||
964 | desc.arg.data = target; | ||
965 | desc.error = 0; | ||
966 | |||
967 | mutex_lock(&filp->f_dentry->d_inode->i_mutex); | 960 | mutex_lock(&filp->f_dentry->d_inode->i_mutex); |
968 | do { | 961 | do { |
969 | if (!relay_file_read_avail(buf, *ppos)) | 962 | if (!relay_file_read_avail(buf, *ppos)) |
@@ -974,19 +967,19 @@ static inline ssize_t relay_file_read_subbufs(struct file *filp, | |||
974 | if (!avail) | 967 | if (!avail) |
975 | break; | 968 | break; |
976 | 969 | ||
977 | avail = min(desc.count, avail); | 970 | avail = min(desc->count, avail); |
978 | ret = subbuf_actor(read_start, buf, avail, &desc, actor); | 971 | ret = subbuf_actor(read_start, buf, avail, desc, actor); |
979 | if (desc.error < 0) | 972 | if (desc->error < 0) |
980 | break; | 973 | break; |
981 | 974 | ||
982 | if (ret) { | 975 | if (ret) { |
983 | relay_file_read_consume(buf, read_start, ret); | 976 | relay_file_read_consume(buf, read_start, ret); |
984 | *ppos = relay_file_read_end_pos(buf, read_start, ret); | 977 | *ppos = relay_file_read_end_pos(buf, read_start, ret); |
985 | } | 978 | } |
986 | } while (desc.count && ret); | 979 | } while (desc->count && ret); |
987 | mutex_unlock(&filp->f_dentry->d_inode->i_mutex); | 980 | mutex_unlock(&filp->f_dentry->d_inode->i_mutex); |
988 | 981 | ||
989 | return desc.written; | 982 | return desc->written; |
990 | } | 983 | } |
991 | 984 | ||
992 | static ssize_t relay_file_read(struct file *filp, | 985 | static ssize_t relay_file_read(struct file *filp, |
@@ -994,8 +987,13 @@ static ssize_t relay_file_read(struct file *filp, | |||
994 | size_t count, | 987 | size_t count, |
995 | loff_t *ppos) | 988 | loff_t *ppos) |
996 | { | 989 | { |
997 | return relay_file_read_subbufs(filp, ppos, count, subbuf_read_actor, | 990 | read_descriptor_t desc; |
998 | NULL, buffer); | 991 | desc.written = 0; |
992 | desc.count = count; | ||
993 | desc.arg.buf = buffer; | ||
994 | desc.error = 0; | ||
995 | return relay_file_read_subbufs(filp, ppos, subbuf_read_actor, | ||
996 | NULL, &desc); | ||
999 | } | 997 | } |
1000 | 998 | ||
1001 | static ssize_t relay_file_sendfile(struct file *filp, | 999 | static ssize_t relay_file_sendfile(struct file *filp, |
@@ -1004,8 +1002,13 @@ static ssize_t relay_file_sendfile(struct file *filp, | |||
1004 | read_actor_t actor, | 1002 | read_actor_t actor, |
1005 | void *target) | 1003 | void *target) |
1006 | { | 1004 | { |
1007 | return relay_file_read_subbufs(filp, ppos, count, subbuf_send_actor, | 1005 | read_descriptor_t desc; |
1008 | actor, target); | 1006 | desc.written = 0; |
1007 | desc.count = count; | ||
1008 | desc.arg.data = target; | ||
1009 | desc.error = 0; | ||
1010 | return relay_file_read_subbufs(filp, ppos, subbuf_send_actor, | ||
1011 | actor, &desc); | ||
1009 | } | 1012 | } |
1010 | 1013 | ||
1011 | struct file_operations relay_file_operations = { | 1014 | struct file_operations relay_file_operations = { |
diff --git a/kernel/sched.c b/kernel/sched.c index 53608a59d6e3..3399701c680e 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -160,15 +160,6 @@ | |||
160 | #define TASK_PREEMPTS_CURR(p, rq) \ | 160 | #define TASK_PREEMPTS_CURR(p, rq) \ |
161 | ((p)->prio < (rq)->curr->prio) | 161 | ((p)->prio < (rq)->curr->prio) |
162 | 162 | ||
163 | /* | ||
164 | * task_timeslice() scales user-nice values [ -20 ... 0 ... 19 ] | ||
165 | * to time slice values: [800ms ... 100ms ... 5ms] | ||
166 | * | ||
167 | * The higher a thread's priority, the bigger timeslices | ||
168 | * it gets during one round of execution. But even the lowest | ||
169 | * priority thread gets MIN_TIMESLICE worth of execution time. | ||
170 | */ | ||
171 | |||
172 | #define SCALE_PRIO(x, prio) \ | 163 | #define SCALE_PRIO(x, prio) \ |
173 | max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_TIMESLICE) | 164 | max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_TIMESLICE) |
174 | 165 | ||
@@ -180,6 +171,15 @@ static unsigned int static_prio_timeslice(int static_prio) | |||
180 | return SCALE_PRIO(DEF_TIMESLICE, static_prio); | 171 | return SCALE_PRIO(DEF_TIMESLICE, static_prio); |
181 | } | 172 | } |
182 | 173 | ||
174 | /* | ||
175 | * task_timeslice() scales user-nice values [ -20 ... 0 ... 19 ] | ||
176 | * to time slice values: [800ms ... 100ms ... 5ms] | ||
177 | * | ||
178 | * The higher a thread's priority, the bigger timeslices | ||
179 | * it gets during one round of execution. But even the lowest | ||
180 | * priority thread gets MIN_TIMESLICE worth of execution time. | ||
181 | */ | ||
182 | |||
183 | static inline unsigned int task_timeslice(struct task_struct *p) | 183 | static inline unsigned int task_timeslice(struct task_struct *p) |
184 | { | 184 | { |
185 | return static_prio_timeslice(p->static_prio); | 185 | return static_prio_timeslice(p->static_prio); |
@@ -1822,14 +1822,14 @@ context_switch(struct rq *rq, struct task_struct *prev, | |||
1822 | struct mm_struct *mm = next->mm; | 1822 | struct mm_struct *mm = next->mm; |
1823 | struct mm_struct *oldmm = prev->active_mm; | 1823 | struct mm_struct *oldmm = prev->active_mm; |
1824 | 1824 | ||
1825 | if (unlikely(!mm)) { | 1825 | if (!mm) { |
1826 | next->active_mm = oldmm; | 1826 | next->active_mm = oldmm; |
1827 | atomic_inc(&oldmm->mm_count); | 1827 | atomic_inc(&oldmm->mm_count); |
1828 | enter_lazy_tlb(oldmm, next); | 1828 | enter_lazy_tlb(oldmm, next); |
1829 | } else | 1829 | } else |
1830 | switch_mm(oldmm, mm, next); | 1830 | switch_mm(oldmm, mm, next); |
1831 | 1831 | ||
1832 | if (unlikely(!prev->mm)) { | 1832 | if (!prev->mm) { |
1833 | prev->active_mm = NULL; | 1833 | prev->active_mm = NULL; |
1834 | WARN_ON(rq->prev_mm); | 1834 | WARN_ON(rq->prev_mm); |
1835 | rq->prev_mm = oldmm; | 1835 | rq->prev_mm = oldmm; |
@@ -3491,7 +3491,7 @@ asmlinkage void __sched preempt_schedule(void) | |||
3491 | * If there is a non-zero preempt_count or interrupts are disabled, | 3491 | * If there is a non-zero preempt_count or interrupts are disabled, |
3492 | * we do not want to preempt the current task. Just return.. | 3492 | * we do not want to preempt the current task. Just return.. |
3493 | */ | 3493 | */ |
3494 | if (unlikely(ti->preempt_count || irqs_disabled())) | 3494 | if (likely(ti->preempt_count || irqs_disabled())) |
3495 | return; | 3495 | return; |
3496 | 3496 | ||
3497 | need_resched: | 3497 | need_resched: |
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c index 7a3b2e75f040..0e53314b14de 100644 --- a/kernel/sys_ni.c +++ b/kernel/sys_ni.c | |||
@@ -49,6 +49,7 @@ cond_syscall(compat_sys_get_robust_list); | |||
49 | cond_syscall(sys_epoll_create); | 49 | cond_syscall(sys_epoll_create); |
50 | cond_syscall(sys_epoll_ctl); | 50 | cond_syscall(sys_epoll_ctl); |
51 | cond_syscall(sys_epoll_wait); | 51 | cond_syscall(sys_epoll_wait); |
52 | cond_syscall(sys_epoll_pwait); | ||
52 | cond_syscall(sys_semget); | 53 | cond_syscall(sys_semget); |
53 | cond_syscall(sys_semop); | 54 | cond_syscall(sys_semop); |
54 | cond_syscall(sys_semtimedop); | 55 | cond_syscall(sys_semtimedop); |
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 8020fb273c4f..8bff2c18fb5a 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
@@ -136,8 +136,10 @@ static int parse_table(int __user *, int, void __user *, size_t __user *, | |||
136 | static int proc_do_uts_string(ctl_table *table, int write, struct file *filp, | 136 | static int proc_do_uts_string(ctl_table *table, int write, struct file *filp, |
137 | void __user *buffer, size_t *lenp, loff_t *ppos); | 137 | void __user *buffer, size_t *lenp, loff_t *ppos); |
138 | 138 | ||
139 | #ifdef CONFIG_PROC_SYSCTL | ||
139 | static int proc_do_cad_pid(ctl_table *table, int write, struct file *filp, | 140 | static int proc_do_cad_pid(ctl_table *table, int write, struct file *filp, |
140 | void __user *buffer, size_t *lenp, loff_t *ppos); | 141 | void __user *buffer, size_t *lenp, loff_t *ppos); |
142 | #endif | ||
141 | 143 | ||
142 | static ctl_table root_table[]; | 144 | static ctl_table root_table[]; |
143 | static struct ctl_table_header root_table_header = | 145 | static struct ctl_table_header root_table_header = |
@@ -542,6 +544,7 @@ static ctl_table kern_table[] = { | |||
542 | .proc_handler = &proc_dointvec, | 544 | .proc_handler = &proc_dointvec, |
543 | }, | 545 | }, |
544 | #endif | 546 | #endif |
547 | #ifdef CONFIG_PROC_SYSCTL | ||
545 | { | 548 | { |
546 | .ctl_name = KERN_CADPID, | 549 | .ctl_name = KERN_CADPID, |
547 | .procname = "cad_pid", | 550 | .procname = "cad_pid", |
@@ -550,6 +553,7 @@ static ctl_table kern_table[] = { | |||
550 | .mode = 0600, | 553 | .mode = 0600, |
551 | .proc_handler = &proc_do_cad_pid, | 554 | .proc_handler = &proc_do_cad_pid, |
552 | }, | 555 | }, |
556 | #endif | ||
553 | { | 557 | { |
554 | .ctl_name = KERN_MAX_THREADS, | 558 | .ctl_name = KERN_MAX_THREADS, |
555 | .procname = "threads-max", | 559 | .procname = "threads-max", |
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c index 126bb30c4afe..a99b2a6e6a07 100644 --- a/kernel/time/jiffies.c +++ b/kernel/time/jiffies.c | |||
@@ -57,7 +57,7 @@ static cycle_t jiffies_read(void) | |||
57 | 57 | ||
58 | struct clocksource clocksource_jiffies = { | 58 | struct clocksource clocksource_jiffies = { |
59 | .name = "jiffies", | 59 | .name = "jiffies", |
60 | .rating = 0, /* lowest rating*/ | 60 | .rating = 1, /* lowest valid rating*/ |
61 | .read = jiffies_read, | 61 | .read = jiffies_read, |
62 | .mask = 0xffffffff, /*32bits*/ | 62 | .mask = 0xffffffff, /*32bits*/ |
63 | .mult = NSEC_PER_JIFFY << JIFFIES_SHIFT, /* details above */ | 63 | .mult = NSEC_PER_JIFFY << JIFFIES_SHIFT, /* details above */ |
diff --git a/kernel/unwind.c b/kernel/unwind.c index 2e2368607aab..f7e50d16dbf6 100644 --- a/kernel/unwind.c +++ b/kernel/unwind.c | |||
@@ -11,13 +11,15 @@ | |||
11 | 11 | ||
12 | #include <linux/unwind.h> | 12 | #include <linux/unwind.h> |
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | #include <linux/delay.h> | 14 | #include <linux/bootmem.h> |
15 | #include <linux/sort.h> | ||
15 | #include <linux/stop_machine.h> | 16 | #include <linux/stop_machine.h> |
16 | #include <asm/sections.h> | 17 | #include <asm/sections.h> |
17 | #include <asm/uaccess.h> | 18 | #include <asm/uaccess.h> |
18 | #include <asm/unaligned.h> | 19 | #include <asm/unaligned.h> |
19 | 20 | ||
20 | extern char __start_unwind[], __end_unwind[]; | 21 | extern char __start_unwind[], __end_unwind[]; |
22 | extern const u8 __start_unwind_hdr[], __end_unwind_hdr[]; | ||
21 | 23 | ||
22 | #define MAX_STACK_DEPTH 8 | 24 | #define MAX_STACK_DEPTH 8 |
23 | 25 | ||
@@ -100,6 +102,8 @@ static struct unwind_table { | |||
100 | } core, init; | 102 | } core, init; |
101 | const void *address; | 103 | const void *address; |
102 | unsigned long size; | 104 | unsigned long size; |
105 | const unsigned char *header; | ||
106 | unsigned long hdrsz; | ||
103 | struct unwind_table *link; | 107 | struct unwind_table *link; |
104 | const char *name; | 108 | const char *name; |
105 | } root_table; | 109 | } root_table; |
@@ -145,6 +149,10 @@ static struct unwind_table *find_table(unsigned long pc) | |||
145 | return table; | 149 | return table; |
146 | } | 150 | } |
147 | 151 | ||
152 | static unsigned long read_pointer(const u8 **pLoc, | ||
153 | const void *end, | ||
154 | signed ptrType); | ||
155 | |||
148 | static void init_unwind_table(struct unwind_table *table, | 156 | static void init_unwind_table(struct unwind_table *table, |
149 | const char *name, | 157 | const char *name, |
150 | const void *core_start, | 158 | const void *core_start, |
@@ -152,14 +160,30 @@ static void init_unwind_table(struct unwind_table *table, | |||
152 | const void *init_start, | 160 | const void *init_start, |
153 | unsigned long init_size, | 161 | unsigned long init_size, |
154 | const void *table_start, | 162 | const void *table_start, |
155 | unsigned long table_size) | 163 | unsigned long table_size, |
164 | const u8 *header_start, | ||
165 | unsigned long header_size) | ||
156 | { | 166 | { |
167 | const u8 *ptr = header_start + 4; | ||
168 | const u8 *end = header_start + header_size; | ||
169 | |||
157 | table->core.pc = (unsigned long)core_start; | 170 | table->core.pc = (unsigned long)core_start; |
158 | table->core.range = core_size; | 171 | table->core.range = core_size; |
159 | table->init.pc = (unsigned long)init_start; | 172 | table->init.pc = (unsigned long)init_start; |
160 | table->init.range = init_size; | 173 | table->init.range = init_size; |
161 | table->address = table_start; | 174 | table->address = table_start; |
162 | table->size = table_size; | 175 | table->size = table_size; |
176 | /* See if the linker provided table looks valid. */ | ||
177 | if (header_size <= 4 | ||
178 | || header_start[0] != 1 | ||
179 | || (void *)read_pointer(&ptr, end, header_start[1]) != table_start | ||
180 | || header_start[2] == DW_EH_PE_omit | ||
181 | || read_pointer(&ptr, end, header_start[2]) <= 0 | ||
182 | || header_start[3] == DW_EH_PE_omit) | ||
183 | header_start = NULL; | ||
184 | table->hdrsz = header_size; | ||
185 | smp_wmb(); | ||
186 | table->header = header_start; | ||
163 | table->link = NULL; | 187 | table->link = NULL; |
164 | table->name = name; | 188 | table->name = name; |
165 | } | 189 | } |
@@ -169,7 +193,143 @@ void __init unwind_init(void) | |||
169 | init_unwind_table(&root_table, "kernel", | 193 | init_unwind_table(&root_table, "kernel", |
170 | _text, _end - _text, | 194 | _text, _end - _text, |
171 | NULL, 0, | 195 | NULL, 0, |
172 | __start_unwind, __end_unwind - __start_unwind); | 196 | __start_unwind, __end_unwind - __start_unwind, |
197 | __start_unwind_hdr, __end_unwind_hdr - __start_unwind_hdr); | ||
198 | } | ||
199 | |||
200 | static const u32 bad_cie, not_fde; | ||
201 | static const u32 *cie_for_fde(const u32 *fde, const struct unwind_table *); | ||
202 | static signed fde_pointer_type(const u32 *cie); | ||
203 | |||
204 | struct eh_frame_hdr_table_entry { | ||
205 | unsigned long start, fde; | ||
206 | }; | ||
207 | |||
208 | static int cmp_eh_frame_hdr_table_entries(const void *p1, const void *p2) | ||
209 | { | ||
210 | const struct eh_frame_hdr_table_entry *e1 = p1; | ||
211 | const struct eh_frame_hdr_table_entry *e2 = p2; | ||
212 | |||
213 | return (e1->start > e2->start) - (e1->start < e2->start); | ||
214 | } | ||
215 | |||
216 | static void swap_eh_frame_hdr_table_entries(void *p1, void *p2, int size) | ||
217 | { | ||
218 | struct eh_frame_hdr_table_entry *e1 = p1; | ||
219 | struct eh_frame_hdr_table_entry *e2 = p2; | ||
220 | unsigned long v; | ||
221 | |||
222 | v = e1->start; | ||
223 | e1->start = e2->start; | ||
224 | e2->start = v; | ||
225 | v = e1->fde; | ||
226 | e1->fde = e2->fde; | ||
227 | e2->fde = v; | ||
228 | } | ||
229 | |||
230 | static void __init setup_unwind_table(struct unwind_table *table, | ||
231 | void *(*alloc)(unsigned long)) | ||
232 | { | ||
233 | const u8 *ptr; | ||
234 | unsigned long tableSize = table->size, hdrSize; | ||
235 | unsigned n; | ||
236 | const u32 *fde; | ||
237 | struct { | ||
238 | u8 version; | ||
239 | u8 eh_frame_ptr_enc; | ||
240 | u8 fde_count_enc; | ||
241 | u8 table_enc; | ||
242 | unsigned long eh_frame_ptr; | ||
243 | unsigned int fde_count; | ||
244 | struct eh_frame_hdr_table_entry table[]; | ||
245 | } __attribute__((__packed__)) *header; | ||
246 | |||
247 | if (table->header) | ||
248 | return; | ||
249 | |||
250 | if (table->hdrsz) | ||
251 | printk(KERN_WARNING ".eh_frame_hdr for '%s' present but unusable\n", | ||
252 | table->name); | ||
253 | |||
254 | if (tableSize & (sizeof(*fde) - 1)) | ||
255 | return; | ||
256 | |||
257 | for (fde = table->address, n = 0; | ||
258 | tableSize > sizeof(*fde) && tableSize - sizeof(*fde) >= *fde; | ||
259 | tableSize -= sizeof(*fde) + *fde, fde += 1 + *fde / sizeof(*fde)) { | ||
260 | const u32 *cie = cie_for_fde(fde, table); | ||
261 | signed ptrType; | ||
262 | |||
263 | if (cie == ¬_fde) | ||
264 | continue; | ||
265 | if (cie == NULL | ||
266 | || cie == &bad_cie | ||
267 | || (ptrType = fde_pointer_type(cie)) < 0) | ||
268 | return; | ||
269 | ptr = (const u8 *)(fde + 2); | ||
270 | if (!read_pointer(&ptr, | ||
271 | (const u8 *)(fde + 1) + *fde, | ||
272 | ptrType)) | ||
273 | return; | ||
274 | ++n; | ||
275 | } | ||
276 | |||
277 | if (tableSize || !n) | ||
278 | return; | ||
279 | |||
280 | hdrSize = 4 + sizeof(unsigned long) + sizeof(unsigned int) | ||
281 | + 2 * n * sizeof(unsigned long); | ||
282 | header = alloc(hdrSize); | ||
283 | if (!header) | ||
284 | return; | ||
285 | header->version = 1; | ||
286 | header->eh_frame_ptr_enc = DW_EH_PE_abs|DW_EH_PE_native; | ||
287 | header->fde_count_enc = DW_EH_PE_abs|DW_EH_PE_data4; | ||
288 | header->table_enc = DW_EH_PE_abs|DW_EH_PE_native; | ||
289 | put_unaligned((unsigned long)table->address, &header->eh_frame_ptr); | ||
290 | BUILD_BUG_ON(offsetof(typeof(*header), fde_count) | ||
291 | % __alignof(typeof(header->fde_count))); | ||
292 | header->fde_count = n; | ||
293 | |||
294 | BUILD_BUG_ON(offsetof(typeof(*header), table) | ||
295 | % __alignof(typeof(*header->table))); | ||
296 | for (fde = table->address, tableSize = table->size, n = 0; | ||
297 | tableSize; | ||
298 | tableSize -= sizeof(*fde) + *fde, fde += 1 + *fde / sizeof(*fde)) { | ||
299 | const u32 *cie = fde + 1 - fde[1] / sizeof(*fde); | ||
300 | |||
301 | if (!fde[1]) | ||
302 | continue; /* this is a CIE */ | ||
303 | ptr = (const u8 *)(fde + 2); | ||
304 | header->table[n].start = read_pointer(&ptr, | ||
305 | (const u8 *)(fde + 1) + *fde, | ||
306 | fde_pointer_type(cie)); | ||
307 | header->table[n].fde = (unsigned long)fde; | ||
308 | ++n; | ||
309 | } | ||
310 | WARN_ON(n != header->fde_count); | ||
311 | |||
312 | sort(header->table, | ||
313 | n, | ||
314 | sizeof(*header->table), | ||
315 | cmp_eh_frame_hdr_table_entries, | ||
316 | swap_eh_frame_hdr_table_entries); | ||
317 | |||
318 | table->hdrsz = hdrSize; | ||
319 | smp_wmb(); | ||
320 | table->header = (const void *)header; | ||
321 | } | ||
322 | |||
323 | static void *__init balloc(unsigned long sz) | ||
324 | { | ||
325 | return __alloc_bootmem_nopanic(sz, | ||
326 | sizeof(unsigned int), | ||
327 | __pa(MAX_DMA_ADDRESS)); | ||
328 | } | ||
329 | |||
330 | void __init unwind_setup(void) | ||
331 | { | ||
332 | setup_unwind_table(&root_table, balloc); | ||
173 | } | 333 | } |
174 | 334 | ||
175 | #ifdef CONFIG_MODULES | 335 | #ifdef CONFIG_MODULES |
@@ -193,7 +353,8 @@ void *unwind_add_table(struct module *module, | |||
193 | init_unwind_table(table, module->name, | 353 | init_unwind_table(table, module->name, |
194 | module->module_core, module->core_size, | 354 | module->module_core, module->core_size, |
195 | module->module_init, module->init_size, | 355 | module->module_init, module->init_size, |
196 | table_start, table_size); | 356 | table_start, table_size, |
357 | NULL, 0); | ||
197 | 358 | ||
198 | if (last_table) | 359 | if (last_table) |
199 | last_table->link = table; | 360 | last_table->link = table; |
@@ -303,6 +464,26 @@ static sleb128_t get_sleb128(const u8 **pcur, const u8 *end) | |||
303 | return value; | 464 | return value; |
304 | } | 465 | } |
305 | 466 | ||
467 | static const u32 *cie_for_fde(const u32 *fde, const struct unwind_table *table) | ||
468 | { | ||
469 | const u32 *cie; | ||
470 | |||
471 | if (!*fde || (*fde & (sizeof(*fde) - 1))) | ||
472 | return &bad_cie; | ||
473 | if (!fde[1]) | ||
474 | return ¬_fde; /* this is a CIE */ | ||
475 | if ((fde[1] & (sizeof(*fde) - 1)) | ||
476 | || fde[1] > (unsigned long)(fde + 1) - (unsigned long)table->address) | ||
477 | return NULL; /* this is not a valid FDE */ | ||
478 | cie = fde + 1 - fde[1] / sizeof(*fde); | ||
479 | if (*cie <= sizeof(*cie) + 4 | ||
480 | || *cie >= fde[1] - sizeof(*fde) | ||
481 | || (*cie & (sizeof(*cie) - 1)) | ||
482 | || cie[1]) | ||
483 | return NULL; /* this is not a (valid) CIE */ | ||
484 | return cie; | ||
485 | } | ||
486 | |||
306 | static unsigned long read_pointer(const u8 **pLoc, | 487 | static unsigned long read_pointer(const u8 **pLoc, |
307 | const void *end, | 488 | const void *end, |
308 | signed ptrType) | 489 | signed ptrType) |
@@ -610,49 +791,108 @@ int unwind(struct unwind_frame_info *frame) | |||
610 | unsigned i; | 791 | unsigned i; |
611 | signed ptrType = -1; | 792 | signed ptrType = -1; |
612 | uleb128_t retAddrReg = 0; | 793 | uleb128_t retAddrReg = 0; |
613 | struct unwind_table *table; | 794 | const struct unwind_table *table; |
614 | struct unwind_state state; | 795 | struct unwind_state state; |
615 | 796 | ||
616 | if (UNW_PC(frame) == 0) | 797 | if (UNW_PC(frame) == 0) |
617 | return -EINVAL; | 798 | return -EINVAL; |
618 | if ((table = find_table(pc)) != NULL | 799 | if ((table = find_table(pc)) != NULL |
619 | && !(table->size & (sizeof(*fde) - 1))) { | 800 | && !(table->size & (sizeof(*fde) - 1))) { |
620 | unsigned long tableSize = table->size; | 801 | const u8 *hdr = table->header; |
621 | 802 | unsigned long tableSize; | |
622 | for (fde = table->address; | 803 | |
623 | tableSize > sizeof(*fde) && tableSize - sizeof(*fde) >= *fde; | 804 | smp_rmb(); |
624 | tableSize -= sizeof(*fde) + *fde, | 805 | if (hdr && hdr[0] == 1) { |
625 | fde += 1 + *fde / sizeof(*fde)) { | 806 | switch(hdr[3] & DW_EH_PE_FORM) { |
626 | if (!*fde || (*fde & (sizeof(*fde) - 1))) | 807 | case DW_EH_PE_native: tableSize = sizeof(unsigned long); break; |
627 | break; | 808 | case DW_EH_PE_data2: tableSize = 2; break; |
628 | if (!fde[1]) | 809 | case DW_EH_PE_data4: tableSize = 4; break; |
629 | continue; /* this is a CIE */ | 810 | case DW_EH_PE_data8: tableSize = 8; break; |
630 | if ((fde[1] & (sizeof(*fde) - 1)) | 811 | default: tableSize = 0; break; |
631 | || fde[1] > (unsigned long)(fde + 1) | 812 | } |
632 | - (unsigned long)table->address) | 813 | ptr = hdr + 4; |
633 | continue; /* this is not a valid FDE */ | 814 | end = hdr + table->hdrsz; |
634 | cie = fde + 1 - fde[1] / sizeof(*fde); | 815 | if (tableSize |
635 | if (*cie <= sizeof(*cie) + 4 | 816 | && read_pointer(&ptr, end, hdr[1]) |
636 | || *cie >= fde[1] - sizeof(*fde) | 817 | == (unsigned long)table->address |
637 | || (*cie & (sizeof(*cie) - 1)) | 818 | && (i = read_pointer(&ptr, end, hdr[2])) > 0 |
638 | || cie[1] | 819 | && i == (end - ptr) / (2 * tableSize) |
639 | || (ptrType = fde_pointer_type(cie)) < 0) { | 820 | && !((end - ptr) % (2 * tableSize))) { |
640 | cie = NULL; /* this is not a (valid) CIE */ | 821 | do { |
641 | continue; | 822 | const u8 *cur = ptr + (i / 2) * (2 * tableSize); |
823 | |||
824 | startLoc = read_pointer(&cur, | ||
825 | cur + tableSize, | ||
826 | hdr[3]); | ||
827 | if (pc < startLoc) | ||
828 | i /= 2; | ||
829 | else { | ||
830 | ptr = cur - tableSize; | ||
831 | i = (i + 1) / 2; | ||
832 | } | ||
833 | } while (startLoc && i > 1); | ||
834 | if (i == 1 | ||
835 | && (startLoc = read_pointer(&ptr, | ||
836 | ptr + tableSize, | ||
837 | hdr[3])) != 0 | ||
838 | && pc >= startLoc) | ||
839 | fde = (void *)read_pointer(&ptr, | ||
840 | ptr + tableSize, | ||
841 | hdr[3]); | ||
642 | } | 842 | } |
843 | } | ||
844 | |||
845 | if (fde != NULL) { | ||
846 | cie = cie_for_fde(fde, table); | ||
643 | ptr = (const u8 *)(fde + 2); | 847 | ptr = (const u8 *)(fde + 2); |
644 | startLoc = read_pointer(&ptr, | 848 | if(cie != NULL |
645 | (const u8 *)(fde + 1) + *fde, | 849 | && cie != &bad_cie |
646 | ptrType); | 850 | && cie != ¬_fde |
647 | endLoc = startLoc | 851 | && (ptrType = fde_pointer_type(cie)) >= 0 |
648 | + read_pointer(&ptr, | 852 | && read_pointer(&ptr, |
649 | (const u8 *)(fde + 1) + *fde, | 853 | (const u8 *)(fde + 1) + *fde, |
650 | ptrType & DW_EH_PE_indirect | 854 | ptrType) == startLoc) { |
651 | ? ptrType | 855 | if (!(ptrType & DW_EH_PE_indirect)) |
652 | : ptrType & (DW_EH_PE_FORM|DW_EH_PE_signed)); | 856 | ptrType &= DW_EH_PE_FORM|DW_EH_PE_signed; |
653 | if (pc >= startLoc && pc < endLoc) | 857 | endLoc = startLoc |
654 | break; | 858 | + read_pointer(&ptr, |
655 | cie = NULL; | 859 | (const u8 *)(fde + 1) + *fde, |
860 | ptrType); | ||
861 | if(pc >= endLoc) | ||
862 | fde = NULL; | ||
863 | } else | ||
864 | fde = NULL; | ||
865 | } | ||
866 | if (fde == NULL) { | ||
867 | for (fde = table->address, tableSize = table->size; | ||
868 | cie = NULL, tableSize > sizeof(*fde) | ||
869 | && tableSize - sizeof(*fde) >= *fde; | ||
870 | tableSize -= sizeof(*fde) + *fde, | ||
871 | fde += 1 + *fde / sizeof(*fde)) { | ||
872 | cie = cie_for_fde(fde, table); | ||
873 | if (cie == &bad_cie) { | ||
874 | cie = NULL; | ||
875 | break; | ||
876 | } | ||
877 | if (cie == NULL | ||
878 | || cie == ¬_fde | ||
879 | || (ptrType = fde_pointer_type(cie)) < 0) | ||
880 | continue; | ||
881 | ptr = (const u8 *)(fde + 2); | ||
882 | startLoc = read_pointer(&ptr, | ||
883 | (const u8 *)(fde + 1) + *fde, | ||
884 | ptrType); | ||
885 | if (!startLoc) | ||
886 | continue; | ||
887 | if (!(ptrType & DW_EH_PE_indirect)) | ||
888 | ptrType &= DW_EH_PE_FORM|DW_EH_PE_signed; | ||
889 | endLoc = startLoc | ||
890 | + read_pointer(&ptr, | ||
891 | (const u8 *)(fde + 1) + *fde, | ||
892 | ptrType); | ||
893 | if (pc >= startLoc && pc < endLoc) | ||
894 | break; | ||
895 | } | ||
656 | } | 896 | } |
657 | } | 897 | } |
658 | if (cie != NULL) { | 898 | if (cie != NULL) { |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index cfc737bffe6d..3df9bfc7ff78 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/notifier.h> | 28 | #include <linux/notifier.h> |
29 | #include <linux/kthread.h> | 29 | #include <linux/kthread.h> |
30 | #include <linux/hardirq.h> | 30 | #include <linux/hardirq.h> |
31 | #include <linux/mempolicy.h> | ||
31 | 32 | ||
32 | /* | 33 | /* |
33 | * The per-CPU workqueue (if single thread, we always use the first | 34 | * The per-CPU workqueue (if single thread, we always use the first |
@@ -245,6 +246,12 @@ static int worker_thread(void *__cwq) | |||
245 | sigprocmask(SIG_BLOCK, &blocked, NULL); | 246 | sigprocmask(SIG_BLOCK, &blocked, NULL); |
246 | flush_signals(current); | 247 | flush_signals(current); |
247 | 248 | ||
249 | /* | ||
250 | * We inherited MPOL_INTERLEAVE from the booting kernel. | ||
251 | * Set MPOL_DEFAULT to insure node local allocations. | ||
252 | */ | ||
253 | numa_default_policy(); | ||
254 | |||
248 | /* SIG_IGN makes children autoreap: see do_notify_parent(). */ | 255 | /* SIG_IGN makes children autoreap: see do_notify_parent(). */ |
249 | sa.sa.sa_handler = SIG_IGN; | 256 | sa.sa.sa_handler = SIG_IGN; |
250 | sa.sa.sa_flags = 0; | 257 | sa.sa.sa_flags = 0; |