diff options
author | James Morris <jmorris@namei.org> | 2011-05-19 04:51:57 -0400 |
---|---|---|
committer | James Morris <jmorris@namei.org> | 2011-05-19 04:51:57 -0400 |
commit | 12a5a2621b1ee14d32beca35304d7c6076a58815 (patch) | |
tree | 213e13f99de690b3c4a510f504393b63ada626bd /kernel | |
parent | e77dc3460fa59be5759e9327ad882868eee9d61b (diff) | |
parent | 61c4f2c81c61f73549928dfd9f3e8f26aa36a8cf (diff) |
Merge branch 'master' into next
Conflicts:
include/linux/capability.h
Manually resolve merge conflict w/ thanks to Stephen Rothwell.
Signed-off-by: James Morris <jmorris@namei.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/capability.c | 12 | ||||
-rw-r--r-- | kernel/cred.c | 12 | ||||
-rw-r--r-- | kernel/exit.c | 2 | ||||
-rw-r--r-- | kernel/hrtimer.c | 10 | ||||
-rw-r--r-- | kernel/irq/proc.c | 2 | ||||
-rw-r--r-- | kernel/kexec.c | 7 | ||||
-rw-r--r-- | kernel/power/hibernate.c | 10 | ||||
-rw-r--r-- | kernel/power/suspend.c | 9 | ||||
-rw-r--r-- | kernel/power/user.c | 5 | ||||
-rw-r--r-- | kernel/ptrace.c | 17 | ||||
-rw-r--r-- | kernel/time/clocksource.c | 4 | ||||
-rw-r--r-- | kernel/time/posix-clock.c | 24 | ||||
-rw-r--r-- | kernel/time/tick-broadcast.c | 12 | ||||
-rw-r--r-- | kernel/trace/Kconfig | 2 | ||||
-rw-r--r-- | kernel/trace/trace.c | 1 | ||||
-rw-r--r-- | kernel/trace/trace_events.c | 1 | ||||
-rw-r--r-- | kernel/watchdog.c | 5 | ||||
-rw-r--r-- | kernel/workqueue.c | 8 |
18 files changed, 103 insertions, 40 deletions
diff --git a/kernel/capability.c b/kernel/capability.c index 14ea4210a530..283c529f8b1c 100644 --- a/kernel/capability.c +++ b/kernel/capability.c | |||
@@ -395,3 +395,15 @@ bool task_ns_capable(struct task_struct *t, int cap) | |||
395 | return ns_capable(task_cred_xxx(t, user)->user_ns, cap); | 395 | return ns_capable(task_cred_xxx(t, user)->user_ns, cap); |
396 | } | 396 | } |
397 | EXPORT_SYMBOL(task_ns_capable); | 397 | EXPORT_SYMBOL(task_ns_capable); |
398 | |||
399 | /** | ||
400 | * nsown_capable - Check superior capability to one's own user_ns | ||
401 | * @cap: The capability in question | ||
402 | * | ||
403 | * Return true if the current task has the given superior capability | ||
404 | * targeted at its own user namespace. | ||
405 | */ | ||
406 | bool nsown_capable(int cap) | ||
407 | { | ||
408 | return ns_capable(current_user_ns(), cap); | ||
409 | } | ||
diff --git a/kernel/cred.c b/kernel/cred.c index b982f0863ae9..e12c8af793f8 100644 --- a/kernel/cred.c +++ b/kernel/cred.c | |||
@@ -54,6 +54,7 @@ struct cred init_cred = { | |||
54 | .cap_effective = CAP_FULL_SET, | 54 | .cap_effective = CAP_FULL_SET, |
55 | .cap_bset = CAP_FULL_SET, | 55 | .cap_bset = CAP_FULL_SET, |
56 | .user = INIT_USER, | 56 | .user = INIT_USER, |
57 | .user_ns = &init_user_ns, | ||
57 | .group_info = &init_groups, | 58 | .group_info = &init_groups, |
58 | #ifdef CONFIG_KEYS | 59 | #ifdef CONFIG_KEYS |
59 | .tgcred = &init_tgcred, | 60 | .tgcred = &init_tgcred, |
@@ -410,6 +411,11 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags) | |||
410 | goto error_put; | 411 | goto error_put; |
411 | } | 412 | } |
412 | 413 | ||
414 | /* cache user_ns in cred. Doesn't need a refcount because it will | ||
415 | * stay pinned by cred->user | ||
416 | */ | ||
417 | new->user_ns = new->user->user_ns; | ||
418 | |||
413 | #ifdef CONFIG_KEYS | 419 | #ifdef CONFIG_KEYS |
414 | /* new threads get their own thread keyrings if their parent already | 420 | /* new threads get their own thread keyrings if their parent already |
415 | * had one */ | 421 | * had one */ |
@@ -741,12 +747,6 @@ int set_create_files_as(struct cred *new, struct inode *inode) | |||
741 | } | 747 | } |
742 | EXPORT_SYMBOL(set_create_files_as); | 748 | EXPORT_SYMBOL(set_create_files_as); |
743 | 749 | ||
744 | struct user_namespace *current_user_ns(void) | ||
745 | { | ||
746 | return _current_user_ns(); | ||
747 | } | ||
748 | EXPORT_SYMBOL(current_user_ns); | ||
749 | |||
750 | #ifdef CONFIG_DEBUG_CREDENTIALS | 750 | #ifdef CONFIG_DEBUG_CREDENTIALS |
751 | 751 | ||
752 | bool creds_are_invalid(const struct cred *cred) | 752 | bool creds_are_invalid(const struct cred *cred) |
diff --git a/kernel/exit.c b/kernel/exit.c index f5d2f63bae0b..8dd874181542 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -1016,7 +1016,7 @@ NORET_TYPE void do_exit(long code) | |||
1016 | /* | 1016 | /* |
1017 | * FIXME: do that only when needed, using sched_exit tracepoint | 1017 | * FIXME: do that only when needed, using sched_exit tracepoint |
1018 | */ | 1018 | */ |
1019 | flush_ptrace_hw_breakpoint(tsk); | 1019 | ptrace_put_breakpoints(tsk); |
1020 | 1020 | ||
1021 | exit_notify(tsk, group_dead); | 1021 | exit_notify(tsk, group_dead); |
1022 | #ifdef CONFIG_NUMA | 1022 | #ifdef CONFIG_NUMA |
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 9017478c5d4c..87fdb3f8db14 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
@@ -81,7 +81,11 @@ DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) = | |||
81 | } | 81 | } |
82 | }; | 82 | }; |
83 | 83 | ||
84 | static int hrtimer_clock_to_base_table[MAX_CLOCKS]; | 84 | static int hrtimer_clock_to_base_table[MAX_CLOCKS] = { |
85 | [CLOCK_REALTIME] = HRTIMER_BASE_REALTIME, | ||
86 | [CLOCK_MONOTONIC] = HRTIMER_BASE_MONOTONIC, | ||
87 | [CLOCK_BOOTTIME] = HRTIMER_BASE_BOOTTIME, | ||
88 | }; | ||
85 | 89 | ||
86 | static inline int hrtimer_clockid_to_base(clockid_t clock_id) | 90 | static inline int hrtimer_clockid_to_base(clockid_t clock_id) |
87 | { | 91 | { |
@@ -1722,10 +1726,6 @@ static struct notifier_block __cpuinitdata hrtimers_nb = { | |||
1722 | 1726 | ||
1723 | void __init hrtimers_init(void) | 1727 | void __init hrtimers_init(void) |
1724 | { | 1728 | { |
1725 | hrtimer_clock_to_base_table[CLOCK_REALTIME] = HRTIMER_BASE_REALTIME; | ||
1726 | hrtimer_clock_to_base_table[CLOCK_MONOTONIC] = HRTIMER_BASE_MONOTONIC; | ||
1727 | hrtimer_clock_to_base_table[CLOCK_BOOTTIME] = HRTIMER_BASE_BOOTTIME; | ||
1728 | |||
1729 | hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE, | 1729 | hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE, |
1730 | (void *)(long)smp_processor_id()); | 1730 | (void *)(long)smp_processor_id()); |
1731 | register_cpu_notifier(&hrtimers_nb); | 1731 | register_cpu_notifier(&hrtimers_nb); |
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index dd201bd35103..834899f2500f 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c | |||
@@ -419,7 +419,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
419 | } else { | 419 | } else { |
420 | seq_printf(p, " %8s", "None"); | 420 | seq_printf(p, " %8s", "None"); |
421 | } | 421 | } |
422 | #ifdef CONFIG_GENIRC_IRQ_SHOW_LEVEL | 422 | #ifdef CONFIG_GENERIC_IRQ_SHOW_LEVEL |
423 | seq_printf(p, " %-8s", irqd_is_level_type(&desc->irq_data) ? "Level" : "Edge"); | 423 | seq_printf(p, " %-8s", irqd_is_level_type(&desc->irq_data) ? "Level" : "Edge"); |
424 | #endif | 424 | #endif |
425 | if (desc->name) | 425 | if (desc->name) |
diff --git a/kernel/kexec.c b/kernel/kexec.c index 55936f9cb251..87b77de03dd3 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <linux/vmalloc.h> | 33 | #include <linux/vmalloc.h> |
34 | #include <linux/swap.h> | 34 | #include <linux/swap.h> |
35 | #include <linux/kmsg_dump.h> | 35 | #include <linux/kmsg_dump.h> |
36 | #include <linux/syscore_ops.h> | ||
36 | 37 | ||
37 | #include <asm/page.h> | 38 | #include <asm/page.h> |
38 | #include <asm/uaccess.h> | 39 | #include <asm/uaccess.h> |
@@ -1532,6 +1533,11 @@ int kernel_kexec(void) | |||
1532 | local_irq_disable(); | 1533 | local_irq_disable(); |
1533 | /* Suspend system devices */ | 1534 | /* Suspend system devices */ |
1534 | error = sysdev_suspend(PMSG_FREEZE); | 1535 | error = sysdev_suspend(PMSG_FREEZE); |
1536 | if (!error) { | ||
1537 | error = syscore_suspend(); | ||
1538 | if (error) | ||
1539 | sysdev_resume(); | ||
1540 | } | ||
1535 | if (error) | 1541 | if (error) |
1536 | goto Enable_irqs; | 1542 | goto Enable_irqs; |
1537 | } else | 1543 | } else |
@@ -1546,6 +1552,7 @@ int kernel_kexec(void) | |||
1546 | 1552 | ||
1547 | #ifdef CONFIG_KEXEC_JUMP | 1553 | #ifdef CONFIG_KEXEC_JUMP |
1548 | if (kexec_image->preserve_context) { | 1554 | if (kexec_image->preserve_context) { |
1555 | syscore_resume(); | ||
1549 | sysdev_resume(); | 1556 | sysdev_resume(); |
1550 | Enable_irqs: | 1557 | Enable_irqs: |
1551 | local_irq_enable(); | 1558 | local_irq_enable(); |
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index aeabd26e3342..50aae660174d 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c | |||
@@ -273,8 +273,11 @@ static int create_image(int platform_mode) | |||
273 | local_irq_disable(); | 273 | local_irq_disable(); |
274 | 274 | ||
275 | error = sysdev_suspend(PMSG_FREEZE); | 275 | error = sysdev_suspend(PMSG_FREEZE); |
276 | if (!error) | 276 | if (!error) { |
277 | error = syscore_suspend(); | 277 | error = syscore_suspend(); |
278 | if (error) | ||
279 | sysdev_resume(); | ||
280 | } | ||
278 | if (error) { | 281 | if (error) { |
279 | printk(KERN_ERR "PM: Some system devices failed to power down, " | 282 | printk(KERN_ERR "PM: Some system devices failed to power down, " |
280 | "aborting hibernation\n"); | 283 | "aborting hibernation\n"); |
@@ -407,8 +410,11 @@ static int resume_target_kernel(bool platform_mode) | |||
407 | local_irq_disable(); | 410 | local_irq_disable(); |
408 | 411 | ||
409 | error = sysdev_suspend(PMSG_QUIESCE); | 412 | error = sysdev_suspend(PMSG_QUIESCE); |
410 | if (!error) | 413 | if (!error) { |
411 | error = syscore_suspend(); | 414 | error = syscore_suspend(); |
415 | if (error) | ||
416 | sysdev_resume(); | ||
417 | } | ||
412 | if (error) | 418 | if (error) |
413 | goto Enable_irqs; | 419 | goto Enable_irqs; |
414 | 420 | ||
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index 2814c32aed51..6275970b2189 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c | |||
@@ -164,8 +164,11 @@ static int suspend_enter(suspend_state_t state) | |||
164 | BUG_ON(!irqs_disabled()); | 164 | BUG_ON(!irqs_disabled()); |
165 | 165 | ||
166 | error = sysdev_suspend(PMSG_SUSPEND); | 166 | error = sysdev_suspend(PMSG_SUSPEND); |
167 | if (!error) | 167 | if (!error) { |
168 | error = syscore_suspend(); | 168 | error = syscore_suspend(); |
169 | if (error) | ||
170 | sysdev_resume(); | ||
171 | } | ||
169 | if (!error) { | 172 | if (!error) { |
170 | if (!(suspend_test(TEST_CORE) || pm_wakeup_pending())) { | 173 | if (!(suspend_test(TEST_CORE) || pm_wakeup_pending())) { |
171 | error = suspend_ops->enter(state); | 174 | error = suspend_ops->enter(state); |
@@ -213,7 +216,6 @@ int suspend_devices_and_enter(suspend_state_t state) | |||
213 | goto Close; | 216 | goto Close; |
214 | } | 217 | } |
215 | suspend_console(); | 218 | suspend_console(); |
216 | pm_restrict_gfp_mask(); | ||
217 | suspend_test_start(); | 219 | suspend_test_start(); |
218 | error = dpm_suspend_start(PMSG_SUSPEND); | 220 | error = dpm_suspend_start(PMSG_SUSPEND); |
219 | if (error) { | 221 | if (error) { |
@@ -230,7 +232,6 @@ int suspend_devices_and_enter(suspend_state_t state) | |||
230 | suspend_test_start(); | 232 | suspend_test_start(); |
231 | dpm_resume_end(PMSG_RESUME); | 233 | dpm_resume_end(PMSG_RESUME); |
232 | suspend_test_finish("resume devices"); | 234 | suspend_test_finish("resume devices"); |
233 | pm_restore_gfp_mask(); | ||
234 | resume_console(); | 235 | resume_console(); |
235 | Close: | 236 | Close: |
236 | if (suspend_ops->end) | 237 | if (suspend_ops->end) |
@@ -291,7 +292,9 @@ int enter_state(suspend_state_t state) | |||
291 | goto Finish; | 292 | goto Finish; |
292 | 293 | ||
293 | pr_debug("PM: Entering %s sleep\n", pm_states[state]); | 294 | pr_debug("PM: Entering %s sleep\n", pm_states[state]); |
295 | pm_restrict_gfp_mask(); | ||
294 | error = suspend_devices_and_enter(state); | 296 | error = suspend_devices_and_enter(state); |
297 | pm_restore_gfp_mask(); | ||
295 | 298 | ||
296 | Finish: | 299 | Finish: |
297 | pr_debug("PM: Finishing wakeup.\n"); | 300 | pr_debug("PM: Finishing wakeup.\n"); |
diff --git a/kernel/power/user.c b/kernel/power/user.c index c36c3b9e8a84..7d02d33be699 100644 --- a/kernel/power/user.c +++ b/kernel/power/user.c | |||
@@ -135,8 +135,10 @@ static int snapshot_release(struct inode *inode, struct file *filp) | |||
135 | free_basic_memory_bitmaps(); | 135 | free_basic_memory_bitmaps(); |
136 | data = filp->private_data; | 136 | data = filp->private_data; |
137 | free_all_swap_pages(data->swap); | 137 | free_all_swap_pages(data->swap); |
138 | if (data->frozen) | 138 | if (data->frozen) { |
139 | pm_restore_gfp_mask(); | ||
139 | thaw_processes(); | 140 | thaw_processes(); |
141 | } | ||
140 | pm_notifier_call_chain(data->mode == O_RDONLY ? | 142 | pm_notifier_call_chain(data->mode == O_RDONLY ? |
141 | PM_POST_HIBERNATION : PM_POST_RESTORE); | 143 | PM_POST_HIBERNATION : PM_POST_RESTORE); |
142 | atomic_inc(&snapshot_device_available); | 144 | atomic_inc(&snapshot_device_available); |
@@ -379,6 +381,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd, | |||
379 | * PM_HIBERNATION_PREPARE | 381 | * PM_HIBERNATION_PREPARE |
380 | */ | 382 | */ |
381 | error = suspend_devices_and_enter(PM_SUSPEND_MEM); | 383 | error = suspend_devices_and_enter(PM_SUSPEND_MEM); |
384 | data->ready = 0; | ||
382 | break; | 385 | break; |
383 | 386 | ||
384 | case SNAPSHOT_PLATFORM_SUPPORT: | 387 | case SNAPSHOT_PLATFORM_SUPPORT: |
diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 0fc1eed28d27..dc7ab65f3b36 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/syscalls.h> | 22 | #include <linux/syscalls.h> |
23 | #include <linux/uaccess.h> | 23 | #include <linux/uaccess.h> |
24 | #include <linux/regset.h> | 24 | #include <linux/regset.h> |
25 | #include <linux/hw_breakpoint.h> | ||
25 | 26 | ||
26 | 27 | ||
27 | /* | 28 | /* |
@@ -879,3 +880,19 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid, | |||
879 | return ret; | 880 | return ret; |
880 | } | 881 | } |
881 | #endif /* CONFIG_COMPAT */ | 882 | #endif /* CONFIG_COMPAT */ |
883 | |||
884 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | ||
885 | int ptrace_get_breakpoints(struct task_struct *tsk) | ||
886 | { | ||
887 | if (atomic_inc_not_zero(&tsk->ptrace_bp_refcnt)) | ||
888 | return 0; | ||
889 | |||
890 | return -1; | ||
891 | } | ||
892 | |||
893 | void ptrace_put_breakpoints(struct task_struct *tsk) | ||
894 | { | ||
895 | if (atomic_dec_and_test(&tsk->ptrace_bp_refcnt)) | ||
896 | flush_ptrace_hw_breakpoint(tsk); | ||
897 | } | ||
898 | #endif /* CONFIG_HAVE_HW_BREAKPOINT */ | ||
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index 6519cf62d9cd..0e17c10f8a9d 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c | |||
@@ -685,8 +685,8 @@ int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq) | |||
685 | /* Add clocksource to the clcoksource list */ | 685 | /* Add clocksource to the clcoksource list */ |
686 | mutex_lock(&clocksource_mutex); | 686 | mutex_lock(&clocksource_mutex); |
687 | clocksource_enqueue(cs); | 687 | clocksource_enqueue(cs); |
688 | clocksource_select(); | ||
689 | clocksource_enqueue_watchdog(cs); | 688 | clocksource_enqueue_watchdog(cs); |
689 | clocksource_select(); | ||
690 | mutex_unlock(&clocksource_mutex); | 690 | mutex_unlock(&clocksource_mutex); |
691 | return 0; | 691 | return 0; |
692 | } | 692 | } |
@@ -706,8 +706,8 @@ int clocksource_register(struct clocksource *cs) | |||
706 | 706 | ||
707 | mutex_lock(&clocksource_mutex); | 707 | mutex_lock(&clocksource_mutex); |
708 | clocksource_enqueue(cs); | 708 | clocksource_enqueue(cs); |
709 | clocksource_select(); | ||
710 | clocksource_enqueue_watchdog(cs); | 709 | clocksource_enqueue_watchdog(cs); |
710 | clocksource_select(); | ||
711 | mutex_unlock(&clocksource_mutex); | 711 | mutex_unlock(&clocksource_mutex); |
712 | return 0; | 712 | return 0; |
713 | } | 713 | } |
diff --git a/kernel/time/posix-clock.c b/kernel/time/posix-clock.c index 25028dd4fa18..c340ca658f37 100644 --- a/kernel/time/posix-clock.c +++ b/kernel/time/posix-clock.c | |||
@@ -19,7 +19,6 @@ | |||
19 | */ | 19 | */ |
20 | #include <linux/device.h> | 20 | #include <linux/device.h> |
21 | #include <linux/file.h> | 21 | #include <linux/file.h> |
22 | #include <linux/mutex.h> | ||
23 | #include <linux/posix-clock.h> | 22 | #include <linux/posix-clock.h> |
24 | #include <linux/slab.h> | 23 | #include <linux/slab.h> |
25 | #include <linux/syscalls.h> | 24 | #include <linux/syscalls.h> |
@@ -34,19 +33,19 @@ static struct posix_clock *get_posix_clock(struct file *fp) | |||
34 | { | 33 | { |
35 | struct posix_clock *clk = fp->private_data; | 34 | struct posix_clock *clk = fp->private_data; |
36 | 35 | ||
37 | mutex_lock(&clk->mutex); | 36 | down_read(&clk->rwsem); |
38 | 37 | ||
39 | if (!clk->zombie) | 38 | if (!clk->zombie) |
40 | return clk; | 39 | return clk; |
41 | 40 | ||
42 | mutex_unlock(&clk->mutex); | 41 | up_read(&clk->rwsem); |
43 | 42 | ||
44 | return NULL; | 43 | return NULL; |
45 | } | 44 | } |
46 | 45 | ||
47 | static void put_posix_clock(struct posix_clock *clk) | 46 | static void put_posix_clock(struct posix_clock *clk) |
48 | { | 47 | { |
49 | mutex_unlock(&clk->mutex); | 48 | up_read(&clk->rwsem); |
50 | } | 49 | } |
51 | 50 | ||
52 | static ssize_t posix_clock_read(struct file *fp, char __user *buf, | 51 | static ssize_t posix_clock_read(struct file *fp, char __user *buf, |
@@ -156,7 +155,7 @@ static int posix_clock_open(struct inode *inode, struct file *fp) | |||
156 | struct posix_clock *clk = | 155 | struct posix_clock *clk = |
157 | container_of(inode->i_cdev, struct posix_clock, cdev); | 156 | container_of(inode->i_cdev, struct posix_clock, cdev); |
158 | 157 | ||
159 | mutex_lock(&clk->mutex); | 158 | down_read(&clk->rwsem); |
160 | 159 | ||
161 | if (clk->zombie) { | 160 | if (clk->zombie) { |
162 | err = -ENODEV; | 161 | err = -ENODEV; |
@@ -172,7 +171,7 @@ static int posix_clock_open(struct inode *inode, struct file *fp) | |||
172 | fp->private_data = clk; | 171 | fp->private_data = clk; |
173 | } | 172 | } |
174 | out: | 173 | out: |
175 | mutex_unlock(&clk->mutex); | 174 | up_read(&clk->rwsem); |
176 | return err; | 175 | return err; |
177 | } | 176 | } |
178 | 177 | ||
@@ -211,25 +210,20 @@ int posix_clock_register(struct posix_clock *clk, dev_t devid) | |||
211 | int err; | 210 | int err; |
212 | 211 | ||
213 | kref_init(&clk->kref); | 212 | kref_init(&clk->kref); |
214 | mutex_init(&clk->mutex); | 213 | init_rwsem(&clk->rwsem); |
215 | 214 | ||
216 | cdev_init(&clk->cdev, &posix_clock_file_operations); | 215 | cdev_init(&clk->cdev, &posix_clock_file_operations); |
217 | clk->cdev.owner = clk->ops.owner; | 216 | clk->cdev.owner = clk->ops.owner; |
218 | err = cdev_add(&clk->cdev, devid, 1); | 217 | err = cdev_add(&clk->cdev, devid, 1); |
219 | if (err) | ||
220 | goto no_cdev; | ||
221 | 218 | ||
222 | return err; | 219 | return err; |
223 | no_cdev: | ||
224 | mutex_destroy(&clk->mutex); | ||
225 | return err; | ||
226 | } | 220 | } |
227 | EXPORT_SYMBOL_GPL(posix_clock_register); | 221 | EXPORT_SYMBOL_GPL(posix_clock_register); |
228 | 222 | ||
229 | static void delete_clock(struct kref *kref) | 223 | static void delete_clock(struct kref *kref) |
230 | { | 224 | { |
231 | struct posix_clock *clk = container_of(kref, struct posix_clock, kref); | 225 | struct posix_clock *clk = container_of(kref, struct posix_clock, kref); |
232 | mutex_destroy(&clk->mutex); | 226 | |
233 | if (clk->release) | 227 | if (clk->release) |
234 | clk->release(clk); | 228 | clk->release(clk); |
235 | } | 229 | } |
@@ -238,9 +232,9 @@ void posix_clock_unregister(struct posix_clock *clk) | |||
238 | { | 232 | { |
239 | cdev_del(&clk->cdev); | 233 | cdev_del(&clk->cdev); |
240 | 234 | ||
241 | mutex_lock(&clk->mutex); | 235 | down_write(&clk->rwsem); |
242 | clk->zombie = true; | 236 | clk->zombie = true; |
243 | mutex_unlock(&clk->mutex); | 237 | up_write(&clk->rwsem); |
244 | 238 | ||
245 | kref_put(&clk->kref, delete_clock); | 239 | kref_put(&clk->kref, delete_clock); |
246 | } | 240 | } |
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index da800ffa810c..723c7637e55a 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c | |||
@@ -522,10 +522,11 @@ static void tick_broadcast_init_next_event(struct cpumask *mask, | |||
522 | */ | 522 | */ |
523 | void tick_broadcast_setup_oneshot(struct clock_event_device *bc) | 523 | void tick_broadcast_setup_oneshot(struct clock_event_device *bc) |
524 | { | 524 | { |
525 | int cpu = smp_processor_id(); | ||
526 | |||
525 | /* Set it up only once ! */ | 527 | /* Set it up only once ! */ |
526 | if (bc->event_handler != tick_handle_oneshot_broadcast) { | 528 | if (bc->event_handler != tick_handle_oneshot_broadcast) { |
527 | int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC; | 529 | int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC; |
528 | int cpu = smp_processor_id(); | ||
529 | 530 | ||
530 | bc->event_handler = tick_handle_oneshot_broadcast; | 531 | bc->event_handler = tick_handle_oneshot_broadcast; |
531 | clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); | 532 | clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); |
@@ -551,6 +552,15 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc) | |||
551 | tick_broadcast_set_event(tick_next_period, 1); | 552 | tick_broadcast_set_event(tick_next_period, 1); |
552 | } else | 553 | } else |
553 | bc->next_event.tv64 = KTIME_MAX; | 554 | bc->next_event.tv64 = KTIME_MAX; |
555 | } else { | ||
556 | /* | ||
557 | * The first cpu which switches to oneshot mode sets | ||
558 | * the bit for all other cpus which are in the general | ||
559 | * (periodic) broadcast mask. So the bit is set and | ||
560 | * would prevent the first broadcast enter after this | ||
561 | * to program the bc device. | ||
562 | */ | ||
563 | tick_broadcast_clear_oneshot(cpu); | ||
554 | } | 564 | } |
555 | } | 565 | } |
556 | 566 | ||
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 61d7d59f4a1a..2ad39e556cb4 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
@@ -141,7 +141,7 @@ if FTRACE | |||
141 | config FUNCTION_TRACER | 141 | config FUNCTION_TRACER |
142 | bool "Kernel Function Tracer" | 142 | bool "Kernel Function Tracer" |
143 | depends on HAVE_FUNCTION_TRACER | 143 | depends on HAVE_FUNCTION_TRACER |
144 | select FRAME_POINTER if !ARM_UNWIND && !S390 | 144 | select FRAME_POINTER if !ARM_UNWIND && !S390 && !MICROBLAZE |
145 | select KALLSYMS | 145 | select KALLSYMS |
146 | select GENERIC_TRACER | 146 | select GENERIC_TRACER |
147 | select CONTEXT_SWITCH_TRACER | 147 | select CONTEXT_SWITCH_TRACER |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index d38c16a06a6f..1cb49be7c7fb 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -1110,6 +1110,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, | |||
1110 | 1110 | ||
1111 | entry->preempt_count = pc & 0xff; | 1111 | entry->preempt_count = pc & 0xff; |
1112 | entry->pid = (tsk) ? tsk->pid : 0; | 1112 | entry->pid = (tsk) ? tsk->pid : 0; |
1113 | entry->padding = 0; | ||
1113 | entry->flags = | 1114 | entry->flags = |
1114 | #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT | 1115 | #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT |
1115 | (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | | 1116 | (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | |
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index e88f74fe1d4c..2fe110341359 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
@@ -116,6 +116,7 @@ static int trace_define_common_fields(void) | |||
116 | __common_field(unsigned char, flags); | 116 | __common_field(unsigned char, flags); |
117 | __common_field(unsigned char, preempt_count); | 117 | __common_field(unsigned char, preempt_count); |
118 | __common_field(int, pid); | 118 | __common_field(int, pid); |
119 | __common_field(int, padding); | ||
119 | 120 | ||
120 | return ret; | 121 | return ret; |
121 | } | 122 | } |
diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 140dce750450..14733d4d156b 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c | |||
@@ -430,9 +430,12 @@ static int watchdog_enable(int cpu) | |||
430 | p = kthread_create(watchdog, (void *)(unsigned long)cpu, "watchdog/%d", cpu); | 430 | p = kthread_create(watchdog, (void *)(unsigned long)cpu, "watchdog/%d", cpu); |
431 | if (IS_ERR(p)) { | 431 | if (IS_ERR(p)) { |
432 | printk(KERN_ERR "softlockup watchdog for %i failed\n", cpu); | 432 | printk(KERN_ERR "softlockup watchdog for %i failed\n", cpu); |
433 | if (!err) | 433 | if (!err) { |
434 | /* if hardlockup hasn't already set this */ | 434 | /* if hardlockup hasn't already set this */ |
435 | err = PTR_ERR(p); | 435 | err = PTR_ERR(p); |
436 | /* and disable the perf event */ | ||
437 | watchdog_nmi_disable(cpu); | ||
438 | } | ||
436 | goto out; | 439 | goto out; |
437 | } | 440 | } |
438 | kthread_bind(p, cpu); | 441 | kthread_bind(p, cpu); |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 8859a41806dd..e3378e8d3a5c 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -1291,8 +1291,14 @@ __acquires(&gcwq->lock) | |||
1291 | return true; | 1291 | return true; |
1292 | spin_unlock_irq(&gcwq->lock); | 1292 | spin_unlock_irq(&gcwq->lock); |
1293 | 1293 | ||
1294 | /* CPU has come up in between, retry migration */ | 1294 | /* |
1295 | * We've raced with CPU hot[un]plug. Give it a breather | ||
1296 | * and retry migration. cond_resched() is required here; | ||
1297 | * otherwise, we might deadlock against cpu_stop trying to | ||
1298 | * bring down the CPU on non-preemptive kernel. | ||
1299 | */ | ||
1295 | cpu_relax(); | 1300 | cpu_relax(); |
1301 | cond_resched(); | ||
1296 | } | 1302 | } |
1297 | } | 1303 | } |
1298 | 1304 | ||