diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/capability.c | 21 | ||||
-rw-r--r-- | kernel/kexec.c | 66 | ||||
-rw-r--r-- | kernel/lockdep.c | 6 | ||||
-rw-r--r-- | kernel/lockdep_internals.h | 13 | ||||
-rw-r--r-- | kernel/lockdep_proc.c | 12 | ||||
-rw-r--r-- | kernel/ptrace.c | 5 | ||||
-rw-r--r-- | kernel/sched.c | 54 | ||||
-rw-r--r-- | kernel/sched_rt.c | 2 | ||||
-rw-r--r-- | kernel/spinlock.c | 3 | ||||
-rw-r--r-- | kernel/sys.c | 2 |
10 files changed, 113 insertions, 71 deletions
diff --git a/kernel/capability.c b/kernel/capability.c index 0101e847603..33e51e78c2d 100644 --- a/kernel/capability.c +++ b/kernel/capability.c | |||
@@ -486,17 +486,22 @@ asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data) | |||
486 | return ret; | 486 | return ret; |
487 | } | 487 | } |
488 | 488 | ||
489 | int __capable(struct task_struct *t, int cap) | 489 | /** |
490 | * capable - Determine if the current task has a superior capability in effect | ||
491 | * @cap: The capability to be tested for | ||
492 | * | ||
493 | * Return true if the current task has the given superior capability currently | ||
494 | * available for use, false if not. | ||
495 | * | ||
496 | * This sets PF_SUPERPRIV on the task if the capability is available on the | ||
497 | * assumption that it's about to be used. | ||
498 | */ | ||
499 | int capable(int cap) | ||
490 | { | 500 | { |
491 | if (security_capable(t, cap) == 0) { | 501 | if (has_capability(current, cap)) { |
492 | t->flags |= PF_SUPERPRIV; | 502 | current->flags |= PF_SUPERPRIV; |
493 | return 1; | 503 | return 1; |
494 | } | 504 | } |
495 | return 0; | 505 | return 0; |
496 | } | 506 | } |
497 | |||
498 | int capable(int cap) | ||
499 | { | ||
500 | return __capable(current, cap); | ||
501 | } | ||
502 | EXPORT_SYMBOL(capable); | 507 | EXPORT_SYMBOL(capable); |
diff --git a/kernel/kexec.c b/kernel/kexec.c index c8a4370e2a3..59f3f0df35d 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c | |||
@@ -12,7 +12,7 @@ | |||
12 | #include <linux/slab.h> | 12 | #include <linux/slab.h> |
13 | #include <linux/fs.h> | 13 | #include <linux/fs.h> |
14 | #include <linux/kexec.h> | 14 | #include <linux/kexec.h> |
15 | #include <linux/spinlock.h> | 15 | #include <linux/mutex.h> |
16 | #include <linux/list.h> | 16 | #include <linux/list.h> |
17 | #include <linux/highmem.h> | 17 | #include <linux/highmem.h> |
18 | #include <linux/syscalls.h> | 18 | #include <linux/syscalls.h> |
@@ -77,7 +77,7 @@ int kexec_should_crash(struct task_struct *p) | |||
77 | * | 77 | * |
78 | * The code for the transition from the current kernel to the | 78 | * The code for the transition from the current kernel to the |
79 | * the new kernel is placed in the control_code_buffer, whose size | 79 | * the new kernel is placed in the control_code_buffer, whose size |
80 | * is given by KEXEC_CONTROL_CODE_SIZE. In the best case only a single | 80 | * is given by KEXEC_CONTROL_PAGE_SIZE. In the best case only a single |
81 | * page of memory is necessary, but some architectures require more. | 81 | * page of memory is necessary, but some architectures require more. |
82 | * Because this memory must be identity mapped in the transition from | 82 | * Because this memory must be identity mapped in the transition from |
83 | * virtual to physical addresses it must live in the range | 83 | * virtual to physical addresses it must live in the range |
@@ -242,7 +242,7 @@ static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry, | |||
242 | */ | 242 | */ |
243 | result = -ENOMEM; | 243 | result = -ENOMEM; |
244 | image->control_code_page = kimage_alloc_control_pages(image, | 244 | image->control_code_page = kimage_alloc_control_pages(image, |
245 | get_order(KEXEC_CONTROL_CODE_SIZE)); | 245 | get_order(KEXEC_CONTROL_PAGE_SIZE)); |
246 | if (!image->control_code_page) { | 246 | if (!image->control_code_page) { |
247 | printk(KERN_ERR "Could not allocate control_code_buffer\n"); | 247 | printk(KERN_ERR "Could not allocate control_code_buffer\n"); |
248 | goto out; | 248 | goto out; |
@@ -317,7 +317,7 @@ static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry, | |||
317 | */ | 317 | */ |
318 | result = -ENOMEM; | 318 | result = -ENOMEM; |
319 | image->control_code_page = kimage_alloc_control_pages(image, | 319 | image->control_code_page = kimage_alloc_control_pages(image, |
320 | get_order(KEXEC_CONTROL_CODE_SIZE)); | 320 | get_order(KEXEC_CONTROL_PAGE_SIZE)); |
321 | if (!image->control_code_page) { | 321 | if (!image->control_code_page) { |
322 | printk(KERN_ERR "Could not allocate control_code_buffer\n"); | 322 | printk(KERN_ERR "Could not allocate control_code_buffer\n"); |
323 | goto out; | 323 | goto out; |
@@ -924,19 +924,14 @@ static int kimage_load_segment(struct kimage *image, | |||
924 | */ | 924 | */ |
925 | struct kimage *kexec_image; | 925 | struct kimage *kexec_image; |
926 | struct kimage *kexec_crash_image; | 926 | struct kimage *kexec_crash_image; |
927 | /* | 927 | |
928 | * A home grown binary mutex. | 928 | static DEFINE_MUTEX(kexec_mutex); |
929 | * Nothing can wait so this mutex is safe to use | ||
930 | * in interrupt context :) | ||
931 | */ | ||
932 | static int kexec_lock; | ||
933 | 929 | ||
934 | asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments, | 930 | asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments, |
935 | struct kexec_segment __user *segments, | 931 | struct kexec_segment __user *segments, |
936 | unsigned long flags) | 932 | unsigned long flags) |
937 | { | 933 | { |
938 | struct kimage **dest_image, *image; | 934 | struct kimage **dest_image, *image; |
939 | int locked; | ||
940 | int result; | 935 | int result; |
941 | 936 | ||
942 | /* We only trust the superuser with rebooting the system. */ | 937 | /* We only trust the superuser with rebooting the system. */ |
@@ -972,8 +967,7 @@ asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments, | |||
972 | * | 967 | * |
973 | * KISS: always take the mutex. | 968 | * KISS: always take the mutex. |
974 | */ | 969 | */ |
975 | locked = xchg(&kexec_lock, 1); | 970 | if (!mutex_trylock(&kexec_mutex)) |
976 | if (locked) | ||
977 | return -EBUSY; | 971 | return -EBUSY; |
978 | 972 | ||
979 | dest_image = &kexec_image; | 973 | dest_image = &kexec_image; |
@@ -1015,8 +1009,7 @@ asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments, | |||
1015 | image = xchg(dest_image, image); | 1009 | image = xchg(dest_image, image); |
1016 | 1010 | ||
1017 | out: | 1011 | out: |
1018 | locked = xchg(&kexec_lock, 0); /* Release the mutex */ | 1012 | mutex_unlock(&kexec_mutex); |
1019 | BUG_ON(!locked); | ||
1020 | kimage_free(image); | 1013 | kimage_free(image); |
1021 | 1014 | ||
1022 | return result; | 1015 | return result; |
@@ -1063,10 +1056,7 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry, | |||
1063 | 1056 | ||
1064 | void crash_kexec(struct pt_regs *regs) | 1057 | void crash_kexec(struct pt_regs *regs) |
1065 | { | 1058 | { |
1066 | int locked; | 1059 | /* Take the kexec_mutex here to prevent sys_kexec_load |
1067 | |||
1068 | |||
1069 | /* Take the kexec_lock here to prevent sys_kexec_load | ||
1070 | * running on one cpu from replacing the crash kernel | 1060 | * running on one cpu from replacing the crash kernel |
1071 | * we are using after a panic on a different cpu. | 1061 | * we are using after a panic on a different cpu. |
1072 | * | 1062 | * |
@@ -1074,8 +1064,7 @@ void crash_kexec(struct pt_regs *regs) | |||
1074 | * of memory the xchg(&kexec_crash_image) would be | 1064 | * of memory the xchg(&kexec_crash_image) would be |
1075 | * sufficient. But since I reuse the memory... | 1065 | * sufficient. But since I reuse the memory... |
1076 | */ | 1066 | */ |
1077 | locked = xchg(&kexec_lock, 1); | 1067 | if (mutex_trylock(&kexec_mutex)) { |
1078 | if (!locked) { | ||
1079 | if (kexec_crash_image) { | 1068 | if (kexec_crash_image) { |
1080 | struct pt_regs fixed_regs; | 1069 | struct pt_regs fixed_regs; |
1081 | crash_setup_regs(&fixed_regs, regs); | 1070 | crash_setup_regs(&fixed_regs, regs); |
@@ -1083,8 +1072,7 @@ void crash_kexec(struct pt_regs *regs) | |||
1083 | machine_crash_shutdown(&fixed_regs); | 1072 | machine_crash_shutdown(&fixed_regs); |
1084 | machine_kexec(kexec_crash_image); | 1073 | machine_kexec(kexec_crash_image); |
1085 | } | 1074 | } |
1086 | locked = xchg(&kexec_lock, 0); | 1075 | mutex_unlock(&kexec_mutex); |
1087 | BUG_ON(!locked); | ||
1088 | } | 1076 | } |
1089 | } | 1077 | } |
1090 | 1078 | ||
@@ -1426,25 +1414,23 @@ static int __init crash_save_vmcoreinfo_init(void) | |||
1426 | 1414 | ||
1427 | module_init(crash_save_vmcoreinfo_init) | 1415 | module_init(crash_save_vmcoreinfo_init) |
1428 | 1416 | ||
1429 | /** | 1417 | /* |
1430 | * kernel_kexec - reboot the system | 1418 | * Move into place and start executing a preloaded standalone |
1431 | * | 1419 | * executable. If nothing was preloaded return an error. |
1432 | * Move into place and start executing a preloaded standalone | ||
1433 | * executable. If nothing was preloaded return an error. | ||
1434 | */ | 1420 | */ |
1435 | int kernel_kexec(void) | 1421 | int kernel_kexec(void) |
1436 | { | 1422 | { |
1437 | int error = 0; | 1423 | int error = 0; |
1438 | 1424 | ||
1439 | if (xchg(&kexec_lock, 1)) | 1425 | if (!mutex_trylock(&kexec_mutex)) |
1440 | return -EBUSY; | 1426 | return -EBUSY; |
1441 | if (!kexec_image) { | 1427 | if (!kexec_image) { |
1442 | error = -EINVAL; | 1428 | error = -EINVAL; |
1443 | goto Unlock; | 1429 | goto Unlock; |
1444 | } | 1430 | } |
1445 | 1431 | ||
1446 | if (kexec_image->preserve_context) { | ||
1447 | #ifdef CONFIG_KEXEC_JUMP | 1432 | #ifdef CONFIG_KEXEC_JUMP |
1433 | if (kexec_image->preserve_context) { | ||
1448 | mutex_lock(&pm_mutex); | 1434 | mutex_lock(&pm_mutex); |
1449 | pm_prepare_console(); | 1435 | pm_prepare_console(); |
1450 | error = freeze_processes(); | 1436 | error = freeze_processes(); |
@@ -1459,6 +1445,7 @@ int kernel_kexec(void) | |||
1459 | error = disable_nonboot_cpus(); | 1445 | error = disable_nonboot_cpus(); |
1460 | if (error) | 1446 | if (error) |
1461 | goto Resume_devices; | 1447 | goto Resume_devices; |
1448 | device_pm_lock(); | ||
1462 | local_irq_disable(); | 1449 | local_irq_disable(); |
1463 | /* At this point, device_suspend() has been called, | 1450 | /* At this point, device_suspend() has been called, |
1464 | * but *not* device_power_down(). We *must* | 1451 | * but *not* device_power_down(). We *must* |
@@ -1470,26 +1457,22 @@ int kernel_kexec(void) | |||
1470 | error = device_power_down(PMSG_FREEZE); | 1457 | error = device_power_down(PMSG_FREEZE); |
1471 | if (error) | 1458 | if (error) |
1472 | goto Enable_irqs; | 1459 | goto Enable_irqs; |
1473 | save_processor_state(); | 1460 | } else |
1474 | #endif | 1461 | #endif |
1475 | } else { | 1462 | { |
1476 | blocking_notifier_call_chain(&reboot_notifier_list, | 1463 | kernel_restart_prepare(NULL); |
1477 | SYS_RESTART, NULL); | ||
1478 | system_state = SYSTEM_RESTART; | ||
1479 | device_shutdown(); | ||
1480 | sysdev_shutdown(); | ||
1481 | printk(KERN_EMERG "Starting new kernel\n"); | 1464 | printk(KERN_EMERG "Starting new kernel\n"); |
1482 | machine_shutdown(); | 1465 | machine_shutdown(); |
1483 | } | 1466 | } |
1484 | 1467 | ||
1485 | machine_kexec(kexec_image); | 1468 | machine_kexec(kexec_image); |
1486 | 1469 | ||
1487 | if (kexec_image->preserve_context) { | ||
1488 | #ifdef CONFIG_KEXEC_JUMP | 1470 | #ifdef CONFIG_KEXEC_JUMP |
1489 | restore_processor_state(); | 1471 | if (kexec_image->preserve_context) { |
1490 | device_power_up(PMSG_RESTORE); | 1472 | device_power_up(PMSG_RESTORE); |
1491 | Enable_irqs: | 1473 | Enable_irqs: |
1492 | local_irq_enable(); | 1474 | local_irq_enable(); |
1475 | device_pm_unlock(); | ||
1493 | enable_nonboot_cpus(); | 1476 | enable_nonboot_cpus(); |
1494 | Resume_devices: | 1477 | Resume_devices: |
1495 | device_resume(PMSG_RESTORE); | 1478 | device_resume(PMSG_RESTORE); |
@@ -1499,11 +1482,10 @@ int kernel_kexec(void) | |||
1499 | Restore_console: | 1482 | Restore_console: |
1500 | pm_restore_console(); | 1483 | pm_restore_console(); |
1501 | mutex_unlock(&pm_mutex); | 1484 | mutex_unlock(&pm_mutex); |
1502 | #endif | ||
1503 | } | 1485 | } |
1486 | #endif | ||
1504 | 1487 | ||
1505 | Unlock: | 1488 | Unlock: |
1506 | xchg(&kexec_lock, 0); | 1489 | mutex_unlock(&kexec_mutex); |
1507 | |||
1508 | return error; | 1490 | return error; |
1509 | } | 1491 | } |
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 1aa91fd6b06..77fa776a2da 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
@@ -1759,11 +1759,10 @@ static void check_chain_key(struct task_struct *curr) | |||
1759 | hlock = curr->held_locks + i; | 1759 | hlock = curr->held_locks + i; |
1760 | if (chain_key != hlock->prev_chain_key) { | 1760 | if (chain_key != hlock->prev_chain_key) { |
1761 | debug_locks_off(); | 1761 | debug_locks_off(); |
1762 | printk("hm#1, depth: %u [%u], %016Lx != %016Lx\n", | 1762 | WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n", |
1763 | curr->lockdep_depth, i, | 1763 | curr->lockdep_depth, i, |
1764 | (unsigned long long)chain_key, | 1764 | (unsigned long long)chain_key, |
1765 | (unsigned long long)hlock->prev_chain_key); | 1765 | (unsigned long long)hlock->prev_chain_key); |
1766 | WARN_ON(1); | ||
1767 | return; | 1766 | return; |
1768 | } | 1767 | } |
1769 | id = hlock->class_idx - 1; | 1768 | id = hlock->class_idx - 1; |
@@ -1778,11 +1777,10 @@ static void check_chain_key(struct task_struct *curr) | |||
1778 | } | 1777 | } |
1779 | if (chain_key != curr->curr_chain_key) { | 1778 | if (chain_key != curr->curr_chain_key) { |
1780 | debug_locks_off(); | 1779 | debug_locks_off(); |
1781 | printk("hm#2, depth: %u [%u], %016Lx != %016Lx\n", | 1780 | WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n", |
1782 | curr->lockdep_depth, i, | 1781 | curr->lockdep_depth, i, |
1783 | (unsigned long long)chain_key, | 1782 | (unsigned long long)chain_key, |
1784 | (unsigned long long)curr->curr_chain_key); | 1783 | (unsigned long long)curr->curr_chain_key); |
1785 | WARN_ON(1); | ||
1786 | } | 1784 | } |
1787 | #endif | 1785 | #endif |
1788 | } | 1786 | } |
diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h index 55db193d366..56b196932c0 100644 --- a/kernel/lockdep_internals.h +++ b/kernel/lockdep_internals.h | |||
@@ -50,8 +50,21 @@ extern unsigned int nr_process_chains; | |||
50 | extern unsigned int max_lockdep_depth; | 50 | extern unsigned int max_lockdep_depth; |
51 | extern unsigned int max_recursion_depth; | 51 | extern unsigned int max_recursion_depth; |
52 | 52 | ||
53 | #ifdef CONFIG_PROVE_LOCKING | ||
53 | extern unsigned long lockdep_count_forward_deps(struct lock_class *); | 54 | extern unsigned long lockdep_count_forward_deps(struct lock_class *); |
54 | extern unsigned long lockdep_count_backward_deps(struct lock_class *); | 55 | extern unsigned long lockdep_count_backward_deps(struct lock_class *); |
56 | #else | ||
57 | static inline unsigned long | ||
58 | lockdep_count_forward_deps(struct lock_class *class) | ||
59 | { | ||
60 | return 0; | ||
61 | } | ||
62 | static inline unsigned long | ||
63 | lockdep_count_backward_deps(struct lock_class *class) | ||
64 | { | ||
65 | return 0; | ||
66 | } | ||
67 | #endif | ||
55 | 68 | ||
56 | #ifdef CONFIG_DEBUG_LOCKDEP | 69 | #ifdef CONFIG_DEBUG_LOCKDEP |
57 | /* | 70 | /* |
diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c index fa19aee604c..4b194d34d77 100644 --- a/kernel/lockdep_proc.c +++ b/kernel/lockdep_proc.c | |||
@@ -82,7 +82,6 @@ static void print_name(struct seq_file *m, struct lock_class *class) | |||
82 | 82 | ||
83 | static int l_show(struct seq_file *m, void *v) | 83 | static int l_show(struct seq_file *m, void *v) |
84 | { | 84 | { |
85 | unsigned long nr_forward_deps, nr_backward_deps; | ||
86 | struct lock_class *class = v; | 85 | struct lock_class *class = v; |
87 | struct lock_list *entry; | 86 | struct lock_list *entry; |
88 | char c1, c2, c3, c4; | 87 | char c1, c2, c3, c4; |
@@ -96,11 +95,10 @@ static int l_show(struct seq_file *m, void *v) | |||
96 | #ifdef CONFIG_DEBUG_LOCKDEP | 95 | #ifdef CONFIG_DEBUG_LOCKDEP |
97 | seq_printf(m, " OPS:%8ld", class->ops); | 96 | seq_printf(m, " OPS:%8ld", class->ops); |
98 | #endif | 97 | #endif |
99 | nr_forward_deps = lockdep_count_forward_deps(class); | 98 | #ifdef CONFIG_PROVE_LOCKING |
100 | seq_printf(m, " FD:%5ld", nr_forward_deps); | 99 | seq_printf(m, " FD:%5ld", lockdep_count_forward_deps(class)); |
101 | 100 | seq_printf(m, " BD:%5ld", lockdep_count_backward_deps(class)); | |
102 | nr_backward_deps = lockdep_count_backward_deps(class); | 101 | #endif |
103 | seq_printf(m, " BD:%5ld", nr_backward_deps); | ||
104 | 102 | ||
105 | get_usage_chars(class, &c1, &c2, &c3, &c4); | 103 | get_usage_chars(class, &c1, &c2, &c3, &c4); |
106 | seq_printf(m, " %c%c%c%c", c1, c2, c3, c4); | 104 | seq_printf(m, " %c%c%c%c", c1, c2, c3, c4); |
@@ -325,7 +323,9 @@ static int lockdep_stats_show(struct seq_file *m, void *v) | |||
325 | if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ) | 323 | if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ) |
326 | nr_hardirq_read_unsafe++; | 324 | nr_hardirq_read_unsafe++; |
327 | 325 | ||
326 | #ifdef CONFIG_PROVE_LOCKING | ||
328 | sum_forward_deps += lockdep_count_forward_deps(class); | 327 | sum_forward_deps += lockdep_count_forward_deps(class); |
328 | #endif | ||
329 | } | 329 | } |
330 | #ifdef CONFIG_DEBUG_LOCKDEP | 330 | #ifdef CONFIG_DEBUG_LOCKDEP |
331 | DEBUG_LOCKS_WARN_ON(debug_atomic_read(&nr_unused_locks) != nr_unused); | 331 | DEBUG_LOCKS_WARN_ON(debug_atomic_read(&nr_unused_locks) != nr_unused); |
diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 082b3fcb32a..356699a96d5 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c | |||
@@ -140,7 +140,7 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode) | |||
140 | if (!dumpable && !capable(CAP_SYS_PTRACE)) | 140 | if (!dumpable && !capable(CAP_SYS_PTRACE)) |
141 | return -EPERM; | 141 | return -EPERM; |
142 | 142 | ||
143 | return security_ptrace(current, task, mode); | 143 | return security_ptrace_may_access(task, mode); |
144 | } | 144 | } |
145 | 145 | ||
146 | bool ptrace_may_access(struct task_struct *task, unsigned int mode) | 146 | bool ptrace_may_access(struct task_struct *task, unsigned int mode) |
@@ -499,8 +499,7 @@ repeat: | |||
499 | goto repeat; | 499 | goto repeat; |
500 | } | 500 | } |
501 | 501 | ||
502 | ret = security_ptrace(current->parent, current, | 502 | ret = security_ptrace_traceme(current->parent); |
503 | PTRACE_MODE_ATTACH); | ||
504 | 503 | ||
505 | /* | 504 | /* |
506 | * Set the ptrace bit in the process ptrace flags. | 505 | * Set the ptrace bit in the process ptrace flags. |
diff --git a/kernel/sched.c b/kernel/sched.c index d601fb0406c..9a1ddb84e26 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -808,9 +808,9 @@ const_debug unsigned int sysctl_sched_nr_migrate = 32; | |||
808 | 808 | ||
809 | /* | 809 | /* |
810 | * ratelimit for updating the group shares. | 810 | * ratelimit for updating the group shares. |
811 | * default: 0.5ms | 811 | * default: 0.25ms |
812 | */ | 812 | */ |
813 | const_debug unsigned int sysctl_sched_shares_ratelimit = 500000; | 813 | unsigned int sysctl_sched_shares_ratelimit = 250000; |
814 | 814 | ||
815 | /* | 815 | /* |
816 | * period over which we measure -rt task cpu usage in us. | 816 | * period over which we measure -rt task cpu usage in us. |
@@ -4669,6 +4669,52 @@ int __sched wait_for_completion_killable(struct completion *x) | |||
4669 | } | 4669 | } |
4670 | EXPORT_SYMBOL(wait_for_completion_killable); | 4670 | EXPORT_SYMBOL(wait_for_completion_killable); |
4671 | 4671 | ||
4672 | /** | ||
4673 | * try_wait_for_completion - try to decrement a completion without blocking | ||
4674 | * @x: completion structure | ||
4675 | * | ||
4676 | * Returns: 0 if a decrement cannot be done without blocking | ||
4677 | * 1 if a decrement succeeded. | ||
4678 | * | ||
4679 | * If a completion is being used as a counting completion, | ||
4680 | * attempt to decrement the counter without blocking. This | ||
4681 | * enables us to avoid waiting if the resource the completion | ||
4682 | * is protecting is not available. | ||
4683 | */ | ||
4684 | bool try_wait_for_completion(struct completion *x) | ||
4685 | { | ||
4686 | int ret = 1; | ||
4687 | |||
4688 | spin_lock_irq(&x->wait.lock); | ||
4689 | if (!x->done) | ||
4690 | ret = 0; | ||
4691 | else | ||
4692 | x->done--; | ||
4693 | spin_unlock_irq(&x->wait.lock); | ||
4694 | return ret; | ||
4695 | } | ||
4696 | EXPORT_SYMBOL(try_wait_for_completion); | ||
4697 | |||
4698 | /** | ||
4699 | * completion_done - Test to see if a completion has any waiters | ||
4700 | * @x: completion structure | ||
4701 | * | ||
4702 | * Returns: 0 if there are waiters (wait_for_completion() in progress) | ||
4703 | * 1 if there are no waiters. | ||
4704 | * | ||
4705 | */ | ||
4706 | bool completion_done(struct completion *x) | ||
4707 | { | ||
4708 | int ret = 1; | ||
4709 | |||
4710 | spin_lock_irq(&x->wait.lock); | ||
4711 | if (!x->done) | ||
4712 | ret = 0; | ||
4713 | spin_unlock_irq(&x->wait.lock); | ||
4714 | return ret; | ||
4715 | } | ||
4716 | EXPORT_SYMBOL(completion_done); | ||
4717 | |||
4672 | static long __sched | 4718 | static long __sched |
4673 | sleep_on_common(wait_queue_head_t *q, int state, long timeout) | 4719 | sleep_on_common(wait_queue_head_t *q, int state, long timeout) |
4674 | { | 4720 | { |
@@ -5740,6 +5786,8 @@ static inline void sched_init_granularity(void) | |||
5740 | sysctl_sched_latency = limit; | 5786 | sysctl_sched_latency = limit; |
5741 | 5787 | ||
5742 | sysctl_sched_wakeup_granularity *= factor; | 5788 | sysctl_sched_wakeup_granularity *= factor; |
5789 | |||
5790 | sysctl_sched_shares_ratelimit *= factor; | ||
5743 | } | 5791 | } |
5744 | 5792 | ||
5745 | #ifdef CONFIG_SMP | 5793 | #ifdef CONFIG_SMP |
@@ -8462,8 +8510,8 @@ struct task_group *sched_create_group(struct task_group *parent) | |||
8462 | WARN_ON(!parent); /* root should already exist */ | 8510 | WARN_ON(!parent); /* root should already exist */ |
8463 | 8511 | ||
8464 | tg->parent = parent; | 8512 | tg->parent = parent; |
8465 | list_add_rcu(&tg->siblings, &parent->children); | ||
8466 | INIT_LIST_HEAD(&tg->children); | 8513 | INIT_LIST_HEAD(&tg->children); |
8514 | list_add_rcu(&tg->siblings, &parent->children); | ||
8467 | spin_unlock_irqrestore(&task_group_lock, flags); | 8515 | spin_unlock_irqrestore(&task_group_lock, flags); |
8468 | 8516 | ||
8469 | return tg; | 8517 | return tg; |
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 6163e4cf885..998ba54b454 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -298,7 +298,7 @@ static void __disable_runtime(struct rq *rq) | |||
298 | struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); | 298 | struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); |
299 | s64 diff; | 299 | s64 diff; |
300 | 300 | ||
301 | if (iter == rt_rq) | 301 | if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF) |
302 | continue; | 302 | continue; |
303 | 303 | ||
304 | spin_lock(&iter->rt_runtime_lock); | 304 | spin_lock(&iter->rt_runtime_lock); |
diff --git a/kernel/spinlock.c b/kernel/spinlock.c index 44baeea94ab..29ab20749dd 100644 --- a/kernel/spinlock.c +++ b/kernel/spinlock.c | |||
@@ -290,7 +290,6 @@ void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass) | |||
290 | spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); | 290 | spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); |
291 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); | 291 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); |
292 | } | 292 | } |
293 | |||
294 | EXPORT_SYMBOL(_spin_lock_nested); | 293 | EXPORT_SYMBOL(_spin_lock_nested); |
295 | 294 | ||
296 | unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass) | 295 | unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass) |
@@ -312,7 +311,6 @@ unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclas | |||
312 | #endif | 311 | #endif |
313 | return flags; | 312 | return flags; |
314 | } | 313 | } |
315 | |||
316 | EXPORT_SYMBOL(_spin_lock_irqsave_nested); | 314 | EXPORT_SYMBOL(_spin_lock_irqsave_nested); |
317 | 315 | ||
318 | void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, | 316 | void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, |
@@ -322,7 +320,6 @@ void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, | |||
322 | spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_); | 320 | spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_); |
323 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); | 321 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); |
324 | } | 322 | } |
325 | |||
326 | EXPORT_SYMBOL(_spin_lock_nest_lock); | 323 | EXPORT_SYMBOL(_spin_lock_nest_lock); |
327 | 324 | ||
328 | #endif | 325 | #endif |
diff --git a/kernel/sys.c b/kernel/sys.c index c01858090a9..3dacb00a7f7 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
@@ -274,7 +274,7 @@ void emergency_restart(void) | |||
274 | } | 274 | } |
275 | EXPORT_SYMBOL_GPL(emergency_restart); | 275 | EXPORT_SYMBOL_GPL(emergency_restart); |
276 | 276 | ||
277 | static void kernel_restart_prepare(char *cmd) | 277 | void kernel_restart_prepare(char *cmd) |
278 | { | 278 | { |
279 | blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd); | 279 | blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd); |
280 | system_state = SYSTEM_RESTART; | 280 | system_state = SYSTEM_RESTART; |