diff options
| author | Ingo Molnar <mingo@elte.hu> | 2008-09-04 07:02:35 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2008-09-04 07:02:35 -0400 |
| commit | 42390cdec5f6e6e2ee54f308474a6ef7dd16aa5c (patch) | |
| tree | e9684c84f53272319a5acd4b9c86503f30274a51 /kernel | |
| parent | 11c231a962c740b3216eb6565149ae5a7944cba7 (diff) | |
| parent | d210baf53b699fc61aa891c177b71d7082d3b957 (diff) | |
Merge branch 'linus' into x86/x2apic
Conflicts:
arch/x86/kernel/cpu/cyrix.c
include/asm-x86/cpufeature.h
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/auditsc.c | 3 | ||||
| -rw-r--r-- | kernel/capability.c | 21 | ||||
| -rw-r--r-- | kernel/exit.c | 82 | ||||
| -rw-r--r-- | kernel/kexec.c | 66 | ||||
| -rw-r--r-- | kernel/lockdep.c | 14 | ||||
| -rw-r--r-- | kernel/lockdep_internals.h | 13 | ||||
| -rw-r--r-- | kernel/lockdep_proc.c | 15 | ||||
| -rw-r--r-- | kernel/module.c | 2 | ||||
| -rw-r--r-- | kernel/nsproxy.c | 1 | ||||
| -rw-r--r-- | kernel/pid_namespace.c | 3 | ||||
| -rw-r--r-- | kernel/pm_qos_params.c | 25 | ||||
| -rw-r--r-- | kernel/power/disk.c | 13 | ||||
| -rw-r--r-- | kernel/power/main.c | 5 | ||||
| -rw-r--r-- | kernel/power/swap.c | 1 | ||||
| -rw-r--r-- | kernel/ptrace.c | 5 | ||||
| -rw-r--r-- | kernel/rcupdate.c | 1 | ||||
| -rw-r--r-- | kernel/resource.c | 88 | ||||
| -rw-r--r-- | kernel/sched.c | 54 | ||||
| -rw-r--r-- | kernel/sched_clock.c | 84 | ||||
| -rw-r--r-- | kernel/sched_features.h | 2 | ||||
| -rw-r--r-- | kernel/sched_rt.c | 15 | ||||
| -rw-r--r-- | kernel/signal.c | 5 | ||||
| -rw-r--r-- | kernel/smp.c | 10 | ||||
| -rw-r--r-- | kernel/softlockup.c | 3 | ||||
| -rw-r--r-- | kernel/spinlock.c | 3 | ||||
| -rw-r--r-- | kernel/sys.c | 10 | ||||
| -rw-r--r-- | kernel/time/tick-sched.c | 6 | ||||
| -rw-r--r-- | kernel/user_namespace.c | 1 | ||||
| -rw-r--r-- | kernel/utsname.c | 1 | ||||
| -rw-r--r-- | kernel/utsname_sysctl.c | 1 |
30 files changed, 308 insertions, 245 deletions
diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 972f8e61d36a..59cedfb040e7 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c | |||
| @@ -243,10 +243,11 @@ static inline int open_arg(int flags, int mask) | |||
| 243 | 243 | ||
| 244 | static int audit_match_perm(struct audit_context *ctx, int mask) | 244 | static int audit_match_perm(struct audit_context *ctx, int mask) |
| 245 | { | 245 | { |
| 246 | unsigned n; | ||
| 246 | if (unlikely(!ctx)) | 247 | if (unlikely(!ctx)) |
| 247 | return 0; | 248 | return 0; |
| 248 | 249 | ||
| 249 | unsigned n = ctx->major; | 250 | n = ctx->major; |
| 250 | switch (audit_classify_syscall(ctx->arch, n)) { | 251 | switch (audit_classify_syscall(ctx->arch, n)) { |
| 251 | case 0: /* native */ | 252 | case 0: /* native */ |
| 252 | if ((mask & AUDIT_PERM_WRITE) && | 253 | if ((mask & AUDIT_PERM_WRITE) && |
diff --git a/kernel/capability.c b/kernel/capability.c index 0101e847603e..33e51e78c2d8 100644 --- a/kernel/capability.c +++ b/kernel/capability.c | |||
| @@ -486,17 +486,22 @@ asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data) | |||
| 486 | return ret; | 486 | return ret; |
| 487 | } | 487 | } |
| 488 | 488 | ||
| 489 | int __capable(struct task_struct *t, int cap) | 489 | /** |
| 490 | * capable - Determine if the current task has a superior capability in effect | ||
| 491 | * @cap: The capability to be tested for | ||
| 492 | * | ||
| 493 | * Return true if the current task has the given superior capability currently | ||
| 494 | * available for use, false if not. | ||
| 495 | * | ||
| 496 | * This sets PF_SUPERPRIV on the task if the capability is available on the | ||
| 497 | * assumption that it's about to be used. | ||
| 498 | */ | ||
| 499 | int capable(int cap) | ||
| 490 | { | 500 | { |
| 491 | if (security_capable(t, cap) == 0) { | 501 | if (has_capability(current, cap)) { |
| 492 | t->flags |= PF_SUPERPRIV; | 502 | current->flags |= PF_SUPERPRIV; |
| 493 | return 1; | 503 | return 1; |
| 494 | } | 504 | } |
| 495 | return 0; | 505 | return 0; |
| 496 | } | 506 | } |
| 497 | |||
| 498 | int capable(int cap) | ||
| 499 | { | ||
| 500 | return __capable(current, cap); | ||
| 501 | } | ||
| 502 | EXPORT_SYMBOL(capable); | 507 | EXPORT_SYMBOL(capable); |
diff --git a/kernel/exit.c b/kernel/exit.c index 38ec40630149..25ed2ad986df 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
| @@ -831,26 +831,50 @@ static void reparent_thread(struct task_struct *p, struct task_struct *father) | |||
| 831 | * the child reaper process (ie "init") in our pid | 831 | * the child reaper process (ie "init") in our pid |
| 832 | * space. | 832 | * space. |
| 833 | */ | 833 | */ |
| 834 | static struct task_struct *find_new_reaper(struct task_struct *father) | ||
| 835 | { | ||
| 836 | struct pid_namespace *pid_ns = task_active_pid_ns(father); | ||
| 837 | struct task_struct *thread; | ||
| 838 | |||
| 839 | thread = father; | ||
| 840 | while_each_thread(father, thread) { | ||
| 841 | if (thread->flags & PF_EXITING) | ||
| 842 | continue; | ||
| 843 | if (unlikely(pid_ns->child_reaper == father)) | ||
| 844 | pid_ns->child_reaper = thread; | ||
| 845 | return thread; | ||
| 846 | } | ||
| 847 | |||
| 848 | if (unlikely(pid_ns->child_reaper == father)) { | ||
| 849 | write_unlock_irq(&tasklist_lock); | ||
| 850 | if (unlikely(pid_ns == &init_pid_ns)) | ||
| 851 | panic("Attempted to kill init!"); | ||
| 852 | |||
| 853 | zap_pid_ns_processes(pid_ns); | ||
| 854 | write_lock_irq(&tasklist_lock); | ||
| 855 | /* | ||
| 856 | * We can not clear ->child_reaper or leave it alone. | ||
| 857 | * There may by stealth EXIT_DEAD tasks on ->children, | ||
| 858 | * forget_original_parent() must move them somewhere. | ||
| 859 | */ | ||
| 860 | pid_ns->child_reaper = init_pid_ns.child_reaper; | ||
| 861 | } | ||
| 862 | |||
| 863 | return pid_ns->child_reaper; | ||
| 864 | } | ||
| 865 | |||
| 834 | static void forget_original_parent(struct task_struct *father) | 866 | static void forget_original_parent(struct task_struct *father) |
| 835 | { | 867 | { |
| 836 | struct task_struct *p, *n, *reaper = father; | 868 | struct task_struct *p, *n, *reaper; |
| 837 | LIST_HEAD(ptrace_dead); | 869 | LIST_HEAD(ptrace_dead); |
| 838 | 870 | ||
| 839 | write_lock_irq(&tasklist_lock); | 871 | write_lock_irq(&tasklist_lock); |
| 840 | 872 | reaper = find_new_reaper(father); | |
| 841 | /* | 873 | /* |
| 842 | * First clean up ptrace if we were using it. | 874 | * First clean up ptrace if we were using it. |
| 843 | */ | 875 | */ |
| 844 | ptrace_exit(father, &ptrace_dead); | 876 | ptrace_exit(father, &ptrace_dead); |
| 845 | 877 | ||
| 846 | do { | ||
| 847 | reaper = next_thread(reaper); | ||
| 848 | if (reaper == father) { | ||
| 849 | reaper = task_child_reaper(father); | ||
| 850 | break; | ||
| 851 | } | ||
| 852 | } while (reaper->flags & PF_EXITING); | ||
| 853 | |||
| 854 | list_for_each_entry_safe(p, n, &father->children, sibling) { | 878 | list_for_each_entry_safe(p, n, &father->children, sibling) { |
| 855 | p->real_parent = reaper; | 879 | p->real_parent = reaper; |
| 856 | if (p->parent == father) { | 880 | if (p->parent == father) { |
| @@ -918,8 +942,8 @@ static void exit_notify(struct task_struct *tsk, int group_dead) | |||
| 918 | 942 | ||
| 919 | /* mt-exec, de_thread() is waiting for us */ | 943 | /* mt-exec, de_thread() is waiting for us */ |
| 920 | if (thread_group_leader(tsk) && | 944 | if (thread_group_leader(tsk) && |
| 921 | tsk->signal->notify_count < 0 && | 945 | tsk->signal->group_exit_task && |
| 922 | tsk->signal->group_exit_task) | 946 | tsk->signal->notify_count < 0) |
| 923 | wake_up_process(tsk->signal->group_exit_task); | 947 | wake_up_process(tsk->signal->group_exit_task); |
| 924 | 948 | ||
| 925 | write_unlock_irq(&tasklist_lock); | 949 | write_unlock_irq(&tasklist_lock); |
| @@ -959,39 +983,6 @@ static void check_stack_usage(void) | |||
| 959 | static inline void check_stack_usage(void) {} | 983 | static inline void check_stack_usage(void) {} |
| 960 | #endif | 984 | #endif |
| 961 | 985 | ||
| 962 | static inline void exit_child_reaper(struct task_struct *tsk) | ||
| 963 | { | ||
| 964 | if (likely(tsk->group_leader != task_child_reaper(tsk))) | ||
| 965 | return; | ||
| 966 | |||
| 967 | if (tsk->nsproxy->pid_ns == &init_pid_ns) | ||
| 968 | panic("Attempted to kill init!"); | ||
| 969 | |||
| 970 | /* | ||
| 971 | * @tsk is the last thread in the 'cgroup-init' and is exiting. | ||
| 972 | * Terminate all remaining processes in the namespace and reap them | ||
| 973 | * before exiting @tsk. | ||
| 974 | * | ||
| 975 | * Note that @tsk (last thread of cgroup-init) may not necessarily | ||
| 976 | * be the child-reaper (i.e main thread of cgroup-init) of the | ||
| 977 | * namespace i.e the child_reaper may have already exited. | ||
| 978 | * | ||
| 979 | * Even after a child_reaper exits, we let it inherit orphaned children, | ||
| 980 | * because, pid_ns->child_reaper remains valid as long as there is | ||
| 981 | * at least one living sub-thread in the cgroup init. | ||
| 982 | |||
| 983 | * This living sub-thread of the cgroup-init will be notified when | ||
| 984 | * a child inherited by the 'child-reaper' exits (do_notify_parent() | ||
| 985 | * uses __group_send_sig_info()). Further, when reaping child processes, | ||
| 986 | * do_wait() iterates over children of all living sub threads. | ||
| 987 | |||
| 988 | * i.e even though 'child_reaper' thread is listed as the parent of the | ||
| 989 | * orphaned children, any living sub-thread in the cgroup-init can | ||
| 990 | * perform the role of the child_reaper. | ||
| 991 | */ | ||
| 992 | zap_pid_ns_processes(tsk->nsproxy->pid_ns); | ||
| 993 | } | ||
| 994 | |||
| 995 | NORET_TYPE void do_exit(long code) | 986 | NORET_TYPE void do_exit(long code) |
| 996 | { | 987 | { |
| 997 | struct task_struct *tsk = current; | 988 | struct task_struct *tsk = current; |
| @@ -1051,7 +1042,6 @@ NORET_TYPE void do_exit(long code) | |||
| 1051 | } | 1042 | } |
| 1052 | group_dead = atomic_dec_and_test(&tsk->signal->live); | 1043 | group_dead = atomic_dec_and_test(&tsk->signal->live); |
| 1053 | if (group_dead) { | 1044 | if (group_dead) { |
| 1054 | exit_child_reaper(tsk); | ||
| 1055 | hrtimer_cancel(&tsk->signal->real_timer); | 1045 | hrtimer_cancel(&tsk->signal->real_timer); |
| 1056 | exit_itimers(tsk->signal); | 1046 | exit_itimers(tsk->signal); |
| 1057 | } | 1047 | } |
diff --git a/kernel/kexec.c b/kernel/kexec.c index c8a4370e2a34..59f3f0df35d4 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c | |||
| @@ -12,7 +12,7 @@ | |||
| 12 | #include <linux/slab.h> | 12 | #include <linux/slab.h> |
| 13 | #include <linux/fs.h> | 13 | #include <linux/fs.h> |
| 14 | #include <linux/kexec.h> | 14 | #include <linux/kexec.h> |
| 15 | #include <linux/spinlock.h> | 15 | #include <linux/mutex.h> |
| 16 | #include <linux/list.h> | 16 | #include <linux/list.h> |
| 17 | #include <linux/highmem.h> | 17 | #include <linux/highmem.h> |
| 18 | #include <linux/syscalls.h> | 18 | #include <linux/syscalls.h> |
| @@ -77,7 +77,7 @@ int kexec_should_crash(struct task_struct *p) | |||
| 77 | * | 77 | * |
| 78 | * The code for the transition from the current kernel to the | 78 | * The code for the transition from the current kernel to the |
| 79 | * the new kernel is placed in the control_code_buffer, whose size | 79 | * the new kernel is placed in the control_code_buffer, whose size |
| 80 | * is given by KEXEC_CONTROL_CODE_SIZE. In the best case only a single | 80 | * is given by KEXEC_CONTROL_PAGE_SIZE. In the best case only a single |
| 81 | * page of memory is necessary, but some architectures require more. | 81 | * page of memory is necessary, but some architectures require more. |
| 82 | * Because this memory must be identity mapped in the transition from | 82 | * Because this memory must be identity mapped in the transition from |
| 83 | * virtual to physical addresses it must live in the range | 83 | * virtual to physical addresses it must live in the range |
| @@ -242,7 +242,7 @@ static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry, | |||
| 242 | */ | 242 | */ |
| 243 | result = -ENOMEM; | 243 | result = -ENOMEM; |
| 244 | image->control_code_page = kimage_alloc_control_pages(image, | 244 | image->control_code_page = kimage_alloc_control_pages(image, |
| 245 | get_order(KEXEC_CONTROL_CODE_SIZE)); | 245 | get_order(KEXEC_CONTROL_PAGE_SIZE)); |
| 246 | if (!image->control_code_page) { | 246 | if (!image->control_code_page) { |
| 247 | printk(KERN_ERR "Could not allocate control_code_buffer\n"); | 247 | printk(KERN_ERR "Could not allocate control_code_buffer\n"); |
| 248 | goto out; | 248 | goto out; |
| @@ -317,7 +317,7 @@ static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry, | |||
| 317 | */ | 317 | */ |
| 318 | result = -ENOMEM; | 318 | result = -ENOMEM; |
| 319 | image->control_code_page = kimage_alloc_control_pages(image, | 319 | image->control_code_page = kimage_alloc_control_pages(image, |
| 320 | get_order(KEXEC_CONTROL_CODE_SIZE)); | 320 | get_order(KEXEC_CONTROL_PAGE_SIZE)); |
| 321 | if (!image->control_code_page) { | 321 | if (!image->control_code_page) { |
| 322 | printk(KERN_ERR "Could not allocate control_code_buffer\n"); | 322 | printk(KERN_ERR "Could not allocate control_code_buffer\n"); |
| 323 | goto out; | 323 | goto out; |
| @@ -924,19 +924,14 @@ static int kimage_load_segment(struct kimage *image, | |||
| 924 | */ | 924 | */ |
| 925 | struct kimage *kexec_image; | 925 | struct kimage *kexec_image; |
| 926 | struct kimage *kexec_crash_image; | 926 | struct kimage *kexec_crash_image; |
| 927 | /* | 927 | |
| 928 | * A home grown binary mutex. | 928 | static DEFINE_MUTEX(kexec_mutex); |
| 929 | * Nothing can wait so this mutex is safe to use | ||
| 930 | * in interrupt context :) | ||
| 931 | */ | ||
| 932 | static int kexec_lock; | ||
| 933 | 929 | ||
| 934 | asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments, | 930 | asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments, |
| 935 | struct kexec_segment __user *segments, | 931 | struct kexec_segment __user *segments, |
| 936 | unsigned long flags) | 932 | unsigned long flags) |
| 937 | { | 933 | { |
| 938 | struct kimage **dest_image, *image; | 934 | struct kimage **dest_image, *image; |
| 939 | int locked; | ||
| 940 | int result; | 935 | int result; |
| 941 | 936 | ||
| 942 | /* We only trust the superuser with rebooting the system. */ | 937 | /* We only trust the superuser with rebooting the system. */ |
| @@ -972,8 +967,7 @@ asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments, | |||
| 972 | * | 967 | * |
| 973 | * KISS: always take the mutex. | 968 | * KISS: always take the mutex. |
| 974 | */ | 969 | */ |
| 975 | locked = xchg(&kexec_lock, 1); | 970 | if (!mutex_trylock(&kexec_mutex)) |
| 976 | if (locked) | ||
| 977 | return -EBUSY; | 971 | return -EBUSY; |
| 978 | 972 | ||
| 979 | dest_image = &kexec_image; | 973 | dest_image = &kexec_image; |
| @@ -1015,8 +1009,7 @@ asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments, | |||
| 1015 | image = xchg(dest_image, image); | 1009 | image = xchg(dest_image, image); |
| 1016 | 1010 | ||
| 1017 | out: | 1011 | out: |
| 1018 | locked = xchg(&kexec_lock, 0); /* Release the mutex */ | 1012 | mutex_unlock(&kexec_mutex); |
| 1019 | BUG_ON(!locked); | ||
| 1020 | kimage_free(image); | 1013 | kimage_free(image); |
| 1021 | 1014 | ||
| 1022 | return result; | 1015 | return result; |
| @@ -1063,10 +1056,7 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry, | |||
| 1063 | 1056 | ||
| 1064 | void crash_kexec(struct pt_regs *regs) | 1057 | void crash_kexec(struct pt_regs *regs) |
| 1065 | { | 1058 | { |
| 1066 | int locked; | 1059 | /* Take the kexec_mutex here to prevent sys_kexec_load |
| 1067 | |||
| 1068 | |||
| 1069 | /* Take the kexec_lock here to prevent sys_kexec_load | ||
| 1070 | * running on one cpu from replacing the crash kernel | 1060 | * running on one cpu from replacing the crash kernel |
| 1071 | * we are using after a panic on a different cpu. | 1061 | * we are using after a panic on a different cpu. |
| 1072 | * | 1062 | * |
| @@ -1074,8 +1064,7 @@ void crash_kexec(struct pt_regs *regs) | |||
| 1074 | * of memory the xchg(&kexec_crash_image) would be | 1064 | * of memory the xchg(&kexec_crash_image) would be |
| 1075 | * sufficient. But since I reuse the memory... | 1065 | * sufficient. But since I reuse the memory... |
| 1076 | */ | 1066 | */ |
| 1077 | locked = xchg(&kexec_lock, 1); | 1067 | if (mutex_trylock(&kexec_mutex)) { |
| 1078 | if (!locked) { | ||
| 1079 | if (kexec_crash_image) { | 1068 | if (kexec_crash_image) { |
| 1080 | struct pt_regs fixed_regs; | 1069 | struct pt_regs fixed_regs; |
| 1081 | crash_setup_regs(&fixed_regs, regs); | 1070 | crash_setup_regs(&fixed_regs, regs); |
| @@ -1083,8 +1072,7 @@ void crash_kexec(struct pt_regs *regs) | |||
| 1083 | machine_crash_shutdown(&fixed_regs); | 1072 | machine_crash_shutdown(&fixed_regs); |
| 1084 | machine_kexec(kexec_crash_image); | 1073 | machine_kexec(kexec_crash_image); |
| 1085 | } | 1074 | } |
| 1086 | locked = xchg(&kexec_lock, 0); | 1075 | mutex_unlock(&kexec_mutex); |
| 1087 | BUG_ON(!locked); | ||
| 1088 | } | 1076 | } |
| 1089 | } | 1077 | } |
| 1090 | 1078 | ||
| @@ -1426,25 +1414,23 @@ static int __init crash_save_vmcoreinfo_init(void) | |||
| 1426 | 1414 | ||
| 1427 | module_init(crash_save_vmcoreinfo_init) | 1415 | module_init(crash_save_vmcoreinfo_init) |
| 1428 | 1416 | ||
| 1429 | /** | 1417 | /* |
| 1430 | * kernel_kexec - reboot the system | 1418 | * Move into place and start executing a preloaded standalone |
| 1431 | * | 1419 | * executable. If nothing was preloaded return an error. |
| 1432 | * Move into place and start executing a preloaded standalone | ||
| 1433 | * executable. If nothing was preloaded return an error. | ||
| 1434 | */ | 1420 | */ |
| 1435 | int kernel_kexec(void) | 1421 | int kernel_kexec(void) |
| 1436 | { | 1422 | { |
| 1437 | int error = 0; | 1423 | int error = 0; |
| 1438 | 1424 | ||
| 1439 | if (xchg(&kexec_lock, 1)) | 1425 | if (!mutex_trylock(&kexec_mutex)) |
| 1440 | return -EBUSY; | 1426 | return -EBUSY; |
| 1441 | if (!kexec_image) { | 1427 | if (!kexec_image) { |
| 1442 | error = -EINVAL; | 1428 | error = -EINVAL; |
| 1443 | goto Unlock; | 1429 | goto Unlock; |
| 1444 | } | 1430 | } |
| 1445 | 1431 | ||
| 1446 | if (kexec_image->preserve_context) { | ||
| 1447 | #ifdef CONFIG_KEXEC_JUMP | 1432 | #ifdef CONFIG_KEXEC_JUMP |
| 1433 | if (kexec_image->preserve_context) { | ||
| 1448 | mutex_lock(&pm_mutex); | 1434 | mutex_lock(&pm_mutex); |
| 1449 | pm_prepare_console(); | 1435 | pm_prepare_console(); |
| 1450 | error = freeze_processes(); | 1436 | error = freeze_processes(); |
| @@ -1459,6 +1445,7 @@ int kernel_kexec(void) | |||
| 1459 | error = disable_nonboot_cpus(); | 1445 | error = disable_nonboot_cpus(); |
| 1460 | if (error) | 1446 | if (error) |
| 1461 | goto Resume_devices; | 1447 | goto Resume_devices; |
| 1448 | device_pm_lock(); | ||
| 1462 | local_irq_disable(); | 1449 | local_irq_disable(); |
| 1463 | /* At this point, device_suspend() has been called, | 1450 | /* At this point, device_suspend() has been called, |
| 1464 | * but *not* device_power_down(). We *must* | 1451 | * but *not* device_power_down(). We *must* |
| @@ -1470,26 +1457,22 @@ int kernel_kexec(void) | |||
| 1470 | error = device_power_down(PMSG_FREEZE); | 1457 | error = device_power_down(PMSG_FREEZE); |
| 1471 | if (error) | 1458 | if (error) |
| 1472 | goto Enable_irqs; | 1459 | goto Enable_irqs; |
| 1473 | save_processor_state(); | 1460 | } else |
| 1474 | #endif | 1461 | #endif |
| 1475 | } else { | 1462 | { |
| 1476 | blocking_notifier_call_chain(&reboot_notifier_list, | 1463 | kernel_restart_prepare(NULL); |
| 1477 | SYS_RESTART, NULL); | ||
| 1478 | system_state = SYSTEM_RESTART; | ||
| 1479 | device_shutdown(); | ||
| 1480 | sysdev_shutdown(); | ||
| 1481 | printk(KERN_EMERG "Starting new kernel\n"); | 1464 | printk(KERN_EMERG "Starting new kernel\n"); |
| 1482 | machine_shutdown(); | 1465 | machine_shutdown(); |
| 1483 | } | 1466 | } |
| 1484 | 1467 | ||
| 1485 | machine_kexec(kexec_image); | 1468 | machine_kexec(kexec_image); |
| 1486 | 1469 | ||
| 1487 | if (kexec_image->preserve_context) { | ||
| 1488 | #ifdef CONFIG_KEXEC_JUMP | 1470 | #ifdef CONFIG_KEXEC_JUMP |
| 1489 | restore_processor_state(); | 1471 | if (kexec_image->preserve_context) { |
| 1490 | device_power_up(PMSG_RESTORE); | 1472 | device_power_up(PMSG_RESTORE); |
| 1491 | Enable_irqs: | 1473 | Enable_irqs: |
| 1492 | local_irq_enable(); | 1474 | local_irq_enable(); |
| 1475 | device_pm_unlock(); | ||
| 1493 | enable_nonboot_cpus(); | 1476 | enable_nonboot_cpus(); |
| 1494 | Resume_devices: | 1477 | Resume_devices: |
| 1495 | device_resume(PMSG_RESTORE); | 1478 | device_resume(PMSG_RESTORE); |
| @@ -1499,11 +1482,10 @@ int kernel_kexec(void) | |||
| 1499 | Restore_console: | 1482 | Restore_console: |
| 1500 | pm_restore_console(); | 1483 | pm_restore_console(); |
| 1501 | mutex_unlock(&pm_mutex); | 1484 | mutex_unlock(&pm_mutex); |
| 1502 | #endif | ||
| 1503 | } | 1485 | } |
| 1486 | #endif | ||
| 1504 | 1487 | ||
| 1505 | Unlock: | 1488 | Unlock: |
| 1506 | xchg(&kexec_lock, 0); | 1489 | mutex_unlock(&kexec_mutex); |
| 1507 | |||
| 1508 | return error; | 1490 | return error; |
| 1509 | } | 1491 | } |
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 1aa91fd6b06e..dbda475b13bd 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
| @@ -875,11 +875,11 @@ static int add_lock_to_list(struct lock_class *class, struct lock_class *this, | |||
| 875 | if (!entry) | 875 | if (!entry) |
| 876 | return 0; | 876 | return 0; |
| 877 | 877 | ||
| 878 | entry->class = this; | ||
| 879 | entry->distance = distance; | ||
| 880 | if (!save_trace(&entry->trace)) | 878 | if (!save_trace(&entry->trace)) |
| 881 | return 0; | 879 | return 0; |
| 882 | 880 | ||
| 881 | entry->class = this; | ||
| 882 | entry->distance = distance; | ||
| 883 | /* | 883 | /* |
| 884 | * Since we never remove from the dependency list, the list can | 884 | * Since we never remove from the dependency list, the list can |
| 885 | * be walked lockless by other CPUs, it's only allocation | 885 | * be walked lockless by other CPUs, it's only allocation |
| @@ -1759,11 +1759,10 @@ static void check_chain_key(struct task_struct *curr) | |||
| 1759 | hlock = curr->held_locks + i; | 1759 | hlock = curr->held_locks + i; |
| 1760 | if (chain_key != hlock->prev_chain_key) { | 1760 | if (chain_key != hlock->prev_chain_key) { |
| 1761 | debug_locks_off(); | 1761 | debug_locks_off(); |
| 1762 | printk("hm#1, depth: %u [%u], %016Lx != %016Lx\n", | 1762 | WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n", |
| 1763 | curr->lockdep_depth, i, | 1763 | curr->lockdep_depth, i, |
| 1764 | (unsigned long long)chain_key, | 1764 | (unsigned long long)chain_key, |
| 1765 | (unsigned long long)hlock->prev_chain_key); | 1765 | (unsigned long long)hlock->prev_chain_key); |
| 1766 | WARN_ON(1); | ||
| 1767 | return; | 1766 | return; |
| 1768 | } | 1767 | } |
| 1769 | id = hlock->class_idx - 1; | 1768 | id = hlock->class_idx - 1; |
| @@ -1778,11 +1777,10 @@ static void check_chain_key(struct task_struct *curr) | |||
| 1778 | } | 1777 | } |
| 1779 | if (chain_key != curr->curr_chain_key) { | 1778 | if (chain_key != curr->curr_chain_key) { |
| 1780 | debug_locks_off(); | 1779 | debug_locks_off(); |
| 1781 | printk("hm#2, depth: %u [%u], %016Lx != %016Lx\n", | 1780 | WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n", |
| 1782 | curr->lockdep_depth, i, | 1781 | curr->lockdep_depth, i, |
| 1783 | (unsigned long long)chain_key, | 1782 | (unsigned long long)chain_key, |
| 1784 | (unsigned long long)curr->curr_chain_key); | 1783 | (unsigned long long)curr->curr_chain_key); |
| 1785 | WARN_ON(1); | ||
| 1786 | } | 1784 | } |
| 1787 | #endif | 1785 | #endif |
| 1788 | } | 1786 | } |
| @@ -2584,7 +2582,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
| 2584 | hlock->trylock = trylock; | 2582 | hlock->trylock = trylock; |
| 2585 | hlock->read = read; | 2583 | hlock->read = read; |
| 2586 | hlock->check = check; | 2584 | hlock->check = check; |
| 2587 | hlock->hardirqs_off = hardirqs_off; | 2585 | hlock->hardirqs_off = !!hardirqs_off; |
| 2588 | #ifdef CONFIG_LOCK_STAT | 2586 | #ifdef CONFIG_LOCK_STAT |
| 2589 | hlock->waittime_stamp = 0; | 2587 | hlock->waittime_stamp = 0; |
| 2590 | hlock->holdtime_stamp = sched_clock(); | 2588 | hlock->holdtime_stamp = sched_clock(); |
| @@ -3031,7 +3029,7 @@ found_it: | |||
| 3031 | 3029 | ||
| 3032 | stats = get_lock_stats(hlock_class(hlock)); | 3030 | stats = get_lock_stats(hlock_class(hlock)); |
| 3033 | if (point < ARRAY_SIZE(stats->contention_point)) | 3031 | if (point < ARRAY_SIZE(stats->contention_point)) |
| 3034 | stats->contention_point[i]++; | 3032 | stats->contention_point[point]++; |
| 3035 | if (lock->cpu != smp_processor_id()) | 3033 | if (lock->cpu != smp_processor_id()) |
| 3036 | stats->bounces[bounce_contended + !!hlock->read]++; | 3034 | stats->bounces[bounce_contended + !!hlock->read]++; |
| 3037 | put_lock_stats(stats); | 3035 | put_lock_stats(stats); |
diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h index 55db193d366d..56b196932c08 100644 --- a/kernel/lockdep_internals.h +++ b/kernel/lockdep_internals.h | |||
| @@ -50,8 +50,21 @@ extern unsigned int nr_process_chains; | |||
| 50 | extern unsigned int max_lockdep_depth; | 50 | extern unsigned int max_lockdep_depth; |
| 51 | extern unsigned int max_recursion_depth; | 51 | extern unsigned int max_recursion_depth; |
| 52 | 52 | ||
| 53 | #ifdef CONFIG_PROVE_LOCKING | ||
| 53 | extern unsigned long lockdep_count_forward_deps(struct lock_class *); | 54 | extern unsigned long lockdep_count_forward_deps(struct lock_class *); |
| 54 | extern unsigned long lockdep_count_backward_deps(struct lock_class *); | 55 | extern unsigned long lockdep_count_backward_deps(struct lock_class *); |
| 56 | #else | ||
| 57 | static inline unsigned long | ||
| 58 | lockdep_count_forward_deps(struct lock_class *class) | ||
| 59 | { | ||
| 60 | return 0; | ||
| 61 | } | ||
| 62 | static inline unsigned long | ||
| 63 | lockdep_count_backward_deps(struct lock_class *class) | ||
| 64 | { | ||
| 65 | return 0; | ||
| 66 | } | ||
| 67 | #endif | ||
| 55 | 68 | ||
| 56 | #ifdef CONFIG_DEBUG_LOCKDEP | 69 | #ifdef CONFIG_DEBUG_LOCKDEP |
| 57 | /* | 70 | /* |
diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c index fa19aee604c2..20dbcbf9c7dd 100644 --- a/kernel/lockdep_proc.c +++ b/kernel/lockdep_proc.c | |||
| @@ -82,7 +82,6 @@ static void print_name(struct seq_file *m, struct lock_class *class) | |||
| 82 | 82 | ||
| 83 | static int l_show(struct seq_file *m, void *v) | 83 | static int l_show(struct seq_file *m, void *v) |
| 84 | { | 84 | { |
| 85 | unsigned long nr_forward_deps, nr_backward_deps; | ||
| 86 | struct lock_class *class = v; | 85 | struct lock_class *class = v; |
| 87 | struct lock_list *entry; | 86 | struct lock_list *entry; |
| 88 | char c1, c2, c3, c4; | 87 | char c1, c2, c3, c4; |
| @@ -96,11 +95,10 @@ static int l_show(struct seq_file *m, void *v) | |||
| 96 | #ifdef CONFIG_DEBUG_LOCKDEP | 95 | #ifdef CONFIG_DEBUG_LOCKDEP |
| 97 | seq_printf(m, " OPS:%8ld", class->ops); | 96 | seq_printf(m, " OPS:%8ld", class->ops); |
| 98 | #endif | 97 | #endif |
| 99 | nr_forward_deps = lockdep_count_forward_deps(class); | 98 | #ifdef CONFIG_PROVE_LOCKING |
| 100 | seq_printf(m, " FD:%5ld", nr_forward_deps); | 99 | seq_printf(m, " FD:%5ld", lockdep_count_forward_deps(class)); |
| 101 | 100 | seq_printf(m, " BD:%5ld", lockdep_count_backward_deps(class)); | |
| 102 | nr_backward_deps = lockdep_count_backward_deps(class); | 101 | #endif |
| 103 | seq_printf(m, " BD:%5ld", nr_backward_deps); | ||
| 104 | 102 | ||
| 105 | get_usage_chars(class, &c1, &c2, &c3, &c4); | 103 | get_usage_chars(class, &c1, &c2, &c3, &c4); |
| 106 | seq_printf(m, " %c%c%c%c", c1, c2, c3, c4); | 104 | seq_printf(m, " %c%c%c%c", c1, c2, c3, c4); |
| @@ -325,7 +323,9 @@ static int lockdep_stats_show(struct seq_file *m, void *v) | |||
| 325 | if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ) | 323 | if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ) |
| 326 | nr_hardirq_read_unsafe++; | 324 | nr_hardirq_read_unsafe++; |
| 327 | 325 | ||
| 326 | #ifdef CONFIG_PROVE_LOCKING | ||
| 328 | sum_forward_deps += lockdep_count_forward_deps(class); | 327 | sum_forward_deps += lockdep_count_forward_deps(class); |
| 328 | #endif | ||
| 329 | } | 329 | } |
| 330 | #ifdef CONFIG_DEBUG_LOCKDEP | 330 | #ifdef CONFIG_DEBUG_LOCKDEP |
| 331 | DEBUG_LOCKS_WARN_ON(debug_atomic_read(&nr_unused_locks) != nr_unused); | 331 | DEBUG_LOCKS_WARN_ON(debug_atomic_read(&nr_unused_locks) != nr_unused); |
| @@ -472,8 +472,9 @@ static void snprint_time(char *buf, size_t bufsiz, s64 nr) | |||
| 472 | { | 472 | { |
| 473 | unsigned long rem; | 473 | unsigned long rem; |
| 474 | 474 | ||
| 475 | nr += 5; /* for display rounding */ | ||
| 475 | rem = do_div(nr, 1000); /* XXX: do_div_signed */ | 476 | rem = do_div(nr, 1000); /* XXX: do_div_signed */ |
| 476 | snprintf(buf, bufsiz, "%lld.%02d", (long long)nr, ((int)rem+5)/10); | 477 | snprintf(buf, bufsiz, "%lld.%02d", (long long)nr, (int)rem/10); |
| 477 | } | 478 | } |
| 478 | 479 | ||
| 479 | static void seq_time(struct seq_file *m, s64 time) | 480 | static void seq_time(struct seq_file *m, s64 time) |
diff --git a/kernel/module.c b/kernel/module.c index 08864d257eb0..9db11911e04b 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
| @@ -1799,7 +1799,7 @@ static void *module_alloc_update_bounds(unsigned long size) | |||
| 1799 | 1799 | ||
| 1800 | /* Allocate and load the module: note that size of section 0 is always | 1800 | /* Allocate and load the module: note that size of section 0 is always |
| 1801 | zero, and we rely on this for optional sections. */ | 1801 | zero, and we rely on this for optional sections. */ |
| 1802 | static struct module *load_module(void __user *umod, | 1802 | static noinline struct module *load_module(void __user *umod, |
| 1803 | unsigned long len, | 1803 | unsigned long len, |
| 1804 | const char __user *uargs) | 1804 | const char __user *uargs) |
| 1805 | { | 1805 | { |
diff --git a/kernel/nsproxy.c b/kernel/nsproxy.c index 21575fc46d05..1d3ef29a2583 100644 --- a/kernel/nsproxy.c +++ b/kernel/nsproxy.c | |||
| @@ -14,7 +14,6 @@ | |||
| 14 | */ | 14 | */ |
| 15 | 15 | ||
| 16 | #include <linux/module.h> | 16 | #include <linux/module.h> |
| 17 | #include <linux/version.h> | ||
| 18 | #include <linux/nsproxy.h> | 17 | #include <linux/nsproxy.h> |
| 19 | #include <linux/init_task.h> | 18 | #include <linux/init_task.h> |
| 20 | #include <linux/mnt_namespace.h> | 19 | #include <linux/mnt_namespace.h> |
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c index ea567b78d1aa..fab8ea86fac3 100644 --- a/kernel/pid_namespace.c +++ b/kernel/pid_namespace.c | |||
| @@ -179,9 +179,6 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns) | |||
| 179 | rc = sys_wait4(-1, NULL, __WALL, NULL); | 179 | rc = sys_wait4(-1, NULL, __WALL, NULL); |
| 180 | } while (rc != -ECHILD); | 180 | } while (rc != -ECHILD); |
| 181 | 181 | ||
| 182 | |||
| 183 | /* Child reaper for the pid namespace is going away */ | ||
| 184 | pid_ns->child_reaper = NULL; | ||
| 185 | acct_exit_ns(pid_ns); | 182 | acct_exit_ns(pid_ns); |
| 186 | return; | 183 | return; |
| 187 | } | 184 | } |
diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c index da9c2dda6a4e..dfdec524d1b7 100644 --- a/kernel/pm_qos_params.c +++ b/kernel/pm_qos_params.c | |||
| @@ -43,7 +43,7 @@ | |||
| 43 | #include <linux/uaccess.h> | 43 | #include <linux/uaccess.h> |
| 44 | 44 | ||
| 45 | /* | 45 | /* |
| 46 | * locking rule: all changes to target_value or requirements or notifiers lists | 46 | * locking rule: all changes to requirements or notifiers lists |
| 47 | * or pm_qos_object list and pm_qos_objects need to happen with pm_qos_lock | 47 | * or pm_qos_object list and pm_qos_objects need to happen with pm_qos_lock |
| 48 | * held, taken with _irqsave. One lock to rule them all | 48 | * held, taken with _irqsave. One lock to rule them all |
| 49 | */ | 49 | */ |
| @@ -66,7 +66,7 @@ struct pm_qos_object { | |||
| 66 | struct miscdevice pm_qos_power_miscdev; | 66 | struct miscdevice pm_qos_power_miscdev; |
| 67 | char *name; | 67 | char *name; |
| 68 | s32 default_value; | 68 | s32 default_value; |
| 69 | s32 target_value; | 69 | atomic_t target_value; |
| 70 | s32 (*comparitor)(s32, s32); | 70 | s32 (*comparitor)(s32, s32); |
| 71 | }; | 71 | }; |
| 72 | 72 | ||
| @@ -77,7 +77,7 @@ static struct pm_qos_object cpu_dma_pm_qos = { | |||
| 77 | .notifiers = &cpu_dma_lat_notifier, | 77 | .notifiers = &cpu_dma_lat_notifier, |
| 78 | .name = "cpu_dma_latency", | 78 | .name = "cpu_dma_latency", |
| 79 | .default_value = 2000 * USEC_PER_SEC, | 79 | .default_value = 2000 * USEC_PER_SEC, |
| 80 | .target_value = 2000 * USEC_PER_SEC, | 80 | .target_value = ATOMIC_INIT(2000 * USEC_PER_SEC), |
| 81 | .comparitor = min_compare | 81 | .comparitor = min_compare |
| 82 | }; | 82 | }; |
| 83 | 83 | ||
| @@ -87,7 +87,7 @@ static struct pm_qos_object network_lat_pm_qos = { | |||
| 87 | .notifiers = &network_lat_notifier, | 87 | .notifiers = &network_lat_notifier, |
| 88 | .name = "network_latency", | 88 | .name = "network_latency", |
| 89 | .default_value = 2000 * USEC_PER_SEC, | 89 | .default_value = 2000 * USEC_PER_SEC, |
| 90 | .target_value = 2000 * USEC_PER_SEC, | 90 | .target_value = ATOMIC_INIT(2000 * USEC_PER_SEC), |
| 91 | .comparitor = min_compare | 91 | .comparitor = min_compare |
| 92 | }; | 92 | }; |
| 93 | 93 | ||
| @@ -99,7 +99,7 @@ static struct pm_qos_object network_throughput_pm_qos = { | |||
| 99 | .notifiers = &network_throughput_notifier, | 99 | .notifiers = &network_throughput_notifier, |
| 100 | .name = "network_throughput", | 100 | .name = "network_throughput", |
| 101 | .default_value = 0, | 101 | .default_value = 0, |
| 102 | .target_value = 0, | 102 | .target_value = ATOMIC_INIT(0), |
| 103 | .comparitor = max_compare | 103 | .comparitor = max_compare |
| 104 | }; | 104 | }; |
| 105 | 105 | ||
| @@ -150,11 +150,11 @@ static void update_target(int target) | |||
| 150 | extreme_value = pm_qos_array[target]->comparitor( | 150 | extreme_value = pm_qos_array[target]->comparitor( |
| 151 | extreme_value, node->value); | 151 | extreme_value, node->value); |
| 152 | } | 152 | } |
| 153 | if (pm_qos_array[target]->target_value != extreme_value) { | 153 | if (atomic_read(&pm_qos_array[target]->target_value) != extreme_value) { |
| 154 | call_notifier = 1; | 154 | call_notifier = 1; |
| 155 | pm_qos_array[target]->target_value = extreme_value; | 155 | atomic_set(&pm_qos_array[target]->target_value, extreme_value); |
| 156 | pr_debug(KERN_ERR "new target for qos %d is %d\n", target, | 156 | pr_debug(KERN_ERR "new target for qos %d is %d\n", target, |
| 157 | pm_qos_array[target]->target_value); | 157 | atomic_read(&pm_qos_array[target]->target_value)); |
| 158 | } | 158 | } |
| 159 | spin_unlock_irqrestore(&pm_qos_lock, flags); | 159 | spin_unlock_irqrestore(&pm_qos_lock, flags); |
| 160 | 160 | ||
| @@ -193,14 +193,7 @@ static int find_pm_qos_object_by_minor(int minor) | |||
| 193 | */ | 193 | */ |
| 194 | int pm_qos_requirement(int pm_qos_class) | 194 | int pm_qos_requirement(int pm_qos_class) |
| 195 | { | 195 | { |
| 196 | int ret_val; | 196 | return atomic_read(&pm_qos_array[pm_qos_class]->target_value); |
| 197 | unsigned long flags; | ||
| 198 | |||
| 199 | spin_lock_irqsave(&pm_qos_lock, flags); | ||
| 200 | ret_val = pm_qos_array[pm_qos_class]->target_value; | ||
| 201 | spin_unlock_irqrestore(&pm_qos_lock, flags); | ||
| 202 | |||
| 203 | return ret_val; | ||
| 204 | } | 197 | } |
| 205 | EXPORT_SYMBOL_GPL(pm_qos_requirement); | 198 | EXPORT_SYMBOL_GPL(pm_qos_requirement); |
| 206 | 199 | ||
diff --git a/kernel/power/disk.c b/kernel/power/disk.c index f011e0870b52..bbd85c60f741 100644 --- a/kernel/power/disk.c +++ b/kernel/power/disk.c | |||
| @@ -21,6 +21,7 @@ | |||
| 21 | #include <linux/console.h> | 21 | #include <linux/console.h> |
| 22 | #include <linux/cpu.h> | 22 | #include <linux/cpu.h> |
| 23 | #include <linux/freezer.h> | 23 | #include <linux/freezer.h> |
| 24 | #include <linux/ftrace.h> | ||
| 24 | 25 | ||
| 25 | #include "power.h" | 26 | #include "power.h" |
| 26 | 27 | ||
| @@ -255,7 +256,7 @@ static int create_image(int platform_mode) | |||
| 255 | 256 | ||
| 256 | int hibernation_snapshot(int platform_mode) | 257 | int hibernation_snapshot(int platform_mode) |
| 257 | { | 258 | { |
| 258 | int error; | 259 | int error, ftrace_save; |
| 259 | 260 | ||
| 260 | /* Free memory before shutting down devices. */ | 261 | /* Free memory before shutting down devices. */ |
| 261 | error = swsusp_shrink_memory(); | 262 | error = swsusp_shrink_memory(); |
| @@ -267,6 +268,7 @@ int hibernation_snapshot(int platform_mode) | |||
| 267 | goto Close; | 268 | goto Close; |
| 268 | 269 | ||
| 269 | suspend_console(); | 270 | suspend_console(); |
| 271 | ftrace_save = __ftrace_enabled_save(); | ||
| 270 | error = device_suspend(PMSG_FREEZE); | 272 | error = device_suspend(PMSG_FREEZE); |
| 271 | if (error) | 273 | if (error) |
| 272 | goto Recover_platform; | 274 | goto Recover_platform; |
| @@ -296,6 +298,7 @@ int hibernation_snapshot(int platform_mode) | |||
| 296 | Resume_devices: | 298 | Resume_devices: |
| 297 | device_resume(in_suspend ? | 299 | device_resume(in_suspend ? |
| 298 | (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE); | 300 | (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE); |
| 301 | __ftrace_enabled_restore(ftrace_save); | ||
| 299 | resume_console(); | 302 | resume_console(); |
| 300 | Close: | 303 | Close: |
| 301 | platform_end(platform_mode); | 304 | platform_end(platform_mode); |
| @@ -366,10 +369,11 @@ static int resume_target_kernel(void) | |||
| 366 | 369 | ||
| 367 | int hibernation_restore(int platform_mode) | 370 | int hibernation_restore(int platform_mode) |
| 368 | { | 371 | { |
| 369 | int error; | 372 | int error, ftrace_save; |
| 370 | 373 | ||
| 371 | pm_prepare_console(); | 374 | pm_prepare_console(); |
| 372 | suspend_console(); | 375 | suspend_console(); |
| 376 | ftrace_save = __ftrace_enabled_save(); | ||
| 373 | error = device_suspend(PMSG_QUIESCE); | 377 | error = device_suspend(PMSG_QUIESCE); |
| 374 | if (error) | 378 | if (error) |
| 375 | goto Finish; | 379 | goto Finish; |
| @@ -384,6 +388,7 @@ int hibernation_restore(int platform_mode) | |||
| 384 | platform_restore_cleanup(platform_mode); | 388 | platform_restore_cleanup(platform_mode); |
| 385 | device_resume(PMSG_RECOVER); | 389 | device_resume(PMSG_RECOVER); |
| 386 | Finish: | 390 | Finish: |
| 391 | __ftrace_enabled_restore(ftrace_save); | ||
| 387 | resume_console(); | 392 | resume_console(); |
| 388 | pm_restore_console(); | 393 | pm_restore_console(); |
| 389 | return error; | 394 | return error; |
| @@ -396,7 +401,7 @@ int hibernation_restore(int platform_mode) | |||
| 396 | 401 | ||
| 397 | int hibernation_platform_enter(void) | 402 | int hibernation_platform_enter(void) |
| 398 | { | 403 | { |
| 399 | int error; | 404 | int error, ftrace_save; |
| 400 | 405 | ||
| 401 | if (!hibernation_ops) | 406 | if (!hibernation_ops) |
| 402 | return -ENOSYS; | 407 | return -ENOSYS; |
| @@ -411,6 +416,7 @@ int hibernation_platform_enter(void) | |||
| 411 | goto Close; | 416 | goto Close; |
| 412 | 417 | ||
| 413 | suspend_console(); | 418 | suspend_console(); |
| 419 | ftrace_save = __ftrace_enabled_save(); | ||
| 414 | error = device_suspend(PMSG_HIBERNATE); | 420 | error = device_suspend(PMSG_HIBERNATE); |
| 415 | if (error) { | 421 | if (error) { |
| 416 | if (hibernation_ops->recover) | 422 | if (hibernation_ops->recover) |
| @@ -445,6 +451,7 @@ int hibernation_platform_enter(void) | |||
| 445 | hibernation_ops->finish(); | 451 | hibernation_ops->finish(); |
| 446 | Resume_devices: | 452 | Resume_devices: |
| 447 | device_resume(PMSG_RESTORE); | 453 | device_resume(PMSG_RESTORE); |
| 454 | __ftrace_enabled_restore(ftrace_save); | ||
| 448 | resume_console(); | 455 | resume_console(); |
| 449 | Close: | 456 | Close: |
| 450 | hibernation_ops->end(); | 457 | hibernation_ops->end(); |
diff --git a/kernel/power/main.c b/kernel/power/main.c index 0b7476f5d2a6..540b16b68565 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c | |||
| @@ -21,6 +21,7 @@ | |||
| 21 | #include <linux/freezer.h> | 21 | #include <linux/freezer.h> |
| 22 | #include <linux/vmstat.h> | 22 | #include <linux/vmstat.h> |
| 23 | #include <linux/syscalls.h> | 23 | #include <linux/syscalls.h> |
| 24 | #include <linux/ftrace.h> | ||
| 24 | 25 | ||
| 25 | #include "power.h" | 26 | #include "power.h" |
| 26 | 27 | ||
| @@ -310,7 +311,7 @@ static int suspend_enter(suspend_state_t state) | |||
| 310 | */ | 311 | */ |
| 311 | int suspend_devices_and_enter(suspend_state_t state) | 312 | int suspend_devices_and_enter(suspend_state_t state) |
| 312 | { | 313 | { |
| 313 | int error; | 314 | int error, ftrace_save; |
| 314 | 315 | ||
| 315 | if (!suspend_ops) | 316 | if (!suspend_ops) |
| 316 | return -ENOSYS; | 317 | return -ENOSYS; |
| @@ -321,6 +322,7 @@ int suspend_devices_and_enter(suspend_state_t state) | |||
| 321 | goto Close; | 322 | goto Close; |
| 322 | } | 323 | } |
| 323 | suspend_console(); | 324 | suspend_console(); |
| 325 | ftrace_save = __ftrace_enabled_save(); | ||
| 324 | suspend_test_start(); | 326 | suspend_test_start(); |
| 325 | error = device_suspend(PMSG_SUSPEND); | 327 | error = device_suspend(PMSG_SUSPEND); |
| 326 | if (error) { | 328 | if (error) { |
| @@ -352,6 +354,7 @@ int suspend_devices_and_enter(suspend_state_t state) | |||
| 352 | suspend_test_start(); | 354 | suspend_test_start(); |
| 353 | device_resume(PMSG_RESUME); | 355 | device_resume(PMSG_RESUME); |
| 354 | suspend_test_finish("resume devices"); | 356 | suspend_test_finish("resume devices"); |
| 357 | __ftrace_enabled_restore(ftrace_save); | ||
| 355 | resume_console(); | 358 | resume_console(); |
| 356 | Close: | 359 | Close: |
| 357 | if (suspend_ops->end) | 360 | if (suspend_ops->end) |
diff --git a/kernel/power/swap.c b/kernel/power/swap.c index a0abf9a463f9..80ccac849e46 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c | |||
| @@ -14,7 +14,6 @@ | |||
| 14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
| 15 | #include <linux/file.h> | 15 | #include <linux/file.h> |
| 16 | #include <linux/utsname.h> | 16 | #include <linux/utsname.h> |
| 17 | #include <linux/version.h> | ||
| 18 | #include <linux/delay.h> | 17 | #include <linux/delay.h> |
| 19 | #include <linux/bitops.h> | 18 | #include <linux/bitops.h> |
| 20 | #include <linux/genhd.h> | 19 | #include <linux/genhd.h> |
diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 082b3fcb32a0..356699a96d56 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c | |||
| @@ -140,7 +140,7 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode) | |||
| 140 | if (!dumpable && !capable(CAP_SYS_PTRACE)) | 140 | if (!dumpable && !capable(CAP_SYS_PTRACE)) |
| 141 | return -EPERM; | 141 | return -EPERM; |
| 142 | 142 | ||
| 143 | return security_ptrace(current, task, mode); | 143 | return security_ptrace_may_access(task, mode); |
| 144 | } | 144 | } |
| 145 | 145 | ||
| 146 | bool ptrace_may_access(struct task_struct *task, unsigned int mode) | 146 | bool ptrace_may_access(struct task_struct *task, unsigned int mode) |
| @@ -499,8 +499,7 @@ repeat: | |||
| 499 | goto repeat; | 499 | goto repeat; |
| 500 | } | 500 | } |
| 501 | 501 | ||
| 502 | ret = security_ptrace(current->parent, current, | 502 | ret = security_ptrace_traceme(current->parent); |
| 503 | PTRACE_MODE_ATTACH); | ||
| 504 | 503 | ||
| 505 | /* | 504 | /* |
| 506 | * Set the ptrace bit in the process ptrace flags. | 505 | * Set the ptrace bit in the process ptrace flags. |
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index f14f372cf6f5..467d5940f624 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c | |||
| @@ -77,6 +77,7 @@ void wakeme_after_rcu(struct rcu_head *head) | |||
| 77 | * sections are delimited by rcu_read_lock() and rcu_read_unlock(), | 77 | * sections are delimited by rcu_read_lock() and rcu_read_unlock(), |
| 78 | * and may be nested. | 78 | * and may be nested. |
| 79 | */ | 79 | */ |
| 80 | void synchronize_rcu(void); /* Makes kernel-doc tools happy */ | ||
| 80 | synchronize_rcu_xxx(synchronize_rcu, call_rcu) | 81 | synchronize_rcu_xxx(synchronize_rcu, call_rcu) |
| 81 | EXPORT_SYMBOL_GPL(synchronize_rcu); | 82 | EXPORT_SYMBOL_GPL(synchronize_rcu); |
| 82 | 83 | ||
diff --git a/kernel/resource.c b/kernel/resource.c index f5b518eabefe..03d796c1b2e9 100644 --- a/kernel/resource.c +++ b/kernel/resource.c | |||
| @@ -362,35 +362,21 @@ int allocate_resource(struct resource *root, struct resource *new, | |||
| 362 | 362 | ||
| 363 | EXPORT_SYMBOL(allocate_resource); | 363 | EXPORT_SYMBOL(allocate_resource); |
| 364 | 364 | ||
| 365 | /** | 365 | /* |
| 366 | * insert_resource - Inserts a resource in the resource tree | 366 | * Insert a resource into the resource tree. If successful, return NULL, |
| 367 | * @parent: parent of the new resource | 367 | * otherwise return the conflicting resource (compare to __request_resource()) |
| 368 | * @new: new resource to insert | ||
| 369 | * | ||
| 370 | * Returns 0 on success, -EBUSY if the resource can't be inserted. | ||
| 371 | * | ||
| 372 | * This function is equivalent to request_resource when no conflict | ||
| 373 | * happens. If a conflict happens, and the conflicting resources | ||
| 374 | * entirely fit within the range of the new resource, then the new | ||
| 375 | * resource is inserted and the conflicting resources become children of | ||
| 376 | * the new resource. | ||
| 377 | */ | 368 | */ |
| 378 | int insert_resource(struct resource *parent, struct resource *new) | 369 | static struct resource * __insert_resource(struct resource *parent, struct resource *new) |
| 379 | { | 370 | { |
| 380 | int result; | ||
| 381 | struct resource *first, *next; | 371 | struct resource *first, *next; |
| 382 | 372 | ||
| 383 | write_lock(&resource_lock); | ||
| 384 | |||
| 385 | for (;; parent = first) { | 373 | for (;; parent = first) { |
| 386 | result = 0; | ||
| 387 | first = __request_resource(parent, new); | 374 | first = __request_resource(parent, new); |
| 388 | if (!first) | 375 | if (!first) |
| 389 | goto out; | 376 | return first; |
| 390 | 377 | ||
| 391 | result = -EBUSY; | ||
| 392 | if (first == parent) | 378 | if (first == parent) |
| 393 | goto out; | 379 | return first; |
| 394 | 380 | ||
| 395 | if ((first->start > new->start) || (first->end < new->end)) | 381 | if ((first->start > new->start) || (first->end < new->end)) |
| 396 | break; | 382 | break; |
| @@ -401,15 +387,13 @@ int insert_resource(struct resource *parent, struct resource *new) | |||
| 401 | for (next = first; ; next = next->sibling) { | 387 | for (next = first; ; next = next->sibling) { |
| 402 | /* Partial overlap? Bad, and unfixable */ | 388 | /* Partial overlap? Bad, and unfixable */ |
| 403 | if (next->start < new->start || next->end > new->end) | 389 | if (next->start < new->start || next->end > new->end) |
| 404 | goto out; | 390 | return next; |
| 405 | if (!next->sibling) | 391 | if (!next->sibling) |
| 406 | break; | 392 | break; |
| 407 | if (next->sibling->start > new->end) | 393 | if (next->sibling->start > new->end) |
| 408 | break; | 394 | break; |
| 409 | } | 395 | } |
| 410 | 396 | ||
| 411 | result = 0; | ||
| 412 | |||
| 413 | new->parent = parent; | 397 | new->parent = parent; |
| 414 | new->sibling = next->sibling; | 398 | new->sibling = next->sibling; |
| 415 | new->child = first; | 399 | new->child = first; |
| @@ -426,10 +410,64 @@ int insert_resource(struct resource *parent, struct resource *new) | |||
| 426 | next = next->sibling; | 410 | next = next->sibling; |
| 427 | next->sibling = new; | 411 | next->sibling = new; |
| 428 | } | 412 | } |
| 413 | return NULL; | ||
| 414 | } | ||
| 429 | 415 | ||
| 430 | out: | 416 | /** |
| 417 | * insert_resource - Inserts a resource in the resource tree | ||
| 418 | * @parent: parent of the new resource | ||
| 419 | * @new: new resource to insert | ||
| 420 | * | ||
| 421 | * Returns 0 on success, -EBUSY if the resource can't be inserted. | ||
| 422 | * | ||
| 423 | * This function is equivalent to request_resource when no conflict | ||
| 424 | * happens. If a conflict happens, and the conflicting resources | ||
| 425 | * entirely fit within the range of the new resource, then the new | ||
| 426 | * resource is inserted and the conflicting resources become children of | ||
| 427 | * the new resource. | ||
| 428 | */ | ||
| 429 | int insert_resource(struct resource *parent, struct resource *new) | ||
| 430 | { | ||
| 431 | struct resource *conflict; | ||
| 432 | |||
| 433 | write_lock(&resource_lock); | ||
| 434 | conflict = __insert_resource(parent, new); | ||
| 435 | write_unlock(&resource_lock); | ||
| 436 | return conflict ? -EBUSY : 0; | ||
| 437 | } | ||
| 438 | |||
| 439 | /** | ||
| 440 | * insert_resource_expand_to_fit - Insert a resource into the resource tree | ||
| 441 | * @root: root resource descriptor | ||
| 442 | * @new: new resource to insert | ||
| 443 | * | ||
| 444 | * Insert a resource into the resource tree, possibly expanding it in order | ||
| 445 | * to make it encompass any conflicting resources. | ||
| 446 | */ | ||
| 447 | void insert_resource_expand_to_fit(struct resource *root, struct resource *new) | ||
| 448 | { | ||
| 449 | if (new->parent) | ||
| 450 | return; | ||
| 451 | |||
| 452 | write_lock(&resource_lock); | ||
| 453 | for (;;) { | ||
| 454 | struct resource *conflict; | ||
| 455 | |||
| 456 | conflict = __insert_resource(root, new); | ||
| 457 | if (!conflict) | ||
| 458 | break; | ||
| 459 | if (conflict == root) | ||
| 460 | break; | ||
| 461 | |||
| 462 | /* Ok, expand resource to cover the conflict, then try again .. */ | ||
| 463 | if (conflict->start < new->start) | ||
| 464 | new->start = conflict->start; | ||
| 465 | if (conflict->end > new->end) | ||
| 466 | new->end = conflict->end; | ||
| 467 | |||
| 468 | printk("Expanded resource %s due to conflict with %s\n", new->name, conflict->name); | ||
| 469 | } | ||
| 431 | write_unlock(&resource_lock); | 470 | write_unlock(&resource_lock); |
| 432 | return result; | ||
| 433 | } | 471 | } |
| 434 | 472 | ||
| 435 | /** | 473 | /** |
diff --git a/kernel/sched.c b/kernel/sched.c index d601fb0406ca..9a1ddb84e26d 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -808,9 +808,9 @@ const_debug unsigned int sysctl_sched_nr_migrate = 32; | |||
| 808 | 808 | ||
| 809 | /* | 809 | /* |
| 810 | * ratelimit for updating the group shares. | 810 | * ratelimit for updating the group shares. |
| 811 | * default: 0.5ms | 811 | * default: 0.25ms |
| 812 | */ | 812 | */ |
| 813 | const_debug unsigned int sysctl_sched_shares_ratelimit = 500000; | 813 | unsigned int sysctl_sched_shares_ratelimit = 250000; |
| 814 | 814 | ||
| 815 | /* | 815 | /* |
| 816 | * period over which we measure -rt task cpu usage in us. | 816 | * period over which we measure -rt task cpu usage in us. |
| @@ -4669,6 +4669,52 @@ int __sched wait_for_completion_killable(struct completion *x) | |||
| 4669 | } | 4669 | } |
| 4670 | EXPORT_SYMBOL(wait_for_completion_killable); | 4670 | EXPORT_SYMBOL(wait_for_completion_killable); |
| 4671 | 4671 | ||
| 4672 | /** | ||
| 4673 | * try_wait_for_completion - try to decrement a completion without blocking | ||
| 4674 | * @x: completion structure | ||
| 4675 | * | ||
| 4676 | * Returns: 0 if a decrement cannot be done without blocking | ||
| 4677 | * 1 if a decrement succeeded. | ||
| 4678 | * | ||
| 4679 | * If a completion is being used as a counting completion, | ||
| 4680 | * attempt to decrement the counter without blocking. This | ||
| 4681 | * enables us to avoid waiting if the resource the completion | ||
| 4682 | * is protecting is not available. | ||
| 4683 | */ | ||
| 4684 | bool try_wait_for_completion(struct completion *x) | ||
| 4685 | { | ||
| 4686 | int ret = 1; | ||
| 4687 | |||
| 4688 | spin_lock_irq(&x->wait.lock); | ||
| 4689 | if (!x->done) | ||
| 4690 | ret = 0; | ||
| 4691 | else | ||
| 4692 | x->done--; | ||
| 4693 | spin_unlock_irq(&x->wait.lock); | ||
| 4694 | return ret; | ||
| 4695 | } | ||
| 4696 | EXPORT_SYMBOL(try_wait_for_completion); | ||
| 4697 | |||
| 4698 | /** | ||
| 4699 | * completion_done - Test to see if a completion has any waiters | ||
| 4700 | * @x: completion structure | ||
| 4701 | * | ||
| 4702 | * Returns: 0 if there are waiters (wait_for_completion() in progress) | ||
| 4703 | * 1 if there are no waiters. | ||
| 4704 | * | ||
| 4705 | */ | ||
| 4706 | bool completion_done(struct completion *x) | ||
| 4707 | { | ||
| 4708 | int ret = 1; | ||
| 4709 | |||
| 4710 | spin_lock_irq(&x->wait.lock); | ||
| 4711 | if (!x->done) | ||
| 4712 | ret = 0; | ||
| 4713 | spin_unlock_irq(&x->wait.lock); | ||
| 4714 | return ret; | ||
| 4715 | } | ||
| 4716 | EXPORT_SYMBOL(completion_done); | ||
| 4717 | |||
| 4672 | static long __sched | 4718 | static long __sched |
| 4673 | sleep_on_common(wait_queue_head_t *q, int state, long timeout) | 4719 | sleep_on_common(wait_queue_head_t *q, int state, long timeout) |
| 4674 | { | 4720 | { |
| @@ -5740,6 +5786,8 @@ static inline void sched_init_granularity(void) | |||
| 5740 | sysctl_sched_latency = limit; | 5786 | sysctl_sched_latency = limit; |
| 5741 | 5787 | ||
| 5742 | sysctl_sched_wakeup_granularity *= factor; | 5788 | sysctl_sched_wakeup_granularity *= factor; |
| 5789 | |||
| 5790 | sysctl_sched_shares_ratelimit *= factor; | ||
| 5743 | } | 5791 | } |
| 5744 | 5792 | ||
| 5745 | #ifdef CONFIG_SMP | 5793 | #ifdef CONFIG_SMP |
| @@ -8462,8 +8510,8 @@ struct task_group *sched_create_group(struct task_group *parent) | |||
| 8462 | WARN_ON(!parent); /* root should already exist */ | 8510 | WARN_ON(!parent); /* root should already exist */ |
| 8463 | 8511 | ||
| 8464 | tg->parent = parent; | 8512 | tg->parent = parent; |
| 8465 | list_add_rcu(&tg->siblings, &parent->children); | ||
| 8466 | INIT_LIST_HEAD(&tg->children); | 8513 | INIT_LIST_HEAD(&tg->children); |
| 8514 | list_add_rcu(&tg->siblings, &parent->children); | ||
| 8467 | spin_unlock_irqrestore(&task_group_lock, flags); | 8515 | spin_unlock_irqrestore(&task_group_lock, flags); |
| 8468 | 8516 | ||
| 8469 | return tg; | 8517 | return tg; |
diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c index 204991a0bfa7..e8ab096ddfe3 100644 --- a/kernel/sched_clock.c +++ b/kernel/sched_clock.c | |||
| @@ -12,19 +12,17 @@ | |||
| 12 | * | 12 | * |
| 13 | * Create a semi stable clock from a mixture of other events, including: | 13 | * Create a semi stable clock from a mixture of other events, including: |
| 14 | * - gtod | 14 | * - gtod |
| 15 | * - jiffies | ||
| 16 | * - sched_clock() | 15 | * - sched_clock() |
| 17 | * - explicit idle events | 16 | * - explicit idle events |
| 18 | * | 17 | * |
| 19 | * We use gtod as base and the unstable clock deltas. The deltas are filtered, | 18 | * We use gtod as base and the unstable clock deltas. The deltas are filtered, |
| 20 | * making it monotonic and keeping it within an expected window. This window | 19 | * making it monotonic and keeping it within an expected window. |
| 21 | * is set up using jiffies. | ||
| 22 | * | 20 | * |
| 23 | * Furthermore, explicit sleep and wakeup hooks allow us to account for time | 21 | * Furthermore, explicit sleep and wakeup hooks allow us to account for time |
| 24 | * that is otherwise invisible (TSC gets stopped). | 22 | * that is otherwise invisible (TSC gets stopped). |
| 25 | * | 23 | * |
| 26 | * The clock: sched_clock_cpu() is monotonic per cpu, and should be somewhat | 24 | * The clock: sched_clock_cpu() is monotonic per cpu, and should be somewhat |
| 27 | * consistent between cpus (never more than 1 jiffies difference). | 25 | * consistent between cpus (never more than 2 jiffies difference). |
| 28 | */ | 26 | */ |
| 29 | #include <linux/sched.h> | 27 | #include <linux/sched.h> |
| 30 | #include <linux/percpu.h> | 28 | #include <linux/percpu.h> |
| @@ -54,7 +52,6 @@ struct sched_clock_data { | |||
| 54 | */ | 52 | */ |
| 55 | raw_spinlock_t lock; | 53 | raw_spinlock_t lock; |
| 56 | 54 | ||
| 57 | unsigned long tick_jiffies; | ||
| 58 | u64 tick_raw; | 55 | u64 tick_raw; |
| 59 | u64 tick_gtod; | 56 | u64 tick_gtod; |
| 60 | u64 clock; | 57 | u64 clock; |
| @@ -75,14 +72,12 @@ static inline struct sched_clock_data *cpu_sdc(int cpu) | |||
| 75 | void sched_clock_init(void) | 72 | void sched_clock_init(void) |
| 76 | { | 73 | { |
| 77 | u64 ktime_now = ktime_to_ns(ktime_get()); | 74 | u64 ktime_now = ktime_to_ns(ktime_get()); |
| 78 | unsigned long now_jiffies = jiffies; | ||
| 79 | int cpu; | 75 | int cpu; |
| 80 | 76 | ||
| 81 | for_each_possible_cpu(cpu) { | 77 | for_each_possible_cpu(cpu) { |
| 82 | struct sched_clock_data *scd = cpu_sdc(cpu); | 78 | struct sched_clock_data *scd = cpu_sdc(cpu); |
| 83 | 79 | ||
| 84 | scd->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 80 | scd->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; |
| 85 | scd->tick_jiffies = now_jiffies; | ||
| 86 | scd->tick_raw = 0; | 81 | scd->tick_raw = 0; |
| 87 | scd->tick_gtod = ktime_now; | 82 | scd->tick_gtod = ktime_now; |
| 88 | scd->clock = ktime_now; | 83 | scd->clock = ktime_now; |
| @@ -92,46 +87,51 @@ void sched_clock_init(void) | |||
| 92 | } | 87 | } |
| 93 | 88 | ||
| 94 | /* | 89 | /* |
| 90 | * min,max except they take wrapping into account | ||
| 91 | */ | ||
| 92 | |||
| 93 | static inline u64 wrap_min(u64 x, u64 y) | ||
| 94 | { | ||
| 95 | return (s64)(x - y) < 0 ? x : y; | ||
| 96 | } | ||
| 97 | |||
| 98 | static inline u64 wrap_max(u64 x, u64 y) | ||
| 99 | { | ||
| 100 | return (s64)(x - y) > 0 ? x : y; | ||
| 101 | } | ||
| 102 | |||
| 103 | /* | ||
| 95 | * update the percpu scd from the raw @now value | 104 | * update the percpu scd from the raw @now value |
| 96 | * | 105 | * |
| 97 | * - filter out backward motion | 106 | * - filter out backward motion |
| 98 | * - use jiffies to generate a min,max window to clip the raw values | 107 | * - use the GTOD tick value to create a window to filter crazy TSC values |
| 99 | */ | 108 | */ |
| 100 | static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now) | 109 | static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now) |
| 101 | { | 110 | { |
| 102 | unsigned long now_jiffies = jiffies; | ||
| 103 | long delta_jiffies = now_jiffies - scd->tick_jiffies; | ||
| 104 | u64 clock = scd->clock; | ||
| 105 | u64 min_clock, max_clock; | ||
| 106 | s64 delta = now - scd->tick_raw; | 111 | s64 delta = now - scd->tick_raw; |
| 112 | u64 clock, min_clock, max_clock; | ||
| 107 | 113 | ||
| 108 | WARN_ON_ONCE(!irqs_disabled()); | 114 | WARN_ON_ONCE(!irqs_disabled()); |
| 109 | min_clock = scd->tick_gtod + delta_jiffies * TICK_NSEC; | ||
| 110 | 115 | ||
| 111 | if (unlikely(delta < 0)) { | 116 | if (unlikely(delta < 0)) |
| 112 | clock++; | 117 | delta = 0; |
| 113 | goto out; | ||
| 114 | } | ||
| 115 | 118 | ||
| 116 | max_clock = min_clock + TICK_NSEC; | 119 | /* |
| 120 | * scd->clock = clamp(scd->tick_gtod + delta, | ||
| 121 | * max(scd->tick_gtod, scd->clock), | ||
| 122 | * scd->tick_gtod + TICK_NSEC); | ||
| 123 | */ | ||
| 117 | 124 | ||
| 118 | if (unlikely(clock + delta > max_clock)) { | 125 | clock = scd->tick_gtod + delta; |
| 119 | if (clock < max_clock) | 126 | min_clock = wrap_max(scd->tick_gtod, scd->clock); |
| 120 | clock = max_clock; | 127 | max_clock = scd->tick_gtod + TICK_NSEC; |
| 121 | else | ||
| 122 | clock++; | ||
| 123 | } else { | ||
| 124 | clock += delta; | ||
| 125 | } | ||
| 126 | 128 | ||
| 127 | out: | 129 | clock = wrap_max(clock, min_clock); |
| 128 | if (unlikely(clock < min_clock)) | 130 | clock = wrap_min(clock, max_clock); |
| 129 | clock = min_clock; | ||
| 130 | 131 | ||
| 131 | scd->tick_jiffies = now_jiffies; | ||
| 132 | scd->clock = clock; | 132 | scd->clock = clock; |
| 133 | 133 | ||
| 134 | return clock; | 134 | return scd->clock; |
| 135 | } | 135 | } |
| 136 | 136 | ||
| 137 | static void lock_double_clock(struct sched_clock_data *data1, | 137 | static void lock_double_clock(struct sched_clock_data *data1, |
| @@ -171,7 +171,7 @@ u64 sched_clock_cpu(int cpu) | |||
| 171 | * larger time as the latest time for both | 171 | * larger time as the latest time for both |
| 172 | * runqueues. (this creates monotonic movement) | 172 | * runqueues. (this creates monotonic movement) |
| 173 | */ | 173 | */ |
| 174 | if (likely(remote_clock < this_clock)) { | 174 | if (likely((s64)(remote_clock - this_clock) < 0)) { |
| 175 | clock = this_clock; | 175 | clock = this_clock; |
| 176 | scd->clock = clock; | 176 | scd->clock = clock; |
| 177 | } else { | 177 | } else { |
| @@ -207,14 +207,9 @@ void sched_clock_tick(void) | |||
| 207 | now = sched_clock(); | 207 | now = sched_clock(); |
| 208 | 208 | ||
| 209 | __raw_spin_lock(&scd->lock); | 209 | __raw_spin_lock(&scd->lock); |
| 210 | __update_sched_clock(scd, now); | ||
| 211 | /* | ||
| 212 | * update tick_gtod after __update_sched_clock() because that will | ||
| 213 | * already observe 1 new jiffy; adding a new tick_gtod to that would | ||
| 214 | * increase the clock 2 jiffies. | ||
| 215 | */ | ||
| 216 | scd->tick_raw = now; | 210 | scd->tick_raw = now; |
| 217 | scd->tick_gtod = now_gtod; | 211 | scd->tick_gtod = now_gtod; |
| 212 | __update_sched_clock(scd, now); | ||
| 218 | __raw_spin_unlock(&scd->lock); | 213 | __raw_spin_unlock(&scd->lock); |
| 219 | } | 214 | } |
| 220 | 215 | ||
| @@ -232,18 +227,7 @@ EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event); | |||
| 232 | */ | 227 | */ |
| 233 | void sched_clock_idle_wakeup_event(u64 delta_ns) | 228 | void sched_clock_idle_wakeup_event(u64 delta_ns) |
| 234 | { | 229 | { |
| 235 | struct sched_clock_data *scd = this_scd(); | 230 | sched_clock_tick(); |
| 236 | |||
| 237 | /* | ||
| 238 | * Override the previous timestamp and ignore all | ||
| 239 | * sched_clock() deltas that occured while we idled, | ||
| 240 | * and use the PM-provided delta_ns to advance the | ||
| 241 | * rq clock: | ||
| 242 | */ | ||
| 243 | __raw_spin_lock(&scd->lock); | ||
| 244 | scd->clock += delta_ns; | ||
| 245 | __raw_spin_unlock(&scd->lock); | ||
| 246 | |||
| 247 | touch_softlockup_watchdog(); | 231 | touch_softlockup_watchdog(); |
| 248 | } | 232 | } |
| 249 | EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); | 233 | EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); |
diff --git a/kernel/sched_features.h b/kernel/sched_features.h index 862b06bd560a..9353ca78154e 100644 --- a/kernel/sched_features.h +++ b/kernel/sched_features.h | |||
| @@ -8,6 +8,6 @@ SCHED_FEAT(SYNC_WAKEUPS, 1) | |||
| 8 | SCHED_FEAT(HRTICK, 1) | 8 | SCHED_FEAT(HRTICK, 1) |
| 9 | SCHED_FEAT(DOUBLE_TICK, 0) | 9 | SCHED_FEAT(DOUBLE_TICK, 0) |
| 10 | SCHED_FEAT(ASYM_GRAN, 1) | 10 | SCHED_FEAT(ASYM_GRAN, 1) |
| 11 | SCHED_FEAT(LB_BIAS, 0) | 11 | SCHED_FEAT(LB_BIAS, 1) |
| 12 | SCHED_FEAT(LB_WAKEUP_UPDATE, 1) | 12 | SCHED_FEAT(LB_WAKEUP_UPDATE, 1) |
| 13 | SCHED_FEAT(ASYM_EFF_LOAD, 1) | 13 | SCHED_FEAT(ASYM_EFF_LOAD, 1) |
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 6163e4cf885b..552310798dad 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
| @@ -199,6 +199,8 @@ static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se) | |||
| 199 | 199 | ||
| 200 | static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq) | 200 | static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq) |
| 201 | { | 201 | { |
| 202 | if (rt_rq->rt_nr_running) | ||
| 203 | resched_task(rq_of_rt_rq(rt_rq)->curr); | ||
| 202 | } | 204 | } |
| 203 | 205 | ||
| 204 | static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq) | 206 | static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq) |
| @@ -298,7 +300,7 @@ static void __disable_runtime(struct rq *rq) | |||
| 298 | struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); | 300 | struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); |
| 299 | s64 diff; | 301 | s64 diff; |
| 300 | 302 | ||
| 301 | if (iter == rt_rq) | 303 | if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF) |
| 302 | continue; | 304 | continue; |
| 303 | 305 | ||
| 304 | spin_lock(&iter->rt_runtime_lock); | 306 | spin_lock(&iter->rt_runtime_lock); |
| @@ -438,9 +440,6 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq) | |||
| 438 | { | 440 | { |
| 439 | u64 runtime = sched_rt_runtime(rt_rq); | 441 | u64 runtime = sched_rt_runtime(rt_rq); |
| 440 | 442 | ||
| 441 | if (runtime == RUNTIME_INF) | ||
| 442 | return 0; | ||
| 443 | |||
| 444 | if (rt_rq->rt_throttled) | 443 | if (rt_rq->rt_throttled) |
| 445 | return rt_rq_throttled(rt_rq); | 444 | return rt_rq_throttled(rt_rq); |
| 446 | 445 | ||
| @@ -491,9 +490,11 @@ static void update_curr_rt(struct rq *rq) | |||
| 491 | rt_rq = rt_rq_of_se(rt_se); | 490 | rt_rq = rt_rq_of_se(rt_se); |
| 492 | 491 | ||
| 493 | spin_lock(&rt_rq->rt_runtime_lock); | 492 | spin_lock(&rt_rq->rt_runtime_lock); |
| 494 | rt_rq->rt_time += delta_exec; | 493 | if (sched_rt_runtime(rt_rq) != RUNTIME_INF) { |
| 495 | if (sched_rt_runtime_exceeded(rt_rq)) | 494 | rt_rq->rt_time += delta_exec; |
| 496 | resched_task(curr); | 495 | if (sched_rt_runtime_exceeded(rt_rq)) |
| 496 | resched_task(curr); | ||
| 497 | } | ||
| 497 | spin_unlock(&rt_rq->rt_runtime_lock); | 498 | spin_unlock(&rt_rq->rt_runtime_lock); |
| 498 | } | 499 | } |
| 499 | } | 500 | } |
diff --git a/kernel/signal.c b/kernel/signal.c index c539f60c6f41..e661b01d340f 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
| @@ -1338,6 +1338,7 @@ int do_notify_parent(struct task_struct *tsk, int sig) | |||
| 1338 | struct siginfo info; | 1338 | struct siginfo info; |
| 1339 | unsigned long flags; | 1339 | unsigned long flags; |
| 1340 | struct sighand_struct *psig; | 1340 | struct sighand_struct *psig; |
| 1341 | int ret = sig; | ||
| 1341 | 1342 | ||
| 1342 | BUG_ON(sig == -1); | 1343 | BUG_ON(sig == -1); |
| 1343 | 1344 | ||
| @@ -1402,7 +1403,7 @@ int do_notify_parent(struct task_struct *tsk, int sig) | |||
| 1402 | * is implementation-defined: we do (if you don't want | 1403 | * is implementation-defined: we do (if you don't want |
| 1403 | * it, just use SIG_IGN instead). | 1404 | * it, just use SIG_IGN instead). |
| 1404 | */ | 1405 | */ |
| 1405 | tsk->exit_signal = -1; | 1406 | ret = tsk->exit_signal = -1; |
| 1406 | if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) | 1407 | if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) |
| 1407 | sig = -1; | 1408 | sig = -1; |
| 1408 | } | 1409 | } |
| @@ -1411,7 +1412,7 @@ int do_notify_parent(struct task_struct *tsk, int sig) | |||
| 1411 | __wake_up_parent(tsk, tsk->parent); | 1412 | __wake_up_parent(tsk, tsk->parent); |
| 1412 | spin_unlock_irqrestore(&psig->siglock, flags); | 1413 | spin_unlock_irqrestore(&psig->siglock, flags); |
| 1413 | 1414 | ||
| 1414 | return sig; | 1415 | return ret; |
| 1415 | } | 1416 | } |
| 1416 | 1417 | ||
| 1417 | static void do_notify_parent_cldstop(struct task_struct *tsk, int why) | 1418 | static void do_notify_parent_cldstop(struct task_struct *tsk, int why) |
diff --git a/kernel/smp.c b/kernel/smp.c index 782e2b93e465..f362a8553777 100644 --- a/kernel/smp.c +++ b/kernel/smp.c | |||
| @@ -210,8 +210,10 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info, | |||
| 210 | { | 210 | { |
| 211 | struct call_single_data d; | 211 | struct call_single_data d; |
| 212 | unsigned long flags; | 212 | unsigned long flags; |
| 213 | /* prevent preemption and reschedule on another processor */ | 213 | /* prevent preemption and reschedule on another processor, |
| 214 | as well as CPU removal */ | ||
| 214 | int me = get_cpu(); | 215 | int me = get_cpu(); |
| 216 | int err = 0; | ||
| 215 | 217 | ||
| 216 | /* Can deadlock when called with interrupts disabled */ | 218 | /* Can deadlock when called with interrupts disabled */ |
| 217 | WARN_ON(irqs_disabled()); | 219 | WARN_ON(irqs_disabled()); |
| @@ -220,7 +222,7 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info, | |||
| 220 | local_irq_save(flags); | 222 | local_irq_save(flags); |
| 221 | func(info); | 223 | func(info); |
| 222 | local_irq_restore(flags); | 224 | local_irq_restore(flags); |
| 223 | } else { | 225 | } else if ((unsigned)cpu < NR_CPUS && cpu_online(cpu)) { |
| 224 | struct call_single_data *data = NULL; | 226 | struct call_single_data *data = NULL; |
| 225 | 227 | ||
| 226 | if (!wait) { | 228 | if (!wait) { |
| @@ -236,10 +238,12 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info, | |||
| 236 | data->func = func; | 238 | data->func = func; |
| 237 | data->info = info; | 239 | data->info = info; |
| 238 | generic_exec_single(cpu, data); | 240 | generic_exec_single(cpu, data); |
| 241 | } else { | ||
| 242 | err = -ENXIO; /* CPU not online */ | ||
| 239 | } | 243 | } |
| 240 | 244 | ||
| 241 | put_cpu(); | 245 | put_cpu(); |
| 242 | return 0; | 246 | return err; |
| 243 | } | 247 | } |
| 244 | EXPORT_SYMBOL(smp_call_function_single); | 248 | EXPORT_SYMBOL(smp_call_function_single); |
| 245 | 249 | ||
diff --git a/kernel/softlockup.c b/kernel/softlockup.c index b75b492fbfcf..cb838ee93a82 100644 --- a/kernel/softlockup.c +++ b/kernel/softlockup.c | |||
| @@ -233,7 +233,8 @@ static void check_hung_uninterruptible_tasks(int this_cpu) | |||
| 233 | do_each_thread(g, t) { | 233 | do_each_thread(g, t) { |
| 234 | if (!--max_count) | 234 | if (!--max_count) |
| 235 | goto unlock; | 235 | goto unlock; |
| 236 | if (t->state & TASK_UNINTERRUPTIBLE) | 236 | /* use "==" to skip the TASK_KILLABLE tasks waiting on NFS */ |
| 237 | if (t->state == TASK_UNINTERRUPTIBLE) | ||
| 237 | check_hung_task(t, now); | 238 | check_hung_task(t, now); |
| 238 | } while_each_thread(g, t); | 239 | } while_each_thread(g, t); |
| 239 | unlock: | 240 | unlock: |
diff --git a/kernel/spinlock.c b/kernel/spinlock.c index 44baeea94ab9..29ab20749dd3 100644 --- a/kernel/spinlock.c +++ b/kernel/spinlock.c | |||
| @@ -290,7 +290,6 @@ void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass) | |||
| 290 | spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); | 290 | spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); |
| 291 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); | 291 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); |
| 292 | } | 292 | } |
| 293 | |||
| 294 | EXPORT_SYMBOL(_spin_lock_nested); | 293 | EXPORT_SYMBOL(_spin_lock_nested); |
| 295 | 294 | ||
| 296 | unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass) | 295 | unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass) |
| @@ -312,7 +311,6 @@ unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclas | |||
| 312 | #endif | 311 | #endif |
| 313 | return flags; | 312 | return flags; |
| 314 | } | 313 | } |
| 315 | |||
| 316 | EXPORT_SYMBOL(_spin_lock_irqsave_nested); | 314 | EXPORT_SYMBOL(_spin_lock_irqsave_nested); |
| 317 | 315 | ||
| 318 | void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, | 316 | void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, |
| @@ -322,7 +320,6 @@ void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, | |||
| 322 | spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_); | 320 | spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_); |
| 323 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); | 321 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); |
| 324 | } | 322 | } |
| 325 | |||
| 326 | EXPORT_SYMBOL(_spin_lock_nest_lock); | 323 | EXPORT_SYMBOL(_spin_lock_nest_lock); |
| 327 | 324 | ||
| 328 | #endif | 325 | #endif |
diff --git a/kernel/sys.c b/kernel/sys.c index c01858090a98..038a7bc0901d 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
| @@ -169,9 +169,9 @@ asmlinkage long sys_setpriority(int which, int who, int niceval) | |||
| 169 | pgrp = find_vpid(who); | 169 | pgrp = find_vpid(who); |
| 170 | else | 170 | else |
| 171 | pgrp = task_pgrp(current); | 171 | pgrp = task_pgrp(current); |
| 172 | do_each_pid_task(pgrp, PIDTYPE_PGID, p) { | 172 | do_each_pid_thread(pgrp, PIDTYPE_PGID, p) { |
| 173 | error = set_one_prio(p, niceval, error); | 173 | error = set_one_prio(p, niceval, error); |
| 174 | } while_each_pid_task(pgrp, PIDTYPE_PGID, p); | 174 | } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); |
| 175 | break; | 175 | break; |
| 176 | case PRIO_USER: | 176 | case PRIO_USER: |
| 177 | user = current->user; | 177 | user = current->user; |
| @@ -229,11 +229,11 @@ asmlinkage long sys_getpriority(int which, int who) | |||
| 229 | pgrp = find_vpid(who); | 229 | pgrp = find_vpid(who); |
| 230 | else | 230 | else |
| 231 | pgrp = task_pgrp(current); | 231 | pgrp = task_pgrp(current); |
| 232 | do_each_pid_task(pgrp, PIDTYPE_PGID, p) { | 232 | do_each_pid_thread(pgrp, PIDTYPE_PGID, p) { |
| 233 | niceval = 20 - task_nice(p); | 233 | niceval = 20 - task_nice(p); |
| 234 | if (niceval > retval) | 234 | if (niceval > retval) |
| 235 | retval = niceval; | 235 | retval = niceval; |
| 236 | } while_each_pid_task(pgrp, PIDTYPE_PGID, p); | 236 | } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); |
| 237 | break; | 237 | break; |
| 238 | case PRIO_USER: | 238 | case PRIO_USER: |
| 239 | user = current->user; | 239 | user = current->user; |
| @@ -274,7 +274,7 @@ void emergency_restart(void) | |||
| 274 | } | 274 | } |
| 275 | EXPORT_SYMBOL_GPL(emergency_restart); | 275 | EXPORT_SYMBOL_GPL(emergency_restart); |
| 276 | 276 | ||
| 277 | static void kernel_restart_prepare(char *cmd) | 277 | void kernel_restart_prepare(char *cmd) |
| 278 | { | 278 | { |
| 279 | blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd); | 279 | blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd); |
| 280 | system_state = SYSTEM_RESTART; | 280 | system_state = SYSTEM_RESTART; |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index f5da526424a9..7a46bde78c66 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
| @@ -643,17 +643,21 @@ void tick_setup_sched_timer(void) | |||
| 643 | ts->nohz_mode = NOHZ_MODE_HIGHRES; | 643 | ts->nohz_mode = NOHZ_MODE_HIGHRES; |
| 644 | #endif | 644 | #endif |
| 645 | } | 645 | } |
| 646 | #endif /* HIGH_RES_TIMERS */ | ||
| 646 | 647 | ||
| 648 | #if defined CONFIG_NO_HZ || defined CONFIG_HIGH_RES_TIMERS | ||
| 647 | void tick_cancel_sched_timer(int cpu) | 649 | void tick_cancel_sched_timer(int cpu) |
| 648 | { | 650 | { |
| 649 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | 651 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
| 650 | 652 | ||
| 653 | # ifdef CONFIG_HIGH_RES_TIMERS | ||
| 651 | if (ts->sched_timer.base) | 654 | if (ts->sched_timer.base) |
| 652 | hrtimer_cancel(&ts->sched_timer); | 655 | hrtimer_cancel(&ts->sched_timer); |
| 656 | # endif | ||
| 653 | 657 | ||
| 654 | ts->nohz_mode = NOHZ_MODE_INACTIVE; | 658 | ts->nohz_mode = NOHZ_MODE_INACTIVE; |
| 655 | } | 659 | } |
| 656 | #endif /* HIGH_RES_TIMERS */ | 660 | #endif |
| 657 | 661 | ||
| 658 | /** | 662 | /** |
| 659 | * Async notification about clocksource changes | 663 | * Async notification about clocksource changes |
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c index a9ab0596de44..532858fa5b88 100644 --- a/kernel/user_namespace.c +++ b/kernel/user_namespace.c | |||
| @@ -6,7 +6,6 @@ | |||
| 6 | */ | 6 | */ |
| 7 | 7 | ||
| 8 | #include <linux/module.h> | 8 | #include <linux/module.h> |
| 9 | #include <linux/version.h> | ||
| 10 | #include <linux/nsproxy.h> | 9 | #include <linux/nsproxy.h> |
| 11 | #include <linux/slab.h> | 10 | #include <linux/slab.h> |
| 12 | #include <linux/user_namespace.h> | 11 | #include <linux/user_namespace.h> |
diff --git a/kernel/utsname.c b/kernel/utsname.c index 64d398f12444..815237a55af8 100644 --- a/kernel/utsname.c +++ b/kernel/utsname.c | |||
| @@ -12,7 +12,6 @@ | |||
| 12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
| 13 | #include <linux/uts.h> | 13 | #include <linux/uts.h> |
| 14 | #include <linux/utsname.h> | 14 | #include <linux/utsname.h> |
| 15 | #include <linux/version.h> | ||
| 16 | #include <linux/err.h> | 15 | #include <linux/err.h> |
| 17 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
| 18 | 17 | ||
diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c index fe3a56c2256d..4ab9659d269e 100644 --- a/kernel/utsname_sysctl.c +++ b/kernel/utsname_sysctl.c | |||
| @@ -12,7 +12,6 @@ | |||
| 12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
| 13 | #include <linux/uts.h> | 13 | #include <linux/uts.h> |
| 14 | #include <linux/utsname.h> | 14 | #include <linux/utsname.h> |
| 15 | #include <linux/version.h> | ||
| 16 | #include <linux/sysctl.h> | 15 | #include <linux/sysctl.h> |
| 17 | 16 | ||
| 18 | static void *get_uts(ctl_table *table, int write) | 17 | static void *get_uts(ctl_table *table, int write) |
