diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/Makefile | 3 | ||||
| -rw-r--r-- | kernel/compat.c | 23 | ||||
| -rw-r--r-- | kernel/cpu.c | 29 | ||||
| -rw-r--r-- | kernel/exit.c | 8 | ||||
| -rw-r--r-- | kernel/fork.c | 5 | ||||
| -rw-r--r-- | kernel/futex.c | 170 | ||||
| -rw-r--r-- | kernel/futex_compat.c | 141 | ||||
| -rw-r--r-- | kernel/module.c | 20 | ||||
| -rw-r--r-- | kernel/panic.c | 4 | ||||
| -rw-r--r-- | kernel/profile.c | 53 | ||||
| -rw-r--r-- | kernel/sched.c | 146 | ||||
| -rw-r--r-- | kernel/softlockup.c | 2 | ||||
| -rw-r--r-- | kernel/sys.c | 327 | ||||
| -rw-r--r-- | kernel/sys_ni.c | 4 |
14 files changed, 733 insertions, 202 deletions
diff --git a/kernel/Makefile b/kernel/Makefile index ff1c11dc12cf..58908f9d156a 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
| @@ -12,6 +12,9 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o profile.o \ | |||
| 12 | 12 | ||
| 13 | obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o | 13 | obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o |
| 14 | obj-$(CONFIG_FUTEX) += futex.o | 14 | obj-$(CONFIG_FUTEX) += futex.o |
| 15 | ifeq ($(CONFIG_COMPAT),y) | ||
| 16 | obj-$(CONFIG_FUTEX) += futex_compat.o | ||
| 17 | endif | ||
| 15 | obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o | 18 | obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o |
| 16 | obj-$(CONFIG_SMP) += cpu.o spinlock.o | 19 | obj-$(CONFIG_SMP) += cpu.o spinlock.o |
| 17 | obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o | 20 | obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o |
diff --git a/kernel/compat.c b/kernel/compat.c index b9bdd1271f44..c1601a84f8d8 100644 --- a/kernel/compat.c +++ b/kernel/compat.c | |||
| @@ -17,7 +17,6 @@ | |||
| 17 | #include <linux/time.h> | 17 | #include <linux/time.h> |
| 18 | #include <linux/signal.h> | 18 | #include <linux/signal.h> |
| 19 | #include <linux/sched.h> /* for MAX_SCHEDULE_TIMEOUT */ | 19 | #include <linux/sched.h> /* for MAX_SCHEDULE_TIMEOUT */ |
| 20 | #include <linux/futex.h> /* for FUTEX_WAIT */ | ||
| 21 | #include <linux/syscalls.h> | 20 | #include <linux/syscalls.h> |
| 22 | #include <linux/unistd.h> | 21 | #include <linux/unistd.h> |
| 23 | #include <linux/security.h> | 22 | #include <linux/security.h> |
| @@ -239,28 +238,6 @@ asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set, | |||
| 239 | return ret; | 238 | return ret; |
| 240 | } | 239 | } |
| 241 | 240 | ||
| 242 | #ifdef CONFIG_FUTEX | ||
| 243 | asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, int val, | ||
| 244 | struct compat_timespec __user *utime, u32 __user *uaddr2, | ||
| 245 | int val3) | ||
| 246 | { | ||
| 247 | struct timespec t; | ||
| 248 | unsigned long timeout = MAX_SCHEDULE_TIMEOUT; | ||
| 249 | int val2 = 0; | ||
| 250 | |||
| 251 | if ((op == FUTEX_WAIT) && utime) { | ||
| 252 | if (get_compat_timespec(&t, utime)) | ||
| 253 | return -EFAULT; | ||
| 254 | timeout = timespec_to_jiffies(&t) + 1; | ||
| 255 | } | ||
| 256 | if (op >= FUTEX_REQUEUE) | ||
| 257 | val2 = (int) (unsigned long) utime; | ||
| 258 | |||
| 259 | return do_futex((unsigned long)uaddr, op, val, timeout, | ||
| 260 | (unsigned long)uaddr2, val2, val3); | ||
| 261 | } | ||
| 262 | #endif | ||
| 263 | |||
| 264 | asmlinkage long compat_sys_setrlimit(unsigned int resource, | 241 | asmlinkage long compat_sys_setrlimit(unsigned int resource, |
| 265 | struct compat_rlimit __user *rlim) | 242 | struct compat_rlimit __user *rlim) |
| 266 | { | 243 | { |
diff --git a/kernel/cpu.c b/kernel/cpu.c index 8be22bd80933..fe2b8d0bfe4c 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
| @@ -18,7 +18,7 @@ | |||
| 18 | /* This protects CPUs going up and down... */ | 18 | /* This protects CPUs going up and down... */ |
| 19 | static DECLARE_MUTEX(cpucontrol); | 19 | static DECLARE_MUTEX(cpucontrol); |
| 20 | 20 | ||
| 21 | static struct notifier_block *cpu_chain; | 21 | static BLOCKING_NOTIFIER_HEAD(cpu_chain); |
| 22 | 22 | ||
| 23 | #ifdef CONFIG_HOTPLUG_CPU | 23 | #ifdef CONFIG_HOTPLUG_CPU |
| 24 | static struct task_struct *lock_cpu_hotplug_owner; | 24 | static struct task_struct *lock_cpu_hotplug_owner; |
| @@ -71,21 +71,13 @@ EXPORT_SYMBOL_GPL(lock_cpu_hotplug_interruptible); | |||
| 71 | /* Need to know about CPUs going up/down? */ | 71 | /* Need to know about CPUs going up/down? */ |
| 72 | int register_cpu_notifier(struct notifier_block *nb) | 72 | int register_cpu_notifier(struct notifier_block *nb) |
| 73 | { | 73 | { |
| 74 | int ret; | 74 | return blocking_notifier_chain_register(&cpu_chain, nb); |
| 75 | |||
| 76 | if ((ret = lock_cpu_hotplug_interruptible()) != 0) | ||
| 77 | return ret; | ||
| 78 | ret = notifier_chain_register(&cpu_chain, nb); | ||
| 79 | unlock_cpu_hotplug(); | ||
| 80 | return ret; | ||
| 81 | } | 75 | } |
| 82 | EXPORT_SYMBOL(register_cpu_notifier); | 76 | EXPORT_SYMBOL(register_cpu_notifier); |
| 83 | 77 | ||
| 84 | void unregister_cpu_notifier(struct notifier_block *nb) | 78 | void unregister_cpu_notifier(struct notifier_block *nb) |
| 85 | { | 79 | { |
| 86 | lock_cpu_hotplug(); | 80 | blocking_notifier_chain_unregister(&cpu_chain, nb); |
| 87 | notifier_chain_unregister(&cpu_chain, nb); | ||
| 88 | unlock_cpu_hotplug(); | ||
| 89 | } | 81 | } |
| 90 | EXPORT_SYMBOL(unregister_cpu_notifier); | 82 | EXPORT_SYMBOL(unregister_cpu_notifier); |
| 91 | 83 | ||
| @@ -141,7 +133,7 @@ int cpu_down(unsigned int cpu) | |||
| 141 | goto out; | 133 | goto out; |
| 142 | } | 134 | } |
| 143 | 135 | ||
| 144 | err = notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE, | 136 | err = blocking_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE, |
| 145 | (void *)(long)cpu); | 137 | (void *)(long)cpu); |
| 146 | if (err == NOTIFY_BAD) { | 138 | if (err == NOTIFY_BAD) { |
| 147 | printk("%s: attempt to take down CPU %u failed\n", | 139 | printk("%s: attempt to take down CPU %u failed\n", |
| @@ -159,7 +151,7 @@ int cpu_down(unsigned int cpu) | |||
| 159 | p = __stop_machine_run(take_cpu_down, NULL, cpu); | 151 | p = __stop_machine_run(take_cpu_down, NULL, cpu); |
| 160 | if (IS_ERR(p)) { | 152 | if (IS_ERR(p)) { |
| 161 | /* CPU didn't die: tell everyone. Can't complain. */ | 153 | /* CPU didn't die: tell everyone. Can't complain. */ |
| 162 | if (notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED, | 154 | if (blocking_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED, |
| 163 | (void *)(long)cpu) == NOTIFY_BAD) | 155 | (void *)(long)cpu) == NOTIFY_BAD) |
| 164 | BUG(); | 156 | BUG(); |
| 165 | 157 | ||
| @@ -182,8 +174,8 @@ int cpu_down(unsigned int cpu) | |||
| 182 | put_cpu(); | 174 | put_cpu(); |
| 183 | 175 | ||
| 184 | /* CPU is completely dead: tell everyone. Too late to complain. */ | 176 | /* CPU is completely dead: tell everyone. Too late to complain. */ |
| 185 | if (notifier_call_chain(&cpu_chain, CPU_DEAD, (void *)(long)cpu) | 177 | if (blocking_notifier_call_chain(&cpu_chain, CPU_DEAD, |
| 186 | == NOTIFY_BAD) | 178 | (void *)(long)cpu) == NOTIFY_BAD) |
| 187 | BUG(); | 179 | BUG(); |
| 188 | 180 | ||
| 189 | check_for_tasks(cpu); | 181 | check_for_tasks(cpu); |
| @@ -211,7 +203,7 @@ int __devinit cpu_up(unsigned int cpu) | |||
| 211 | goto out; | 203 | goto out; |
| 212 | } | 204 | } |
| 213 | 205 | ||
| 214 | ret = notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu); | 206 | ret = blocking_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu); |
| 215 | if (ret == NOTIFY_BAD) { | 207 | if (ret == NOTIFY_BAD) { |
| 216 | printk("%s: attempt to bring up CPU %u failed\n", | 208 | printk("%s: attempt to bring up CPU %u failed\n", |
| 217 | __FUNCTION__, cpu); | 209 | __FUNCTION__, cpu); |
| @@ -226,11 +218,12 @@ int __devinit cpu_up(unsigned int cpu) | |||
| 226 | BUG_ON(!cpu_online(cpu)); | 218 | BUG_ON(!cpu_online(cpu)); |
| 227 | 219 | ||
| 228 | /* Now call notifier in preparation. */ | 220 | /* Now call notifier in preparation. */ |
| 229 | notifier_call_chain(&cpu_chain, CPU_ONLINE, hcpu); | 221 | blocking_notifier_call_chain(&cpu_chain, CPU_ONLINE, hcpu); |
| 230 | 222 | ||
| 231 | out_notify: | 223 | out_notify: |
| 232 | if (ret != 0) | 224 | if (ret != 0) |
| 233 | notifier_call_chain(&cpu_chain, CPU_UP_CANCELED, hcpu); | 225 | blocking_notifier_call_chain(&cpu_chain, |
| 226 | CPU_UP_CANCELED, hcpu); | ||
| 234 | out: | 227 | out: |
| 235 | unlock_cpu_hotplug(); | 228 | unlock_cpu_hotplug(); |
| 236 | return ret; | 229 | return ret; |
diff --git a/kernel/exit.c b/kernel/exit.c index 8037405e136e..a8c7efc7a681 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
| @@ -31,6 +31,8 @@ | |||
| 31 | #include <linux/signal.h> | 31 | #include <linux/signal.h> |
| 32 | #include <linux/cn_proc.h> | 32 | #include <linux/cn_proc.h> |
| 33 | #include <linux/mutex.h> | 33 | #include <linux/mutex.h> |
| 34 | #include <linux/futex.h> | ||
| 35 | #include <linux/compat.h> | ||
| 34 | 36 | ||
| 35 | #include <asm/uaccess.h> | 37 | #include <asm/uaccess.h> |
| 36 | #include <asm/unistd.h> | 38 | #include <asm/unistd.h> |
| @@ -852,6 +854,12 @@ fastcall NORET_TYPE void do_exit(long code) | |||
| 852 | exit_itimers(tsk->signal); | 854 | exit_itimers(tsk->signal); |
| 853 | acct_process(code); | 855 | acct_process(code); |
| 854 | } | 856 | } |
| 857 | if (unlikely(tsk->robust_list)) | ||
| 858 | exit_robust_list(tsk); | ||
| 859 | #ifdef CONFIG_COMPAT | ||
| 860 | if (unlikely(tsk->compat_robust_list)) | ||
| 861 | compat_exit_robust_list(tsk); | ||
| 862 | #endif | ||
| 855 | exit_mm(tsk); | 863 | exit_mm(tsk); |
| 856 | 864 | ||
| 857 | exit_sem(tsk); | 865 | exit_sem(tsk); |
diff --git a/kernel/fork.c b/kernel/fork.c index e0a2b449dea6..c49bd193b058 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
| @@ -1061,7 +1061,10 @@ static task_t *copy_process(unsigned long clone_flags, | |||
| 1061 | * Clear TID on mm_release()? | 1061 | * Clear TID on mm_release()? |
| 1062 | */ | 1062 | */ |
| 1063 | p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL; | 1063 | p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL; |
| 1064 | 1064 | p->robust_list = NULL; | |
| 1065 | #ifdef CONFIG_COMPAT | ||
| 1066 | p->compat_robust_list = NULL; | ||
| 1067 | #endif | ||
| 1065 | /* | 1068 | /* |
| 1066 | * sigaltstack should be cleared when sharing the same VM | 1069 | * sigaltstack should be cleared when sharing the same VM |
| 1067 | */ | 1070 | */ |
diff --git a/kernel/futex.c b/kernel/futex.c index 5efa2f978032..9c9b2b6b22dd 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
| @@ -8,6 +8,10 @@ | |||
| 8 | * Removed page pinning, fix privately mapped COW pages and other cleanups | 8 | * Removed page pinning, fix privately mapped COW pages and other cleanups |
| 9 | * (C) Copyright 2003, 2004 Jamie Lokier | 9 | * (C) Copyright 2003, 2004 Jamie Lokier |
| 10 | * | 10 | * |
| 11 | * Robust futex support started by Ingo Molnar | ||
| 12 | * (C) Copyright 2006 Red Hat Inc, All Rights Reserved | ||
| 13 | * Thanks to Thomas Gleixner for suggestions, analysis and fixes. | ||
| 14 | * | ||
| 11 | * Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly | 15 | * Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly |
| 12 | * enough at me, Linus for the original (flawed) idea, Matthew | 16 | * enough at me, Linus for the original (flawed) idea, Matthew |
| 13 | * Kirkwood for proof-of-concept implementation. | 17 | * Kirkwood for proof-of-concept implementation. |
| @@ -829,6 +833,172 @@ error: | |||
| 829 | goto out; | 833 | goto out; |
| 830 | } | 834 | } |
| 831 | 835 | ||
| 836 | /* | ||
| 837 | * Support for robust futexes: the kernel cleans up held futexes at | ||
| 838 | * thread exit time. | ||
| 839 | * | ||
| 840 | * Implementation: user-space maintains a per-thread list of locks it | ||
| 841 | * is holding. Upon do_exit(), the kernel carefully walks this list, | ||
| 842 | * and marks all locks that are owned by this thread with the | ||
| 843 | * FUTEX_OWNER_DEAD bit, and wakes up a waiter (if any). The list is | ||
| 844 | * always manipulated with the lock held, so the list is private and | ||
| 845 | * per-thread. Userspace also maintains a per-thread 'list_op_pending' | ||
| 846 | * field, to allow the kernel to clean up if the thread dies after | ||
| 847 | * acquiring the lock, but just before it could have added itself to | ||
| 848 | * the list. There can only be one such pending lock. | ||
| 849 | */ | ||
| 850 | |||
| 851 | /** | ||
| 852 | * sys_set_robust_list - set the robust-futex list head of a task | ||
| 853 | * @head: pointer to the list-head | ||
| 854 | * @len: length of the list-head, as userspace expects | ||
| 855 | */ | ||
| 856 | asmlinkage long | ||
| 857 | sys_set_robust_list(struct robust_list_head __user *head, | ||
| 858 | size_t len) | ||
| 859 | { | ||
| 860 | /* | ||
| 861 | * The kernel knows only one size for now: | ||
| 862 | */ | ||
| 863 | if (unlikely(len != sizeof(*head))) | ||
| 864 | return -EINVAL; | ||
| 865 | |||
| 866 | current->robust_list = head; | ||
| 867 | |||
| 868 | return 0; | ||
| 869 | } | ||
| 870 | |||
| 871 | /** | ||
| 872 | * sys_get_robust_list - get the robust-futex list head of a task | ||
| 873 | * @pid: pid of the process [zero for current task] | ||
| 874 | * @head_ptr: pointer to a list-head pointer, the kernel fills it in | ||
| 875 | * @len_ptr: pointer to a length field, the kernel fills in the header size | ||
| 876 | */ | ||
| 877 | asmlinkage long | ||
| 878 | sys_get_robust_list(int pid, struct robust_list_head __user **head_ptr, | ||
| 879 | size_t __user *len_ptr) | ||
| 880 | { | ||
| 881 | struct robust_list_head *head; | ||
| 882 | unsigned long ret; | ||
| 883 | |||
| 884 | if (!pid) | ||
| 885 | head = current->robust_list; | ||
| 886 | else { | ||
| 887 | struct task_struct *p; | ||
| 888 | |||
| 889 | ret = -ESRCH; | ||
| 890 | read_lock(&tasklist_lock); | ||
| 891 | p = find_task_by_pid(pid); | ||
| 892 | if (!p) | ||
| 893 | goto err_unlock; | ||
| 894 | ret = -EPERM; | ||
| 895 | if ((current->euid != p->euid) && (current->euid != p->uid) && | ||
| 896 | !capable(CAP_SYS_PTRACE)) | ||
| 897 | goto err_unlock; | ||
| 898 | head = p->robust_list; | ||
| 899 | read_unlock(&tasklist_lock); | ||
| 900 | } | ||
| 901 | |||
| 902 | if (put_user(sizeof(*head), len_ptr)) | ||
| 903 | return -EFAULT; | ||
| 904 | return put_user(head, head_ptr); | ||
| 905 | |||
| 906 | err_unlock: | ||
| 907 | read_unlock(&tasklist_lock); | ||
| 908 | |||
| 909 | return ret; | ||
| 910 | } | ||
| 911 | |||
| 912 | /* | ||
| 913 | * Process a futex-list entry, check whether it's owned by the | ||
| 914 | * dying task, and do notification if so: | ||
| 915 | */ | ||
| 916 | int handle_futex_death(u32 __user *uaddr, struct task_struct *curr) | ||
| 917 | { | ||
| 918 | u32 uval; | ||
| 919 | |||
| 920 | retry: | ||
| 921 | if (get_user(uval, uaddr)) | ||
| 922 | return -1; | ||
| 923 | |||
| 924 | if ((uval & FUTEX_TID_MASK) == curr->pid) { | ||
| 925 | /* | ||
| 926 | * Ok, this dying thread is truly holding a futex | ||
| 927 | * of interest. Set the OWNER_DIED bit atomically | ||
| 928 | * via cmpxchg, and if the value had FUTEX_WAITERS | ||
| 929 | * set, wake up a waiter (if any). (We have to do a | ||
| 930 | * futex_wake() even if OWNER_DIED is already set - | ||
| 931 | * to handle the rare but possible case of recursive | ||
| 932 | * thread-death.) The rest of the cleanup is done in | ||
| 933 | * userspace. | ||
| 934 | */ | ||
| 935 | if (futex_atomic_cmpxchg_inatomic(uaddr, uval, | ||
| 936 | uval | FUTEX_OWNER_DIED) != uval) | ||
| 937 | goto retry; | ||
| 938 | |||
| 939 | if (uval & FUTEX_WAITERS) | ||
| 940 | futex_wake((unsigned long)uaddr, 1); | ||
| 941 | } | ||
| 942 | return 0; | ||
| 943 | } | ||
| 944 | |||
| 945 | /* | ||
| 946 | * Walk curr->robust_list (very carefully, it's a userspace list!) | ||
| 947 | * and mark any locks found there dead, and notify any waiters. | ||
| 948 | * | ||
| 949 | * We silently return on any sign of list-walking problem. | ||
| 950 | */ | ||
| 951 | void exit_robust_list(struct task_struct *curr) | ||
| 952 | { | ||
| 953 | struct robust_list_head __user *head = curr->robust_list; | ||
| 954 | struct robust_list __user *entry, *pending; | ||
| 955 | unsigned int limit = ROBUST_LIST_LIMIT; | ||
| 956 | unsigned long futex_offset; | ||
| 957 | |||
| 958 | /* | ||
| 959 | * Fetch the list head (which was registered earlier, via | ||
| 960 | * sys_set_robust_list()): | ||
| 961 | */ | ||
| 962 | if (get_user(entry, &head->list.next)) | ||
| 963 | return; | ||
| 964 | /* | ||
| 965 | * Fetch the relative futex offset: | ||
| 966 | */ | ||
| 967 | if (get_user(futex_offset, &head->futex_offset)) | ||
| 968 | return; | ||
| 969 | /* | ||
| 970 | * Fetch any possibly pending lock-add first, and handle it | ||
| 971 | * if it exists: | ||
| 972 | */ | ||
| 973 | if (get_user(pending, &head->list_op_pending)) | ||
| 974 | return; | ||
| 975 | if (pending) | ||
| 976 | handle_futex_death((void *)pending + futex_offset, curr); | ||
| 977 | |||
| 978 | while (entry != &head->list) { | ||
| 979 | /* | ||
| 980 | * A pending lock might already be on the list, so | ||
| 981 | * dont process it twice: | ||
| 982 | */ | ||
| 983 | if (entry != pending) | ||
| 984 | if (handle_futex_death((void *)entry + futex_offset, | ||
| 985 | curr)) | ||
| 986 | return; | ||
| 987 | /* | ||
| 988 | * Fetch the next entry in the list: | ||
| 989 | */ | ||
| 990 | if (get_user(entry, &entry->next)) | ||
| 991 | return; | ||
| 992 | /* | ||
| 993 | * Avoid excessively long or circular lists: | ||
| 994 | */ | ||
| 995 | if (!--limit) | ||
| 996 | break; | ||
| 997 | |||
| 998 | cond_resched(); | ||
| 999 | } | ||
| 1000 | } | ||
| 1001 | |||
| 832 | long do_futex(unsigned long uaddr, int op, int val, unsigned long timeout, | 1002 | long do_futex(unsigned long uaddr, int op, int val, unsigned long timeout, |
| 833 | unsigned long uaddr2, int val2, int val3) | 1003 | unsigned long uaddr2, int val2, int val3) |
| 834 | { | 1004 | { |
diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c new file mode 100644 index 000000000000..9c077cf9aa84 --- /dev/null +++ b/kernel/futex_compat.c | |||
| @@ -0,0 +1,141 @@ | |||
| 1 | /* | ||
| 2 | * linux/kernel/futex_compat.c | ||
| 3 | * | ||
| 4 | * Futex compatibililty routines. | ||
| 5 | * | ||
| 6 | * Copyright 2006, Red Hat, Inc., Ingo Molnar | ||
| 7 | */ | ||
| 8 | |||
| 9 | #include <linux/linkage.h> | ||
| 10 | #include <linux/compat.h> | ||
| 11 | #include <linux/futex.h> | ||
| 12 | |||
| 13 | #include <asm/uaccess.h> | ||
| 14 | |||
| 15 | /* | ||
| 16 | * Walk curr->robust_list (very carefully, it's a userspace list!) | ||
| 17 | * and mark any locks found there dead, and notify any waiters. | ||
| 18 | * | ||
| 19 | * We silently return on any sign of list-walking problem. | ||
| 20 | */ | ||
| 21 | void compat_exit_robust_list(struct task_struct *curr) | ||
| 22 | { | ||
| 23 | struct compat_robust_list_head __user *head = curr->compat_robust_list; | ||
| 24 | struct robust_list __user *entry, *pending; | ||
| 25 | compat_uptr_t uentry, upending; | ||
| 26 | unsigned int limit = ROBUST_LIST_LIMIT; | ||
| 27 | compat_long_t futex_offset; | ||
| 28 | |||
| 29 | /* | ||
| 30 | * Fetch the list head (which was registered earlier, via | ||
| 31 | * sys_set_robust_list()): | ||
| 32 | */ | ||
| 33 | if (get_user(uentry, &head->list.next)) | ||
| 34 | return; | ||
| 35 | entry = compat_ptr(uentry); | ||
| 36 | /* | ||
| 37 | * Fetch the relative futex offset: | ||
| 38 | */ | ||
| 39 | if (get_user(futex_offset, &head->futex_offset)) | ||
| 40 | return; | ||
| 41 | /* | ||
| 42 | * Fetch any possibly pending lock-add first, and handle it | ||
| 43 | * if it exists: | ||
| 44 | */ | ||
| 45 | if (get_user(upending, &head->list_op_pending)) | ||
| 46 | return; | ||
| 47 | pending = compat_ptr(upending); | ||
| 48 | if (upending) | ||
| 49 | handle_futex_death((void *)pending + futex_offset, curr); | ||
| 50 | |||
| 51 | while (compat_ptr(uentry) != &head->list) { | ||
| 52 | /* | ||
| 53 | * A pending lock might already be on the list, so | ||
| 54 | * dont process it twice: | ||
| 55 | */ | ||
| 56 | if (entry != pending) | ||
| 57 | if (handle_futex_death((void *)entry + futex_offset, | ||
| 58 | curr)) | ||
| 59 | return; | ||
| 60 | |||
| 61 | /* | ||
| 62 | * Fetch the next entry in the list: | ||
| 63 | */ | ||
| 64 | if (get_user(uentry, (compat_uptr_t *)&entry->next)) | ||
| 65 | return; | ||
| 66 | entry = compat_ptr(uentry); | ||
| 67 | /* | ||
| 68 | * Avoid excessively long or circular lists: | ||
| 69 | */ | ||
| 70 | if (!--limit) | ||
| 71 | break; | ||
| 72 | |||
| 73 | cond_resched(); | ||
| 74 | } | ||
| 75 | } | ||
| 76 | |||
| 77 | asmlinkage long | ||
| 78 | compat_sys_set_robust_list(struct compat_robust_list_head __user *head, | ||
| 79 | compat_size_t len) | ||
| 80 | { | ||
| 81 | if (unlikely(len != sizeof(*head))) | ||
| 82 | return -EINVAL; | ||
| 83 | |||
| 84 | current->compat_robust_list = head; | ||
| 85 | |||
| 86 | return 0; | ||
| 87 | } | ||
| 88 | |||
| 89 | asmlinkage long | ||
| 90 | compat_sys_get_robust_list(int pid, compat_uptr_t *head_ptr, | ||
| 91 | compat_size_t __user *len_ptr) | ||
| 92 | { | ||
| 93 | struct compat_robust_list_head *head; | ||
| 94 | unsigned long ret; | ||
| 95 | |||
| 96 | if (!pid) | ||
| 97 | head = current->compat_robust_list; | ||
| 98 | else { | ||
| 99 | struct task_struct *p; | ||
| 100 | |||
| 101 | ret = -ESRCH; | ||
| 102 | read_lock(&tasklist_lock); | ||
| 103 | p = find_task_by_pid(pid); | ||
| 104 | if (!p) | ||
| 105 | goto err_unlock; | ||
| 106 | ret = -EPERM; | ||
| 107 | if ((current->euid != p->euid) && (current->euid != p->uid) && | ||
| 108 | !capable(CAP_SYS_PTRACE)) | ||
| 109 | goto err_unlock; | ||
| 110 | head = p->compat_robust_list; | ||
| 111 | read_unlock(&tasklist_lock); | ||
| 112 | } | ||
| 113 | |||
| 114 | if (put_user(sizeof(*head), len_ptr)) | ||
| 115 | return -EFAULT; | ||
| 116 | return put_user(ptr_to_compat(head), head_ptr); | ||
| 117 | |||
| 118 | err_unlock: | ||
| 119 | read_unlock(&tasklist_lock); | ||
| 120 | |||
| 121 | return ret; | ||
| 122 | } | ||
| 123 | |||
| 124 | asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, u32 val, | ||
| 125 | struct compat_timespec __user *utime, u32 __user *uaddr2, | ||
| 126 | u32 val3) | ||
| 127 | { | ||
| 128 | struct timespec t; | ||
| 129 | unsigned long timeout = MAX_SCHEDULE_TIMEOUT; | ||
| 130 | int val2 = 0; | ||
| 131 | |||
| 132 | if ((op == FUTEX_WAIT) && utime) { | ||
| 133 | if (get_compat_timespec(&t, utime)) | ||
| 134 | return -EFAULT; | ||
| 135 | timeout = timespec_to_jiffies(&t) + 1; | ||
| 136 | } | ||
| 137 | if (op >= FUTEX_REQUEUE) | ||
| 138 | val2 = (int) (unsigned long) utime; | ||
| 139 | |||
| 140 | return do_futex(uaddr, op, val, timeout, uaddr2, val2, val3); | ||
| 141 | } | ||
diff --git a/kernel/module.c b/kernel/module.c index ddfe45ac2fd1..4fafd58038a0 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
| @@ -64,26 +64,17 @@ static DEFINE_SPINLOCK(modlist_lock); | |||
| 64 | static DEFINE_MUTEX(module_mutex); | 64 | static DEFINE_MUTEX(module_mutex); |
| 65 | static LIST_HEAD(modules); | 65 | static LIST_HEAD(modules); |
| 66 | 66 | ||
| 67 | static DEFINE_MUTEX(notify_mutex); | 67 | static BLOCKING_NOTIFIER_HEAD(module_notify_list); |
| 68 | static struct notifier_block * module_notify_list; | ||
| 69 | 68 | ||
| 70 | int register_module_notifier(struct notifier_block * nb) | 69 | int register_module_notifier(struct notifier_block * nb) |
| 71 | { | 70 | { |
| 72 | int err; | 71 | return blocking_notifier_chain_register(&module_notify_list, nb); |
| 73 | mutex_lock(¬ify_mutex); | ||
| 74 | err = notifier_chain_register(&module_notify_list, nb); | ||
| 75 | mutex_unlock(¬ify_mutex); | ||
| 76 | return err; | ||
| 77 | } | 72 | } |
| 78 | EXPORT_SYMBOL(register_module_notifier); | 73 | EXPORT_SYMBOL(register_module_notifier); |
| 79 | 74 | ||
| 80 | int unregister_module_notifier(struct notifier_block * nb) | 75 | int unregister_module_notifier(struct notifier_block * nb) |
| 81 | { | 76 | { |
| 82 | int err; | 77 | return blocking_notifier_chain_unregister(&module_notify_list, nb); |
| 83 | mutex_lock(¬ify_mutex); | ||
| 84 | err = notifier_chain_unregister(&module_notify_list, nb); | ||
| 85 | mutex_unlock(¬ify_mutex); | ||
| 86 | return err; | ||
| 87 | } | 78 | } |
| 88 | EXPORT_SYMBOL(unregister_module_notifier); | 79 | EXPORT_SYMBOL(unregister_module_notifier); |
| 89 | 80 | ||
| @@ -1816,9 +1807,8 @@ sys_init_module(void __user *umod, | |||
| 1816 | /* Drop lock so they can recurse */ | 1807 | /* Drop lock so they can recurse */ |
| 1817 | mutex_unlock(&module_mutex); | 1808 | mutex_unlock(&module_mutex); |
| 1818 | 1809 | ||
| 1819 | mutex_lock(¬ify_mutex); | 1810 | blocking_notifier_call_chain(&module_notify_list, |
| 1820 | notifier_call_chain(&module_notify_list, MODULE_STATE_COMING, mod); | 1811 | MODULE_STATE_COMING, mod); |
| 1821 | mutex_unlock(¬ify_mutex); | ||
| 1822 | 1812 | ||
| 1823 | /* Start the module */ | 1813 | /* Start the module */ |
| 1824 | if (mod->init != NULL) | 1814 | if (mod->init != NULL) |
diff --git a/kernel/panic.c b/kernel/panic.c index acd95adddb93..f895c7c01d5b 100644 --- a/kernel/panic.c +++ b/kernel/panic.c | |||
| @@ -29,7 +29,7 @@ static DEFINE_SPINLOCK(pause_on_oops_lock); | |||
| 29 | int panic_timeout; | 29 | int panic_timeout; |
| 30 | EXPORT_SYMBOL(panic_timeout); | 30 | EXPORT_SYMBOL(panic_timeout); |
| 31 | 31 | ||
| 32 | struct notifier_block *panic_notifier_list; | 32 | ATOMIC_NOTIFIER_HEAD(panic_notifier_list); |
| 33 | 33 | ||
| 34 | EXPORT_SYMBOL(panic_notifier_list); | 34 | EXPORT_SYMBOL(panic_notifier_list); |
| 35 | 35 | ||
| @@ -97,7 +97,7 @@ NORET_TYPE void panic(const char * fmt, ...) | |||
| 97 | smp_send_stop(); | 97 | smp_send_stop(); |
| 98 | #endif | 98 | #endif |
| 99 | 99 | ||
| 100 | notifier_call_chain(&panic_notifier_list, 0, buf); | 100 | atomic_notifier_call_chain(&panic_notifier_list, 0, buf); |
| 101 | 101 | ||
| 102 | if (!panic_blink) | 102 | if (!panic_blink) |
| 103 | panic_blink = no_blink; | 103 | panic_blink = no_blink; |
diff --git a/kernel/profile.c b/kernel/profile.c index ad81f799a9b4..5a730fdb1a2c 100644 --- a/kernel/profile.c +++ b/kernel/profile.c | |||
| @@ -87,72 +87,52 @@ void __init profile_init(void) | |||
| 87 | 87 | ||
| 88 | #ifdef CONFIG_PROFILING | 88 | #ifdef CONFIG_PROFILING |
| 89 | 89 | ||
| 90 | static DECLARE_RWSEM(profile_rwsem); | 90 | static BLOCKING_NOTIFIER_HEAD(task_exit_notifier); |
| 91 | static DEFINE_RWLOCK(handoff_lock); | 91 | static ATOMIC_NOTIFIER_HEAD(task_free_notifier); |
| 92 | static struct notifier_block * task_exit_notifier; | 92 | static BLOCKING_NOTIFIER_HEAD(munmap_notifier); |
| 93 | static struct notifier_block * task_free_notifier; | ||
| 94 | static struct notifier_block * munmap_notifier; | ||
| 95 | 93 | ||
| 96 | void profile_task_exit(struct task_struct * task) | 94 | void profile_task_exit(struct task_struct * task) |
| 97 | { | 95 | { |
| 98 | down_read(&profile_rwsem); | 96 | blocking_notifier_call_chain(&task_exit_notifier, 0, task); |
| 99 | notifier_call_chain(&task_exit_notifier, 0, task); | ||
| 100 | up_read(&profile_rwsem); | ||
| 101 | } | 97 | } |
| 102 | 98 | ||
| 103 | int profile_handoff_task(struct task_struct * task) | 99 | int profile_handoff_task(struct task_struct * task) |
| 104 | { | 100 | { |
| 105 | int ret; | 101 | int ret; |
| 106 | read_lock(&handoff_lock); | 102 | ret = atomic_notifier_call_chain(&task_free_notifier, 0, task); |
| 107 | ret = notifier_call_chain(&task_free_notifier, 0, task); | ||
| 108 | read_unlock(&handoff_lock); | ||
| 109 | return (ret == NOTIFY_OK) ? 1 : 0; | 103 | return (ret == NOTIFY_OK) ? 1 : 0; |
| 110 | } | 104 | } |
| 111 | 105 | ||
| 112 | void profile_munmap(unsigned long addr) | 106 | void profile_munmap(unsigned long addr) |
| 113 | { | 107 | { |
| 114 | down_read(&profile_rwsem); | 108 | blocking_notifier_call_chain(&munmap_notifier, 0, (void *)addr); |
| 115 | notifier_call_chain(&munmap_notifier, 0, (void *)addr); | ||
| 116 | up_read(&profile_rwsem); | ||
| 117 | } | 109 | } |
| 118 | 110 | ||
| 119 | int task_handoff_register(struct notifier_block * n) | 111 | int task_handoff_register(struct notifier_block * n) |
| 120 | { | 112 | { |
| 121 | int err = -EINVAL; | 113 | return atomic_notifier_chain_register(&task_free_notifier, n); |
| 122 | |||
| 123 | write_lock(&handoff_lock); | ||
| 124 | err = notifier_chain_register(&task_free_notifier, n); | ||
| 125 | write_unlock(&handoff_lock); | ||
| 126 | return err; | ||
| 127 | } | 114 | } |
| 128 | 115 | ||
| 129 | int task_handoff_unregister(struct notifier_block * n) | 116 | int task_handoff_unregister(struct notifier_block * n) |
| 130 | { | 117 | { |
| 131 | int err = -EINVAL; | 118 | return atomic_notifier_chain_unregister(&task_free_notifier, n); |
| 132 | |||
| 133 | write_lock(&handoff_lock); | ||
| 134 | err = notifier_chain_unregister(&task_free_notifier, n); | ||
| 135 | write_unlock(&handoff_lock); | ||
| 136 | return err; | ||
| 137 | } | 119 | } |
| 138 | 120 | ||
| 139 | int profile_event_register(enum profile_type type, struct notifier_block * n) | 121 | int profile_event_register(enum profile_type type, struct notifier_block * n) |
| 140 | { | 122 | { |
| 141 | int err = -EINVAL; | 123 | int err = -EINVAL; |
| 142 | 124 | ||
| 143 | down_write(&profile_rwsem); | ||
| 144 | |||
| 145 | switch (type) { | 125 | switch (type) { |
| 146 | case PROFILE_TASK_EXIT: | 126 | case PROFILE_TASK_EXIT: |
| 147 | err = notifier_chain_register(&task_exit_notifier, n); | 127 | err = blocking_notifier_chain_register( |
| 128 | &task_exit_notifier, n); | ||
| 148 | break; | 129 | break; |
| 149 | case PROFILE_MUNMAP: | 130 | case PROFILE_MUNMAP: |
| 150 | err = notifier_chain_register(&munmap_notifier, n); | 131 | err = blocking_notifier_chain_register( |
| 132 | &munmap_notifier, n); | ||
| 151 | break; | 133 | break; |
| 152 | } | 134 | } |
| 153 | 135 | ||
| 154 | up_write(&profile_rwsem); | ||
| 155 | |||
| 156 | return err; | 136 | return err; |
| 157 | } | 137 | } |
| 158 | 138 | ||
| @@ -161,18 +141,17 @@ int profile_event_unregister(enum profile_type type, struct notifier_block * n) | |||
| 161 | { | 141 | { |
| 162 | int err = -EINVAL; | 142 | int err = -EINVAL; |
| 163 | 143 | ||
| 164 | down_write(&profile_rwsem); | ||
| 165 | |||
| 166 | switch (type) { | 144 | switch (type) { |
| 167 | case PROFILE_TASK_EXIT: | 145 | case PROFILE_TASK_EXIT: |
| 168 | err = notifier_chain_unregister(&task_exit_notifier, n); | 146 | err = blocking_notifier_chain_unregister( |
| 147 | &task_exit_notifier, n); | ||
| 169 | break; | 148 | break; |
| 170 | case PROFILE_MUNMAP: | 149 | case PROFILE_MUNMAP: |
| 171 | err = notifier_chain_unregister(&munmap_notifier, n); | 150 | err = blocking_notifier_chain_unregister( |
| 151 | &munmap_notifier, n); | ||
| 172 | break; | 152 | break; |
| 173 | } | 153 | } |
| 174 | 154 | ||
| 175 | up_write(&profile_rwsem); | ||
| 176 | return err; | 155 | return err; |
| 177 | } | 156 | } |
| 178 | 157 | ||
diff --git a/kernel/sched.c b/kernel/sched.c index 78acdefeccca..7854ee516b92 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -145,7 +145,8 @@ | |||
| 145 | (v1) * (v2_max) / (v1_max) | 145 | (v1) * (v2_max) / (v1_max) |
| 146 | 146 | ||
| 147 | #define DELTA(p) \ | 147 | #define DELTA(p) \ |
| 148 | (SCALE(TASK_NICE(p), 40, MAX_BONUS) + INTERACTIVE_DELTA) | 148 | (SCALE(TASK_NICE(p) + 20, 40, MAX_BONUS) - 20 * MAX_BONUS / 40 + \ |
| 149 | INTERACTIVE_DELTA) | ||
| 149 | 150 | ||
| 150 | #define TASK_INTERACTIVE(p) \ | 151 | #define TASK_INTERACTIVE(p) \ |
| 151 | ((p)->prio <= (p)->static_prio - DELTA(p)) | 152 | ((p)->prio <= (p)->static_prio - DELTA(p)) |
| @@ -2878,13 +2879,11 @@ asmlinkage void __sched schedule(void) | |||
| 2878 | * schedule() atomically, we ignore that path for now. | 2879 | * schedule() atomically, we ignore that path for now. |
| 2879 | * Otherwise, whine if we are scheduling when we should not be. | 2880 | * Otherwise, whine if we are scheduling when we should not be. |
| 2880 | */ | 2881 | */ |
| 2881 | if (likely(!current->exit_state)) { | 2882 | if (unlikely(in_atomic() && !current->exit_state)) { |
| 2882 | if (unlikely(in_atomic())) { | 2883 | printk(KERN_ERR "BUG: scheduling while atomic: " |
| 2883 | printk(KERN_ERR "BUG: scheduling while atomic: " | 2884 | "%s/0x%08x/%d\n", |
| 2884 | "%s/0x%08x/%d\n", | 2885 | current->comm, preempt_count(), current->pid); |
| 2885 | current->comm, preempt_count(), current->pid); | 2886 | dump_stack(); |
| 2886 | dump_stack(); | ||
| 2887 | } | ||
| 2888 | } | 2887 | } |
| 2889 | profile_hit(SCHED_PROFILING, __builtin_return_address(0)); | 2888 | profile_hit(SCHED_PROFILING, __builtin_return_address(0)); |
| 2890 | 2889 | ||
| @@ -5575,11 +5574,31 @@ static int cpu_to_cpu_group(int cpu) | |||
| 5575 | } | 5574 | } |
| 5576 | #endif | 5575 | #endif |
| 5577 | 5576 | ||
| 5577 | #ifdef CONFIG_SCHED_MC | ||
| 5578 | static DEFINE_PER_CPU(struct sched_domain, core_domains); | ||
| 5579 | static struct sched_group sched_group_core[NR_CPUS]; | ||
| 5580 | #endif | ||
| 5581 | |||
| 5582 | #if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT) | ||
| 5583 | static int cpu_to_core_group(int cpu) | ||
| 5584 | { | ||
| 5585 | return first_cpu(cpu_sibling_map[cpu]); | ||
| 5586 | } | ||
| 5587 | #elif defined(CONFIG_SCHED_MC) | ||
| 5588 | static int cpu_to_core_group(int cpu) | ||
| 5589 | { | ||
| 5590 | return cpu; | ||
| 5591 | } | ||
| 5592 | #endif | ||
| 5593 | |||
| 5578 | static DEFINE_PER_CPU(struct sched_domain, phys_domains); | 5594 | static DEFINE_PER_CPU(struct sched_domain, phys_domains); |
| 5579 | static struct sched_group sched_group_phys[NR_CPUS]; | 5595 | static struct sched_group sched_group_phys[NR_CPUS]; |
| 5580 | static int cpu_to_phys_group(int cpu) | 5596 | static int cpu_to_phys_group(int cpu) |
| 5581 | { | 5597 | { |
| 5582 | #ifdef CONFIG_SCHED_SMT | 5598 | #if defined(CONFIG_SCHED_MC) |
| 5599 | cpumask_t mask = cpu_coregroup_map(cpu); | ||
| 5600 | return first_cpu(mask); | ||
| 5601 | #elif defined(CONFIG_SCHED_SMT) | ||
| 5583 | return first_cpu(cpu_sibling_map[cpu]); | 5602 | return first_cpu(cpu_sibling_map[cpu]); |
| 5584 | #else | 5603 | #else |
| 5585 | return cpu; | 5604 | return cpu; |
| @@ -5602,6 +5621,32 @@ static int cpu_to_allnodes_group(int cpu) | |||
| 5602 | { | 5621 | { |
| 5603 | return cpu_to_node(cpu); | 5622 | return cpu_to_node(cpu); |
| 5604 | } | 5623 | } |
| 5624 | static void init_numa_sched_groups_power(struct sched_group *group_head) | ||
| 5625 | { | ||
| 5626 | struct sched_group *sg = group_head; | ||
| 5627 | int j; | ||
| 5628 | |||
| 5629 | if (!sg) | ||
| 5630 | return; | ||
| 5631 | next_sg: | ||
| 5632 | for_each_cpu_mask(j, sg->cpumask) { | ||
| 5633 | struct sched_domain *sd; | ||
| 5634 | |||
| 5635 | sd = &per_cpu(phys_domains, j); | ||
| 5636 | if (j != first_cpu(sd->groups->cpumask)) { | ||
| 5637 | /* | ||
| 5638 | * Only add "power" once for each | ||
| 5639 | * physical package. | ||
| 5640 | */ | ||
| 5641 | continue; | ||
| 5642 | } | ||
| 5643 | |||
| 5644 | sg->cpu_power += sd->groups->cpu_power; | ||
| 5645 | } | ||
| 5646 | sg = sg->next; | ||
| 5647 | if (sg != group_head) | ||
| 5648 | goto next_sg; | ||
| 5649 | } | ||
| 5605 | #endif | 5650 | #endif |
| 5606 | 5651 | ||
| 5607 | /* | 5652 | /* |
| @@ -5677,6 +5722,17 @@ void build_sched_domains(const cpumask_t *cpu_map) | |||
| 5677 | sd->parent = p; | 5722 | sd->parent = p; |
| 5678 | sd->groups = &sched_group_phys[group]; | 5723 | sd->groups = &sched_group_phys[group]; |
| 5679 | 5724 | ||
| 5725 | #ifdef CONFIG_SCHED_MC | ||
| 5726 | p = sd; | ||
| 5727 | sd = &per_cpu(core_domains, i); | ||
| 5728 | group = cpu_to_core_group(i); | ||
| 5729 | *sd = SD_MC_INIT; | ||
| 5730 | sd->span = cpu_coregroup_map(i); | ||
| 5731 | cpus_and(sd->span, sd->span, *cpu_map); | ||
| 5732 | sd->parent = p; | ||
| 5733 | sd->groups = &sched_group_core[group]; | ||
| 5734 | #endif | ||
| 5735 | |||
| 5680 | #ifdef CONFIG_SCHED_SMT | 5736 | #ifdef CONFIG_SCHED_SMT |
| 5681 | p = sd; | 5737 | p = sd; |
| 5682 | sd = &per_cpu(cpu_domains, i); | 5738 | sd = &per_cpu(cpu_domains, i); |
| @@ -5702,6 +5758,19 @@ void build_sched_domains(const cpumask_t *cpu_map) | |||
| 5702 | } | 5758 | } |
| 5703 | #endif | 5759 | #endif |
| 5704 | 5760 | ||
| 5761 | #ifdef CONFIG_SCHED_MC | ||
| 5762 | /* Set up multi-core groups */ | ||
| 5763 | for_each_cpu_mask(i, *cpu_map) { | ||
| 5764 | cpumask_t this_core_map = cpu_coregroup_map(i); | ||
| 5765 | cpus_and(this_core_map, this_core_map, *cpu_map); | ||
| 5766 | if (i != first_cpu(this_core_map)) | ||
| 5767 | continue; | ||
| 5768 | init_sched_build_groups(sched_group_core, this_core_map, | ||
| 5769 | &cpu_to_core_group); | ||
| 5770 | } | ||
| 5771 | #endif | ||
| 5772 | |||
| 5773 | |||
| 5705 | /* Set up physical groups */ | 5774 | /* Set up physical groups */ |
| 5706 | for (i = 0; i < MAX_NUMNODES; i++) { | 5775 | for (i = 0; i < MAX_NUMNODES; i++) { |
| 5707 | cpumask_t nodemask = node_to_cpumask(i); | 5776 | cpumask_t nodemask = node_to_cpumask(i); |
| @@ -5798,51 +5867,38 @@ void build_sched_domains(const cpumask_t *cpu_map) | |||
| 5798 | power = SCHED_LOAD_SCALE; | 5867 | power = SCHED_LOAD_SCALE; |
| 5799 | sd->groups->cpu_power = power; | 5868 | sd->groups->cpu_power = power; |
| 5800 | #endif | 5869 | #endif |
| 5870 | #ifdef CONFIG_SCHED_MC | ||
| 5871 | sd = &per_cpu(core_domains, i); | ||
| 5872 | power = SCHED_LOAD_SCALE + (cpus_weight(sd->groups->cpumask)-1) | ||
| 5873 | * SCHED_LOAD_SCALE / 10; | ||
| 5874 | sd->groups->cpu_power = power; | ||
| 5801 | 5875 | ||
| 5802 | sd = &per_cpu(phys_domains, i); | 5876 | sd = &per_cpu(phys_domains, i); |
| 5877 | |||
| 5878 | /* | ||
| 5879 | * This has to be < 2 * SCHED_LOAD_SCALE | ||
| 5880 | * Lets keep it SCHED_LOAD_SCALE, so that | ||
| 5881 | * while calculating NUMA group's cpu_power | ||
| 5882 | * we can simply do | ||
| 5883 | * numa_group->cpu_power += phys_group->cpu_power; | ||
| 5884 | * | ||
| 5885 | * See "only add power once for each physical pkg" | ||
| 5886 | * comment below | ||
| 5887 | */ | ||
| 5888 | sd->groups->cpu_power = SCHED_LOAD_SCALE; | ||
| 5889 | #else | ||
| 5890 | sd = &per_cpu(phys_domains, i); | ||
| 5803 | power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE * | 5891 | power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE * |
| 5804 | (cpus_weight(sd->groups->cpumask)-1) / 10; | 5892 | (cpus_weight(sd->groups->cpumask)-1) / 10; |
| 5805 | sd->groups->cpu_power = power; | 5893 | sd->groups->cpu_power = power; |
| 5806 | |||
| 5807 | #ifdef CONFIG_NUMA | ||
| 5808 | sd = &per_cpu(allnodes_domains, i); | ||
| 5809 | if (sd->groups) { | ||
| 5810 | power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE * | ||
| 5811 | (cpus_weight(sd->groups->cpumask)-1) / 10; | ||
| 5812 | sd->groups->cpu_power = power; | ||
| 5813 | } | ||
| 5814 | #endif | 5894 | #endif |
| 5815 | } | 5895 | } |
| 5816 | 5896 | ||
| 5817 | #ifdef CONFIG_NUMA | 5897 | #ifdef CONFIG_NUMA |
| 5818 | for (i = 0; i < MAX_NUMNODES; i++) { | 5898 | for (i = 0; i < MAX_NUMNODES; i++) |
| 5819 | struct sched_group *sg = sched_group_nodes[i]; | 5899 | init_numa_sched_groups_power(sched_group_nodes[i]); |
| 5820 | int j; | ||
| 5821 | |||
| 5822 | if (sg == NULL) | ||
| 5823 | continue; | ||
| 5824 | next_sg: | ||
| 5825 | for_each_cpu_mask(j, sg->cpumask) { | ||
| 5826 | struct sched_domain *sd; | ||
| 5827 | int power; | ||
| 5828 | |||
| 5829 | sd = &per_cpu(phys_domains, j); | ||
| 5830 | if (j != first_cpu(sd->groups->cpumask)) { | ||
| 5831 | /* | ||
| 5832 | * Only add "power" once for each | ||
| 5833 | * physical package. | ||
| 5834 | */ | ||
| 5835 | continue; | ||
| 5836 | } | ||
| 5837 | power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE * | ||
| 5838 | (cpus_weight(sd->groups->cpumask)-1) / 10; | ||
| 5839 | 5900 | ||
| 5840 | sg->cpu_power += power; | 5901 | init_numa_sched_groups_power(sched_group_allnodes); |
| 5841 | } | ||
| 5842 | sg = sg->next; | ||
| 5843 | if (sg != sched_group_nodes[i]) | ||
| 5844 | goto next_sg; | ||
| 5845 | } | ||
| 5846 | #endif | 5902 | #endif |
| 5847 | 5903 | ||
| 5848 | /* Attach the domains */ | 5904 | /* Attach the domains */ |
| @@ -5850,6 +5906,8 @@ next_sg: | |||
| 5850 | struct sched_domain *sd; | 5906 | struct sched_domain *sd; |
| 5851 | #ifdef CONFIG_SCHED_SMT | 5907 | #ifdef CONFIG_SCHED_SMT |
| 5852 | sd = &per_cpu(cpu_domains, i); | 5908 | sd = &per_cpu(cpu_domains, i); |
| 5909 | #elif defined(CONFIG_SCHED_MC) | ||
| 5910 | sd = &per_cpu(core_domains, i); | ||
| 5853 | #else | 5911 | #else |
| 5854 | sd = &per_cpu(phys_domains, i); | 5912 | sd = &per_cpu(phys_domains, i); |
| 5855 | #endif | 5913 | #endif |
diff --git a/kernel/softlockup.c b/kernel/softlockup.c index d9b3d5847ed8..ced91e1ff564 100644 --- a/kernel/softlockup.c +++ b/kernel/softlockup.c | |||
| @@ -152,5 +152,5 @@ __init void spawn_softlockup_task(void) | |||
| 152 | cpu_callback(&cpu_nfb, CPU_ONLINE, cpu); | 152 | cpu_callback(&cpu_nfb, CPU_ONLINE, cpu); |
| 153 | register_cpu_notifier(&cpu_nfb); | 153 | register_cpu_notifier(&cpu_nfb); |
| 154 | 154 | ||
| 155 | notifier_chain_register(&panic_notifier_list, &panic_block); | 155 | atomic_notifier_chain_register(&panic_notifier_list, &panic_block); |
| 156 | } | 156 | } |
diff --git a/kernel/sys.c b/kernel/sys.c index 38bc73ede2ba..c93d37f71aef 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
| @@ -95,99 +95,304 @@ int cad_pid = 1; | |||
| 95 | * and the like. | 95 | * and the like. |
| 96 | */ | 96 | */ |
| 97 | 97 | ||
| 98 | static struct notifier_block *reboot_notifier_list; | 98 | static BLOCKING_NOTIFIER_HEAD(reboot_notifier_list); |
| 99 | static DEFINE_RWLOCK(notifier_lock); | 99 | |
| 100 | /* | ||
| 101 | * Notifier chain core routines. The exported routines below | ||
| 102 | * are layered on top of these, with appropriate locking added. | ||
| 103 | */ | ||
| 104 | |||
| 105 | static int notifier_chain_register(struct notifier_block **nl, | ||
| 106 | struct notifier_block *n) | ||
| 107 | { | ||
| 108 | while ((*nl) != NULL) { | ||
| 109 | if (n->priority > (*nl)->priority) | ||
| 110 | break; | ||
| 111 | nl = &((*nl)->next); | ||
| 112 | } | ||
| 113 | n->next = *nl; | ||
| 114 | rcu_assign_pointer(*nl, n); | ||
| 115 | return 0; | ||
| 116 | } | ||
| 117 | |||
| 118 | static int notifier_chain_unregister(struct notifier_block **nl, | ||
| 119 | struct notifier_block *n) | ||
| 120 | { | ||
| 121 | while ((*nl) != NULL) { | ||
| 122 | if ((*nl) == n) { | ||
| 123 | rcu_assign_pointer(*nl, n->next); | ||
| 124 | return 0; | ||
| 125 | } | ||
| 126 | nl = &((*nl)->next); | ||
| 127 | } | ||
| 128 | return -ENOENT; | ||
| 129 | } | ||
| 130 | |||
| 131 | static int __kprobes notifier_call_chain(struct notifier_block **nl, | ||
| 132 | unsigned long val, void *v) | ||
| 133 | { | ||
| 134 | int ret = NOTIFY_DONE; | ||
| 135 | struct notifier_block *nb; | ||
| 136 | |||
| 137 | nb = rcu_dereference(*nl); | ||
| 138 | while (nb) { | ||
| 139 | ret = nb->notifier_call(nb, val, v); | ||
| 140 | if ((ret & NOTIFY_STOP_MASK) == NOTIFY_STOP_MASK) | ||
| 141 | break; | ||
| 142 | nb = rcu_dereference(nb->next); | ||
| 143 | } | ||
| 144 | return ret; | ||
| 145 | } | ||
| 146 | |||
| 147 | /* | ||
| 148 | * Atomic notifier chain routines. Registration and unregistration | ||
| 149 | * use a mutex, and call_chain is synchronized by RCU (no locks). | ||
| 150 | */ | ||
| 100 | 151 | ||
| 101 | /** | 152 | /** |
| 102 | * notifier_chain_register - Add notifier to a notifier chain | 153 | * atomic_notifier_chain_register - Add notifier to an atomic notifier chain |
| 103 | * @list: Pointer to root list pointer | 154 | * @nh: Pointer to head of the atomic notifier chain |
| 104 | * @n: New entry in notifier chain | 155 | * @n: New entry in notifier chain |
| 105 | * | 156 | * |
| 106 | * Adds a notifier to a notifier chain. | 157 | * Adds a notifier to an atomic notifier chain. |
| 107 | * | 158 | * |
| 108 | * Currently always returns zero. | 159 | * Currently always returns zero. |
| 109 | */ | 160 | */ |
| 161 | |||
| 162 | int atomic_notifier_chain_register(struct atomic_notifier_head *nh, | ||
| 163 | struct notifier_block *n) | ||
| 164 | { | ||
| 165 | unsigned long flags; | ||
| 166 | int ret; | ||
| 167 | |||
| 168 | spin_lock_irqsave(&nh->lock, flags); | ||
| 169 | ret = notifier_chain_register(&nh->head, n); | ||
| 170 | spin_unlock_irqrestore(&nh->lock, flags); | ||
| 171 | return ret; | ||
| 172 | } | ||
| 173 | |||
| 174 | EXPORT_SYMBOL_GPL(atomic_notifier_chain_register); | ||
| 175 | |||
| 176 | /** | ||
| 177 | * atomic_notifier_chain_unregister - Remove notifier from an atomic notifier chain | ||
| 178 | * @nh: Pointer to head of the atomic notifier chain | ||
| 179 | * @n: Entry to remove from notifier chain | ||
| 180 | * | ||
| 181 | * Removes a notifier from an atomic notifier chain. | ||
| 182 | * | ||
| 183 | * Returns zero on success or %-ENOENT on failure. | ||
| 184 | */ | ||
| 185 | int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh, | ||
| 186 | struct notifier_block *n) | ||
| 187 | { | ||
| 188 | unsigned long flags; | ||
| 189 | int ret; | ||
| 190 | |||
| 191 | spin_lock_irqsave(&nh->lock, flags); | ||
| 192 | ret = notifier_chain_unregister(&nh->head, n); | ||
| 193 | spin_unlock_irqrestore(&nh->lock, flags); | ||
| 194 | synchronize_rcu(); | ||
| 195 | return ret; | ||
| 196 | } | ||
| 197 | |||
| 198 | EXPORT_SYMBOL_GPL(atomic_notifier_chain_unregister); | ||
| 199 | |||
| 200 | /** | ||
| 201 | * atomic_notifier_call_chain - Call functions in an atomic notifier chain | ||
| 202 | * @nh: Pointer to head of the atomic notifier chain | ||
| 203 | * @val: Value passed unmodified to notifier function | ||
| 204 | * @v: Pointer passed unmodified to notifier function | ||
| 205 | * | ||
| 206 | * Calls each function in a notifier chain in turn. The functions | ||
| 207 | * run in an atomic context, so they must not block. | ||
| 208 | * This routine uses RCU to synchronize with changes to the chain. | ||
| 209 | * | ||
| 210 | * If the return value of the notifier can be and'ed | ||
| 211 | * with %NOTIFY_STOP_MASK then atomic_notifier_call_chain | ||
| 212 | * will return immediately, with the return value of | ||
| 213 | * the notifier function which halted execution. | ||
| 214 | * Otherwise the return value is the return value | ||
| 215 | * of the last notifier function called. | ||
| 216 | */ | ||
| 110 | 217 | ||
| 111 | int notifier_chain_register(struct notifier_block **list, struct notifier_block *n) | 218 | int atomic_notifier_call_chain(struct atomic_notifier_head *nh, |
| 219 | unsigned long val, void *v) | ||
| 112 | { | 220 | { |
| 113 | write_lock(¬ifier_lock); | 221 | int ret; |
| 114 | while(*list) | 222 | |
| 115 | { | 223 | rcu_read_lock(); |
| 116 | if(n->priority > (*list)->priority) | 224 | ret = notifier_call_chain(&nh->head, val, v); |
| 117 | break; | 225 | rcu_read_unlock(); |
| 118 | list= &((*list)->next); | 226 | return ret; |
| 119 | } | ||
| 120 | n->next = *list; | ||
| 121 | *list=n; | ||
| 122 | write_unlock(¬ifier_lock); | ||
| 123 | return 0; | ||
| 124 | } | 227 | } |
| 125 | 228 | ||
| 126 | EXPORT_SYMBOL(notifier_chain_register); | 229 | EXPORT_SYMBOL_GPL(atomic_notifier_call_chain); |
| 230 | |||
| 231 | /* | ||
| 232 | * Blocking notifier chain routines. All access to the chain is | ||
| 233 | * synchronized by an rwsem. | ||
| 234 | */ | ||
| 127 | 235 | ||
| 128 | /** | 236 | /** |
| 129 | * notifier_chain_unregister - Remove notifier from a notifier chain | 237 | * blocking_notifier_chain_register - Add notifier to a blocking notifier chain |
| 130 | * @nl: Pointer to root list pointer | 238 | * @nh: Pointer to head of the blocking notifier chain |
| 131 | * @n: New entry in notifier chain | 239 | * @n: New entry in notifier chain |
| 132 | * | 240 | * |
| 133 | * Removes a notifier from a notifier chain. | 241 | * Adds a notifier to a blocking notifier chain. |
| 242 | * Must be called in process context. | ||
| 134 | * | 243 | * |
| 135 | * Returns zero on success, or %-ENOENT on failure. | 244 | * Currently always returns zero. |
| 136 | */ | 245 | */ |
| 137 | 246 | ||
| 138 | int notifier_chain_unregister(struct notifier_block **nl, struct notifier_block *n) | 247 | int blocking_notifier_chain_register(struct blocking_notifier_head *nh, |
| 248 | struct notifier_block *n) | ||
| 139 | { | 249 | { |
| 140 | write_lock(¬ifier_lock); | 250 | int ret; |
| 141 | while((*nl)!=NULL) | 251 | |
| 142 | { | 252 | /* |
| 143 | if((*nl)==n) | 253 | * This code gets used during boot-up, when task switching is |
| 144 | { | 254 | * not yet working and interrupts must remain disabled. At |
| 145 | *nl=n->next; | 255 | * such times we must not call down_write(). |
| 146 | write_unlock(¬ifier_lock); | 256 | */ |
| 147 | return 0; | 257 | if (unlikely(system_state == SYSTEM_BOOTING)) |
| 148 | } | 258 | return notifier_chain_register(&nh->head, n); |
| 149 | nl=&((*nl)->next); | 259 | |
| 150 | } | 260 | down_write(&nh->rwsem); |
| 151 | write_unlock(¬ifier_lock); | 261 | ret = notifier_chain_register(&nh->head, n); |
| 152 | return -ENOENT; | 262 | up_write(&nh->rwsem); |
| 263 | return ret; | ||
| 153 | } | 264 | } |
| 154 | 265 | ||
| 155 | EXPORT_SYMBOL(notifier_chain_unregister); | 266 | EXPORT_SYMBOL_GPL(blocking_notifier_chain_register); |
| 156 | 267 | ||
| 157 | /** | 268 | /** |
| 158 | * notifier_call_chain - Call functions in a notifier chain | 269 | * blocking_notifier_chain_unregister - Remove notifier from a blocking notifier chain |
| 159 | * @n: Pointer to root pointer of notifier chain | 270 | * @nh: Pointer to head of the blocking notifier chain |
| 271 | * @n: Entry to remove from notifier chain | ||
| 272 | * | ||
| 273 | * Removes a notifier from a blocking notifier chain. | ||
| 274 | * Must be called from process context. | ||
| 275 | * | ||
| 276 | * Returns zero on success or %-ENOENT on failure. | ||
| 277 | */ | ||
| 278 | int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh, | ||
| 279 | struct notifier_block *n) | ||
| 280 | { | ||
| 281 | int ret; | ||
| 282 | |||
| 283 | /* | ||
| 284 | * This code gets used during boot-up, when task switching is | ||
| 285 | * not yet working and interrupts must remain disabled. At | ||
| 286 | * such times we must not call down_write(). | ||
| 287 | */ | ||
| 288 | if (unlikely(system_state == SYSTEM_BOOTING)) | ||
| 289 | return notifier_chain_unregister(&nh->head, n); | ||
| 290 | |||
| 291 | down_write(&nh->rwsem); | ||
| 292 | ret = notifier_chain_unregister(&nh->head, n); | ||
| 293 | up_write(&nh->rwsem); | ||
| 294 | return ret; | ||
| 295 | } | ||
| 296 | |||
| 297 | EXPORT_SYMBOL_GPL(blocking_notifier_chain_unregister); | ||
| 298 | |||
| 299 | /** | ||
| 300 | * blocking_notifier_call_chain - Call functions in a blocking notifier chain | ||
| 301 | * @nh: Pointer to head of the blocking notifier chain | ||
| 160 | * @val: Value passed unmodified to notifier function | 302 | * @val: Value passed unmodified to notifier function |
| 161 | * @v: Pointer passed unmodified to notifier function | 303 | * @v: Pointer passed unmodified to notifier function |
| 162 | * | 304 | * |
| 163 | * Calls each function in a notifier chain in turn. | 305 | * Calls each function in a notifier chain in turn. The functions |
| 306 | * run in a process context, so they are allowed to block. | ||
| 164 | * | 307 | * |
| 165 | * If the return value of the notifier can be and'd | 308 | * If the return value of the notifier can be and'ed |
| 166 | * with %NOTIFY_STOP_MASK, then notifier_call_chain | 309 | * with %NOTIFY_STOP_MASK then blocking_notifier_call_chain |
| 167 | * will return immediately, with the return value of | 310 | * will return immediately, with the return value of |
| 168 | * the notifier function which halted execution. | 311 | * the notifier function which halted execution. |
| 169 | * Otherwise, the return value is the return value | 312 | * Otherwise the return value is the return value |
| 170 | * of the last notifier function called. | 313 | * of the last notifier function called. |
| 171 | */ | 314 | */ |
| 172 | 315 | ||
| 173 | int __kprobes notifier_call_chain(struct notifier_block **n, unsigned long val, void *v) | 316 | int blocking_notifier_call_chain(struct blocking_notifier_head *nh, |
| 317 | unsigned long val, void *v) | ||
| 174 | { | 318 | { |
| 175 | int ret=NOTIFY_DONE; | 319 | int ret; |
| 176 | struct notifier_block *nb = *n; | ||
| 177 | 320 | ||
| 178 | while(nb) | 321 | down_read(&nh->rwsem); |
| 179 | { | 322 | ret = notifier_call_chain(&nh->head, val, v); |
| 180 | ret=nb->notifier_call(nb,val,v); | 323 | up_read(&nh->rwsem); |
| 181 | if(ret&NOTIFY_STOP_MASK) | ||
| 182 | { | ||
| 183 | return ret; | ||
| 184 | } | ||
| 185 | nb=nb->next; | ||
| 186 | } | ||
| 187 | return ret; | 324 | return ret; |
| 188 | } | 325 | } |
| 189 | 326 | ||
| 190 | EXPORT_SYMBOL(notifier_call_chain); | 327 | EXPORT_SYMBOL_GPL(blocking_notifier_call_chain); |
| 328 | |||
| 329 | /* | ||
| 330 | * Raw notifier chain routines. There is no protection; | ||
| 331 | * the caller must provide it. Use at your own risk! | ||
| 332 | */ | ||
| 333 | |||
| 334 | /** | ||
| 335 | * raw_notifier_chain_register - Add notifier to a raw notifier chain | ||
| 336 | * @nh: Pointer to head of the raw notifier chain | ||
| 337 | * @n: New entry in notifier chain | ||
| 338 | * | ||
| 339 | * Adds a notifier to a raw notifier chain. | ||
| 340 | * All locking must be provided by the caller. | ||
| 341 | * | ||
| 342 | * Currently always returns zero. | ||
| 343 | */ | ||
| 344 | |||
| 345 | int raw_notifier_chain_register(struct raw_notifier_head *nh, | ||
| 346 | struct notifier_block *n) | ||
| 347 | { | ||
| 348 | return notifier_chain_register(&nh->head, n); | ||
| 349 | } | ||
| 350 | |||
| 351 | EXPORT_SYMBOL_GPL(raw_notifier_chain_register); | ||
| 352 | |||
| 353 | /** | ||
| 354 | * raw_notifier_chain_unregister - Remove notifier from a raw notifier chain | ||
| 355 | * @nh: Pointer to head of the raw notifier chain | ||
| 356 | * @n: Entry to remove from notifier chain | ||
| 357 | * | ||
| 358 | * Removes a notifier from a raw notifier chain. | ||
| 359 | * All locking must be provided by the caller. | ||
| 360 | * | ||
| 361 | * Returns zero on success or %-ENOENT on failure. | ||
| 362 | */ | ||
| 363 | int raw_notifier_chain_unregister(struct raw_notifier_head *nh, | ||
| 364 | struct notifier_block *n) | ||
| 365 | { | ||
| 366 | return notifier_chain_unregister(&nh->head, n); | ||
| 367 | } | ||
| 368 | |||
| 369 | EXPORT_SYMBOL_GPL(raw_notifier_chain_unregister); | ||
| 370 | |||
| 371 | /** | ||
| 372 | * raw_notifier_call_chain - Call functions in a raw notifier chain | ||
| 373 | * @nh: Pointer to head of the raw notifier chain | ||
| 374 | * @val: Value passed unmodified to notifier function | ||
| 375 | * @v: Pointer passed unmodified to notifier function | ||
| 376 | * | ||
| 377 | * Calls each function in a notifier chain in turn. The functions | ||
| 378 | * run in an undefined context. | ||
| 379 | * All locking must be provided by the caller. | ||
| 380 | * | ||
| 381 | * If the return value of the notifier can be and'ed | ||
| 382 | * with %NOTIFY_STOP_MASK then raw_notifier_call_chain | ||
| 383 | * will return immediately, with the return value of | ||
| 384 | * the notifier function which halted execution. | ||
| 385 | * Otherwise the return value is the return value | ||
| 386 | * of the last notifier function called. | ||
| 387 | */ | ||
| 388 | |||
| 389 | int raw_notifier_call_chain(struct raw_notifier_head *nh, | ||
| 390 | unsigned long val, void *v) | ||
| 391 | { | ||
| 392 | return notifier_call_chain(&nh->head, val, v); | ||
| 393 | } | ||
| 394 | |||
| 395 | EXPORT_SYMBOL_GPL(raw_notifier_call_chain); | ||
| 191 | 396 | ||
| 192 | /** | 397 | /** |
| 193 | * register_reboot_notifier - Register function to be called at reboot time | 398 | * register_reboot_notifier - Register function to be called at reboot time |
| @@ -196,13 +401,13 @@ EXPORT_SYMBOL(notifier_call_chain); | |||
| 196 | * Registers a function with the list of functions | 401 | * Registers a function with the list of functions |
| 197 | * to be called at reboot time. | 402 | * to be called at reboot time. |
| 198 | * | 403 | * |
| 199 | * Currently always returns zero, as notifier_chain_register | 404 | * Currently always returns zero, as blocking_notifier_chain_register |
| 200 | * always returns zero. | 405 | * always returns zero. |
| 201 | */ | 406 | */ |
| 202 | 407 | ||
| 203 | int register_reboot_notifier(struct notifier_block * nb) | 408 | int register_reboot_notifier(struct notifier_block * nb) |
| 204 | { | 409 | { |
| 205 | return notifier_chain_register(&reboot_notifier_list, nb); | 410 | return blocking_notifier_chain_register(&reboot_notifier_list, nb); |
| 206 | } | 411 | } |
| 207 | 412 | ||
| 208 | EXPORT_SYMBOL(register_reboot_notifier); | 413 | EXPORT_SYMBOL(register_reboot_notifier); |
| @@ -219,7 +424,7 @@ EXPORT_SYMBOL(register_reboot_notifier); | |||
| 219 | 424 | ||
| 220 | int unregister_reboot_notifier(struct notifier_block * nb) | 425 | int unregister_reboot_notifier(struct notifier_block * nb) |
| 221 | { | 426 | { |
| 222 | return notifier_chain_unregister(&reboot_notifier_list, nb); | 427 | return blocking_notifier_chain_unregister(&reboot_notifier_list, nb); |
| 223 | } | 428 | } |
| 224 | 429 | ||
| 225 | EXPORT_SYMBOL(unregister_reboot_notifier); | 430 | EXPORT_SYMBOL(unregister_reboot_notifier); |
| @@ -380,7 +585,7 @@ EXPORT_SYMBOL_GPL(emergency_restart); | |||
| 380 | 585 | ||
| 381 | void kernel_restart_prepare(char *cmd) | 586 | void kernel_restart_prepare(char *cmd) |
| 382 | { | 587 | { |
| 383 | notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd); | 588 | blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd); |
| 384 | system_state = SYSTEM_RESTART; | 589 | system_state = SYSTEM_RESTART; |
| 385 | device_shutdown(); | 590 | device_shutdown(); |
| 386 | } | 591 | } |
| @@ -430,7 +635,7 @@ EXPORT_SYMBOL_GPL(kernel_kexec); | |||
| 430 | 635 | ||
| 431 | void kernel_shutdown_prepare(enum system_states state) | 636 | void kernel_shutdown_prepare(enum system_states state) |
| 432 | { | 637 | { |
| 433 | notifier_call_chain(&reboot_notifier_list, | 638 | blocking_notifier_call_chain(&reboot_notifier_list, |
| 434 | (state == SYSTEM_HALT)?SYS_HALT:SYS_POWER_OFF, NULL); | 639 | (state == SYSTEM_HALT)?SYS_HALT:SYS_POWER_OFF, NULL); |
| 435 | system_state = state; | 640 | system_state = state; |
| 436 | device_shutdown(); | 641 | device_shutdown(); |
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c index 1067090db6b1..d82864c4a617 100644 --- a/kernel/sys_ni.c +++ b/kernel/sys_ni.c | |||
| @@ -42,6 +42,10 @@ cond_syscall(sys_recvmsg); | |||
| 42 | cond_syscall(sys_socketcall); | 42 | cond_syscall(sys_socketcall); |
| 43 | cond_syscall(sys_futex); | 43 | cond_syscall(sys_futex); |
| 44 | cond_syscall(compat_sys_futex); | 44 | cond_syscall(compat_sys_futex); |
| 45 | cond_syscall(sys_set_robust_list); | ||
| 46 | cond_syscall(compat_sys_set_robust_list); | ||
| 47 | cond_syscall(sys_get_robust_list); | ||
| 48 | cond_syscall(compat_sys_get_robust_list); | ||
| 45 | cond_syscall(sys_epoll_create); | 49 | cond_syscall(sys_epoll_create); |
| 46 | cond_syscall(sys_epoll_ctl); | 50 | cond_syscall(sys_epoll_ctl); |
| 47 | cond_syscall(sys_epoll_wait); | 51 | cond_syscall(sys_epoll_wait); |
