aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cgroup.c8
-rw-r--r--kernel/exit.c2
-rw-r--r--kernel/extable.c16
-rw-r--r--kernel/futex.c351
-rw-r--r--kernel/irq/manage.c12
-rw-r--r--kernel/lockdep.c60
-rw-r--r--kernel/lockdep_proc.c28
-rw-r--r--kernel/mutex.c10
-rw-r--r--kernel/notifier.c8
-rw-r--r--kernel/panic.c32
-rw-r--r--kernel/posix-cpu-timers.c10
-rw-r--r--kernel/posix-timers.c6
-rw-r--r--kernel/printk.c2
-rw-r--r--kernel/resource.c9
-rw-r--r--kernel/sched.c3
-rw-r--r--kernel/softirq.c14
-rw-r--r--kernel/softlockup.c2
-rw-r--r--kernel/stacktrace.c11
-rw-r--r--kernel/sys.c2
19 files changed, 283 insertions, 303 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 8185a0f0959..2606d0fb4e5 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1024,7 +1024,7 @@ static int cgroup_get_sb(struct file_system_type *fs_type,
1024 if (ret == -EBUSY) { 1024 if (ret == -EBUSY) {
1025 mutex_unlock(&cgroup_mutex); 1025 mutex_unlock(&cgroup_mutex);
1026 mutex_unlock(&inode->i_mutex); 1026 mutex_unlock(&inode->i_mutex);
1027 goto drop_new_super; 1027 goto free_cg_links;
1028 } 1028 }
1029 1029
1030 /* EBUSY should be the only error here */ 1030 /* EBUSY should be the only error here */
@@ -1073,10 +1073,11 @@ static int cgroup_get_sb(struct file_system_type *fs_type,
1073 1073
1074 return simple_set_mnt(mnt, sb); 1074 return simple_set_mnt(mnt, sb);
1075 1075
1076 free_cg_links:
1077 free_cg_links(&tmp_cg_links);
1076 drop_new_super: 1078 drop_new_super:
1077 up_write(&sb->s_umount); 1079 up_write(&sb->s_umount);
1078 deactivate_super(sb); 1080 deactivate_super(sb);
1079 free_cg_links(&tmp_cg_links);
1080 return ret; 1081 return ret;
1081} 1082}
1082 1083
@@ -2934,9 +2935,6 @@ int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys,
2934 again: 2935 again:
2935 root = subsys->root; 2936 root = subsys->root;
2936 if (root == &rootnode) { 2937 if (root == &rootnode) {
2937 printk(KERN_INFO
2938 "Not cloning cgroup for unused subsystem %s\n",
2939 subsys->name);
2940 mutex_unlock(&cgroup_mutex); 2938 mutex_unlock(&cgroup_mutex);
2941 return 0; 2939 return 0;
2942 } 2940 }
diff --git a/kernel/exit.c b/kernel/exit.c
index 2d8be7ebb0f..30fcdf16737 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -1321,10 +1321,10 @@ static int wait_task_zombie(struct task_struct *p, int options,
1321 * group, which consolidates times for all threads in the 1321 * group, which consolidates times for all threads in the
1322 * group including the group leader. 1322 * group including the group leader.
1323 */ 1323 */
1324 thread_group_cputime(p, &cputime);
1324 spin_lock_irq(&p->parent->sighand->siglock); 1325 spin_lock_irq(&p->parent->sighand->siglock);
1325 psig = p->parent->signal; 1326 psig = p->parent->signal;
1326 sig = p->signal; 1327 sig = p->signal;
1327 thread_group_cputime(p, &cputime);
1328 psig->cutime = 1328 psig->cutime =
1329 cputime_add(psig->cutime, 1329 cputime_add(psig->cutime,
1330 cputime_add(cputime.utime, 1330 cputime_add(cputime.utime,
diff --git a/kernel/extable.c b/kernel/extable.c
index a26cb2e1702..adf0cc9c02d 100644
--- a/kernel/extable.c
+++ b/kernel/extable.c
@@ -66,3 +66,19 @@ int kernel_text_address(unsigned long addr)
66 return 1; 66 return 1;
67 return module_text_address(addr) != NULL; 67 return module_text_address(addr) != NULL;
68} 68}
69
70/*
71 * On some architectures (PPC64, IA64) function pointers
72 * are actually only tokens to some data that then holds the
73 * real function address. As a result, to find if a function
74 * pointer is part of the kernel text, we need to do some
75 * special dereferencing first.
76 */
77int func_ptr_is_kernel_text(void *ptr)
78{
79 unsigned long addr;
80 addr = (unsigned long) dereference_function_descriptor(ptr);
81 if (core_kernel_text(addr))
82 return 1;
83 return module_text_address(addr) != NULL;
84}
diff --git a/kernel/futex.c b/kernel/futex.c
index 8af10027514..b4f87bac91c 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -92,11 +92,12 @@ struct futex_pi_state {
92 * A futex_q has a woken state, just like tasks have TASK_RUNNING. 92 * A futex_q has a woken state, just like tasks have TASK_RUNNING.
93 * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0. 93 * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
94 * The order of wakup is always to make the first condition true, then 94 * The order of wakup is always to make the first condition true, then
95 * wake up q->waiters, then make the second condition true. 95 * wake up q->waiter, then make the second condition true.
96 */ 96 */
97struct futex_q { 97struct futex_q {
98 struct plist_node list; 98 struct plist_node list;
99 wait_queue_head_t waiters; 99 /* There can only be a single waiter */
100 wait_queue_head_t waiter;
100 101
101 /* Which hash list lock to use: */ 102 /* Which hash list lock to use: */
102 spinlock_t *lock_ptr; 103 spinlock_t *lock_ptr;
@@ -123,24 +124,6 @@ struct futex_hash_bucket {
123static struct futex_hash_bucket futex_queues[1<<FUTEX_HASHBITS]; 124static struct futex_hash_bucket futex_queues[1<<FUTEX_HASHBITS];
124 125
125/* 126/*
126 * Take mm->mmap_sem, when futex is shared
127 */
128static inline void futex_lock_mm(struct rw_semaphore *fshared)
129{
130 if (fshared)
131 down_read(fshared);
132}
133
134/*
135 * Release mm->mmap_sem, when the futex is shared
136 */
137static inline void futex_unlock_mm(struct rw_semaphore *fshared)
138{
139 if (fshared)
140 up_read(fshared);
141}
142
143/*
144 * We hash on the keys returned from get_futex_key (see below). 127 * We hash on the keys returned from get_futex_key (see below).
145 */ 128 */
146static struct futex_hash_bucket *hash_futex(union futex_key *key) 129static struct futex_hash_bucket *hash_futex(union futex_key *key)
@@ -161,6 +144,45 @@ static inline int match_futex(union futex_key *key1, union futex_key *key2)
161 && key1->both.offset == key2->both.offset); 144 && key1->both.offset == key2->both.offset);
162} 145}
163 146
147/*
148 * Take a reference to the resource addressed by a key.
149 * Can be called while holding spinlocks.
150 *
151 */
152static void get_futex_key_refs(union futex_key *key)
153{
154 if (!key->both.ptr)
155 return;
156
157 switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
158 case FUT_OFF_INODE:
159 atomic_inc(&key->shared.inode->i_count);
160 break;
161 case FUT_OFF_MMSHARED:
162 atomic_inc(&key->private.mm->mm_count);
163 break;
164 }
165}
166
167/*
168 * Drop a reference to the resource addressed by a key.
169 * The hash bucket spinlock must not be held.
170 */
171static void drop_futex_key_refs(union futex_key *key)
172{
173 if (!key->both.ptr)
174 return;
175
176 switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
177 case FUT_OFF_INODE:
178 iput(key->shared.inode);
179 break;
180 case FUT_OFF_MMSHARED:
181 mmdrop(key->private.mm);
182 break;
183 }
184}
185
164/** 186/**
165 * get_futex_key - Get parameters which are the keys for a futex. 187 * get_futex_key - Get parameters which are the keys for a futex.
166 * @uaddr: virtual address of the futex 188 * @uaddr: virtual address of the futex
@@ -179,12 +201,10 @@ static inline int match_futex(union futex_key *key1, union futex_key *key2)
179 * For other futexes, it points to &current->mm->mmap_sem and 201 * For other futexes, it points to &current->mm->mmap_sem and
180 * caller must have taken the reader lock. but NOT any spinlocks. 202 * caller must have taken the reader lock. but NOT any spinlocks.
181 */ 203 */
182static int get_futex_key(u32 __user *uaddr, struct rw_semaphore *fshared, 204static int get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key)
183 union futex_key *key)
184{ 205{
185 unsigned long address = (unsigned long)uaddr; 206 unsigned long address = (unsigned long)uaddr;
186 struct mm_struct *mm = current->mm; 207 struct mm_struct *mm = current->mm;
187 struct vm_area_struct *vma;
188 struct page *page; 208 struct page *page;
189 int err; 209 int err;
190 210
@@ -208,100 +228,50 @@ static int get_futex_key(u32 __user *uaddr, struct rw_semaphore *fshared,
208 return -EFAULT; 228 return -EFAULT;
209 key->private.mm = mm; 229 key->private.mm = mm;
210 key->private.address = address; 230 key->private.address = address;
231 get_futex_key_refs(key);
211 return 0; 232 return 0;
212 } 233 }
213 /*
214 * The futex is hashed differently depending on whether
215 * it's in a shared or private mapping. So check vma first.
216 */
217 vma = find_extend_vma(mm, address);
218 if (unlikely(!vma))
219 return -EFAULT;
220 234
221 /* 235again:
222 * Permissions. 236 err = get_user_pages_fast(address, 1, 0, &page);
223 */ 237 if (err < 0)
224 if (unlikely((vma->vm_flags & (VM_IO|VM_READ)) != VM_READ)) 238 return err;
225 return (vma->vm_flags & VM_IO) ? -EPERM : -EACCES; 239
240 lock_page(page);
241 if (!page->mapping) {
242 unlock_page(page);
243 put_page(page);
244 goto again;
245 }
226 246
227 /* 247 /*
228 * Private mappings are handled in a simple way. 248 * Private mappings are handled in a simple way.
229 * 249 *
230 * NOTE: When userspace waits on a MAP_SHARED mapping, even if 250 * NOTE: When userspace waits on a MAP_SHARED mapping, even if
231 * it's a read-only handle, it's expected that futexes attach to 251 * it's a read-only handle, it's expected that futexes attach to
232 * the object not the particular process. Therefore we use 252 * the object not the particular process.
233 * VM_MAYSHARE here, not VM_SHARED which is restricted to shared
234 * mappings of _writable_ handles.
235 */ 253 */
236 if (likely(!(vma->vm_flags & VM_MAYSHARE))) { 254 if (PageAnon(page)) {
237 key->both.offset |= FUT_OFF_MMSHARED; /* reference taken on mm */ 255 key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
238 key->private.mm = mm; 256 key->private.mm = mm;
239 key->private.address = address; 257 key->private.address = address;
240 return 0; 258 } else {
259 key->both.offset |= FUT_OFF_INODE; /* inode-based key */
260 key->shared.inode = page->mapping->host;
261 key->shared.pgoff = page->index;
241 } 262 }
242 263
243 /* 264 get_futex_key_refs(key);
244 * Linear file mappings are also simple.
245 */
246 key->shared.inode = vma->vm_file->f_path.dentry->d_inode;
247 key->both.offset |= FUT_OFF_INODE; /* inode-based key. */
248 if (likely(!(vma->vm_flags & VM_NONLINEAR))) {
249 key->shared.pgoff = (((address - vma->vm_start) >> PAGE_SHIFT)
250 + vma->vm_pgoff);
251 return 0;
252 }
253 265
254 /* 266 unlock_page(page);
255 * We could walk the page table to read the non-linear 267 put_page(page);
256 * pte, and get the page index without fetching the page 268 return 0;
257 * from swap. But that's a lot of code to duplicate here
258 * for a rare case, so we simply fetch the page.
259 */
260 err = get_user_pages(current, mm, address, 1, 0, 0, &page, NULL);
261 if (err >= 0) {
262 key->shared.pgoff =
263 page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
264 put_page(page);
265 return 0;
266 }
267 return err;
268}
269
270/*
271 * Take a reference to the resource addressed by a key.
272 * Can be called while holding spinlocks.
273 *
274 */
275static void get_futex_key_refs(union futex_key *key)
276{
277 if (key->both.ptr == NULL)
278 return;
279 switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
280 case FUT_OFF_INODE:
281 atomic_inc(&key->shared.inode->i_count);
282 break;
283 case FUT_OFF_MMSHARED:
284 atomic_inc(&key->private.mm->mm_count);
285 break;
286 }
287} 269}
288 270
289/* 271static inline
290 * Drop a reference to the resource addressed by a key. 272void put_futex_key(int fshared, union futex_key *key)
291 * The hash bucket spinlock must not be held.
292 */
293static void drop_futex_key_refs(union futex_key *key)
294{ 273{
295 if (!key->both.ptr) 274 drop_futex_key_refs(key);
296 return;
297 switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
298 case FUT_OFF_INODE:
299 iput(key->shared.inode);
300 break;
301 case FUT_OFF_MMSHARED:
302 mmdrop(key->private.mm);
303 break;
304 }
305} 275}
306 276
307static u32 cmpxchg_futex_value_locked(u32 __user *uaddr, u32 uval, u32 newval) 277static u32 cmpxchg_futex_value_locked(u32 __user *uaddr, u32 uval, u32 newval)
@@ -328,10 +298,8 @@ static int get_futex_value_locked(u32 *dest, u32 __user *from)
328 298
329/* 299/*
330 * Fault handling. 300 * Fault handling.
331 * if fshared is non NULL, current->mm->mmap_sem is already held
332 */ 301 */
333static int futex_handle_fault(unsigned long address, 302static int futex_handle_fault(unsigned long address, int attempt)
334 struct rw_semaphore *fshared, int attempt)
335{ 303{
336 struct vm_area_struct * vma; 304 struct vm_area_struct * vma;
337 struct mm_struct *mm = current->mm; 305 struct mm_struct *mm = current->mm;
@@ -340,8 +308,7 @@ static int futex_handle_fault(unsigned long address,
340 if (attempt > 2) 308 if (attempt > 2)
341 return ret; 309 return ret;
342 310
343 if (!fshared) 311 down_read(&mm->mmap_sem);
344 down_read(&mm->mmap_sem);
345 vma = find_vma(mm, address); 312 vma = find_vma(mm, address);
346 if (vma && address >= vma->vm_start && 313 if (vma && address >= vma->vm_start &&
347 (vma->vm_flags & VM_WRITE)) { 314 (vma->vm_flags & VM_WRITE)) {
@@ -361,8 +328,7 @@ static int futex_handle_fault(unsigned long address,
361 current->min_flt++; 328 current->min_flt++;
362 } 329 }
363 } 330 }
364 if (!fshared) 331 up_read(&mm->mmap_sem);
365 up_read(&mm->mmap_sem);
366 return ret; 332 return ret;
367} 333}
368 334
@@ -385,6 +351,7 @@ static int refill_pi_state_cache(void)
385 /* pi_mutex gets initialized later */ 351 /* pi_mutex gets initialized later */
386 pi_state->owner = NULL; 352 pi_state->owner = NULL;
387 atomic_set(&pi_state->refcount, 1); 353 atomic_set(&pi_state->refcount, 1);
354 pi_state->key = FUTEX_KEY_INIT;
388 355
389 current->pi_state_cache = pi_state; 356 current->pi_state_cache = pi_state;
390 357
@@ -462,7 +429,7 @@ void exit_pi_state_list(struct task_struct *curr)
462 struct list_head *next, *head = &curr->pi_state_list; 429 struct list_head *next, *head = &curr->pi_state_list;
463 struct futex_pi_state *pi_state; 430 struct futex_pi_state *pi_state;
464 struct futex_hash_bucket *hb; 431 struct futex_hash_bucket *hb;
465 union futex_key key; 432 union futex_key key = FUTEX_KEY_INIT;
466 433
467 if (!futex_cmpxchg_enabled) 434 if (!futex_cmpxchg_enabled)
468 return; 435 return;
@@ -607,7 +574,7 @@ static void wake_futex(struct futex_q *q)
607 * The lock in wake_up_all() is a crucial memory barrier after the 574 * The lock in wake_up_all() is a crucial memory barrier after the
608 * plist_del() and also before assigning to q->lock_ptr. 575 * plist_del() and also before assigning to q->lock_ptr.
609 */ 576 */
610 wake_up_all(&q->waiters); 577 wake_up(&q->waiter);
611 /* 578 /*
612 * The waiting task can free the futex_q as soon as this is written, 579 * The waiting task can free the futex_q as soon as this is written,
613 * without taking any locks. This must come last. 580 * without taking any locks. This must come last.
@@ -719,20 +686,17 @@ double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
719 * Wake up all waiters hashed on the physical page that is mapped 686 * Wake up all waiters hashed on the physical page that is mapped
720 * to this virtual address: 687 * to this virtual address:
721 */ 688 */
722static int futex_wake(u32 __user *uaddr, struct rw_semaphore *fshared, 689static int futex_wake(u32 __user *uaddr, int fshared, int nr_wake, u32 bitset)
723 int nr_wake, u32 bitset)
724{ 690{
725 struct futex_hash_bucket *hb; 691 struct futex_hash_bucket *hb;
726 struct futex_q *this, *next; 692 struct futex_q *this, *next;
727 struct plist_head *head; 693 struct plist_head *head;
728 union futex_key key; 694 union futex_key key = FUTEX_KEY_INIT;
729 int ret; 695 int ret;
730 696
731 if (!bitset) 697 if (!bitset)
732 return -EINVAL; 698 return -EINVAL;
733 699
734 futex_lock_mm(fshared);
735
736 ret = get_futex_key(uaddr, fshared, &key); 700 ret = get_futex_key(uaddr, fshared, &key);
737 if (unlikely(ret != 0)) 701 if (unlikely(ret != 0))
738 goto out; 702 goto out;
@@ -760,7 +724,7 @@ static int futex_wake(u32 __user *uaddr, struct rw_semaphore *fshared,
760 724
761 spin_unlock(&hb->lock); 725 spin_unlock(&hb->lock);
762out: 726out:
763 futex_unlock_mm(fshared); 727 put_futex_key(fshared, &key);
764 return ret; 728 return ret;
765} 729}
766 730
@@ -769,19 +733,16 @@ out:
769 * to this virtual address: 733 * to this virtual address:
770 */ 734 */
771static int 735static int
772futex_wake_op(u32 __user *uaddr1, struct rw_semaphore *fshared, 736futex_wake_op(u32 __user *uaddr1, int fshared, u32 __user *uaddr2,
773 u32 __user *uaddr2,
774 int nr_wake, int nr_wake2, int op) 737 int nr_wake, int nr_wake2, int op)
775{ 738{
776 union futex_key key1, key2; 739 union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
777 struct futex_hash_bucket *hb1, *hb2; 740 struct futex_hash_bucket *hb1, *hb2;
778 struct plist_head *head; 741 struct plist_head *head;
779 struct futex_q *this, *next; 742 struct futex_q *this, *next;
780 int ret, op_ret, attempt = 0; 743 int ret, op_ret, attempt = 0;
781 744
782retryfull: 745retryfull:
783 futex_lock_mm(fshared);
784
785 ret = get_futex_key(uaddr1, fshared, &key1); 746 ret = get_futex_key(uaddr1, fshared, &key1);
786 if (unlikely(ret != 0)) 747 if (unlikely(ret != 0))
787 goto out; 748 goto out;
@@ -826,18 +787,12 @@ retry:
826 */ 787 */
827 if (attempt++) { 788 if (attempt++) {
828 ret = futex_handle_fault((unsigned long)uaddr2, 789 ret = futex_handle_fault((unsigned long)uaddr2,
829 fshared, attempt); 790 attempt);
830 if (ret) 791 if (ret)
831 goto out; 792 goto out;
832 goto retry; 793 goto retry;
833 } 794 }
834 795
835 /*
836 * If we would have faulted, release mmap_sem,
837 * fault it in and start all over again.
838 */
839 futex_unlock_mm(fshared);
840
841 ret = get_user(dummy, uaddr2); 796 ret = get_user(dummy, uaddr2);
842 if (ret) 797 if (ret)
843 return ret; 798 return ret;
@@ -873,7 +828,8 @@ retry:
873 if (hb1 != hb2) 828 if (hb1 != hb2)
874 spin_unlock(&hb2->lock); 829 spin_unlock(&hb2->lock);
875out: 830out:
876 futex_unlock_mm(fshared); 831 put_futex_key(fshared, &key2);
832 put_futex_key(fshared, &key1);
877 833
878 return ret; 834 return ret;
879} 835}
@@ -882,19 +838,16 @@ out:
882 * Requeue all waiters hashed on one physical page to another 838 * Requeue all waiters hashed on one physical page to another
883 * physical page. 839 * physical page.
884 */ 840 */
885static int futex_requeue(u32 __user *uaddr1, struct rw_semaphore *fshared, 841static int futex_requeue(u32 __user *uaddr1, int fshared, u32 __user *uaddr2,
886 u32 __user *uaddr2,
887 int nr_wake, int nr_requeue, u32 *cmpval) 842 int nr_wake, int nr_requeue, u32 *cmpval)
888{ 843{
889 union futex_key key1, key2; 844 union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
890 struct futex_hash_bucket *hb1, *hb2; 845 struct futex_hash_bucket *hb1, *hb2;
891 struct plist_head *head1; 846 struct plist_head *head1;
892 struct futex_q *this, *next; 847 struct futex_q *this, *next;
893 int ret, drop_count = 0; 848 int ret, drop_count = 0;
894 849
895 retry: 850 retry:
896 futex_lock_mm(fshared);
897
898 ret = get_futex_key(uaddr1, fshared, &key1); 851 ret = get_futex_key(uaddr1, fshared, &key1);
899 if (unlikely(ret != 0)) 852 if (unlikely(ret != 0))
900 goto out; 853 goto out;
@@ -917,12 +870,6 @@ static int futex_requeue(u32 __user *uaddr1, struct rw_semaphore *fshared,
917 if (hb1 != hb2) 870 if (hb1 != hb2)
918 spin_unlock(&hb2->lock); 871 spin_unlock(&hb2->lock);
919 872
920 /*
921 * If we would have faulted, release mmap_sem, fault
922 * it in and start all over again.
923 */
924 futex_unlock_mm(fshared);
925
926 ret = get_user(curval, uaddr1); 873 ret = get_user(curval, uaddr1);
927 874
928 if (!ret) 875 if (!ret)
@@ -974,7 +921,8 @@ out_unlock:
974 drop_futex_key_refs(&key1); 921 drop_futex_key_refs(&key1);
975 922
976out: 923out:
977 futex_unlock_mm(fshared); 924 put_futex_key(fshared, &key2);
925 put_futex_key(fshared, &key1);
978 return ret; 926 return ret;
979} 927}
980 928
@@ -983,7 +931,7 @@ static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
983{ 931{
984 struct futex_hash_bucket *hb; 932 struct futex_hash_bucket *hb;
985 933
986 init_waitqueue_head(&q->waiters); 934 init_waitqueue_head(&q->waiter);
987 935
988 get_futex_key_refs(&q->key); 936 get_futex_key_refs(&q->key);
989 hb = hash_futex(&q->key); 937 hb = hash_futex(&q->key);
@@ -1096,8 +1044,7 @@ static void unqueue_me_pi(struct futex_q *q)
1096 * private futexes. 1044 * private futexes.
1097 */ 1045 */
1098static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, 1046static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
1099 struct task_struct *newowner, 1047 struct task_struct *newowner, int fshared)
1100 struct rw_semaphore *fshared)
1101{ 1048{
1102 u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS; 1049 u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
1103 struct futex_pi_state *pi_state = q->pi_state; 1050 struct futex_pi_state *pi_state = q->pi_state;
@@ -1176,7 +1123,7 @@ retry:
1176handle_fault: 1123handle_fault:
1177 spin_unlock(q->lock_ptr); 1124 spin_unlock(q->lock_ptr);
1178 1125
1179 ret = futex_handle_fault((unsigned long)uaddr, fshared, attempt++); 1126 ret = futex_handle_fault((unsigned long)uaddr, attempt++);
1180 1127
1181 spin_lock(q->lock_ptr); 1128 spin_lock(q->lock_ptr);
1182 1129
@@ -1196,12 +1143,13 @@ handle_fault:
1196 * In case we must use restart_block to restart a futex_wait, 1143 * In case we must use restart_block to restart a futex_wait,
1197 * we encode in the 'flags' shared capability 1144 * we encode in the 'flags' shared capability
1198 */ 1145 */
1199#define FLAGS_SHARED 1 1146#define FLAGS_SHARED 0x01
1147#define FLAGS_CLOCKRT 0x02
1200 1148
1201static long futex_wait_restart(struct restart_block *restart); 1149static long futex_wait_restart(struct restart_block *restart);
1202 1150
1203static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared, 1151static int futex_wait(u32 __user *uaddr, int fshared,
1204 u32 val, ktime_t *abs_time, u32 bitset) 1152 u32 val, ktime_t *abs_time, u32 bitset, int clockrt)
1205{ 1153{
1206 struct task_struct *curr = current; 1154 struct task_struct *curr = current;
1207 DECLARE_WAITQUEUE(wait, curr); 1155 DECLARE_WAITQUEUE(wait, curr);
@@ -1218,8 +1166,7 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
1218 q.pi_state = NULL; 1166 q.pi_state = NULL;
1219 q.bitset = bitset; 1167 q.bitset = bitset;
1220 retry: 1168 retry:
1221 futex_lock_mm(fshared); 1169 q.key = FUTEX_KEY_INIT;
1222
1223 ret = get_futex_key(uaddr, fshared, &q.key); 1170 ret = get_futex_key(uaddr, fshared, &q.key);
1224 if (unlikely(ret != 0)) 1171 if (unlikely(ret != 0))
1225 goto out_release_sem; 1172 goto out_release_sem;
@@ -1251,12 +1198,6 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
1251 if (unlikely(ret)) { 1198 if (unlikely(ret)) {
1252 queue_unlock(&q, hb); 1199 queue_unlock(&q, hb);
1253 1200
1254 /*
1255 * If we would have faulted, release mmap_sem, fault it in and
1256 * start all over again.
1257 */
1258 futex_unlock_mm(fshared);
1259
1260 ret = get_user(uval, uaddr); 1201 ret = get_user(uval, uaddr);
1261 1202
1262 if (!ret) 1203 if (!ret)
@@ -1271,12 +1212,6 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
1271 queue_me(&q, hb); 1212 queue_me(&q, hb);
1272 1213
1273 /* 1214 /*
1274 * Now the futex is queued and we have checked the data, we
1275 * don't want to hold mmap_sem while we sleep.
1276 */
1277 futex_unlock_mm(fshared);
1278
1279 /*
1280 * There might have been scheduling since the queue_me(), as we 1215 * There might have been scheduling since the queue_me(), as we
1281 * cannot hold a spinlock across the get_user() in case it 1216 * cannot hold a spinlock across the get_user() in case it
1282 * faults, and we cannot just set TASK_INTERRUPTIBLE state when 1217 * faults, and we cannot just set TASK_INTERRUPTIBLE state when
@@ -1287,7 +1222,7 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
1287 1222
1288 /* add_wait_queue is the barrier after __set_current_state. */ 1223 /* add_wait_queue is the barrier after __set_current_state. */
1289 __set_current_state(TASK_INTERRUPTIBLE); 1224 __set_current_state(TASK_INTERRUPTIBLE);
1290 add_wait_queue(&q.waiters, &wait); 1225 add_wait_queue(&q.waiter, &wait);
1291 /* 1226 /*
1292 * !plist_node_empty() is safe here without any lock. 1227 * !plist_node_empty() is safe here without any lock.
1293 * q.lock_ptr != 0 is not safe, because of ordering against wakeup. 1228 * q.lock_ptr != 0 is not safe, because of ordering against wakeup.
@@ -1300,8 +1235,10 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
1300 slack = current->timer_slack_ns; 1235 slack = current->timer_slack_ns;
1301 if (rt_task(current)) 1236 if (rt_task(current))
1302 slack = 0; 1237 slack = 0;
1303 hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, 1238 hrtimer_init_on_stack(&t.timer,
1304 HRTIMER_MODE_ABS); 1239 clockrt ? CLOCK_REALTIME :
1240 CLOCK_MONOTONIC,
1241 HRTIMER_MODE_ABS);
1305 hrtimer_init_sleeper(&t, current); 1242 hrtimer_init_sleeper(&t, current);
1306 hrtimer_set_expires_range_ns(&t.timer, *abs_time, slack); 1243 hrtimer_set_expires_range_ns(&t.timer, *abs_time, slack);
1307 1244
@@ -1356,6 +1293,8 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
1356 1293
1357 if (fshared) 1294 if (fshared)
1358 restart->futex.flags |= FLAGS_SHARED; 1295 restart->futex.flags |= FLAGS_SHARED;
1296 if (clockrt)
1297 restart->futex.flags |= FLAGS_CLOCKRT;
1359 return -ERESTART_RESTARTBLOCK; 1298 return -ERESTART_RESTARTBLOCK;
1360 } 1299 }
1361 1300
@@ -1363,7 +1302,7 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
1363 queue_unlock(&q, hb); 1302 queue_unlock(&q, hb);
1364 1303
1365 out_release_sem: 1304 out_release_sem:
1366 futex_unlock_mm(fshared); 1305 put_futex_key(fshared, &q.key);
1367 return ret; 1306 return ret;
1368} 1307}
1369 1308
@@ -1371,15 +1310,16 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
1371static long futex_wait_restart(struct restart_block *restart) 1310static long futex_wait_restart(struct restart_block *restart)
1372{ 1311{
1373 u32 __user *uaddr = (u32 __user *)restart->futex.uaddr; 1312 u32 __user *uaddr = (u32 __user *)restart->futex.uaddr;
1374 struct rw_semaphore *fshared = NULL; 1313 int fshared = 0;
1375 ktime_t t; 1314 ktime_t t;
1376 1315
1377 t.tv64 = restart->futex.time; 1316 t.tv64 = restart->futex.time;
1378 restart->fn = do_no_restart_syscall; 1317 restart->fn = do_no_restart_syscall;
1379 if (restart->futex.flags & FLAGS_SHARED) 1318 if (restart->futex.flags & FLAGS_SHARED)
1380 fshared = &current->mm->mmap_sem; 1319 fshared = 1;
1381 return (long)futex_wait(uaddr, fshared, restart->futex.val, &t, 1320 return (long)futex_wait(uaddr, fshared, restart->futex.val, &t,
1382 restart->futex.bitset); 1321 restart->futex.bitset,
1322 restart->futex.flags & FLAGS_CLOCKRT);
1383} 1323}
1384 1324
1385 1325
@@ -1389,7 +1329,7 @@ static long futex_wait_restart(struct restart_block *restart)
1389 * if there are waiters then it will block, it does PI, etc. (Due to 1329 * if there are waiters then it will block, it does PI, etc. (Due to
1390 * races the kernel might see a 0 value of the futex too.) 1330 * races the kernel might see a 0 value of the futex too.)
1391 */ 1331 */
1392static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared, 1332static int futex_lock_pi(u32 __user *uaddr, int fshared,
1393 int detect, ktime_t *time, int trylock) 1333 int detect, ktime_t *time, int trylock)
1394{ 1334{
1395 struct hrtimer_sleeper timeout, *to = NULL; 1335 struct hrtimer_sleeper timeout, *to = NULL;
@@ -1412,8 +1352,7 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
1412 1352
1413 q.pi_state = NULL; 1353 q.pi_state = NULL;
1414 retry: 1354 retry:
1415 futex_lock_mm(fshared); 1355 q.key = FUTEX_KEY_INIT;
1416
1417 ret = get_futex_key(uaddr, fshared, &q.key); 1356 ret = get_futex_key(uaddr, fshared, &q.key);
1418 if (unlikely(ret != 0)) 1357 if (unlikely(ret != 0))
1419 goto out_release_sem; 1358 goto out_release_sem;
@@ -1502,7 +1441,6 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
1502 * exit to complete. 1441 * exit to complete.
1503 */ 1442 */
1504 queue_unlock(&q, hb); 1443 queue_unlock(&q, hb);
1505 futex_unlock_mm(fshared);
1506 cond_resched(); 1444 cond_resched();
1507 goto retry; 1445 goto retry;
1508 1446
@@ -1534,12 +1472,6 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
1534 */ 1472 */
1535 queue_me(&q, hb); 1473 queue_me(&q, hb);
1536 1474
1537 /*
1538 * Now the futex is queued and we have checked the data, we
1539 * don't want to hold mmap_sem while we sleep.
1540 */
1541 futex_unlock_mm(fshared);
1542
1543 WARN_ON(!q.pi_state); 1475 WARN_ON(!q.pi_state);
1544 /* 1476 /*
1545 * Block on the PI mutex: 1477 * Block on the PI mutex:
@@ -1552,7 +1484,6 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
1552 ret = ret ? 0 : -EWOULDBLOCK; 1484 ret = ret ? 0 : -EWOULDBLOCK;
1553 } 1485 }
1554 1486
1555 futex_lock_mm(fshared);
1556 spin_lock(q.lock_ptr); 1487 spin_lock(q.lock_ptr);
1557 1488
1558 if (!ret) { 1489 if (!ret) {
@@ -1618,7 +1549,6 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
1618 1549
1619 /* Unqueue and drop the lock */ 1550 /* Unqueue and drop the lock */
1620 unqueue_me_pi(&q); 1551 unqueue_me_pi(&q);
1621 futex_unlock_mm(fshared);
1622 1552
1623 if (to) 1553 if (to)
1624 destroy_hrtimer_on_stack(&to->timer); 1554 destroy_hrtimer_on_stack(&to->timer);
@@ -1628,34 +1558,30 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
1628 queue_unlock(&q, hb); 1558 queue_unlock(&q, hb);
1629 1559
1630 out_release_sem: 1560 out_release_sem:
1631 futex_unlock_mm(fshared); 1561 put_futex_key(fshared, &q.key);
1632 if (to) 1562 if (to)
1633 destroy_hrtimer_on_stack(&to->timer); 1563 destroy_hrtimer_on_stack(&to->timer);
1634 return ret; 1564 return ret;
1635 1565
1636 uaddr_faulted: 1566 uaddr_faulted:
1637 /* 1567 /*
1638 * We have to r/w *(int __user *)uaddr, but we can't modify it 1568 * We have to r/w *(int __user *)uaddr, and we have to modify it
1639 * non-atomically. Therefore, if get_user below is not 1569 * atomically. Therefore, if we continue to fault after get_user()
1640 * enough, we need to handle the fault ourselves, while 1570 * below, we need to handle the fault ourselves, while still holding
1641 * still holding the mmap_sem. 1571 * the mmap_sem. This can occur if the uaddr is under contention as
1642 * 1572 * we have to drop the mmap_sem in order to call get_user().
1643 * ... and hb->lock. :-) --ANK
1644 */ 1573 */
1645 queue_unlock(&q, hb); 1574 queue_unlock(&q, hb);
1646 1575
1647 if (attempt++) { 1576 if (attempt++) {
1648 ret = futex_handle_fault((unsigned long)uaddr, fshared, 1577 ret = futex_handle_fault((unsigned long)uaddr, attempt);
1649 attempt);
1650 if (ret) 1578 if (ret)
1651 goto out_release_sem; 1579 goto out_release_sem;
1652 goto retry_unlocked; 1580 goto retry_unlocked;
1653 } 1581 }
1654 1582
1655 futex_unlock_mm(fshared);
1656
1657 ret = get_user(uval, uaddr); 1583 ret = get_user(uval, uaddr);
1658 if (!ret && (uval != -EFAULT)) 1584 if (!ret)
1659 goto retry; 1585 goto retry;
1660 1586
1661 if (to) 1587 if (to)
@@ -1668,13 +1594,13 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
1668 * This is the in-kernel slowpath: we look up the PI state (if any), 1594 * This is the in-kernel slowpath: we look up the PI state (if any),
1669 * and do the rt-mutex unlock. 1595 * and do the rt-mutex unlock.
1670 */ 1596 */
1671static int futex_unlock_pi(u32 __user *uaddr, struct rw_semaphore *fshared) 1597static int futex_unlock_pi(u32 __user *uaddr, int fshared)
1672{ 1598{
1673 struct futex_hash_bucket *hb; 1599 struct futex_hash_bucket *hb;
1674 struct futex_q *this, *next; 1600 struct futex_q *this, *next;
1675 u32 uval; 1601 u32 uval;
1676 struct plist_head *head; 1602 struct plist_head *head;
1677 union futex_key key; 1603 union futex_key key = FUTEX_KEY_INIT;
1678 int ret, attempt = 0; 1604 int ret, attempt = 0;
1679 1605
1680retry: 1606retry:
@@ -1685,10 +1611,6 @@ retry:
1685 */ 1611 */
1686 if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current)) 1612 if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current))
1687 return -EPERM; 1613 return -EPERM;
1688 /*
1689 * First take all the futex related locks:
1690 */
1691 futex_lock_mm(fshared);
1692 1614
1693 ret = get_futex_key(uaddr, fshared, &key); 1615 ret = get_futex_key(uaddr, fshared, &key);
1694 if (unlikely(ret != 0)) 1616 if (unlikely(ret != 0))
@@ -1747,34 +1669,30 @@ retry_unlocked:
1747out_unlock: 1669out_unlock:
1748 spin_unlock(&hb->lock); 1670 spin_unlock(&hb->lock);
1749out: 1671out:
1750 futex_unlock_mm(fshared); 1672 put_futex_key(fshared, &key);
1751 1673
1752 return ret; 1674 return ret;
1753 1675
1754pi_faulted: 1676pi_faulted:
1755 /* 1677 /*
1756 * We have to r/w *(int __user *)uaddr, but we can't modify it 1678 * We have to r/w *(int __user *)uaddr, and we have to modify it
1757 * non-atomically. Therefore, if get_user below is not 1679 * atomically. Therefore, if we continue to fault after get_user()
1758 * enough, we need to handle the fault ourselves, while 1680 * below, we need to handle the fault ourselves, while still holding
1759 * still holding the mmap_sem. 1681 * the mmap_sem. This can occur if the uaddr is under contention as
1760 * 1682 * we have to drop the mmap_sem in order to call get_user().
1761 * ... and hb->lock. --ANK
1762 */ 1683 */
1763 spin_unlock(&hb->lock); 1684 spin_unlock(&hb->lock);
1764 1685
1765 if (attempt++) { 1686 if (attempt++) {
1766 ret = futex_handle_fault((unsigned long)uaddr, fshared, 1687 ret = futex_handle_fault((unsigned long)uaddr, attempt);
1767 attempt);
1768 if (ret) 1688 if (ret)
1769 goto out; 1689 goto out;
1770 uval = 0; 1690 uval = 0;
1771 goto retry_unlocked; 1691 goto retry_unlocked;
1772 } 1692 }
1773 1693
1774 futex_unlock_mm(fshared);
1775
1776 ret = get_user(uval, uaddr); 1694 ret = get_user(uval, uaddr);
1777 if (!ret && (uval != -EFAULT)) 1695 if (!ret)
1778 goto retry; 1696 goto retry;
1779 1697
1780 return ret; 1698 return ret;
@@ -1898,8 +1816,7 @@ retry:
1898 * PI futexes happens in exit_pi_state(): 1816 * PI futexes happens in exit_pi_state():
1899 */ 1817 */
1900 if (!pi && (uval & FUTEX_WAITERS)) 1818 if (!pi && (uval & FUTEX_WAITERS))
1901 futex_wake(uaddr, &curr->mm->mmap_sem, 1, 1819 futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
1902 FUTEX_BITSET_MATCH_ANY);
1903 } 1820 }
1904 return 0; 1821 return 0;
1905} 1822}
@@ -1993,18 +1910,22 @@ void exit_robust_list(struct task_struct *curr)
1993long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, 1910long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
1994 u32 __user *uaddr2, u32 val2, u32 val3) 1911 u32 __user *uaddr2, u32 val2, u32 val3)
1995{ 1912{
1996 int ret = -ENOSYS; 1913 int clockrt, ret = -ENOSYS;
1997 int cmd = op & FUTEX_CMD_MASK; 1914 int cmd = op & FUTEX_CMD_MASK;
1998 struct rw_semaphore *fshared = NULL; 1915 int fshared = 0;
1999 1916
2000 if (!(op & FUTEX_PRIVATE_FLAG)) 1917 if (!(op & FUTEX_PRIVATE_FLAG))
2001 fshared = &current->mm->mmap_sem; 1918 fshared = 1;
1919
1920 clockrt = op & FUTEX_CLOCK_REALTIME;
1921 if (clockrt && cmd != FUTEX_WAIT_BITSET)
1922 return -ENOSYS;
2002 1923
2003 switch (cmd) { 1924 switch (cmd) {
2004 case FUTEX_WAIT: 1925 case FUTEX_WAIT:
2005 val3 = FUTEX_BITSET_MATCH_ANY; 1926 val3 = FUTEX_BITSET_MATCH_ANY;
2006 case FUTEX_WAIT_BITSET: 1927 case FUTEX_WAIT_BITSET:
2007 ret = futex_wait(uaddr, fshared, val, timeout, val3); 1928 ret = futex_wait(uaddr, fshared, val, timeout, val3, clockrt);
2008 break; 1929 break;
2009 case FUTEX_WAKE: 1930 case FUTEX_WAKE:
2010 val3 = FUTEX_BITSET_MATCH_ANY; 1931 val3 = FUTEX_BITSET_MATCH_ANY;
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 801addda3c4..e9d1c8205a3 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -673,6 +673,18 @@ int request_irq(unsigned int irq, irq_handler_t handler,
673 struct irq_desc *desc; 673 struct irq_desc *desc;
674 int retval; 674 int retval;
675 675
676 /*
677 * handle_IRQ_event() always ignores IRQF_DISABLED except for
678 * the _first_ irqaction (sigh). That can cause oopsing, but
679 * the behavior is classified as "will not fix" so we need to
680 * start nudging drivers away from using that idiom.
681 */
682 if ((irqflags & (IRQF_SHARED|IRQF_DISABLED))
683 == (IRQF_SHARED|IRQF_DISABLED))
684 pr_warning("IRQ %d/%s: IRQF_DISABLED is not "
685 "guaranteed on shared IRQs\n",
686 irq, devname);
687
676#ifdef CONFIG_LOCKDEP 688#ifdef CONFIG_LOCKDEP
677 /* 689 /*
678 * Lockdep wants atomic interrupt handlers: 690 * Lockdep wants atomic interrupt handlers:
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 46a404173db..4fa6eeb4e8a 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -136,16 +136,16 @@ static inline struct lock_class *hlock_class(struct held_lock *hlock)
136#ifdef CONFIG_LOCK_STAT 136#ifdef CONFIG_LOCK_STAT
137static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats); 137static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats);
138 138
139static int lock_contention_point(struct lock_class *class, unsigned long ip) 139static int lock_point(unsigned long points[], unsigned long ip)
140{ 140{
141 int i; 141 int i;
142 142
143 for (i = 0; i < ARRAY_SIZE(class->contention_point); i++) { 143 for (i = 0; i < LOCKSTAT_POINTS; i++) {
144 if (class->contention_point[i] == 0) { 144 if (points[i] == 0) {
145 class->contention_point[i] = ip; 145 points[i] = ip;
146 break; 146 break;
147 } 147 }
148 if (class->contention_point[i] == ip) 148 if (points[i] == ip)
149 break; 149 break;
150 } 150 }
151 151
@@ -185,6 +185,9 @@ struct lock_class_stats lock_stats(struct lock_class *class)
185 for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++) 185 for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
186 stats.contention_point[i] += pcs->contention_point[i]; 186 stats.contention_point[i] += pcs->contention_point[i];
187 187
188 for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++)
189 stats.contending_point[i] += pcs->contending_point[i];
190
188 lock_time_add(&pcs->read_waittime, &stats.read_waittime); 191 lock_time_add(&pcs->read_waittime, &stats.read_waittime);
189 lock_time_add(&pcs->write_waittime, &stats.write_waittime); 192 lock_time_add(&pcs->write_waittime, &stats.write_waittime);
190 193
@@ -209,6 +212,7 @@ void clear_lock_stats(struct lock_class *class)
209 memset(cpu_stats, 0, sizeof(struct lock_class_stats)); 212 memset(cpu_stats, 0, sizeof(struct lock_class_stats));
210 } 213 }
211 memset(class->contention_point, 0, sizeof(class->contention_point)); 214 memset(class->contention_point, 0, sizeof(class->contention_point));
215 memset(class->contending_point, 0, sizeof(class->contending_point));
212} 216}
213 217
214static struct lock_class_stats *get_lock_stats(struct lock_class *class) 218static struct lock_class_stats *get_lock_stats(struct lock_class *class)
@@ -287,14 +291,12 @@ void lockdep_off(void)
287{ 291{
288 current->lockdep_recursion++; 292 current->lockdep_recursion++;
289} 293}
290
291EXPORT_SYMBOL(lockdep_off); 294EXPORT_SYMBOL(lockdep_off);
292 295
293void lockdep_on(void) 296void lockdep_on(void)
294{ 297{
295 current->lockdep_recursion--; 298 current->lockdep_recursion--;
296} 299}
297
298EXPORT_SYMBOL(lockdep_on); 300EXPORT_SYMBOL(lockdep_on);
299 301
300/* 302/*
@@ -576,7 +578,8 @@ static void print_lock_class_header(struct lock_class *class, int depth)
576/* 578/*
577 * printk all lock dependencies starting at <entry>: 579 * printk all lock dependencies starting at <entry>:
578 */ 580 */
579static void print_lock_dependencies(struct lock_class *class, int depth) 581static void __used
582print_lock_dependencies(struct lock_class *class, int depth)
580{ 583{
581 struct lock_list *entry; 584 struct lock_list *entry;
582 585
@@ -2508,7 +2511,6 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
2508 if (subclass) 2511 if (subclass)
2509 register_lock_class(lock, subclass, 1); 2512 register_lock_class(lock, subclass, 1);
2510} 2513}
2511
2512EXPORT_SYMBOL_GPL(lockdep_init_map); 2514EXPORT_SYMBOL_GPL(lockdep_init_map);
2513 2515
2514/* 2516/*
@@ -2689,8 +2691,9 @@ static int check_unlock(struct task_struct *curr, struct lockdep_map *lock,
2689} 2691}
2690 2692
2691static int 2693static int
2692__lock_set_subclass(struct lockdep_map *lock, 2694__lock_set_class(struct lockdep_map *lock, const char *name,
2693 unsigned int subclass, unsigned long ip) 2695 struct lock_class_key *key, unsigned int subclass,
2696 unsigned long ip)
2694{ 2697{
2695 struct task_struct *curr = current; 2698 struct task_struct *curr = current;
2696 struct held_lock *hlock, *prev_hlock; 2699 struct held_lock *hlock, *prev_hlock;
@@ -2717,6 +2720,7 @@ __lock_set_subclass(struct lockdep_map *lock,
2717 return print_unlock_inbalance_bug(curr, lock, ip); 2720 return print_unlock_inbalance_bug(curr, lock, ip);
2718 2721
2719found_it: 2722found_it:
2723 lockdep_init_map(lock, name, key, 0);
2720 class = register_lock_class(lock, subclass, 0); 2724 class = register_lock_class(lock, subclass, 0);
2721 hlock->class_idx = class - lock_classes + 1; 2725 hlock->class_idx = class - lock_classes + 1;
2722 2726
@@ -2901,9 +2905,9 @@ static void check_flags(unsigned long flags)
2901#endif 2905#endif
2902} 2906}
2903 2907
2904void 2908void lock_set_class(struct lockdep_map *lock, const char *name,
2905lock_set_subclass(struct lockdep_map *lock, 2909 struct lock_class_key *key, unsigned int subclass,
2906 unsigned int subclass, unsigned long ip) 2910 unsigned long ip)
2907{ 2911{
2908 unsigned long flags; 2912 unsigned long flags;
2909 2913
@@ -2913,13 +2917,12 @@ lock_set_subclass(struct lockdep_map *lock,
2913 raw_local_irq_save(flags); 2917 raw_local_irq_save(flags);
2914 current->lockdep_recursion = 1; 2918 current->lockdep_recursion = 1;
2915 check_flags(flags); 2919 check_flags(flags);
2916 if (__lock_set_subclass(lock, subclass, ip)) 2920 if (__lock_set_class(lock, name, key, subclass, ip))
2917 check_chain_key(current); 2921 check_chain_key(current);
2918 current->lockdep_recursion = 0; 2922 current->lockdep_recursion = 0;
2919 raw_local_irq_restore(flags); 2923 raw_local_irq_restore(flags);
2920} 2924}
2921 2925EXPORT_SYMBOL_GPL(lock_set_class);
2922EXPORT_SYMBOL_GPL(lock_set_subclass);
2923 2926
2924/* 2927/*
2925 * We are not always called with irqs disabled - do that here, 2928 * We are not always called with irqs disabled - do that here,
@@ -2943,7 +2946,6 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2943 current->lockdep_recursion = 0; 2946 current->lockdep_recursion = 0;
2944 raw_local_irq_restore(flags); 2947 raw_local_irq_restore(flags);
2945} 2948}
2946
2947EXPORT_SYMBOL_GPL(lock_acquire); 2949EXPORT_SYMBOL_GPL(lock_acquire);
2948 2950
2949void lock_release(struct lockdep_map *lock, int nested, 2951void lock_release(struct lockdep_map *lock, int nested,
@@ -2961,7 +2963,6 @@ void lock_release(struct lockdep_map *lock, int nested,
2961 current->lockdep_recursion = 0; 2963 current->lockdep_recursion = 0;
2962 raw_local_irq_restore(flags); 2964 raw_local_irq_restore(flags);
2963} 2965}
2964
2965EXPORT_SYMBOL_GPL(lock_release); 2966EXPORT_SYMBOL_GPL(lock_release);
2966 2967
2967#ifdef CONFIG_LOCK_STAT 2968#ifdef CONFIG_LOCK_STAT
@@ -2999,7 +3000,7 @@ __lock_contended(struct lockdep_map *lock, unsigned long ip)
2999 struct held_lock *hlock, *prev_hlock; 3000 struct held_lock *hlock, *prev_hlock;
3000 struct lock_class_stats *stats; 3001 struct lock_class_stats *stats;
3001 unsigned int depth; 3002 unsigned int depth;
3002 int i, point; 3003 int i, contention_point, contending_point;
3003 3004
3004 depth = curr->lockdep_depth; 3005 depth = curr->lockdep_depth;
3005 if (DEBUG_LOCKS_WARN_ON(!depth)) 3006 if (DEBUG_LOCKS_WARN_ON(!depth))
@@ -3023,18 +3024,22 @@ __lock_contended(struct lockdep_map *lock, unsigned long ip)
3023found_it: 3024found_it:
3024 hlock->waittime_stamp = sched_clock(); 3025 hlock->waittime_stamp = sched_clock();
3025 3026
3026 point = lock_contention_point(hlock_class(hlock), ip); 3027 contention_point = lock_point(hlock_class(hlock)->contention_point, ip);
3028 contending_point = lock_point(hlock_class(hlock)->contending_point,
3029 lock->ip);
3027 3030
3028 stats = get_lock_stats(hlock_class(hlock)); 3031 stats = get_lock_stats(hlock_class(hlock));
3029 if (point < ARRAY_SIZE(stats->contention_point)) 3032 if (contention_point < LOCKSTAT_POINTS)
3030 stats->contention_point[point]++; 3033 stats->contention_point[contention_point]++;
3034 if (contending_point < LOCKSTAT_POINTS)
3035 stats->contending_point[contending_point]++;
3031 if (lock->cpu != smp_processor_id()) 3036 if (lock->cpu != smp_processor_id())
3032 stats->bounces[bounce_contended + !!hlock->read]++; 3037 stats->bounces[bounce_contended + !!hlock->read]++;
3033 put_lock_stats(stats); 3038 put_lock_stats(stats);
3034} 3039}
3035 3040
3036static void 3041static void
3037__lock_acquired(struct lockdep_map *lock) 3042__lock_acquired(struct lockdep_map *lock, unsigned long ip)
3038{ 3043{
3039 struct task_struct *curr = current; 3044 struct task_struct *curr = current;
3040 struct held_lock *hlock, *prev_hlock; 3045 struct held_lock *hlock, *prev_hlock;
@@ -3083,6 +3088,7 @@ found_it:
3083 put_lock_stats(stats); 3088 put_lock_stats(stats);
3084 3089
3085 lock->cpu = cpu; 3090 lock->cpu = cpu;
3091 lock->ip = ip;
3086} 3092}
3087 3093
3088void lock_contended(struct lockdep_map *lock, unsigned long ip) 3094void lock_contended(struct lockdep_map *lock, unsigned long ip)
@@ -3104,7 +3110,7 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip)
3104} 3110}
3105EXPORT_SYMBOL_GPL(lock_contended); 3111EXPORT_SYMBOL_GPL(lock_contended);
3106 3112
3107void lock_acquired(struct lockdep_map *lock) 3113void lock_acquired(struct lockdep_map *lock, unsigned long ip)
3108{ 3114{
3109 unsigned long flags; 3115 unsigned long flags;
3110 3116
@@ -3117,7 +3123,7 @@ void lock_acquired(struct lockdep_map *lock)
3117 raw_local_irq_save(flags); 3123 raw_local_irq_save(flags);
3118 check_flags(flags); 3124 check_flags(flags);
3119 current->lockdep_recursion = 1; 3125 current->lockdep_recursion = 1;
3120 __lock_acquired(lock); 3126 __lock_acquired(lock, ip);
3121 current->lockdep_recursion = 0; 3127 current->lockdep_recursion = 0;
3122 raw_local_irq_restore(flags); 3128 raw_local_irq_restore(flags);
3123} 3129}
@@ -3441,7 +3447,6 @@ retry:
3441 if (unlock) 3447 if (unlock)
3442 read_unlock(&tasklist_lock); 3448 read_unlock(&tasklist_lock);
3443} 3449}
3444
3445EXPORT_SYMBOL_GPL(debug_show_all_locks); 3450EXPORT_SYMBOL_GPL(debug_show_all_locks);
3446 3451
3447/* 3452/*
@@ -3462,7 +3467,6 @@ void debug_show_held_locks(struct task_struct *task)
3462{ 3467{
3463 __debug_show_held_locks(task); 3468 __debug_show_held_locks(task);
3464} 3469}
3465
3466EXPORT_SYMBOL_GPL(debug_show_held_locks); 3470EXPORT_SYMBOL_GPL(debug_show_held_locks);
3467 3471
3468void lockdep_sys_exit(void) 3472void lockdep_sys_exit(void)
diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
index 20dbcbf9c7d..13716b81389 100644
--- a/kernel/lockdep_proc.c
+++ b/kernel/lockdep_proc.c
@@ -470,11 +470,12 @@ static void seq_line(struct seq_file *m, char c, int offset, int length)
470 470
471static void snprint_time(char *buf, size_t bufsiz, s64 nr) 471static void snprint_time(char *buf, size_t bufsiz, s64 nr)
472{ 472{
473 unsigned long rem; 473 s64 div;
474 s32 rem;
474 475
475 nr += 5; /* for display rounding */ 476 nr += 5; /* for display rounding */
476 rem = do_div(nr, 1000); /* XXX: do_div_signed */ 477 div = div_s64_rem(nr, 1000, &rem);
477 snprintf(buf, bufsiz, "%lld.%02d", (long long)nr, (int)rem/10); 478 snprintf(buf, bufsiz, "%lld.%02d", (long long)div, (int)rem/10);
478} 479}
479 480
480static void seq_time(struct seq_file *m, s64 time) 481static void seq_time(struct seq_file *m, s64 time)
@@ -556,7 +557,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
556 if (stats->read_holdtime.nr) 557 if (stats->read_holdtime.nr)
557 namelen += 2; 558 namelen += 2;
558 559
559 for (i = 0; i < ARRAY_SIZE(class->contention_point); i++) { 560 for (i = 0; i < LOCKSTAT_POINTS; i++) {
560 char sym[KSYM_SYMBOL_LEN]; 561 char sym[KSYM_SYMBOL_LEN];
561 char ip[32]; 562 char ip[32];
562 563
@@ -573,6 +574,23 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
573 stats->contention_point[i], 574 stats->contention_point[i],
574 ip, sym); 575 ip, sym);
575 } 576 }
577 for (i = 0; i < LOCKSTAT_POINTS; i++) {
578 char sym[KSYM_SYMBOL_LEN];
579 char ip[32];
580
581 if (class->contending_point[i] == 0)
582 break;
583
584 if (!i)
585 seq_line(m, '-', 40-namelen, namelen);
586
587 sprint_symbol(sym, class->contending_point[i]);
588 snprintf(ip, sizeof(ip), "[<%p>]",
589 (void *)class->contending_point[i]);
590 seq_printf(m, "%40s %14lu %29s %s\n", name,
591 stats->contending_point[i],
592 ip, sym);
593 }
576 if (i) { 594 if (i) {
577 seq_puts(m, "\n"); 595 seq_puts(m, "\n");
578 seq_line(m, '.', 0, 40 + 1 + 10 * (14 + 1)); 596 seq_line(m, '.', 0, 40 + 1 + 10 * (14 + 1));
@@ -582,7 +600,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
582 600
583static void seq_header(struct seq_file *m) 601static void seq_header(struct seq_file *m)
584{ 602{
585 seq_printf(m, "lock_stat version 0.2\n"); 603 seq_printf(m, "lock_stat version 0.3\n");
586 seq_line(m, '-', 0, 40 + 1 + 10 * (14 + 1)); 604 seq_line(m, '-', 0, 40 + 1 + 10 * (14 + 1));
587 seq_printf(m, "%40s %14s %14s %14s %14s %14s %14s %14s %14s " 605 seq_printf(m, "%40s %14s %14s %14s %14s %14s %14s %14s %14s "
588 "%14s %14s\n", 606 "%14s %14s\n",
diff --git a/kernel/mutex.c b/kernel/mutex.c
index 12c779dc65d..4f45d4b658e 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -59,7 +59,7 @@ EXPORT_SYMBOL(__mutex_init);
59 * We also put the fastpath first in the kernel image, to make sure the 59 * We also put the fastpath first in the kernel image, to make sure the
60 * branch is predicted by the CPU as default-untaken. 60 * branch is predicted by the CPU as default-untaken.
61 */ 61 */
62static void noinline __sched 62static __used noinline void __sched
63__mutex_lock_slowpath(atomic_t *lock_count); 63__mutex_lock_slowpath(atomic_t *lock_count);
64 64
65/*** 65/***
@@ -96,7 +96,7 @@ void inline __sched mutex_lock(struct mutex *lock)
96EXPORT_SYMBOL(mutex_lock); 96EXPORT_SYMBOL(mutex_lock);
97#endif 97#endif
98 98
99static noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count); 99static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
100 100
101/*** 101/***
102 * mutex_unlock - release the mutex 102 * mutex_unlock - release the mutex
@@ -184,7 +184,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
184 } 184 }
185 185
186done: 186done:
187 lock_acquired(&lock->dep_map); 187 lock_acquired(&lock->dep_map, ip);
188 /* got the lock - rejoice! */ 188 /* got the lock - rejoice! */
189 mutex_remove_waiter(lock, &waiter, task_thread_info(task)); 189 mutex_remove_waiter(lock, &waiter, task_thread_info(task));
190 debug_mutex_set_owner(lock, task_thread_info(task)); 190 debug_mutex_set_owner(lock, task_thread_info(task));
@@ -268,7 +268,7 @@ __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
268/* 268/*
269 * Release the lock, slowpath: 269 * Release the lock, slowpath:
270 */ 270 */
271static noinline void 271static __used noinline void
272__mutex_unlock_slowpath(atomic_t *lock_count) 272__mutex_unlock_slowpath(atomic_t *lock_count)
273{ 273{
274 __mutex_unlock_common_slowpath(lock_count, 1); 274 __mutex_unlock_common_slowpath(lock_count, 1);
@@ -313,7 +313,7 @@ int __sched mutex_lock_killable(struct mutex *lock)
313} 313}
314EXPORT_SYMBOL(mutex_lock_killable); 314EXPORT_SYMBOL(mutex_lock_killable);
315 315
316static noinline void __sched 316static __used noinline void __sched
317__mutex_lock_slowpath(atomic_t *lock_count) 317__mutex_lock_slowpath(atomic_t *lock_count)
318{ 318{
319 struct mutex *lock = container_of(lock_count, struct mutex, count); 319 struct mutex *lock = container_of(lock_count, struct mutex, count);
diff --git a/kernel/notifier.c b/kernel/notifier.c
index 4282c0a40a5..61d5aa5eced 100644
--- a/kernel/notifier.c
+++ b/kernel/notifier.c
@@ -82,6 +82,14 @@ static int __kprobes notifier_call_chain(struct notifier_block **nl,
82 82
83 while (nb && nr_to_call) { 83 while (nb && nr_to_call) {
84 next_nb = rcu_dereference(nb->next); 84 next_nb = rcu_dereference(nb->next);
85
86#ifdef CONFIG_DEBUG_NOTIFIERS
87 if (unlikely(!func_ptr_is_kernel_text(nb->notifier_call))) {
88 WARN(1, "Invalid notifier called!");
89 nb = next_nb;
90 continue;
91 }
92#endif
85 ret = nb->notifier_call(nb, val, v); 93 ret = nb->notifier_call(nb, val, v);
86 94
87 if (nr_calls) 95 if (nr_calls)
diff --git a/kernel/panic.c b/kernel/panic.c
index 4d5088355bf..13f06349a78 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -21,6 +21,7 @@
21#include <linux/debug_locks.h> 21#include <linux/debug_locks.h>
22#include <linux/random.h> 22#include <linux/random.h>
23#include <linux/kallsyms.h> 23#include <linux/kallsyms.h>
24#include <linux/dmi.h>
24 25
25int panic_on_oops; 26int panic_on_oops;
26static unsigned long tainted_mask; 27static unsigned long tainted_mask;
@@ -321,36 +322,27 @@ void oops_exit(void)
321} 322}
322 323
323#ifdef WANT_WARN_ON_SLOWPATH 324#ifdef WANT_WARN_ON_SLOWPATH
324void warn_on_slowpath(const char *file, int line)
325{
326 char function[KSYM_SYMBOL_LEN];
327 unsigned long caller = (unsigned long) __builtin_return_address(0);
328 sprint_symbol(function, caller);
329
330 printk(KERN_WARNING "------------[ cut here ]------------\n");
331 printk(KERN_WARNING "WARNING: at %s:%d %s()\n", file,
332 line, function);
333 print_modules();
334 dump_stack();
335 print_oops_end_marker();
336 add_taint(TAINT_WARN);
337}
338EXPORT_SYMBOL(warn_on_slowpath);
339
340
341void warn_slowpath(const char *file, int line, const char *fmt, ...) 325void warn_slowpath(const char *file, int line, const char *fmt, ...)
342{ 326{
343 va_list args; 327 va_list args;
344 char function[KSYM_SYMBOL_LEN]; 328 char function[KSYM_SYMBOL_LEN];
345 unsigned long caller = (unsigned long)__builtin_return_address(0); 329 unsigned long caller = (unsigned long)__builtin_return_address(0);
330 const char *board;
331
346 sprint_symbol(function, caller); 332 sprint_symbol(function, caller);
347 333
348 printk(KERN_WARNING "------------[ cut here ]------------\n"); 334 printk(KERN_WARNING "------------[ cut here ]------------\n");
349 printk(KERN_WARNING "WARNING: at %s:%d %s()\n", file, 335 printk(KERN_WARNING "WARNING: at %s:%d %s()\n", file,
350 line, function); 336 line, function);
351 va_start(args, fmt); 337 board = dmi_get_system_info(DMI_PRODUCT_NAME);
352 vprintk(fmt, args); 338 if (board)
353 va_end(args); 339 printk(KERN_WARNING "Hardware name: %s\n", board);
340
341 if (fmt) {
342 va_start(args, fmt);
343 vprintk(fmt, args);
344 va_end(args);
345 }
354 346
355 print_modules(); 347 print_modules();
356 dump_stack(); 348 dump_stack();
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index 4e5288a831d..157de3a4783 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -58,21 +58,21 @@ void thread_group_cputime(
58 struct task_struct *tsk, 58 struct task_struct *tsk,
59 struct task_cputime *times) 59 struct task_cputime *times)
60{ 60{
61 struct signal_struct *sig; 61 struct task_cputime *totals, *tot;
62 int i; 62 int i;
63 struct task_cputime *tot;
64 63
65 sig = tsk->signal; 64 totals = tsk->signal->cputime.totals;
66 if (unlikely(!sig) || !sig->cputime.totals) { 65 if (!totals) {
67 times->utime = tsk->utime; 66 times->utime = tsk->utime;
68 times->stime = tsk->stime; 67 times->stime = tsk->stime;
69 times->sum_exec_runtime = tsk->se.sum_exec_runtime; 68 times->sum_exec_runtime = tsk->se.sum_exec_runtime;
70 return; 69 return;
71 } 70 }
71
72 times->stime = times->utime = cputime_zero; 72 times->stime = times->utime = cputime_zero;
73 times->sum_exec_runtime = 0; 73 times->sum_exec_runtime = 0;
74 for_each_possible_cpu(i) { 74 for_each_possible_cpu(i) {
75 tot = per_cpu_ptr(tsk->signal->cputime.totals, i); 75 tot = per_cpu_ptr(totals, i);
76 times->utime = cputime_add(times->utime, tot->utime); 76 times->utime = cputime_add(times->utime, tot->utime);
77 times->stime = cputime_add(times->stime, tot->stime); 77 times->stime = cputime_add(times->stime, tot->stime);
78 times->sum_exec_runtime += tot->sum_exec_runtime; 78 times->sum_exec_runtime += tot->sum_exec_runtime;
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index 5e79c662294..a140e44eebb 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -197,6 +197,11 @@ static int common_timer_create(struct k_itimer *new_timer)
197 return 0; 197 return 0;
198} 198}
199 199
200static int no_timer_create(struct k_itimer *new_timer)
201{
202 return -EOPNOTSUPP;
203}
204
200/* 205/*
201 * Return nonzero if we know a priori this clockid_t value is bogus. 206 * Return nonzero if we know a priori this clockid_t value is bogus.
202 */ 207 */
@@ -248,6 +253,7 @@ static __init int init_posix_timers(void)
248 .clock_getres = hrtimer_get_res, 253 .clock_getres = hrtimer_get_res,
249 .clock_get = posix_get_monotonic_raw, 254 .clock_get = posix_get_monotonic_raw,
250 .clock_set = do_posix_clock_nosettime, 255 .clock_set = do_posix_clock_nosettime,
256 .timer_create = no_timer_create,
251 }; 257 };
252 258
253 register_posix_clock(CLOCK_REALTIME, &clock_realtime); 259 register_posix_clock(CLOCK_REALTIME, &clock_realtime);
diff --git a/kernel/printk.c b/kernel/printk.c
index f492f1583d7..e651ab05655 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -662,7 +662,7 @@ asmlinkage int vprintk(const char *fmt, va_list args)
662 if (recursion_bug) { 662 if (recursion_bug) {
663 recursion_bug = 0; 663 recursion_bug = 0;
664 strcpy(printk_buf, recursion_bug_msg); 664 strcpy(printk_buf, recursion_bug_msg);
665 printed_len = sizeof(recursion_bug_msg); 665 printed_len = strlen(recursion_bug_msg);
666 } 666 }
667 /* Emit the output into the temporary buffer */ 667 /* Emit the output into the temporary buffer */
668 printed_len += vscnprintf(printk_buf + printed_len, 668 printed_len += vscnprintf(printk_buf + printed_len,
diff --git a/kernel/resource.c b/kernel/resource.c
index 4337063663e..e633106b12f 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -853,6 +853,15 @@ int iomem_map_sanity_check(resource_size_t addr, unsigned long size)
853 if (PFN_DOWN(p->start) <= PFN_DOWN(addr) && 853 if (PFN_DOWN(p->start) <= PFN_DOWN(addr) &&
854 PFN_DOWN(p->end) >= PFN_DOWN(addr + size - 1)) 854 PFN_DOWN(p->end) >= PFN_DOWN(addr + size - 1))
855 continue; 855 continue;
856 /*
857 * if a resource is "BUSY", it's not a hardware resource
858 * but a driver mapping of such a resource; we don't want
859 * to warn for those; some drivers legitimately map only
860 * partial hardware resources. (example: vesafb)
861 */
862 if (p->flags & IORESOURCE_BUSY)
863 continue;
864
856 printk(KERN_WARNING "resource map sanity check conflict: " 865 printk(KERN_WARNING "resource map sanity check conflict: "
857 "0x%llx 0x%llx 0x%llx 0x%llx %s\n", 866 "0x%llx 0x%llx 0x%llx 0x%llx %s\n",
858 (unsigned long long)addr, 867 (unsigned long long)addr,
diff --git a/kernel/sched.c b/kernel/sched.c
index e4bb1dd7b30..3e70963120a 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4203,7 +4203,6 @@ void account_steal_time(struct task_struct *p, cputime_t steal)
4203 4203
4204 if (p == rq->idle) { 4204 if (p == rq->idle) {
4205 p->stime = cputime_add(p->stime, steal); 4205 p->stime = cputime_add(p->stime, steal);
4206 account_group_system_time(p, steal);
4207 if (atomic_read(&rq->nr_iowait) > 0) 4206 if (atomic_read(&rq->nr_iowait) > 0)
4208 cpustat->iowait = cputime64_add(cpustat->iowait, tmp); 4207 cpustat->iowait = cputime64_add(cpustat->iowait, tmp);
4209 else 4208 else
@@ -4339,7 +4338,7 @@ void __kprobes sub_preempt_count(int val)
4339 /* 4338 /*
4340 * Underflow? 4339 * Underflow?
4341 */ 4340 */
4342 if (DEBUG_LOCKS_WARN_ON(val > preempt_count())) 4341 if (DEBUG_LOCKS_WARN_ON(val > preempt_count() - (!!kernel_locked())))
4343 return; 4342 return;
4344 /* 4343 /*
4345 * Is the spinlock portion underflowing? 4344 * Is the spinlock portion underflowing?
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 80d323e6f61..466e75ce271 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -102,20 +102,6 @@ void local_bh_disable(void)
102 102
103EXPORT_SYMBOL(local_bh_disable); 103EXPORT_SYMBOL(local_bh_disable);
104 104
105void __local_bh_enable(void)
106{
107 WARN_ON_ONCE(in_irq());
108
109 /*
110 * softirqs should never be enabled by __local_bh_enable(),
111 * it always nests inside local_bh_enable() sections:
112 */
113 WARN_ON_ONCE(softirq_count() == SOFTIRQ_OFFSET);
114
115 sub_preempt_count(SOFTIRQ_OFFSET);
116}
117EXPORT_SYMBOL_GPL(__local_bh_enable);
118
119/* 105/*
120 * Special-case - softirqs can safely be enabled in 106 * Special-case - softirqs can safely be enabled in
121 * cond_resched_softirq(), or by __do_softirq(), 107 * cond_resched_softirq(), or by __do_softirq(),
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index dc0b3be6b7d..1ab790c67b1 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -164,7 +164,7 @@ unsigned long __read_mostly sysctl_hung_task_check_count = 1024;
164/* 164/*
165 * Zero means infinite timeout - no checking done: 165 * Zero means infinite timeout - no checking done:
166 */ 166 */
167unsigned long __read_mostly sysctl_hung_task_timeout_secs = 120; 167unsigned long __read_mostly sysctl_hung_task_timeout_secs = 480;
168 168
169unsigned long __read_mostly sysctl_hung_task_warnings = 10; 169unsigned long __read_mostly sysctl_hung_task_warnings = 10;
170 170
diff --git a/kernel/stacktrace.c b/kernel/stacktrace.c
index 94b527ef1d1..eb212f8f8bc 100644
--- a/kernel/stacktrace.c
+++ b/kernel/stacktrace.c
@@ -6,6 +6,7 @@
6 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 6 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
7 */ 7 */
8#include <linux/sched.h> 8#include <linux/sched.h>
9#include <linux/kernel.h>
9#include <linux/module.h> 10#include <linux/module.h>
10#include <linux/kallsyms.h> 11#include <linux/kallsyms.h>
11#include <linux/stacktrace.h> 12#include <linux/stacktrace.h>
@@ -24,3 +25,13 @@ void print_stack_trace(struct stack_trace *trace, int spaces)
24} 25}
25EXPORT_SYMBOL_GPL(print_stack_trace); 26EXPORT_SYMBOL_GPL(print_stack_trace);
26 27
28/*
29 * Architectures that do not implement save_stack_trace_tsk get this
30 * weak alias and a once-per-bootup warning (whenever this facility
31 * is utilized - for example by procfs):
32 */
33__weak void
34save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
35{
36 WARN_ONCE(1, KERN_INFO "save_stack_trace_tsk() not implemented yet.\n");
37}
diff --git a/kernel/sys.c b/kernel/sys.c
index 31deba8f7d1..5fc3a0cfb99 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -858,8 +858,8 @@ void do_sys_times(struct tms *tms)
858 struct task_cputime cputime; 858 struct task_cputime cputime;
859 cputime_t cutime, cstime; 859 cputime_t cutime, cstime;
860 860
861 spin_lock_irq(&current->sighand->siglock);
862 thread_group_cputime(current, &cputime); 861 thread_group_cputime(current, &cputime);
862 spin_lock_irq(&current->sighand->siglock);
863 cutime = current->signal->cutime; 863 cutime = current->signal->cutime;
864 cstime = current->signal->cstime; 864 cstime = current->signal->cstime;
865 spin_unlock_irq(&current->sighand->siglock); 865 spin_unlock_irq(&current->sighand->siglock);